code
stringlengths
1
13.8M
parse_ast <- function(kind, resolve) { fn <- function(obj, schema) { if (inherits(obj, kind)) { resolve(obj$value, schema) } else { NULL } } pryr_unenclose(fn) } for_onload(function() { coerce_int <- function(value, ...) { MAX_INT <- 2147483647 MIN_INT <- -2147483648 num <- suppressWarnings(as.integer(value)) if (!is.na(num)) { if (num <= MAX_INT && num >= MIN_INT) { return(num) } } return(NULL) } Int <- ScalarTypeDefinition$new( name = Name$new(value = "Int"), description = paste0( "The Int scalar type represents a signed 32-bit numeric non-fractional value. ", "Response formats that support a 32-bit integer or a number type should use that ", "type to represent this scalar." ), .resolve = coerce_int, .parse_ast = parse_ast("IntValue", coerce_int) ) coerce_float <- function(value, ...) { num <- suppressWarnings(as.numeric(value)) if (is.numeric(num)) { return(num) } else { return(NULL) } } Float <- ScalarTypeDefinition$new( name = Name$new(value = "Float"), description = collapse( "The `Float` scalar type represents signed double-precision fractional ", "values as specified by ", "[IEEE 754](http://en.wikipedia.org/wiki/IEEE_floating_point)." ), .resolve = coerce_float, .parse_ast = pryr_unenclose(function(obj, schema) { if ( inherits(obj, "IntValue") || inherits(obj, "FloatValue") ) { coerce_float(obj$value, schema) } else { NULL } }) ) coerce_string <- function(value, ...) { char <- suppressWarnings(as.character(value)) if (is.character(char)) { return(char) } else { return(NULL) } } String <- ScalarTypeDefinition$new( name = Name$new(value = "String"), description = collapse( "The `String` scalar type represents textual data, represented as UTF-8 ", "character sequences. The String type is most often used by GraphQL to ", "represent free-form human-readable text." ), .resolve = coerce_string, .parse_ast = parse_ast("StringValue", coerce_string) ) coerce_boolean <- function(value, ...) { val <- suppressWarnings(as.logical(value)) if (is.logical(val)) { if (!is_nullish(val)) { return(val) } } return(NULL) } Boolean <- ScalarTypeDefinition$new( name = Name$new(value = "Boolean"), description = "The `Boolean` scalar type represents `TRUE` or `FALSE`.", .resolve = coerce_boolean, .parse_ast = parse_ast("BooleanValue", coerce_boolean) ) })
cellMap = function(D, R, indcells = NULL, indrows = NULL, standOD=NULL,showVals=NULL,rowlabels="", columnlabels="",mTitle="", rowtitle="", columntitle="",showrows=NULL, showcolumns=NULL, nrowsinblock=1, ncolumnsinblock=1,autolabel=TRUE, columnangle=90,sizetitles=1.1,adjustrowlabels=1, adjustcolumnlabels=1, colContrast=1,outlyingGrad=TRUE, darkestColor = sqrt(qchisq(0.999,1)), drawCircles = TRUE) { funcSqueeze = function(Xin,n,d,ncolumnsinblock,nrowsinblock,colContrast) { Xblock = matrix(0,nrow=n,ncol=d) Xblockgrad = matrix(0,nrow=n,ncol=d) for (i in seq_len(n)) { for (j in seq_len(d)) { Xsel = Xin[(1+((i-1)*nrowsinblock)):(i*nrowsinblock), (1+((j-1)*ncolumnsinblock)):(j*ncolumnsinblock)] seltable = tabulate(Xsel,nbins=4) if (sum(seltable) > 0) { indmax = which(seltable==max(seltable))[1] cntmax = seltable[indmax] gradmax = (cntmax / (ncolumnsinblock*nrowsinblock))^(1/colContrast) } else { indmax = 0 gradmax = 1 } Xblock[i,j] = indmax Xblockgrad[i,j] = gradmax } } return(list(X=Xblock,Xgrad=Xblockgrad)) } variable <- rownr <- rescaleoffset <- x <- y <- NULL type = "cell" n = nrow(R) d = ncol(R) blockMap = FALSE if (ncolumnsinblock > 1 | nrowsinblock > 1 ){ blockMap = TRUE if (ncolumnsinblock > d) stop('Input argument ncolumnsinblock cannot be larger than d') if (nrowsinblock > n) stop('Input argument nrowsinblock cannot be larger than n') if (!is.null(showVals)) warning('The option showVals=D or showVals=R cannot be combined with ncolumnsinblock or nrowsinblock greater than 1, so showVals is set to NULL here.') showVals = NULL } if(!blockMap){ if (!all(dim(R) == dim(D))) stop('Dimensions of D and R must match') } if(!(blockMap & autolabel==FALSE)){ if (length(columnlabels) > 0 & length(columnlabels)!= d) { stop(paste('Number of columnlabels does not match d = ',d,sep=""))} if (length(rowlabels) > 0 & length(rowlabels)!= n) { stop(paste('Number of rowlabels does not match n = ',n,sep=""))} } if (!is.null(showVals)) { if (!showVals %in% c("D", "R")) { stop(paste("Invalid \"showVals\" argument. Should be one of: NULL, \"D\", \"R\"")) } } if(is.null(indcells)) indcells = which(abs(R) > sqrt(qchisq(0.99,1))) if(!(is.null(showcolumns) & is.null(showrows))){ if(is.null(showcolumns)) { showcolumns = seq_len(d) } else { if(!(all(showcolumns %in% seq_len(d)))) stop(" showcolumns goes out of bounds")} if(is.null(showrows)) { showrows = seq_len(n) } else { if(!(all(showrows %in% seq_len(n)))) stop(" showrows goes out of bounds")} tempMat = matrix(0,n,d) tempMat[indcells] = 1 tempMat = tempMat[showrows,showcolumns] indcells = which(tempMat == 1) tempVec = rep(0,n) tempVec[indrows] = 1 tempVec = tempVec[showrows] indrows = which(tempVec == 1) rm(tempMat,tempVec) if(!blockMap) D = D[showrows,showcolumns] R = R[showrows,showcolumns] if (!(blockMap & autolabel==FALSE)) columnlabels = columnlabels[showcolumns] if (!(blockMap & autolabel==FALSE)) rowlabels = rowlabels[showrows] n = nrow(R) d = ncol(R) if(!is.null(standOD)) standOD = standOD[showrows] } if (type=="residual") outlyingGrad=1 X = matrix(0,n,d) Xrow = matrix(0,n,1) Xrow[indrows,1] = 3 if (type=="cell" | blockMap){ pcells = indcells[indcells %in% which(R>=0)] ncells = indcells[indcells %in% which(R<0)] } else { pcells = which(R>=0) ncells = which(R<0) } X[ncells] = 1 X[pcells] = 2 X[is.na(R)] = 4 if (blockMap) { n = floor(n/nrowsinblock) d = floor(d/ncolumnsinblock) result = funcSqueeze(X,n,d,ncolumnsinblock,nrowsinblock,colContrast) X = result$X Xgrad = result$Xgrad result = funcSqueeze(Xrow,n,1,1,nrowsinblock,colContrast) Xrowgrad = result$Xgrad Xrowgrad[result$X==0] = 0 if (autolabel==TRUE) { if (ncolumnsinblock>1 & length(columnlabels)>0) { labx = columnlabels columnlabels = rep(0,d) for(ind in seq_len(d)) { columnlabels[ind] = paste(labx[(1+((ind-1)*ncolumnsinblock))],"-", labx[(ind*ncolumnsinblock)],sep="") } } if (nrowsinblock>1 & length(rowlabels)>0) { laby = rowlabels rowlabels = rep(0,n) for(ind in seq_len(n)) { rowlabels[ind] = paste(laby[(1+((ind-1)*nrowsinblock))],"-", laby[(ind*nrowsinblock) ]) } } } else { if (length(columnlabels) > 0 & length(columnlabels)!= d) { stop(paste(' autolabel=FALSE and number of columnlabels is ', length(columnlabels),' but should be ',d,sep=""))} if (length(rowlabels) > 0 & length(rowlabels)!= n) { stop(paste(' autolabel=FALSE and number of rowlabels is ', length(rowlabels),' but should be ',n,sep=""))} } Xdf = data.frame(cbind(seq(1,n,1),X)) colnames(Xdf) = c("rownr",seq(1,d,1)) rownames(Xdf) = NULL Xdf$rownr = with(Xdf, reorder(rownr, seq(n,1,-1))) mX = melt(Xdf,id.var="rownr", value.name = "CatNr") Xgraddf = data.frame(cbind(seq(1,n,1),Xgrad)) colnames(Xgraddf) = c("rownr",seq(1,d,1)) rownames(Xgraddf) = NULL Xgraddf$rownr = with(Xgraddf, reorder(rownr, seq(n,1,-1))) mXgrad = melt(Xgraddf,id.var="rownr", value.name = "grad") mX$grad = mXgrad$grad mX$rescaleoffset = mXgrad$grad + 10*mX$CatNr mXrow = data.frame(rownr=seq_len(n),rescaleoffset=Xrowgrad+ 10*3) scalerange = c(0,1) gradientends = scalerange + rep(c(0,10,20,30,40), each=2) if (type=="cell") colorends = c("yellow", "yellow", "yellow", "blue", "yellow", "red","white", "black", "yellow", "white") if (type=="residual") colorends = c("white", "white", "white", "blue", "white", "red","white", "black", "white", "white") } else { Ddf = data.frame(cbind(seq(1,n,1),D)) colnames(Ddf) = c("rownr",seq(1,d,1)) rownames(Ddf) = NULL Ddf$rownr = with(Ddf, reorder(rownr, seq(n,1,-1))) mD = melt(Ddf,id.var="rownr") Rdf = data.frame(cbind(seq(1,n,1),R)) colnames(Rdf) = c("rownr",seq(1,d,1)) rownames(Rdf) = NULL Rdf$rownr = with(Rdf, reorder(rownr, seq(n,1,-1))) mR = melt(Rdf,id.var="rownr") Xdf = data.frame(cbind(seq(1,n,1),X)) colnames(Xdf) = c("rownr",seq(1,d,1)) rownames(Xdf) = NULL Xdf$rownr = with(Xdf, reorder(rownr, seq(n,1,-1))) mX = melt(Xdf,id.var="rownr", value.name = "CatNr") if (!is.null(showVals)) { if (showVals=="D") mX$data = mD$value if (showVals=="R") mX$data = mR$value } if (!outlyingGrad) { mX$rescaleoffset = 10*mX$CatNr scalerange = c(0,1) gradientends = scalerange + rep(c(0,10,20,30,40), each=2) gradientends colorends = c("yellow", "yellow", "blue", "blue", "red", "red", "white", "black","white", "white") } else { Xgrad = matrix(NA,n,d) if (type=="cell") { Xgrad[indcells] = abs(R[indcells]) limL = sqrt(qchisq(0.9,1)) } else { Xgrad = abs(R) limL = 0 } limH = darkestColor Xgrad[Xgrad>limH] = limH Xgrad = ( (Xgrad -limL) / (limH - limL) )^colContrast Xgrad[is.na(Xgrad)] = 0 Xgraddf = data.frame(cbind(seq(1,n,1),Xgrad)) colnames(Xgraddf) = c("rownr",seq(1,d,1)) rownames(Xgraddf) = NULL Xgraddf$rownr = with(Xgraddf, reorder(rownr, seq(n,1,-1))) mXgrad = melt(Xgraddf,id.var="rownr", value.name = "grad") mX$grad = mXgrad$grad mX$rescaleoffset = mXgrad$grad + 10*mX$CatNr scalerange = c(0,1) gradientends = scalerange + rep(c(0,10,20,30,40), each=2) if (type=="cell") colorends = c("yellow", "yellow", "yellow", "blue", "yellow", "red", "white", "black", "white", "white") if (type=="residual") colorends = c("white", "white", "white", "blue", "white", "red", "white", "black", "white", "white") } tempVec = rep(0,n) tempVec[indrows] = 1 mXrow = data.frame(rownr=seq_len(n),rescaleoffset=40-(10*tempVec) ) rm(tempVec) if (is.null(standOD)) { mXrow$rescaleoffset[indrows] = mXrow$rescaleoffset[indrows] + 1 } else { limL = 1 limH = 3 standOD[standOD>limH] = limH standOD = ( (standOD - limL) / (limH - limL) )^colContrast mXrow$rescaleoffset[indrows] = mXrow$rescaleoffset[indrows] + standOD[indrows] } } rowlabels = rev(rowlabels) base_size = 10 columnlabels = c(columnlabels,"","") circleFun = function(centerx, centery, r, npoints) { tt = seq(0, 2 * pi, length.out = npoints) xx = centerx + r * cos(tt) yy = centery + r * sin(tt) return(c(xx, yy)) } if (drawCircles) { centerx = d + 1 centery = n:1 radius = 0.4 npoints = 100 circlePoints = mapply(circleFun, centerx, centery, radius, npoints) positions = data.frame(rownr = rep(seq_len(n), each = npoints), x = c(circlePoints[seq_len(npoints), ]), y = c(circlePoints[(npoints +1):(2*npoints), ])) datapoly = merge(mXrow, positions, by = c("rownr")) } ggp = ggplot(data = mX, aes(variable, rownr)) + { if (blockMap) geom_tile(aes(fill = rescale(rescaleoffset, from = range(gradientends))), color = "white") } + { if (!blockMap & outlyingGrad) geom_tile(aes(fill = rescale(rescaleoffset, from = range(gradientends))), color = "white") } + { if (!blockMap & !outlyingGrad) geom_tile(aes(fill = rescale(rescaleoffset, from = range(gradientends))), colour = "white") } + { if (drawCircles) geom_polygon(data = datapoly, aes(x = x, y = y, fill = rescale(rescaleoffset, from = range(gradientends)), group = rownr), colour = "black") } + scale_fill_gradientn(colours = colorends, values = rescale(gradientends), rescaler = function(x, ...) x, oob = scales::squish) + ggtitle(mTitle) + coord_fixed() + theme_classic(base_size = base_size * 1) + labs(x = columntitle, y = rowtitle) + scale_x_discrete(expand = c(0, 0), limits = as.factor(seq(1, d + 2, 1)), labels = columnlabels) + scale_y_discrete(expand = c(0, 0), labels = rowlabels) + theme(legend.position = "none", axis.ticks = element_blank(), plot.title = element_text(size = base_size * 2, hjust = 0.5, vjust = 1, face = "bold"), axis.text.x = element_text(size = base_size * 1.8, angle = columnangle, hjust = adjustcolumnlabels, vjust = 0.5, colour = "black"), axis.text.y = element_text(size = base_size * 1.8, angle = 0, hjust = adjustrowlabels, colour = "black"), axis.title.x = element_text(colour = "black", size = base_size * sizetitles, vjust = 1), axis.title.y = element_text(colour = "black", size = base_size * sizetitles, vjust = 0), axis.line.x = element_blank(), panel.border = element_blank()) + annotate(geom = "segment", x = 0.5, xend = d + 0.5, y = 0.5, yend = 0.5) + annotate(geom = "segment", x = 0.5, xend = d + 0.5, y = n + 0.5, yend = n + 0.5) + annotate(geom = "segment", x = d + 0.5, xend = d + 0.5, y = 0.5, yend = n + 0.5) if (!is.null(showVals)) { txtcol = mX$CatNr txtcol[txtcol==0] = "black" txtcol[txtcol==1] = "white" txtcol[txtcol==2] = "white" if (type=="residual") { txtcol[]="black" txtcol[mXgrad$grad>0.5] = "white" } txtcol[txtcol==4] = "black" ggp = ggp + geom_text(aes(label = ifelse(is.na(data), sprintf("%1.0f",data), round(data,1))),size = base_size*0.5, colour=txtcol, na.rm = TRUE) } return(ggp) }
test_that("binary - uses user defined costs", { df <- data.frame( obs = factor(c("B", "A", "B"), levels = c("A", "B")), A = c(1, .80, .51) ) costs <- dplyr::tribble( ~truth, ~estimate, ~cost, "A", "B", 2, "B", "A", 3 ) exp_cost <- 1.00 * 3 + 0.00 * 0 + 0.80 * 0 + 0.20 * 2 + 0.51 * 3 + 0.49 * 0 expect_equal( classification_cost(df, obs, A, costs = costs)[[".estimate"]], exp_cost / nrow(df) ) }) test_that("binary - respects `event_first`", { df <- data.frame( obs = factor(c("B", "A", "B"), levels = c("A", "B")), B = c(1, .80, .51) ) costs <- dplyr::tribble( ~truth, ~estimate, ~cost, "A", "B", 2, "B", "A", 3 ) exp_cost <- 1.00 * 0 + 0.00 * 3 + 0.80 * 2 + 0.20 * 0 + 0.51 * 0 + 0.49 * 3 expect_equal( classification_cost(df, obs, B, costs = costs, event_level = "second")[[".estimate"]], exp_cost / nrow(df) ) }) test_that("costs$truth can be factor", { df <- data.frame( obs = factor(c("B", "A", "B"), levels = c("A", "B")), A = c(1, .80, .51) ) costs <- dplyr::tribble( ~truth, ~estimate, ~cost, "A", "B", 2, "B", "A", 3 ) costs2 <- costs costs2$truth <- as.factor(costs2$truth) costs2$estimate <- as.factor(costs2$estimate) expect_identical( classification_cost(df, obs, A, costs = costs), classification_cost(df, obs, A, costs = costs2) ) }) test_that("binary - requires 1 column of probabilities", { expect_error(classification_cost(two_class_example, truth, Class1:Class2), "`binary` metric") }) test_that("multiclass - uses equal costs by default", { df <- data.frame( obs = factor(c("A", "A", "A", "B", "B", "C")), A = c(1, .80, .51, .1, .2, .3), B = c(0, .05, .29, .8, .6, .3), C = c(0, .15, .20, .1, .2, .4) ) A <- df$B[df$obs == "A"] + df$C[df$obs == "A"] B <- df$A[df$obs == "B"] + df$C[df$obs == "B"] C <- df$A[df$obs == "C"] + df$B[df$obs == "C"] estimate <- mean(c(A, B, C)) expect_equal( classification_cost(df, obs, A:C)[[".estimate"]], estimate ) }) test_that("multiclass - respects user defined costs", { df <- data.frame( obs = factor(c("A", "A", "A", "B", "B", "C")), A = c(1, .80, .51, .1, .2, .3), B = c(0, .05, .29, .8, .6, .3), C = c(0, .15, .20, .1, .2, .4) ) costs <- dplyr::tribble( ~truth, ~estimate, ~cost, "A", "A", 0, "A", "B", 1, "A", "C", 2, "B", "A", 3, "B", "B", 0, "B", "C", 4, "C", "A", 5, "C", "B", 6, "C", "C", 0, ) exp_cost <- 1.00 * 0 + 0.00 * 1 + 0.00 * 2 + 0.80 * 0 + 0.05 * 1 + 0.15 * 2 + 0.51 * 0 + 0.29 * 1 + 0.20 * 2 + 0.10 * 3 + 0.80 * 0 + 0.10 * 4 + 0.20 * 3 + 0.60 * 0 + 0.20 * 4 + 0.30 * 5 + 0.30 * 6 + 0.40 * 0 expect_equal( classification_cost(df, obs, A:C, costs = costs)[[".estimate"]], exp_cost / nrow(df) ) }) test_that("multiclass - fills in missing combinations with zero cost", { df <- data.frame( obs = factor(c("A", "A", "A", "B", "B", "C")), A = c(1, .80, .51, .1, .2, .3), B = c(0, .05, .29, .8, .6, .3), C = c(0, .15, .20, .1, .2, .4) ) costs_partial <- dplyr::tribble( ~truth, ~estimate, ~cost, "A", "A", 0, "A", "C", 2, "B", "A", 3, "B", "C", 4, "C", "A", 5, "C", "C", 0, ) costs <- dplyr::tribble( ~truth, ~estimate, ~cost, "A", "A", 0, "A", "B", 0, "A", "C", 2, "B", "A", 3, "B", "B", 0, "B", "C", 4, "C", "A", 5, "C", "B", 0, "C", "C", 0, ) expect_identical( classification_cost(df, obs, A:C, costs = costs_partial), classification_cost(df, obs, A:C, costs = costs) ) }) test_that("costs must be a data frame with the right column names", { df <- data.frame( obs = factor(c("B", "A", "B"), levels = c("A", "B")), A = c(1, .80, .51) ) expect_error(classification_cost(df, obs, A, costs = 1), "`NULL` or a data.frame") expect_error(classification_cost(df, obs, A, costs = data.frame()), "3 columns") expect_error(classification_cost(df, obs, A, costs = data.frame(x = 1, y = 2, z = 3)), "'truth', 'estimate', and 'cost'") }) test_that("costs$estimate must contain the right levels", { df <- data.frame( obs = factor(c("B", "A", "B"), levels = c("A", "B")), A = c(1, .80, .51) ) costs <- dplyr::tribble( ~truth, ~estimate, ~cost, "A", "b", 2, "B", "A", 3 ) expect_error(classification_cost(df, obs, A, costs = costs), "can only contain 'A', 'B'") }) test_that("costs$truth must contain the right levels", { df <- data.frame( obs = factor(c("B", "A", "B"), levels = c("A", "B")), A = c(1, .80, .51) ) costs <- dplyr::tribble( ~truth, ~estimate, ~cost, "a", "B", 2, "B", "A", 3 ) expect_error(classification_cost(df, obs, A, costs = costs), "can only contain 'A', 'B'") }) test_that("costs$truth, costs$estimate, and costs$cost must have the right type", { df <- data.frame( obs = factor(c("B", "A", "B"), levels = c("A", "B")), A = c(1, .80, .51) ) costs <- dplyr::tribble( ~truth, ~estimate, ~cost, 1, "B", 2, 2, "A", 3 ) expect_error(classification_cost(df, obs, A, costs = costs), "character or factor") costs <- dplyr::tribble( ~truth, ~estimate, ~cost, "A", 1, 2, "B", 2, 3 ) expect_error(classification_cost(df, obs, A, costs = costs), "character or factor") costs <- dplyr::tribble( ~truth, ~estimate, ~cost, "A", "B", "1", "B", "A", "2" ) expect_error(classification_cost(df, obs, A, costs = costs), "numeric column") }) test_that("costs$truth and costs$estimate cannot contain duplicate pairs", { df <- data.frame( obs = factor(c("B", "A", "B"), levels = c("A", "B")), A = c(1, .80, .51) ) costs <- dplyr::tribble( ~truth, ~estimate, ~cost, "A", "B", 2, "A", "B", 3 ) expect_error( classification_cost(df, obs, A, costs = costs), "cannot have duplicate 'truth' / 'estimate' combinations" ) })
PipeOpMCBoost = R6Class("PipeOpMCBoost", inherit = mlr3pipelines::PipeOp, public = list( initialize = function(id = "mcboost", param_vals = list()) { param_set = paradox::ParamSet$new(list( paradox::ParamInt$new("max_iter", lower = 0L, upper = Inf, default = 5L, tags = "train"), paradox::ParamDbl$new("alpha", lower = 0, upper = 1, default = 1e-4, tags = "train"), paradox::ParamDbl$new("eta", lower = 0, upper = 1, default = 1, tags = "train"), paradox::ParamLgl$new("partition", tags = "train", default = TRUE), paradox::ParamInt$new("num_buckets", lower = 1, upper = Inf, default = 2L, tags = "train"), paradox::ParamLgl$new("rebucket", default = FALSE, tags = "train"), paradox::ParamLgl$new("multiplicative", default = TRUE, tags = "train"), paradox::ParamUty$new("auditor_fitter", default = NULL, tags = "train"), paradox::ParamUty$new("subpops", default = NULL, tags = "train"), paradox::ParamUty$new("default_model_class", default = ConstantPredictor, tags = "train"), paradox::ParamUty$new("init_predictor", default = NULL, tags = "train") )) super$initialize(id, param_set = param_set, param_vals = param_vals, packages = character(0), input = data.table(name = c("data", "prediction"), train = c("TaskClassif", "TaskClassif"), predict = c("TaskClassif", "TaskClassif")), output = data.table(name = "output", train = "NULL", predict = "PredictionClassif"), tags = "target transform") } ), private = list( .train = function(inputs) { d = inputs$data$data(cols = inputs$data$feature_names) l = inputs$data$data(cols = inputs$data$target_names) args = self$param_set$get_values(tags = "train") if (is.null(args$init_predictor)) { init_predictor = function(data, prediction) { if (length(prediction$feature_names) > 1L) { prds = prediction$data(cols = prediction$feature_names) as.matrix(prds) } else { prds = prediction$data(cols = prediction$feature_names)[[1]] one_hot(prds) } } args$init_predictor = init_predictor } mc = invoke(MCBoost$new, .args = args) mc$multicalibrate(d, l, predictor_args = inputs$prediction) self$state = list("mc" = mc) list(NULL) }, .predict = function(inputs) { d = inputs$data$data(cols = inputs$data$feature_names) prob = self$state$mc$predict_probs(d, predictor_args = inputs$prediction) prob = cbind(1 - prob, prob) lvls = c(inputs$prediction$negative, inputs$prediction$positive) colnames(prob) = lvls list(PredictionClassif$new( inputs$prediction, row_ids = inputs$prediction$row_ids, truth = inputs$prediction$truth(), prob = prob )) } ), active = list( predict_type = function(val) { if (!missing(val)) { if (!identical(val, private$.learner)) { stop("$predict_type for PipeOpMCBoost is read-only.") } } return("prob") } ) )
estimateAUCwithMVNCML <- function(inputData, LOQ, timePoints, isMultiplicative = FALSE, onlyFitCML = FALSE, printCMLmessage = TRUE, optimizationMethod = NULL, CMLcontrol = NULL, na.rm = TRUE, isPairwise = FALSE){ if (isPairwise){ outputResults <- estimateAUCwithPairwiseCML (inputData, LOQ, timePoints, isMultiplicative, onlyFitCML, optimizationMethod = optimizationMethod, CMLcontrol, na.rm) }else{ outputResults <- estimateAUCwithFullCML (inputData, LOQ, timePoints, isMultiplicative, onlyFitCML, printCMLmessage, optimizationMethod = optimizationMethod, CMLcontrol, na.rm) } return(outputResults) }
startCTT <- function() { shiny::runApp(appDir = system.file("Shiny", package="CTTShiny")) }
require(Rcpp) require(RcppArmadillo) require(inline) src <- ' // [[Rcpp::depends(RcppArmadillo)]] using namespace Rcpp; using namespace arma; // [[Rcpp::export]] SEXP testRcpp(NumericVector x_) { vec x(x_); uvec index = find(x); return wrap(x); } ' sourceCpp(code=src) testRcpp(map0) time0 <- c(0,10,0,20) ind0 <- time0>0 map0 <- vector("integer",) map0[ind0] <- as.integer(1:sum(ind0)) map0[!ind0] <- NaN which0 <- 1:length(time0) which0[!ind0] <- NaN src <- ' // [[Rcpp::depends(RcppArmadillo)]] using namespace Rcpp; using namespace arma; uvec removeNaN(vec x) { vec newx = x; uvec index = find(newx == newx); // NaN != NaN newx = newx(index); return conv_to<uvec>::from(newx); } // [[Rcpp::export]] uvec testRcpp(NumericVector x_) { vec x = as<vec>(x_); uvec index = find(x==x); return index; } ' sourceCpp(code=src) testRcpp(map0) testRcpp(which0) testRcpp(1:10) src <- ' // [[Rcpp::depends(RcppArmadillo)]] using namespace Rcpp; using namespace arma; // [[Rcpp::export]] double testRcpp(double x) { return R::lgammafn(x); } ' sourceCpp(code=src) testRcpp(3.0) - log(gamma(3.0))
spec_span<-function(from=NA_character_,to=NA_character_,first=NA_integer_,last=NA_integer_, exclFirst=NA_integer_,exclLast=NA_integer_, var=NA_character_){ x.from<-as.Date(from) x.to <- as.Date(to) x.first <-first x.last <- last x.exclFirst <- exclFirst x.exclLast <- exclLast if (sum(!is.na(x.from),!is.na(x.to))==2){ if (x.to <= x.from){ warning(paste(var,".to <= ",var,".from. Provided values will be ignored.",sep=""), call. = FALSE) x.to <-NA_character_ x.from <-NA_character_ } } if (sum(!is.na(x.from),!is.na(x.to),!is.na(x.last),!is.na(x.first),!is.na(x.exclFirst), !is.na(x.exclLast))==0) { z.span <- NA_character_ z.type <- NA_character_ z.d0 <- NA_character_ z.d1 <- NA_character_ z.n0 <- NA_integer_ z.n1 <- NA_integer_ z<- data.frame(span=z.span,type=z.type,d0=z.d0,d1=z.d1,n0=z.n0,n1=z.n1, row.names= "") return(z) }else if (sum(!is.na(x.from),!is.na(x.to))==2){ z.span <- paste(as.character(x.from),"-",as.character(x.to), sep=" ") z.type <- "Between" z.d0 <- as.character(x.from) z.d1 <- as.character(x.to) z.n0 <- 0 z.n1 <- 0 if (sum(!is.na(x.last),!is.na(x.first),!is.na(x.exclFirst),!is.na(x.exclLast))!=0) { warning(paste("(",var,".to, ",var,".from) used. Remainig variables (",var,".last,",var,".first,",var,".exclFirst,",var,".exclLast) will be ignored.",sep=""), call. = FALSE) } z<- data.frame(span=z.span,type=z.type,d0=z.d0,d1=z.d1,n0=z.n0,n1=z.n1, row.names= "") return(z) } else if (sum(!is.na(x.from),!is.na(x.to))==1) { if (!is.na(x.from)) { z.span <- paste("From",as.character(x.from), sep=" ") z.type <- "From" z.d0 <- as.character(x.from) z.d1 <- "2020-12-31" z.n0 <- 0 z.n1 <- 0 }else{ z.span <- paste("Until",as.character(x.to), sep=" ") z.type <- "To" z.d0 <- "1900-01-01" z.d1 <- as.character(x.to) z.n0 <- 0 z.n1 <- 0 } if (sum(!is.na(x.last),!is.na(x.first),!is.na(x.exclFirst),!is.na(x.exclLast))!=0) { warning(paste("(",var,".to, ",var,".from) used. Remainig variables (",var,".last,",var,".first,",var,".exclFirst,",var,".exclLast) will be ignored.",sep=""), call. = FALSE) } z<- data.frame(span=z.span,type=z.type,d0=z.d0,d1=z.d1,n0=z.n0,n1=z.n1, row.names= "") return(z) } else if (!is.na(x.last) & x.last> 0) { z.span <- paste("Last",as.character(x.last),"periods", sep=" ") z.type <- "Last" z.d0 <- "1900-01-01" z.d1 <- "2020-12-31" z.n0 <- 0 z.n1 <- x.last if (sum(!is.na(x.first),!is.na(x.exclFirst),!is.na(x.exclLast))!=0) { warning(paste("(",var,".last used. Remainig variables (",var,".first,",var,".exclFirst,",var,".exclLast) will be ignored.",sep=""), call. = FALSE) } z<- data.frame(span=z.span,type=z.type,d0=z.d0,d1=z.d1,n0=z.n0,n1=z.n1, row.names= "") return(z) } else if (!is.na(x.first) & x.first> 0) { z.span <- paste("First",as.character(x.first),"periods", sep=" ") z.type <- "First" z.d0 <- "1900-01-01" z.d1 <- "2020-12-31" z.n0 <- x.first z.n1 <- 0 if (sum(!is.na(x.exclFirst),!is.na(x.exclLast))!=0) { warning(paste("(",var,".first used. Remainig variables (",var,".exclFirst,",var,".exclLast) will be ignored.",sep=""), call. = FALSE) } z<- data.frame(span=z.span,type=z.type,d0=z.d0,d1=z.d1,n0=z.n0,n1=z.n1, row.names= "") return(z) } else if (sum(!is.na(x.exclFirst),!is.na(x.exclLast))==2){ if (sum(x.exclFirst,x.exclLast)==0){ z.span <- "All" z.type <- "All" z.d0 <- "1900-01-01" z.d1 <- "2020-12-31" z.n0 <- 0 z.n1 <- 0 }else{ z.type <- "Excluding" z.d0 <- "1900-01-01" z.d1 <- "2020-12-31" z.n0 <- x.exclFirst z.n1 <- x.exclLast if (x.exclFirst==0) { z.span <- paste("All but last",as.character(x.exclLast),"periods", sep=" ") } else if (x.exclLast==0) { z.span <- paste("All but first",as.character(x.exclFirst),"periods", sep=" ") }else{ z.span <- paste("All but first",as.character(x.exclFirst),"periods and last",as.character(x.exclLast),"periods", sep=" ") } } z<- data.frame(span=z.span,type=z.type,d0=z.d0,d1=z.d1,n0=z.n0,n1=z.n1, row.names= "") return(z) }else if ((is.na(x.exclFirst)& x.exclLast==0)| (is.na(x.exclLast)& x.exclFirst==0)){ z.span <- "All" z.type <- "All" z.d0 <- "1900-01-01" z.d1 <- "2020-12-31" z.n0 <- 0 z.n1 <- 0 z<- data.frame(span=z.span,type=z.type,d0=z.d0,d1=z.d1,n0=z.n0,n1=z.n1, row.names= "") return(z) } else if (!is.na(x.exclFirst)) { z.span <- paste("All but first",as.character(x.exclFirst),"periods", sep=" ") z.type <- "Excluding" z.d0 <- "1900-01-01" z.d1 <- "2020-12-31" z.n0 <- x.exclFirst z.n1 <- 0 z<- data.frame(span=z.span,type=z.type,d0=z.d0,d1=z.d1,n0=z.n0,n1=z.n1, row.names= "") return(z) } else if (!is.na(x.exclLast)) { z.span <- paste("All but last",as.character(x.exclLast),"periods", sep=" ") z.type <- "Excluding" z.d0 <- "1900-01-01" z.d1 <- "2020-12-31" z.n0 <- 0 z.n1 <- x.exclLast z<- data.frame(span=z.span,type=z.type,d0=z.d0,d1=z.d1,n0=z.n0,n1=z.n1, row.names= "") return(z) }else{ z.span <- NA_character_ z.type <- NA_character_ z.d0 <- NA_character_ z.d1 <- NA_character_ z.n0 <- NA_integer_ z.n1 <- NA_integer_ } z<- data.frame(span=z.span,type=z.type,d0=z.d0,d1=z.d1,n0=z.n0,n1=z.n1, row.names= "") return(z) } spec_preOut<-function(outliertype=NA,outlierdate=NA,outliercoef=NA){ outliers.type <- c("AO","LS","TC","SO") if (sum(is.na(outliertype))==0){ if (!is.vector(outliertype)|is.list(outliertype)|length(setdiff(outliertype,outliers.type))>0){ warning("wrong format of the userdef.outliertype. Pre-specified outlier(s) will be ignored.", call. = FALSE) return(NA) }else if (!is.vector(outlierdate)| sum(is.na(as.Date(outlierdate)))>0){ warning("wrong format of the userdef.outlierdate. Pre-specified outlier(s) will be ignored.", call. = FALSE) return(NA) }else if (length(outliertype)!=length(outlierdate)){ warning("userdef.outlierdate is not of the same length as userdef.outliertype. Pre-specified outlier(s) will be ignored.", call. = FALSE) return(NA) }else{ if (sum(is.na(outliercoef))!=0){ outliers <-data.frame(type = outliertype, date = outlierdate, coeff = rep(NA,length(outliertype))) }else if(!is.vector(outliercoef)|!is.numeric(outliercoef)|(length(outliercoef)!= length(outliertype))){ warning("userdef.outliercoef is wrongly specified. The coefficient(s) will be ignored.", call. = FALSE) outliers <-data.frame(type = outliertype, date = outlierdate, coeff = rep(NA,length(outliertype))) }else{ outliers <-data.frame(type = outliertype, date = outlierdate, coeff = outliercoef) } } }else{ return(NA) } outliers } spec_preVar<-function(var = NA, vartype = NA, varcoef = NA, tradingdays.option = NA){ variables.type <- c("Undefined","Series","Trend","Seasonal","SeasonallyAdjusted","Irregular","Calendar") nvar <- if (is.mts(var)) {dim(var)[2]} else if (is.ts(var)) {1} else {0} if (all(sapply(vartype,is.na))) vartype <- rep("Undefined", nvar) if (sum(!is.na(var))!=0){ if (!is.ts(var) & !is.mts(var)){ warning("userdef.var must be a time series or a matrix of time series. User-defined variable(s) will be ignored.", call. = FALSE) vars <- list(series = NA, description = NA) return(vars) }else if (!is.vector(vartype)|is.list(vartype)|length(setdiff(vartype,variables.type))>0) { warning("wrong format of the userdef.varType. User-defined variable(s) will be ignored.", call. = FALSE) vars <- list(series = NA, description = NA) return(vars) }else{ if (nvar != length(vartype)) { vartype <- rep(vartype, nvar)[1:nvar] } if (!is.na(tradingdays.option)){ var_calendar <- grep("Calendar", vartype) if(tradingdays.option != "UserDefined" && length(var_calendar) > 0){ warning("userdef.VarType contains Calendar variables but the tradingdays.options isn't UserDefined.\n", "Corresponding variables will be ignored.", call. = FALSE) if(length(var_calendar) == nvar){ return(list(series = NA, description = NA)) }else{ var <- var[,-var_calendar] vartype <- vartype[-var_calendar] if(sum(is.na(varcoef))!=0 | !is.vector(varcoef) | !is.numeric(varcoef)| (length(varcoef)!= length(vartype))){ varcoef <- NA }else{ varcoef <- varcoef[-var_calendar] } } } } if(is.mts(var)){ description_names <- base::make.names(colnames(var), unique = TRUE) description_names <- gsub(".","_", description_names, fixed = TRUE) }else{ description_names <- "userdef" } if (sum(is.na(varcoef)) != 0){ vars <- list(series = var, description = data.frame(type = vartype, coeff = NA, row.names = description_names)) }else if(!is.vector(varcoef)|!is.numeric(varcoef)|(length(varcoef)!= length(vartype))){ warning("userdef.varCoef is wrongly specified. The coefficient(s) will be ignored.", call. = FALSE) vars <- list(series = var, description = data.frame(type = vartype, coeff = NA, row.names = description_names)) }else{ vars <- list(series = var, description = data.frame(type = vartype, coeff = varcoef, row.names = description_names)) } } }else{ vars <- list(series = NA, description = NA) } vars } spec_arimaCoef<-function(coef = NA, coeftype= NA){ coef.types <- c("Fixed", "Initial","Undefined") if (sum(!is.na(coef))!=0){ if (!is.vector(coef)|is.list(coef)| !is.numeric(coef)){ warning("wrong format of the arima.coef. Defined ARIMA coef. variable(s) will be ignored.", call. = FALSE) return(NA) }else if (!is.vector(coeftype)|is.list(coeftype)|length(setdiff(coeftype,coef.types))>0) { warning("wrong format of the arima.coefType. Defined ARIMA coef. variable(s) will be ignored.", call. = FALSE) return(NA) } else if (length(coef)!=length(coeftype)){ warning("arima.coefType is not of the same length as arima.coef. Defined ARIMA coef. variable(s) will be ignored.", call. = FALSE) return(NA) }else{ coeff <-data.frame(Type = coeftype, Value = coef ) } }else{ return(NA) } return(coeff) } spec_arimaCoefF <- function(enabled=NA, armaP=NA, armaF=NA , coefP=NA, coefF=NA){ if (enabled == TRUE & sum(!is.na(coefF))==0 & sum(!is.na(coefP))!=0){ if (dim(coefP)[1]== sum(armaP) & identical(armaP,armaF)){ coef <- coefP ena <- TRUE } else { coef <- NA ena<- FALSE } }else if (sum(!is.na(coefF))==0){ coef <- NA ena<- FALSE }else if (dim(coefF)[1]!= sum(armaP)){ coef <- NA ena<- FALSE warning("A wrong number of ARIMA coef was provided (arima.coef and arima.coefType). Defined ARIMA coef. variable(s) will be ignored.", call. = FALSE) } else { dsc <-c() if (armaP[1]!=0){ for (i in 1:armaP[1]){dsc <- c(dsc,paste("Phi(",as.character(i),")",sep=""))} } if (armaP[2]!=0){ for (i in 1:armaP[2]){dsc <-c(dsc,paste("Theta(",as.character(i),")",sep=""))} } if (armaP[3]!=0){ for (i in 1:armaP[3]){dsc <-c(dsc,paste("BPhi(",as.character(i),")",sep=""))} } if (armaP[4]!=0){ for (i in 1:armaP[4]){dsc <-c(dsc,paste("BTheta(",as.character(i),")",sep=""))} } coef <- coefF rownames(coef) <- dsc ena <- enabled } return(list(ena, coef)) } spec_seasma <- function(seasma=NA){ len <- length(seasma) seasma.type <- c("Msr","Stable","X11Default","S3X1","S3X3","S3X5","S3X9","S3X15") if (sum(is.na(seasma)) != 0) { return(NA) } else if (!is.vector(seasma) | is.list(seasma) | length(setdiff(seasma,seasma.type)) > 0) { warning("wrong format of the x11.seasonalma.\nPossibles filters per period: \"Msr\",\"Stable\", \"X11Default\", \"S3X1\", \"S3X3\", \"S3X5\", \"S3X9\" and \"S3X15\".\nPre-specified seasonal filters will be ignored.", call. = FALSE) return(NA) } else if (!(len %in% c(1, 2, 4, 6, 12))) { warning("wrong format of the x11.seasonalma.\nPre-specified seasonal filters will be ignored.", call. = FALSE) return(NA) } else { z <- toString(seasma) return(z) } } spec_calendar_sigma <- function(calendarSigma = NA, sigmaVector = NA){ len <- length(calendarSigma) calendarSigma.type <- c("None","Signif","All","Select") sigmaVector.type <- c("Group1", "Group2") if (identical_na(calendarSigma)) { if (!identical_na(sigmaVector)) { warning("x11.sigmaVector will be ignored: x11.calendarSigma must be set to \"Select\"", call. = FALSE) } calendarSigma <- sigmaVector <- NA } else if (is.list(calendarSigma) || length(calendarSigma) > 1 || !calendarSigma %in% calendarSigma.type) { warning("Wrong format of the x11.calendarSigma.", "\nPossibles values are: ", "\"None\",\"Signif\", \"All\", \"Select\".", "\nParameters will be ignored.", call. = FALSE) calendarSigma <- sigmaVector <- NA } else if (identical(calendarSigma, "Select")) { if (identical_na(sigmaVector)) { warning("x11.sigmaVector must be specified when x11.calendarSigma = \"Select\"." , "\nx11.calendarSigma will be set to ", '"None".', call. = FALSE) sigmaVector <- NA calendarSigma <- "None" }else if (length(setdiff(sigmaVector, sigmaVector.type)) > 0 || !(length(sigmaVector) %in% c(2, 4, 6, 12))) { warning("Wrong format of the x11.sigmaVector." , "\nIt will be ignored and x11.calendarSigma is set to ", '"None".', call. = FALSE) sigmaVector <- NA calendarSigma <- "None" }else{ sigmaVector <- toString(sigmaVector) } } else { sigmaVector <- NA } list(calendarSigma = calendarSigma, sigmaVector = sigmaVector) } spec_trendma <- function(trendma=NA){ if (sum(!is.na(trendma))==0){ return(NA) } else if (!is.numeric(trendma)){ return(NA) warning("The variable x11.trendma should be numeric.\nThe variable will be ignored.", call. = FALSE) } else if (trendma <= 1 | trendma > 101 | (floor(trendma/2)==trendma/2)) { warning("The variable x11.trendma should be in the range (1,101] and be an odd number.\nThe variable will be ignored.", call. = FALSE) return(NA) } else { return(trendma) } } spec_estimateX13<-function(est, spanP, spanM){ est[3,"preliminary.check"] <- if(!is.na(est[2,"preliminary.check"])) { est[2,"preliminary.check"] } else { est[1,"preliminary.check"] } span <-spanM est[3,"span"] <- if(!is.na(est[2,"span"])) {est[2,"span"]} else {est[1,"span"]} if (is.na(est[2,"span"])) { span <- rbind(spanP[1,],spanM[2,]) rownames(span) <- c("estimate","outlier") } est[3,"tolerance"] <- if(!is.na(est[2,"tolerance"])) {est[2,"tolerance"]} else {est[1,"tolerance"]} rownames(est) <- c("Predefined","User_modif","Final") return(list(est=est,span=span)) } spec_transformX13<-function(trans){ trans[3,1] <- if(!is.na(trans[2,1])) {trans[2,1]} else {trans[1,1]} if (trans[3,1]== "None"){ trans[3,2] <-"None" trans[3,3] <-trans[1,3] } else if (trans[3,1]== "Log"){ trans[3,3] <-trans[1,3] } else if (trans[3,1]== "Auto"){ trans[3,2] <-"None" } if (is.na(trans[3,2])) trans[3,2] <- if(!is.na(trans[2,2])) {trans[2,2]} else {trans[1,2]} if (is.na(trans[3,3])) trans[3,3] <- if(!is.na(trans[2,3])) {trans[2,3]} else {trans[1,3]} rownames(trans) <- c("Predefined","User_modif","Final") return(trans) } spec_tdX13<-function(td, tf, tadj){ td[3, "option"] <- if(!is.na(td[2, "option"])) {td[2, "option"]} else {td[1, "option"]} td[3, "stocktd"] <- if(!is.na(td[2, "stocktd"])) {td[2, "stocktd"]} else {td[1, "stocktd"]} if (td[3, "option"]=="None" & td[3, "stocktd"]==0) { td[3, "autoadjust"]<- td[1, "autoadjust"] td[3,c("leapyear", "test")]<- "None" } else if (td[3, "option"]=="None") { td[3, "autoadjust"]<- td[1, "autoadjust"] td[3, "leapyear"]<- "None" } else { td[3, "stocktd"] <-0 } if (is.na(td[3, "autoadjust"])){ td[3, "autoadjust"] <- if(!is.na(td[2, "autoadjust"])) {td[2, "autoadjust"]} else {td[1, "autoadjust"]} if (td[3, "autoadjust"] & as.character(tf)=="Auto") { td[3, "leapyear"]<- td[1, "leapyear"] }else{ td[3, "autoadjust"]<-FALSE } } if (is.na(td[3, "leapyear"])){ if (tadj!="None") { td[3, "leapyear"] <- "None" }else{ td[3, "leapyear"] <- if(!is.na(td[2, "leapyear"])) {td[2, "leapyear"]} else {td[1, "leapyear"]} } } if (is.na(td[3, "test"])) td[3, "test"] <- if(!is.na(td[2, "test"])) {td[2, "test"]} else {td[1, "test"]} if(td[3, "option"] == "UserDefined"){ if(any(!is.na(td[1, c("autoadjust", "leapyear", "stocktd")]))){ warning("With tradingdays.option = \"UserDefined\", the parameters tradingdays.autoadjust, tradingdays.leapyear and tradingdays.stocktd are ignored.\n", call. = FALSE) } td[3, "leapyear"] <- "None" td[3, "autoadjust"] <- FALSE td[3, "stocktd"] <- 0 } rownames(td) <- c("Predefined","User_modif","Final") return(td) } spec_easterX13<-function(easter){ easter[3,1] <- if(!is.na(easter[2,1])) {easter[2,1]} else {easter[1,1]} if (easter[3,1]== FALSE) { easter[3,2] <-FALSE easter[3,3] <-easter[1,3] easter[3,4] <- "None" } if (is.na(easter[3,2])){ easter[3,2] <- if (!is.na(easter[2,2])) {easter[2,2]} else {easter[1,2]} } if (is.na(easter[3,4])){ easter[3,4] <- if (!is.na(easter[2,4])) {easter[2,4]} else {easter[1,4]} } if (is.na(easter[3,3])){ if (easter[3,4]=="Add") { easter[3,3] <-easter[1,3] }else{ easter[3,3] <- if (!is.na(easter[2,3])) {easter[2,3]} else {easter[1,3]} } } rownames(easter) <- c("Predefined","User_modif","Final") return(easter) } spec_outliersX13<-function(out, spanP, spanM){ span <-spanM out[3,1]<- if (!is.na(out[2,1])) {out[2,1]} else {out[1,1]} out[3,3]<- if (!is.na(out[2,3])) {out[2,3]} else {out[1,3]} out[3,4]<- if (!is.na(out[2,4])) {out[2,4]} else {out[1,4]} out[3,5]<- if (!is.na(out[2,5])) {out[2,5]} else {out[1,5]} out[3,6]<- if (!is.na(out[2,6])) {out[2,6]} else {out[1,6]} if ((out[3,1]==FALSE) | (sum(out[3,3],out[3,4],out[3,5],out[3,6])==0)) { out[3,c(1,3:6)]<- FALSE out[3,c(2,7:10)]<- out[1,c(2,7:10)] span <- rbind(spanM[1,],spanP[2,]) rownames(span) <- c("estimate","outlier") }else{ out[3,2]<- if (!is.na(out[2,2])) {out[2,2]} else {out[1,2]} out[3,7]<- if (!is.na(out[2,7])) {out[2,7]} else {out[1,7]} out[3,8]<- if (!is.na(out[2,8])) {out[2,8]} else {out[1,8]} out[3,9]<- if (!is.na(out[2,9])) {out[2,9]} else {out[1,9]} out[3,10]<- if (!is.na(out[2,10])) {out[2,10]} else {out[1,10]} if (out[3,7]) {out[3,8]<-4} } if (is.na(out[2,2])) { span <- rbind(spanM[1,],spanP[2,]) rownames(span) <- c("estimate","outlier") } rownames(out) <- c("Predefined","User_modif","Final") return(list(out=out,span=span)) } spec_arimaX13 <-function(arimaspc, arimaco){ arimacoF <- arimaco$Final arimacoP <- arimaco$Predefined arimaspc[3,1]<- if (!is.na(arimaspc[2,1])) {arimaspc[2,1]} else {arimaspc[1,1]} if (arimaspc[3,1] == FALSE){ arimaspc[3,2:11] <- arimaspc[1,2:11] arimaspc[3,12]<- if (!is.na(arimaspc[2,12])) {arimaspc[2,12]} else {arimaspc[1,12]} arimaspc[3,13]<- if (!is.na(arimaspc[2,13])) {arimaspc[2,13]} else {arimaspc[1,13]} arimaspc[3,14]<- if (!is.na(arimaspc[2,14])) {arimaspc[2,14]} else {arimaspc[1,14]} arimaspc[3,15]<- if (!is.na(arimaspc[2,15])) {arimaspc[2,15]} else {arimaspc[1,15]} arimaspc[3,16]<- if (!is.na(arimaspc[2,16])) {arimaspc[2,16]} else {arimaspc[1,16]} arimaspc[3,17]<- if (!is.na(arimaspc[2,17])) {arimaspc[2,17]} else {arimaspc[1,17]} arimaspc[3,18]<- if (!is.na(arimaspc[2,18])) {arimaspc[2,18]} else {arimaspc[1,18]} arimaspc[3,19]<- if (!is.na(arimaspc[2,19])) {arimaspc[2,19]} else {arimaspc[1,19]} } else { arimaspc[3,2]<- if (!is.na(arimaspc[2,2])) {arimaspc[2,2]} else {arimaspc[1,2]} arimaspc[3,3]<- if (!is.na(arimaspc[2,3])) {arimaspc[2,3]} else {arimaspc[1,3]} arimaspc[3,4]<- if (!is.na(arimaspc[2,4])) {arimaspc[2,4]} else {arimaspc[1,4]} arimaspc[3,5]<- if (!is.na(arimaspc[2,5])) {arimaspc[2,5]} else {arimaspc[1,5]} arimaspc[3,6]<- if (!is.na(arimaspc[2,6])) {arimaspc[2,6]} else {arimaspc[1,6]} arimaspc[3,7]<- if (!is.na(arimaspc[2,7])) {arimaspc[2,7]} else {arimaspc[1,7]} arimaspc[3,8]<- if (!is.na(arimaspc[2,8])) {arimaspc[2,8]} else {arimaspc[1,8]} arimaspc[3,9]<- if (!is.na(arimaspc[2,9])) {arimaspc[2,9]} else {arimaspc[1,9]} arimaspc[3,10]<- if (!is.na(arimaspc[2,10])) {arimaspc[2,10]} else {arimaspc[1,10]} arimaspc[3,11]<- if (!is.na(arimaspc[2,11])) {arimaspc[2,11]} else {arimaspc[1,11]} arimaspc[3,12:18] <- arimaspc[1,12:18] arimaspc[3,19] <- FALSE } arma.cp <- c(arimaspc[3,13],arimaspc[3,15],arimaspc[3,16],arimaspc[3,18]) arma.cf <- c(arimaspc[1,13],arimaspc[1,15],arimaspc[1,16],arimaspc[1,18]) arimacoefFinal <- spec_arimaCoefF(enabled = arimaspc[3,19], armaP = arma.cp, armaF = arma.cf, coefP = arimacoP, coefF = arimacoF) arimaspc[3,19] <- as.logical(arimacoefFinal[1]) arimacoF <- if (is.na(arimacoefFinal[2])) {NA} else {as.data.frame(arimacoefFinal[2])} rownames(arimaspc) <- c("Predefined","User_modif","Final") x <- list(Predefined = arimacoP , Final = arimacoF) y <- list(specification = arimaspc, coefficients = x) return(y) } spec_estimateTS<-function(est, spanP, spanM){ est[3,"preliminary.check"] <- if(!is.na(est[2,"preliminary.check"])) { est[2,"preliminary.check"] } else { est[1,"preliminary.check"] } span <-spanM est[3,"span"] <- if(!is.na(est[2,"span"])) {est[2,"span"]} else {est[1,"span"]} if (is.na(est[2,"span"])) { span <- rbind(spanP[1,],spanM[2,]) rownames(span) <- c("estimate","outlier") } est[3, "tolerance"] <- if(!is.na(est[2, "tolerance"])) {est[2, "tolerance"]} else {est[1, "tolerance"]} est[3, "exact_ml"] <- if(!is.na(est[2, "exact_ml"])) {est[2, "exact_ml"]} else {est[1, "exact_ml"]} est[3, "urfinal"] <- if(!is.na(est[2, "urfinal"])) {est[2, "urfinal"]} else {est[1, "urfinal"]} rownames(est) <- c("Predefined","User_modif","Final") return(list(est=est,span=span)) } spec_transformTS<-function(trans){ trans[3,1] <- if(!is.na(trans[2,1])) {trans[2,1]} else {trans[1,1]} if (trans[3,1]=="Auto") { trans[3,2] <- if(!is.na(trans[2,2])) {trans[2,2]} else {trans[1,2]} }else{ trans[3,2]<-trans[1,2] } rownames(trans) <- c("Predefined","User_modif","Final") return(trans) } spec_tdTS<-function(td){ td[3, "automatic"] <- if(!is.na(td[2, "automatic"])) {td[2, "automatic"]} else {td[1, "automatic"]} if (td[3, "automatic"]== "Unused"){ td[3, "pftd"] <- if(!is.na(td[2, "pftd"])) {td[2, "pftd"]} else {td[1, "pftd"]} td[3, c("option", "leapyear", "stocktd", "test")] <- td[1, c("option", "leapyear", "stocktd", "test")] }else{ td[3, "pftd"] <-td[1, "pftd"] td[3, "option"] <- if(!is.na(td[2, "option"])) {td[2, "option"]} else {td[1, "option"]} td[3, "stocktd"] <- if(!is.na(td[2, "stocktd"])) {td[2, "stocktd"]} else {td[1, "stocktd"]} if (td[3, "option"]=="None" & td[3, "stocktd"]==0){ td[3, "leapyear"] <- FALSE td[3, "test"] <- "None" }else if (td[3, "option"]=="None"){ td[3, "leapyear"] <- FALSE td[3, "test"] <- if(!is.na(td[2, "test"])) {td[2, "test"]} else {td[1, "test"]} }else{ td[3, "stocktd"]<-0 td[3, "leapyear"] <- if(!is.na(td[2, "leapyear"])) {td[2, "leapyear"]} else {td[1, "leapyear"]} td[3, "test"] <- if(!is.na(td[2, "test"])) {td[2, "test"]} else {td[1, "test"]} } } if(td[3, "option"] == "UserDefined"){ if(any(!is.na(td[1, c("automatic","leapyear", "stocktd")]))){ warning("With tradingdays.option = \"UserDefined\", the parameters tradingdays.leapyear and tradingdays.stocktd are ignored.\n", call. = FALSE) } td[3, "automatic"] <- td[1, "automatic"] td[3, "leapyear"] <- FALSE td[3, "stocktd"] <- 0 } rownames(td) <- c("Predefined","User_modif","Final") return(td) } spec_easterTS<-function(easter){ easter[3,1] <- if(!is.na(easter[2,1])) {easter[2,1]} else {easter[1,1]} if (easter[3,1] == "Unused"){ easter[3,2] <- FALSE easter[3,3:4] <- easter[1,3:4] }else{ easter[3,2] <- if(!is.na(easter[2,2])) {easter[2,2]} else {easter[1,2]} easter[3,3] <- if(!is.na(easter[2,3])) {easter[2,3]} else {easter[1,3]} easter[3,4] <- if(!is.na(easter[2,4])) {easter[2,4]} else {easter[1,4]} } rownames(easter) <- c("Predefined","User_modif","Final") return(easter) } spec_outliersTS<-function(out, spanP, spanM){ span <-spanM out[3,1]<- if (!is.na(out[2,1])) {out[2,1]} else {out[1,1]} out[3,3]<- if (!is.na(out[2,3])) {out[2,3]} else {out[1,3]} out[3,4]<- if (!is.na(out[2,4])) {out[2,4]} else {out[1,4]} out[3,5]<- if (!is.na(out[2,5])) {out[2,5]} else {out[1,5]} out[3,6]<- if (!is.na(out[2,6])) {out[2,6]} else {out[1,6]} if ((out[3,1]==FALSE) | (sum(out[3,3],out[3,4],out[3,5],out[3,6])==0)) { out[3,c(1,3:6)]<- FALSE out[3,c(2,7:10)]<- out[1,c(2,7:10)] span <- rbind(spanM[1,],spanP[2,]) rownames(span) <- c("estimate","outlier") }else{ out[3,2]<- if (!is.na(out[2,2])) {out[2,2]} else {out[1,2]} out[3,7]<- if (!is.na(out[2,7])) {out[2,7]} else {out[1,7]} out[3,8]<- if (!is.na(out[2,8])) {out[2,8]} else {out[1,8]} out[3,9]<- if (!is.na(out[2,9])) {out[2,9]} else {out[1,9]} out[3,10]<- if (!is.na(out[2,10])) {out[2,10]} else {out[1,10]} if (out[3,7]) {out[3,8]<-3.5} } if (is.na(out[2,2])) { span <- rbind(spanM[1,],spanP[2,]) rownames(span) <- c("estimate","outlier") } rownames(out) <- c("Predefined","User_modif","Final") return(list(out=out,span=span)) } spec_arimaTS <-function(arimaspc, arimaco){ arimacoF <- arimaco$Final arimacoP <- arimaco$Predefined arimaspc[3,1]<- if (!is.na(arimaspc[2,1])) {arimaspc[2,1]} else {arimaspc[1,1]} if (arimaspc[3,1] == FALSE){ arimaspc[3,2:9] <- arimaspc[1,2:9] arimaspc[3,10]<- if (!is.na(arimaspc[2,10])) {arimaspc[2,10]} else {arimaspc[1,10]} arimaspc[3,11]<- if (!is.na(arimaspc[2,11])) {arimaspc[2,11]} else {arimaspc[1,11]} arimaspc[3,12]<- if (!is.na(arimaspc[2,12])) {arimaspc[2,12]} else {arimaspc[1,12]} arimaspc[3,13]<- if (!is.na(arimaspc[2,13])) {arimaspc[2,13]} else {arimaspc[1,13]} arimaspc[3,14]<- if (!is.na(arimaspc[2,14])) {arimaspc[2,14]} else {arimaspc[1,14]} arimaspc[3,15]<- if (!is.na(arimaspc[2,15])) {arimaspc[2,15]} else {arimaspc[1,15]} arimaspc[3,16]<- if (!is.na(arimaspc[2,16])) {arimaspc[2,16]} else {arimaspc[1,16]} arimaspc[3,17]<- if (!is.na(arimaspc[2,17])) {arimaspc[2,17]} else {arimaspc[1,17]} }else{ arimaspc[3,2]<- if (!is.na(arimaspc[2,2])) {arimaspc[2,2]} else {arimaspc[1,2]} arimaspc[3,3]<- if (!is.na(arimaspc[2,3])) {arimaspc[2,3]} else {arimaspc[1,3]} arimaspc[3,4]<- if (!is.na(arimaspc[2,4])) {arimaspc[2,4]} else {arimaspc[1,4]} arimaspc[3,5]<- if (!is.na(arimaspc[2,5])) {arimaspc[2,5]} else {arimaspc[1,5]} arimaspc[3,6]<- if (!is.na(arimaspc[2,6])) {arimaspc[2,6]} else {arimaspc[1,6]} arimaspc[3,7]<- if (!is.na(arimaspc[2,7])) {arimaspc[2,7]} else {arimaspc[1,7]} arimaspc[3,8]<- if (!is.na(arimaspc[2,8])) {arimaspc[2,8]} else {arimaspc[1,8]} arimaspc[3,9]<- if (!is.na(arimaspc[2,9])) {arimaspc[2,9]} else {arimaspc[1,9]} arimaspc[3,10:16] <- arimaspc[1,10:16] arimaspc[3,17] <- FALSE } arma.cp <- c(arimaspc[3,11],arimaspc[3,13],arimaspc[3,14],arimaspc[3,16]) arma.cf <- c(arimaspc[1,11],arimaspc[1,13],arimaspc[1,14],arimaspc[1,16]) arimacoefFinal <- spec_arimaCoefF(enabled = arimaspc[3,17], armaP = arma.cp, armaF = arma.cf, coefP = arimacoP, coefF = arimacoF) arimaspc[3,17] <- as.logical(arimacoefFinal[1]) arimacoF <- if (is.na(arimacoefFinal[2])) {NA} else {as.data.frame(arimacoefFinal[2])} rownames(arimaspc) <- c("Predefined","User_modif","Final") x <- list(Predefined = arimacoP , Final = arimacoF) y <- list(specification = arimaspc, coefficients = x) return(y) } spec_userdef <- function(usrspc, out, var, tf) { outF <- out$Final outP <- out$Predefined varF <- var$Final varP <- var$Predefined if(is.na(usrspc [2,1])){ usrspc[3,1] <- usrspc[1,1] if (usrspc[1,1] & (sum(!is.na(outF))==0)){ outF<- outP } }else if (usrspc[2,1] & usrspc[1,1] & sum(!is.na(outF))==0){ usrspc[3,1] <- TRUE outF<- outP }else if(sum(!is.na(outF))==0){ usrspc[3,1]<-FALSE }else{ usrspc[3,1] <- usrspc[2,1] } if (usrspc[3,1]){ if (sum(!is.na(outF[,3]))==0){ usrspc[2:3,2]<- FALSE }else if (tf=="Auto"){ usrspc[2,2]<- TRUE usrspc[3,2]<- FALSE }else{ usrspc[2:3,2]<- TRUE } }else{ usrspc[3,2]<- FALSE } if(is.na(usrspc[2,3])){ usrspc[3,3] <- usrspc[1,3] if (usrspc[1,3] & (sum(!is.na(varF$series))==0)){ varF<- varP } }else if (usrspc[2,3] & usrspc[1,3]==TRUE & sum(!is.na(varF$series))==0){ usrspc[3,3] <- TRUE varF<- varP }else if(sum(!is.na(varF$series))==0){ usrspc[3,3]<-FALSE }else{ usrspc[3,3] <- usrspc[2,3] } if (usrspc[3,3]){ if (sum(!is.na(varF$description[,2]))==0){ usrspc[2:3,4]<- FALSE }else if (tf=="Auto"){ usrspc[2,4]<- TRUE usrspc[3,4]<- FALSE }else{ usrspc[2:3,4]<- TRUE } }else{ usrspc[3,4]<- FALSE } rownames(usrspc) <- c("Predefined","User_modif","Final") outliers <- list(Predefined = outP, Final = outF) variables <- list(Predefined = varP, Final = varF) x <- list(specification = usrspc, outliers = outliers, variables = variables) return(x) } spec_forecast <- function(fcst){ fcst[3,1] <- if (!is.na(fcst[2,1])) {fcst[2,1]} else {fcst[1,1]} rownames(fcst) <- c("Predefined","User_modif","Final") return(fcst) } spec_x11 <- function(x11){ for (i in c("x11.mode", "x11.seasonalComp", "x11.lsigma", "x11.usigma", "x11.trendAuto", "x11.fcasts", "x11.bcasts", "x11.excludeFcasts") ) { x11[3,i] <- if (!is.na(x11[2,i])) {x11[2,i]} else {x11[1,i]} } if (x11[3,"x11.trendAuto"] | is.na(x11[2, "x11.trendma"])) { x11[3, "x11.trendma"] <- x11[1, "x11.trendma"] }else { x11[3,"x11.trendma"] <- x11[2, "x11.trendma"] } if(x11[3,"x11.seasonalComp"] & !is.na(x11[2,"x11.seasonalma"])){ x11[3,"x11.seasonalma"] <- x11[2,"x11.seasonalma"] }else { x11[3,"x11.seasonalma"] <- x11[1,"x11.seasonalma"] } if (is.na(x11[2, "x11.calendarSigma"])) { x11[3, c("x11.calendarSigma","x11.sigmaVector")] <- x11[1, c("x11.calendarSigma","x11.sigmaVector")] } else if (!identical(x11[2, "x11.calendarSigma"], "Select")){ x11[3, "x11.calendarSigma"] <- x11[2, "x11.calendarSigma"] x11[3, "x11.sigmaVector"] <- NA } else{ x11[3, c("x11.calendarSigma","x11.sigmaVector")] <- x11[2, c("x11.calendarSigma","x11.sigmaVector")] } rownames(x11) <- c("Predefined","User_modif","Final") return(x11) } spec_seats <- function(seatspc){ seats <- seatspc for (i in seq_len(ncol(seats))) { seats[3,i] <- if (!is.na(seats[2,i])) {seats[2,i]} else {seats[1,i]} } rownames(seats) <- c("Predefined","User_modif","Final") return(seats) }
context("landscape level lsm_l_te metric") landscapemetrics_landscape_landscape_value <- lsm_l_te(landscape) test_lsm <- matrix(data = NA, nrow = 25, ncol = 30) test_lsm[c(5:7), c(5:7)] <- 1 test_lsm[4, 6] <- 1 test_lsm[6, 8] <- 1 test_lsm[8, 6] <- 1 test_lsm[6, 4] <- 1 test_lsm[6, 6] <- 2 test_lsm <- raster::raster(test_lsm, xmn = 0, xmx = 30, ymn = 0, ymx = 25) test_that("lsm_l_te is typestable", { expect_is(lsm_l_te(landscape), "tbl_df") expect_is(lsm_l_te(landscape_stack), "tbl_df") expect_is(lsm_l_te(landscape_brick), "tbl_df") expect_is(lsm_l_te(landscape_list), "tbl_df") }) test_that("lsm_l_te returns the desired number of columns", { expect_equal(ncol(landscapemetrics_landscape_landscape_value), 6) }) test_that("lsm_l_te returns in every column the correct type", { expect_type(landscapemetrics_landscape_landscape_value$layer, "integer") expect_type(landscapemetrics_landscape_landscape_value$level, "character") expect_type(landscapemetrics_landscape_landscape_value$class, "integer") expect_type(landscapemetrics_landscape_landscape_value$id, "integer") expect_type(landscapemetrics_landscape_landscape_value$metric, "character") expect_type(landscapemetrics_landscape_landscape_value$value, "double") }) test_that("lsm_l_te option count_boundary is working", { te_with_boundary <- lsm_l_te(landscape, count_boundary = TRUE) te_without_boundary <- lsm_l_te(landscape, count_boundary = FALSE) expect_lt(te_without_boundary$value, te_with_boundary$value) }) test_that("lsm_l_te can handle raster with different xy resolution", { expect_is(lsm_l_te(landscape_diff_res), "tbl_df") }) test_that("lsm_l_te is the same if count_boundary = FALSE", { result_cbF <- lsm_c_te(test_lsm, count_boundary = FALSE) result_cbT <- lsm_c_te(test_lsm, count_boundary = TRUE) result_l_cbF <- lsm_l_te(test_lsm, count_boundary = FALSE) result_l_cbT <- lsm_l_te(test_lsm, count_boundary = TRUE) expect_true(all(result_l_cbF$value == result_cbF$value)) expect_true(all(result_l_cbT$value == max(result_cbT$value))) })
ir_parse <- function(x, options, type = NULL) { xp <- odin_preprocess(x, type) root <- xp$root exprs <- xp$exprs base <- xp$base dat <- ir_parse_exprs(exprs) eqs <- dat$eqs source <- dat$source config <- ir_parse_config(eqs, base, root, source, options$read_include, options$config_custom) features <- ir_parse_features(eqs, config, source) variables <- ir_parse_find_variables(eqs, features$discrete, source) eqs <- lapply(eqs, ir_parse_rewrite_initial, variables) eqs <- ir_parse_arrays(eqs, variables, config$include$names, source) eqs <- ir_parse_substitute(eqs, options$substitutions) if (options$rewrite_constants) { eqs <- ir_parse_rewrite_constants(eqs) } else if (options$rewrite_dims && features$has_array) { eqs <- ir_parse_rewrite_dims(eqs) } packing <- ir_parse_packing(eqs, variables, source) eqs <- c(eqs, packing$offsets) packing$offsets <- NULL if (features$has_interpolate) { eqs <- ir_parse_interpolate(eqs, features$discrete, source) i <- vcapply(eqs, "[[", "type") == "alloc_interpolate" f <- function(v) { sort(unique(unlist(unname(lapply(eqs[i], function(eq) eq$control[[v]]))))) %||% character(0) } interpolate <- list(min = f("min"), max = f("max"), critical = f("critical")) } else { interpolate <- list(min = list(), max = list(), critical = list()) } if (features$has_delay) { eqs <- ir_parse_delay(eqs, features$discrete, packing$variables, source) } eqs <- eqs[order(names(eqs))] meta <- ir_parse_meta(features$discrete) dependencies <- ir_parse_dependencies(eqs, variables, meta$time, source) stage <- ir_parse_stage(eqs, dependencies, variables, meta$time, source) eqs_initial <- names_if(vlapply(eqs, function(x) identical(x$lhs$special, "initial"))) features$initial_time_dependent <- features$has_delay || max(stage[eqs_initial]) == STAGE_TIME data <- ir_parse_data(eqs, packing, stage, source) if (features$has_user) { is_user <- vcapply(eqs, "[[", "type") == "user" user <- unname(lapply(eqs[is_user], function(x) list(name = x$name, has_default = !is.null(x$user$default)))) user <- user[order(vlapply(user, "[[", "has_default"))] } else { user <- list() } components <- ir_parse_components(eqs, dependencies, variables, stage, features$discrete, source, options) equations <- ir_parse_equations(eqs) ir_parse_check_functions(eqs, features$discrete, config$include$names, source) ret <- list(version = .odin$version, config = config, meta = meta, features = features, data = data, equations = equations, components = components, user = user, interpolate = interpolate, source = source) ir <- ir_serialise(ret, options$pretty) if (options$validate) { ir_validate(ir, TRUE) } ir } ir_parse_packing <- function(eqs, variables, source) { i <- vlapply(eqs, function(x) identical(x$lhs$special, "initial")) pack_variables <- ir_parse_packing_new(eqs[i], TRUE, "variable") j <- vlapply(eqs, function(x) identical(x$lhs$special, "output")) pack_output <- ir_parse_packing_new(eqs[j], FALSE, "output") output <- vcapply(pack_output$contents, "[[", "name") err <- intersect(output, variables) if (length(err) > 0L) { k <- which(i | j) k <- k[vlapply(eqs[k], function(x) x$lhs$name_data %in% err)] ir_parse_error("output() name cannot be the same as variable name", ir_parse_error_lines(eqs[k]), source) } list(variables = pack_variables, output = pack_output, offsets = c(pack_variables$offsets, pack_output$offsets)) } ir_parse_data <- function(eqs, packing, stage, source) { type <- vcapply(eqs, function(x) x$type, USE.NAMES = FALSE) is_alloc <- vlapply(eqs, function(x) x$type == "alloc" && x$name != x$lhs$name_lhs) i <- !(is_alloc | type %in% c("copy", "config")) elements <- lapply(eqs[i], ir_parse_data_element, stage) names(elements) <- vcapply(elements, "[[", "name") elements <- elements[order(names(elements))] list(elements = elements, variable = packing$variables, output = packing$output) } ir_parse_data_element <- function(x, stage) { name <- x$lhs$name_data storage_type <- x$lhs$storage_type %||% "double" if (is.null(x$array)) { rank <- 0L dimnames <- NULL } else { rank <- x$array$rank dimnames <- x$array$dimnames } stage <- stage[[x$name]] if (is.null(x$lhs$special)) { if (rank == 0L && stage == STAGE_TIME) { location <- "transient" } else { location <- "internal" } } else if (x$lhs$special == "initial") { location <- "internal" name <- x$lhs$name_equation } else if (x$lhs$special == "deriv" || x$lhs$special == "update") { location <- "variable" } else if (x$lhs$special == "output") { location <- "output" } else { stop("unclassified data type [odin bug]") } list(name = name, location = location, stage = stage, storage_type = storage_type, rank = rank, dimnames = dimnames) } ir_parse_meta <- function(discrete) { time <- if (discrete) STEP else TIME result <- if (discrete) STATE_NEXT else DSTATEDT list(internal = INTERNAL, user = USER, state = STATE, result = result, output = OUTPUT, time = time, initial_time = initial_name(time)) } ir_parse_find_variables <- function(eqs, discrete, source) { is_special <- vlapply(eqs, function(x) !is.null(x$lhs$special)) special <- vcapply(eqs[is_special], function(x) x$lhs$special) name_data <- vcapply(eqs[is_special], function(x) x$lhs$name_data) rhs_fun <- if (discrete) "update" else "deriv" is_initial <- special == "initial" is_var <- special == rhs_fun vars <- name_data[is_var] vars_initial <- name_data[is_initial] if (!setequal(vars, vars_initial)) { msg <- collector() msg_initial <- setdiff(vars, vars_initial) if (length(msg_initial) > 0L) { msg$add("\tin %s() but not initial(): %s", rhs_fun, paste(msg_initial, collapse = ", ")) } msg_vars <- setdiff(vars_initial, vars) if (length(msg_vars) > 0L) { msg$add("\tin initial() but not %s(): %s", rhs_fun, paste(msg_vars, collapse = ", ")) } tmp <- eqs[is_var | is_initial] ir_parse_error(sprintf( "%s() and initial() must contain same set of equations:\n%s\n", rhs_fun, paste(msg$get(), collapse = "\n")), ir_parse_error_lines(tmp), source) } err <- names(eqs) %in% vars if (any(err)) { ir_parse_error( sprintf("variables on lhs must be within %s() or initial() (%s)", rhs_fun, paste(intersect(vars, names(eqs)), collapse = ", ")), ir_parse_error_lines(eqs[err]), source) } unique(unname(vars)) } ir_parse_find_exclusive_output <- function(eqs, source) { i <- vlapply(eqs, function(x) identical(x$lhs$special, "output")) j <- vlapply(eqs, function(x) is.null(x$lhs$special)) nms <- vcapply(eqs[i], function(x) x$lhs$name_data, USE.NAMES = FALSE) setdiff(nms, names_if(j)) } ir_parse_dependencies <- function(eqs, variables, time_name, source) { implicit <- set_names(rep(list(character(0)), length(variables) + 1), c(variables, time_name)) explicit <- lapply(eqs, function(eq) setdiff(eq$depends$variables, c(eq$name, INDEX, ""))) deps <- c(explicit, implicit) msg <- lapply(deps, setdiff, names(deps)) i <- lengths(msg) > 0L if (any(i)) { msg <- sort(unique(unlist(msg))) fmt <- ngettext(length(msg), "Unknown variable %s", "Unknown variables %s") ir_parse_error(sprintf(fmt, paste(msg, collapse = ", ")), ir_parse_error_lines(eqs[i]), source) } order <- topological_order(deps) recursive_dependencies(order, deps, variables) } ir_parse_stage <- function(eqs, dependencies, variables, time_name, source) { stage <- set_names(rep(STAGE_CONSTANT, length(dependencies)), names(dependencies)) is_user <- function(x) { x$type == "user" } is_null <- function(x) { x$type == "null" } is_time_dependent <- function(x) { (!is.null(x$lhs$special) && x$lhs$special %in% c("deriv", "update", "output")) || !is.null(x$rhs$delay) || !is.null(x$rhs$interpolate) || isTRUE(x$stochastic) || x$type == "delay_continuous" } stage[names_if(vlapply(eqs, is_user))] <- STAGE_USER stage[names_if(vlapply(eqs, is_time_dependent))] <- STAGE_TIME stage[time_name] <- STAGE_TIME stage[variables] <- STAGE_TIME for (i in seq_along(stage)) { stage[[i]] <- max(stage[[i]], stage[dependencies[[i]]]) } stage[names_if(vlapply(eqs, is_null))] <- STAGE_NULL i <- vlapply(eqs, function(x) !is.null(x$array)) len <- lapply(eqs[i], function(x) x$array$dimnames$length) len_var <- vcapply(len[!vlapply(len, is.numeric)], as.character) err <- stage[len_var] == STAGE_TIME if (any(err)) { ir_parse_error( "Array extent is determined by time", ir_parse_error_lines(eqs[len_var[err]]), source) } stage } ir_parse_packing_new <- function(eqs, variables, offset_prefix) { eqs <- unname(eqs) len <- lapply(eqs, function(x) x$array$dimnames$length %||% 1L) rank <- viapply(eqs, function(x) x$array$rank %||% 0L) names <- vcapply(eqs, function(x) x$lhs$name_data) ir_parse_packing_internal(names, rank, len, variables, offset_prefix) } ir_parse_packing_internal <- function(names, rank, len, variables, offset_prefix) { i <- order(!vlapply(len, is.numeric), rank) names <- names[i] rank <- rank[i] len <- len[i] is_array <- rank > 0L offset <- vector("list", length(names) + 1L) offset[[1L]] <- 0L for (i in seq_along(names)) { if (!is_array[[i]]) { offset[[i + 1L]] <- i } else { len_i <- if (is.numeric(len[[i]])) len[[i]] else as.name(len[[i]]) offset[[i + 1L]] <- static_eval(call("+", offset[[i]], len_i)) } } length <- offset[[length(names) + 1L]] offset <- offset[seq_along(names)] i <- vlapply(offset[seq_along(names)], is.call) if (any(i)) { eq_offset <- function(name, value) { list(name = name, type = "expression_scalar", implicit = TRUE, source = integer(0), depends = find_symbols(value), lhs = list(name_data = name, name_equation = name, name_lhs = name, storage_type = "int"), rhs = list(value = value)) } offset_name <- sprintf("offset_%s_%s", offset_prefix, names[i]) eqs_offsets <- Map(eq_offset, offset_name, offset[i]) names(eqs_offsets) <- vcapply(eqs_offsets, "[[", "name") offset[i] <- lapply(names(eqs_offsets), as.name) } else { eqs_offsets <- NULL } if (variables) { contents <- unname(Map( list, name = names, offset = offset, initial = initial_name(names))) } else { contents <- unname(Map(list, name = names, offset = offset)) } list(length = length, contents = contents, offsets = eqs_offsets) } ir_parse_features <- function(eqs, config, source) { is_update <- vlapply(eqs, function(x) identical(x$lhs$special, "update")) is_deriv <- vlapply(eqs, function(x) identical(x$lhs$special, "deriv")) is_output <- vlapply(eqs, function(x) identical(x$lhs$special, "output")) is_dim <- vlapply(eqs, function(x) identical(x$lhs$special, "dim")) is_user <- vlapply(eqs, function(x) !is.null(x$user)) is_delay <- vlapply(eqs, function(x) !is.null(x$delay)) is_interpolate <- vlapply(eqs, function(x) !is.null(x$interpolate)) is_stochastic <- vlapply(eqs, function(x) isTRUE(x$stochastic)) if (any(is_update) && any(is_deriv)) { tmp <- eqs[is_deriv | is_update] ir_parse_error("Cannot mix deriv() and update()", ir_parse_error_lines(tmp), source) } if (!any(is_update | is_deriv)) { ir_parse_error("Did not find a deriv() or an update() call", NULL, NULL) } list(discrete = any(is_update), has_array = any(is_dim), has_output = any(is_output), has_user = any(is_user), has_delay = any(is_delay), has_interpolate = any(is_interpolate), has_stochastic = any(is_stochastic), has_include = !is.null(config$include), initial_time_dependent = NULL) } ir_parse_components <- function(eqs, dependencies, variables, stage, discrete, source, options) { eqs_constant <- intersect(names_if(stage == STAGE_CONSTANT), names(eqs)) eqs_user <- intersect(names_if(stage == STAGE_USER), names(eqs)) eqs_time <- intersect(names_if(stage == STAGE_TIME), names(eqs)) rhs_special <- if (discrete) "update" else "deriv" rhs <- names_if(vlapply(eqs, function(x) identical(x$lhs$special, rhs_special))) v <- unique(unlist(dependencies[rhs], use.names = FALSE)) eqs_rhs <- intersect(eqs_time, c(rhs, v)) variables_rhs <- intersect(variables, v) output <- names_if(vlapply(eqs, function(x) identical(x$lhs$special, "output"))) v <- unique(c(character(), unlist(dependencies[output], use.names = FALSE))) eqs_output <- intersect(eqs_time, c(output, v)) variables_output <- intersect(variables, v) initial <- names_if(vlapply(eqs, function(x) identical(x$lhs$special, "initial"))) v <- unique(c(initial, unlist(dependencies[initial], use.names = FALSE))) eqs_initial <- intersect(eqs_time, v) type <- vcapply(eqs, "[[", "type") core <- unique(c(initial, rhs, output, eqs_initial, eqs_rhs, eqs_output)) used_in_delay <- unlist(lapply(eqs[type == "delay_continuous"], function(x) x$delay$depends$variables), FALSE, FALSE) core <- union(core, used_in_delay) ignore <- c("config", "null", "delay_index", "alloc") core <- c(core, names(eqs)[type %in% ignore]) if (!options$no_check_unused_equations) { ir_parse_check_unused(eqs, dependencies, core, stage, source) } list( create = list(variables = character(0), equations = eqs_constant), user = list(variables = character(0), equations = eqs_user), initial = list(variables = character(0), equations = eqs_initial), rhs = list(variables = variables_rhs, equations = eqs_rhs), output = list(variables = variables_output, equations = eqs_output)) } ir_parse_check_unused <- function(eqs, dependencies, core, stage, source) { used <- unique(c(core, unlist(dependencies[core], FALSE, FALSE))) check <- names_if(vlapply(eqs, function(x) !isTRUE(x$implicit))) unused <- setdiff(check, used) ignored <- vlapply(eqs[unused], function(x) any(grepl(" USE.NAMES = FALSE) if (length(ignored) > 0) { dropped <- names_if(stage[unused[ignored]] == STAGE_TIME) if (length(dropped) > 0) { ir_parse_note(sprintf( "Unused equation marked as ignored will be dropped: %s", paste(sort(dropped), collapse = ", ")), ir_parse_error_lines(eqs[dropped]), source) } } unused <- unused[!ignored] if (length(unused) > 0L) { what <- ngettext(length(unused), "equation", "equations") ir_parse_note(sprintf("Unused %s: %s", what, paste(sort(unused), collapse = ", ")), ir_parse_error_lines(eqs[unused]), source) } } ir_parse_exprs <- function(exprs) { src <- attr(exprs, "wholeSrcref", exact = TRUE) if (is.null(src)) { src <- vcapply(exprs, deparse_str) lines <- seq_along(exprs) } else { src <- as.character(src) lines0 <- utils::getSrcLocation(exprs, "line", first = TRUE) lines1 <- utils::getSrcLocation(exprs, "line", first = FALSE) lines <- Map(seq.int, lines0, lines1) } expr_is_assignment <- function(x) { length(x) == 3L && (identical(x[[1]], quote(`<-`)) || identical(x[[1]], quote(`=`))) } err <- which(!vlapply(exprs, expr_is_assignment)) if (length(err) > 0L) { ir_parse_error("Every line must contain an assignment", unlist(lines[err]), src) } eqs <- Map(ir_parse_expr, exprs, lines, MoreArgs = list(source = src)) names(eqs) <- vcapply(eqs, "[[", "name") list(eqs = eqs, source = src) } ir_parse_expr <- function(expr, line, source) { lhs <- ir_parse_expr_lhs(expr[[2L]], line, source) rhs <- ir_parse_expr_rhs(expr[[3L]], line, source) depends <- join_deps(list(lhs$depends, rhs$depends)) rhs$depends <- NULL if (!is.null(rhs$user)) { type <- "user" } else if (!is.null(rhs$interpolate)) { type <- "interpolate" } else if (!is.null(rhs$delay)) { type <- "delay" } else if (identical(lhs$special, "dim")) { type <- "dim" } else if (identical(lhs$special, "config")) { type <- "config" } else if (lhs$type == "expression_scalar") { type <- "expression_scalar" } else if (lhs$type == "expression_array") { type <- "expression_array" } else { stop("unclassified equation type [odin bug]") } lhs$type <- NULL if (type == "user") { if (!is.null(lhs$special) && !identical(lhs$special, "dim")) { ir_parse_error("user() only valid for non-special variables", line, source) } if (rhs$user$integer) { lhs$storage_type <- "int" } } if (type == "delay" && !is.null(lhs$special)) { ir_parse_error("delay() only valid for non-special variables", line, source) } if (identical(lhs$special, "output")) { copy_expr <- as.name(lhs$name_data) if (type == "expression_array") { copy_expr_index <- lapply(INDEX[seq_along(lhs$index)[[1]]], as.name) copy_expr_array <- as.call(c(as.name("["), copy_expr, copy_expr_index)) } else { copy_expr_array <- copy_expr } is_copy <- isTRUE(rhs$rhs$value) || identical(rhs$rhs$value, copy_expr) || identical(rhs$rhs$value, copy_expr_array) if (is_copy) { type <- "copy" depends <- list(functions = character(0), variables = lhs$name_data) rhs <- NULL } } if (any(names(FUNCTIONS_INPLACE) %in% depends$functions)) { type <- "expression_inplace" ir_parse_expr_rhs_check_inplace(lhs, rhs, line, source) } is_self_ref <- lhs$name_data %in% depends$variables && type != "expression_array" && !identical(lhs$special, "deriv") && !identical(lhs$special, "update") && type != "copy" if (is_self_ref) { ir_parse_error( "Self referencing expressions not allowed (except for arrays)", line, source) } ret <- list(name = lhs$name_equation, type = type, lhs = lhs, depends = depends, source = line) c(ret, rhs) } ir_parse_expr_lhs <- function(lhs, line, source) { is_special <- is_array <- FALSE special <- index <- depends <- NULL if (is.call(lhs)) { fun <- deparse_str(lhs[[1L]]) if (fun %in% SPECIAL_LHS) { if (length(lhs) != 2L) { ir_parse_error("Invalid length special function on lhs", line, source) } is_special <- TRUE special <- fun lhs <- lhs[[2L]] } } if (is_call(lhs, "[")) { if (is_special && special %in% c("dim", "config")) { ir_parse_error("dim() must be applied to a name only (not an array)", line, source) } is_array <- TRUE tmp <- ir_parse_expr_lhs_index(lhs, line, source) index <- tmp$index depends <- tmp$depends lhs <- lhs[[2L]] } name <- ir_parse_expr_check_lhs_name(lhs, special, line, source) type <- if (is_array) "expression_array" else "expression_scalar" name_data <- name name_equation <- if (is_special) sprintf("%s_%s", special, name) else name if (!is_special) { name_lhs <- name_equation } else if (special %in% c("initial", "dim")) { name_lhs <- name_equation } else if (special %in% c("deriv", "output", "update", "config")) { name_lhs <- name_data } else { stop("odin bug") } list(type = type, name_data = name_data, name_equation = name_equation, name_lhs = name_lhs, special = special, index = index, depends = depends) } ir_parse_expr_lhs_index <- function(lhs, line, source) { if (!is.name(lhs[[2L]])) { ir_parse_error("array lhs must be a name", line, source) } index <- as.list(lhs[-(1:2)]) is_empty <- vlapply(index, identical, quote(expr = )) if (any(is_empty)) { if (length(index) == 1L) { index[] <- list(bquote(1:length(.(lhs[[2L]])))) } else { index[is_empty] <- lapply(as.numeric(which(is_empty)), function(i) bquote(1:dim(.(lhs[[2L]]), .(i)))) } lhs[-(1:2)] <- index } tmp <- lapply(index, ir_parse_expr_lhs_check_index) ok <- vlapply(tmp, as.logical) if (all(ok)) { extent_max <- lapply(tmp, attr, "value_max", exact = TRUE) extent_min <- lapply(tmp, attr, "value_min", exact = TRUE) is_range <- !vlapply(extent_min, is.null) } else { msg <- paste0("\t\t", vcapply(tmp[!ok], attr, "message"), collapse = "\n") ir_parse_error(sprintf("Invalid array use on lhs:\n%s", msg), line, source) } name <- deparse(lhs[[2L]]) deps <- find_symbols(index) err <- intersect(INDEX, deps$variables) if (length(err) > 0L) { ir_parse_error( sprintf("Special index variable %s may not be used on array lhs", pastec(err)), line, source) } name_dim <- array_dim_name(name) deps$variables <- union(deps$variables, name_dim) deps$functions <- setdiff(deps$functions, ":") list( index = Map(list, value = index, is_range = is_range, index = INDEX[seq_along(index)]), depends = deps) } ir_parse_expr_check_lhs_name <- function(lhs, special, line, source) { if (is.call(lhs)) { fun <- deparse_str(lhs[[1L]]) if (fun %in% SPECIAL_LHS) { msg <- sprintf("Invalid nested lhs function usage for %s", fun) } else { msg <- sprintf("Unhandled expression %s on lhs", fun) } ir_parse_error(msg, line, source) } if (!is.name(lhs)) { if (is.null(special)) { ir_parse_error("Invalid left hand side", line, source) } else { ir_parse_error(sprintf("Argument to %s must be a symbol", special), line, source) } } name <- deparse(lhs) if (name %in% RESERVED) { ir_parse_error(sprintf("Reserved name '%s' for lhs", name), line, source) } re <- sprintf("^(%s)_.*", paste(RESERVED_PREFIX, collapse = "|")) if (grepl(re, name)) { ir_parse_error(sprintf("Variable name cannot start with '%s_'", sub(re, "\\1", name)), line, source) } name } ir_parse_expr_rhs <- function(rhs, line, source) { if (is_call(rhs, quote(delay))) { ir_parse_expr_rhs_delay(rhs, line, source) } else if (is_call(rhs, quote(user))) { ir_parse_expr_rhs_user(rhs, line, source) } else if (is_call(rhs, quote(interpolate))) { ir_parse_expr_rhs_interpolate(rhs, line, source) } else { ir_parse_expr_rhs_expression(rhs, line, source) } } ir_parse_expr_rhs_expression <- function(rhs, line, source) { depends <- find_symbols(rhs) err <- intersect(setdiff(SPECIAL_LHS, "dim"), depends$functions) if (length(err) > 0L) { ir_parse_error(sprintf("Function %s is disallowed on rhs", paste(unique(err), collapse = ", ")), line, source) } err <- intersect(SPECIAL_RHS, depends$functions) if (length(err) > 0L) { ir_parse_error(sprintf("%s() must be the only call on the rhs", err[[1]]), line, source) } err <- intersect(c(SPECIAL_LHS, SPECIAL_RHS), depends$variables) if (length(err) > 0L) { ir_parse_error(sprintf( "Function '%s' is disallowed as symbol on rhs", err[[1L]]), line, source) } ir_parse_expr_rhs_check_usage(rhs, line, source) if ("sum" %in% depends$functions) { rhs <- ir_parse_expr_rhs_expression_sum(rhs, line, source) depends <- find_symbols(rhs) } if (":" %in% depends$functions) { ir_parse_error("Range operator ':' may not be used on rhs", line, source) } stochastic <- any(depends$functions %in% names(FUNCTIONS_STOCHASTIC)) list(rhs = list(value = rhs), depends = depends, stochastic = stochastic) } ir_parse_expr_rhs_user <- function(rhs, line, source) { args <- as.list(rhs[-1L]) nms <- names(args) %||% rep("", length(args)) if (any(!nzchar(nms[-1]))) { ir_parse_error("Only first argument to user() may be unnamed", line, source) } m <- match.call(function(default, integer, min, max, ...) NULL, rhs, FALSE) extra <- m[["..."]] if (!is.null(extra)) { ir_parse_error(sprintf("Unknown %s to user(): %s", ngettext(length(extra), "argument", "arguments"), paste(squote(names(extra)), collapse = ", ")), line, source) } deps <- find_symbols(as.list(rhs[-1L])) allowed <- c("+", "/", "-", "*", "^", "(") if (length(setdiff(deps$functions, allowed)) > 0L) { ir_parse_error("user() call must not use functions", line, source) } if (length(deps$variables) > 0L) { ir_parse_error("user() call must not reference variables", line, source) } user <- list(default = m$default, dim = FALSE, integer = m$integer %||% FALSE, min = m$min, max = m$max) list(user = user) } ir_parse_expr_rhs_interpolate <- function(rhs, line, source) { na <- length(rhs) - 1L if (na < 2L || na > 3L) { ir_parse_error("interpolate() requires two or three arguments", line, source) } m <- match.call(function(t, y, type) NULL, rhs, FALSE) type <- m$type %||% "spline" if (!is.character(type)) { ir_parse_error("Expected a string constant for interpolation type", line, source) } if (!(type %in% INTERPOLATION_TYPES)) { ir_parse_error(sprintf( "Invalid interpolation type; must be one: of %s", paste(INTERPOLATION_TYPES, collapse = ", ")), line, source) } if (!is.symbol(m$t)) { ir_parse_error("interpolation time argument must be a symbol", line, source) } if (!is.symbol(m$y)) { ir_parse_error("interpolation target argument must be a symbol", line, source) } t <- as.character(m$t) y <- as.character(m$y) list(interpolate = list(t = t, y = y, type = type), depends = ir_parse_depends(variables = c(t, y))) } ir_parse_expr_rhs_delay <- function(rhs, line, source) { na <- length(rhs) - 1L if (na < 2L || na > 3L) { ir_parse_error("delay() requires two or three arguments", line, source) } delay_expr <- rhs[[2L]] delay_time <- rhs[[3L]] if (na == 3L) { delay_default <- ir_parse_expr_rhs(rhs[[4L]], line, source) } else { delay_default <- NULL } deps_delay_expr <- find_symbols(delay_expr) deps_delay_time <- find_symbols(delay_time) fns <- c(deps_delay_expr$functions, deps_delay_time$functions, delay_default$depends$functions) if ("delay" %in% fns) { ir_parse_error("delay() may not be nested", line, source) } if (TIME %in% deps_delay_expr$variables) { ir_parse_error("delay() may not refer to time as that's confusing", line, source) } depends <- join_deps(list(deps_delay_time, delay_default$depends)) list(delay = list(time = delay_time, default = delay_default$rhs$value, depends = deps_delay_expr), rhs = list(value = delay_expr), depends = depends) } ir_parse_equations <- function(eqs) { type <- vcapply(eqs, "[[", "type") eqs[!(type %in% c("null", "config"))] } ir_parse_depends <- function(functions = character(0), variables = character(0)) { list(functions = functions, variables = variables) } ir_parse_interpolate <- function(eqs, discrete, source) { type <- vcapply(eqs, "[[", "type") for (eq in eqs[type == "interpolate"]) { eqs <- ir_parse_interpolate1(eq, eqs, discrete, source) } eqs } ir_parse_interpolate1 <- function(eq, eqs, discrete, source) { nm <- eq$lhs$name_lhs nm_alloc <- sprintf("interpolate_%s", nm) eq_alloc <- eq eq_alloc$name <- nm_alloc eq_alloc$type <- "alloc_interpolate" eq_alloc$lhs$name_lhs <- nm_alloc eq_alloc$lhs$name_data <- nm_alloc eq_alloc$lhs$name_equation <- nm_alloc eq_alloc$lhs$storage_type <- "interpolate_data" msg <- setdiff(c(eq_alloc$interpolate$t, eq_alloc$interpolate$y), names(eqs)) if (length(msg) > 0L) { fmt <- ngettext(length(msg), "Unknown variable %s", "Unknown variables %s") ir_parse_error(sprintf(fmt, paste(msg, collapse = ", ")), eq$source, source) } eq_t <- eqs[[eq_alloc$interpolate$t]] eq_y <- eqs[[eq_alloc$interpolate$y]] rank_t <- eq_t$array$rank rank_y <- eq_y$array$rank rank_z <- eq$array$rank %||% 0L if (eq_t$array$rank != 1L) { ir_parse_error(sprintf("Expected %s to be a vector for interpolation", eq_t$name), eq_t$source, source) } if (eq_y$array$rank != rank_z + 1L) { type <- if (rank_z == 0L) "vector" else paste(rank_z + 1, "dimensional array") ir_parse_error(sprintf("Expected %s to be a %s", eq_y$name, type), eq_y$source, source) } eq_alloc$interpolate$equation <- nm time <- if (discrete) STEP else TIME eq_use <- eq eq_use$type <- "interpolate" eq_use$depends <- ir_parse_depends(variables = c(time, nm_alloc)) eq_use$interpolate <- nm_alloc type <- eq_alloc$interpolate$type eq_alloc$control <- list( min = eq_t$name, max = if (type != "constant") eq_t$name, critical = if (type == "constant") eq_t$name) if (isTRUE(eq$user$dim)) { deps_alloc <- union(eq_alloc$depends$variables, eq$array$dimnames$length) eq_alloc$depends <- ir_parse_depends(variables = deps_alloc) deps <- c(eq$array$dimnames$length, eq$interpolate$t, eq$interpolate$y) eq$depends <- ir_parse_depends(variables = deps) nm_length <- eq$array$dimnames$length eqs[[nm_length]]$type <- "expression_scalar" if (rank_z == 1L) { len <- eq_y$array$dimnames$dim[[2]] eqs[[nm_length]]$rhs$value <- as.name(len) eqs[[nm_length]]$depends <- ir_parse_depends(variables = len) } else { for (j in seq_along(eq$array$dimnames$dim)) { nm <- eq$array$dimnames$dim[[j]] eqs[[nm]]$type <- "expression_scalar" eqs[[nm]]$rhs$value <- as.name(eq_y$array$dimnames$dim[[j + 1]]) eqs[[nm]]$depends$variables <- eq_y$array$dimnames$dim[[j + 1]] } } } extra <- list(eq_alloc, eq_use) names(extra) <- vcapply(extra, "[[", "name") stopifnot(sum(names(eqs) == eq$name) == 1) c(eqs[names(eqs) != eq$name], extra) } ir_parse_rewrite_initial <- function(eq, variables) { needs_rewrite <- identical(eq$lhs$special, "initial") && any(eq$depends$variables %in% variables) if (needs_rewrite) { subs <- set_names(initial_name(variables), variables) env <- as.environment(lapply(subs, as.name)) eq$rhs$value <- substitute_(eq$rhs$value, env) i <- match(eq$depends$variables, names(subs)) j <- !is.na(i) eq$depends$variables[j] <- subs[i][j] } eq } ir_parse_check_functions <- function(eqs, discrete, include, source) { used_functions <- lapply(eqs, function(x) x$depends$functions) all_used_functions <- unique(unlist(used_functions)) if (!discrete) { err <- intersect(all_used_functions, names(FUNCTIONS_STOCHASTIC)) if (length(err) > 0L) { tmp <- eqs[vlapply(used_functions, function(x) any(x %in% err))] ir_parse_error(sprintf( "Stochastic functions not allowed in ODE models (used: %s)", pastec(err)), ir_parse_error_lines(tmp), source) } } allowed <- c(names(FUNCTIONS), names(FUNCTIONS_INFIX), names(FUNCTIONS_UNARY), names(FUNCTIONS_RENAME), "odin_sum", include, if (discrete) names(FUNCTIONS_STOCHASTIC)) err <- setdiff(all_used_functions, allowed) if (length(err) > 0L) { tmp <- eqs[vlapply(used_functions, function(x) any(x %in% err))] ir_parse_error(sprintf("Unsupported %s: %s", ngettext(length(err), "function", "functions"), pastec(err)), ir_parse_error_lines(tmp), source) } } ir_parse_delay <- function(eqs, discrete, variables, source) { type <- vcapply(eqs, "[[", "type") for (eq in eqs[type == "delay"]) { if (discrete) { eqs <- ir_parse_delay_discrete(eq, eqs, source) initial_time <- initial_name(STEP) initial_time_type <- "int" } else { eqs <- ir_parse_delay_continuous(eq, eqs, variables, source) initial_time <- initial_name(TIME) initial_time_type <- "double" subs <- unique(unlist(lapply(eqs[names_if(type == "delay")], function(x) x$delay$substitutions), FALSE, FALSE)) f <- function(x) { depends <- if (is.numeric(x$dim)) character(0) else as.character(x$dim) list(name = x$to, type = "alloc", source = integer(0), depends = ir_parse_depends(variables = depends), lhs = list(name_data = x$to, name_lhs = x$to, name_equation = x$to), array = eqs[[x$from]]$array) } arrays <- lapply(subs, f) names(arrays) <- vcapply(arrays, "[[", "name") eqs <- c(eqs, arrays[setdiff(names(arrays), names(eqs))]) } } eq_initial_time <- list(list( name = initial_time, type = "null", lhs = list(name_data = initial_time, name_equation = initial_time, special = "initial", storage_type = initial_time_type), line = integer(0), depends = NULL)) names(eq_initial_time) <- initial_time c(eqs, eq_initial_time) } ir_parse_delay_discrete <- function(eq, eqs, source) { nm <- eq$name nm_ring <- sprintf("delay_ring_%s", nm) len <- eq$array$dimnames$length depends_ring <- list( functions = character(0), variables = if (is.character(len)) len else character(0)) lhs_ring <- list(name_data = nm_ring, name_equation = nm_ring, name_lhs = nm_ring, storage_type = "ring_buffer") eq_ring <- list( name = nm_ring, type = "alloc_ring", source = eq$source, depends = depends_ring, lhs = lhs_ring, delay = nm) lhs_use <- eq$lhs[c("name_data", "name_equation", "name_lhs", "special")] depends_use <- join_deps(list(eq$depends, eq$delay$depends)) depends_use$variables <- c(nm_ring, depends_use$variables) eq_use <- list( name = nm, type = "delay_discrete", source = eq$source, depends = depends_use, lhs = lhs_use, rhs = list(value = eq$rhs$value, index = eq$rhs$index), array = eq$array, delay = list(ring = nm_ring, time = eq$delay$time, default = eq$delay$default)) extra <- list(eq_ring, eq_use) names(extra) <- vcapply(extra, "[[", "name") stopifnot(sum(names(eqs) == eq$name) == 1) c(eqs[names(eqs) != eq$name], extra) } ir_parse_delay_continuous <- function(eq, eqs, variables, source) { variable_names <- vcapply(variables$contents, "[[", "name") nm <- eq$name nm_state <- sprintf("delay_state_%s", nm) nm_index <- sprintf("delay_index_%s", nm) nm_dim <- sprintf("dim_delay_%s", nm) graph <- ir_parse_delay_continuous_graph(eq, eqs, variable_names, source) arrays <- names_if( vcapply(eqs[graph$equations], "[[", "type") == "expression_array") if (length(arrays) > 0L) { substitutions <- lapply(arrays, function(x) list(from = x, to = sprintf("delay_array_%s", x), dim = eqs[[x]]$array$dimnames$length)) } else { substitutions <- list() } if (is.numeric(graph$packing$length)) { eq_len <- NULL val_len <- graph$packing$length dep_len <- character(0) } else { eq_len <- list( name = nm_dim, type = "expression_scalar", source = eq$source, depends = find_symbols(graph$packing$length), lhs = list(name_data = nm_dim, name_equation = nm_dim, name_lhs = nm_dim, storage_type = "int"), rhs = list(value = graph$packing$length)) val_len <- nm_dim dep_len <- nm_dim } lhs_use <- eq$lhs[c("name_data", "name_equation", "name_lhs", "special")] subs_from <- vcapply(substitutions, "[[", "to") depends_use <- join_deps(list( eq$depends, ir_parse_depends(variables = c(dep_len, subs_from, TIME)))) eq_use <- list( name = nm, type = "delay_continuous", source = eq$source, depends = depends_use, lhs = lhs_use, rhs = list(value = eq$rhs$value, index = eq$rhs$index), delay = list( state = nm_state, index = nm_index, substitutions = substitutions, variables = list(length = val_len, contents = graph$packing$contents), equations = graph$equations, default = eq$delay$default, time = eq$delay$time, depends = eq$delay$depends), array = eq$array) array <- list(dimnames = list(length = val_len, dim = NULL, mult = NULL), rank = 1L) lhs_index <- list(name_data = nm_index, name_equation = nm_index, name_lhs = nm_index, storage_type = "int") offsets <- lapply(variables$contents[match(graph$variables, variable_names)], "[[", "offset") depends_index <- join_deps(lapply(offsets, find_symbols)) depends_index$variables <- union(depends_index$variables, dep_len) eq_index <- list( name = nm_index, type = "delay_index", source = eq$source, depends = depends_index, lhs = lhs_index, delay = nm, array = array) lhs_state <- list(name_data = nm_state, name_equation = nm_state, name_lhs = nm_state, storage_type = "double") eq_state <- list( name = nm_state, type = "null", source = eq$source, depends = ir_parse_depends(variables = dep_len), lhs = lhs_state, array = array) offsets <- graph$packing$offsets if (!is.null(offsets)) { eq_index$depends$variables <- c(eq_index$depends$variables, names(offsets)) } extra <- c(if (is.null(eq_len)) NULL else list(eq_len), list(eq_index, eq_state, eq_use), offsets) names(extra) <- vcapply(extra, "[[", "name") stopifnot(sum(names(eqs) == eq$name) == 1) c(eqs[names(eqs) != eq$name], extra) } ir_parse_delay_continuous_graph <- function(eq, eqs, variables, source) { used <- eq$delay$depends$variables exclude <- c(variables, TIME, INDEX) v <- setdiff(used, exclude) deps <- list() while (length(v) > 0L) { err <- setdiff(v, names(eqs)) if (length(err) > 0L) { pos <- intersect(union(names(deps), unlist(deps, FALSE, FALSE)), names(eqs)) msg <- sprintf("Missing %s in delay expression: %s (for delay %s)", ngettext(length(err), "variable", "variables"), paste(err, collapse = ", "), eq$name) ir_parse_error(msg, ir_parse_error_lines(eqs[union(pos, eq$name)]), source) } tmp <- lapply(eqs[v], function(x) x$depends$variables) deps <- c(deps, tmp) v <- setdiff(unlist(tmp, use.names = FALSE), c(exclude, names(deps))) } used_vars <- intersect(variables, union(used, unlist(deps, use.names = FALSE))) used_eqs <- topological_order(deps) %||% character(0) include <- set_names(logical(length(used_eqs)), used_eqs) for (v in used_eqs) { d <- deps[[v]] include[[v]] <- any(d %in% used_vars) || any(d == TIME) || eqs[[v]]$type == "delay" || any(include[intersect(d, names(deps))]) } used_eqs <- used_eqs[include] i <- vlapply(eqs, function(x) identical(x$lhs$special, "deriv") && x$lhs$name_data %in% used_vars) packing <- ir_parse_packing_new(eqs[i], FALSE, eq$name) list(equations = used_eqs, variables = used_vars, packing = packing) } ir_parse_expr_rhs_check_usage <- function(rhs, line, source) { len <- c(FUNCTIONS, setNames(FUNCTIONS[FUNCTIONS_RENAME], names(FUNCTIONS_RENAME))) throw <- function(...) { ir_parse_error(sprintf(...), line, source) } check_usage <- function(x) { if (is.recursive(x)) { fn <- x[[1L]] if (!is.name(fn)) { throw("Cannot process statement") } nm <- deparse(fn) n <- len[[nm]] nargs <- length(x) - 1L if (nm == "function") { throw("Cannot define R functions in odin model") } if (length(n) > 1L) { if (nargs < n[[1L]] || nargs > n[[2L]]) { if (is.finite(n[[2L]])) { throw("Expected %d-%d arguments in %s call, but recieved %d", n[[1L]], n[[2L]], nm, nargs) } else { throw("Expected %d or more arguments in %s call, but recieved %d", n[[1L]], nm, nargs) } } } else if (!is.null(n) && is.finite(n)) { if (nargs != n) { if (nm == "if") { throw("All if statements must have an else clause") } else { throw("Expected %d %s in %s call, but recieved %d", n, ngettext(n, "argument", "arguments"), nm, nargs) } } } lapply(as.list(x[-1L]), check_usage) } } check_usage(rhs) } ir_parse_expr_rhs_check_inplace <- function(lhs, rhs, line, source) { fn <- deparse(rhs$rhs$value[[1]]) depends <- join_deps(lapply(rhs$rhs$value[-1], find_symbols)) if (!(fn %in% names(FUNCTIONS_INPLACE)) || length(depends$functions) > 0L) { ir_parse_error(sprintf( "At present, inplace function '%s' must use no functions", fn), line, source) } if (is.null(lhs$index)) { ir_parse_error(sprintf( "Expected an array on the lhs of inplace function '%s'", fn), line, source) } } ir_parse_substitute <- function(eqs, subs) { if (is.null(subs)) { return(eqs) } f <- function(nm) { eq <- eqs[[nm]] if (is.null(eq)) { stop(sprintf("Substitution failed: '%s' is not an equation", nm), call. = FALSE) } if (eq$type != "user") { stop(sprintf("Substitution failed: '%s' is not a user() equation", nm), call. = FALSE) } if (!is.null(eq$array)) { stop(sprintf("Substitution failed: '%s' is an array", nm), call. = FALSE) } value <- support_coerce_mode(subs[[nm]], eq$user$integer, eq$user$min, eq$user$max, nm) eq$type <- "expression_scalar" eq$rhs <- list(value = value) eq$stochastic <- FALSE eq } eqs[names(subs)] <- lapply(names(subs), f) eqs } ir_parse_rewrite_dims <- function(eqs) { nms <- names_if(vlapply(eqs, function(x) isTRUE(x$lhs$dim))) ir_parse_rewrite(nms, eqs) } ir_parse_rewrite_constants <- function(eqs) { nms <- names_if(vlapply(eqs, function(x) x$type == "expression_scalar")) ir_parse_rewrite(nms, eqs) } ir_parse_rewrite_compute_eqs <- function(nms, eqs) { cache <- new_empty_env() lapply(eqs[nms], function(eq) static_eval(ir_parse_rewrite_compute(eq$rhs$value, eqs, cache))) } ir_parse_rewrite_compute <- function(x, eqs, cache) { key <- deparse_str(x) if (key %in% names(cache)) { return(cache[[key]]) } if (!is.numeric(x)) { if (is.symbol(x)) { x_eq <- eqs[[deparse_str(x)]] if (identical(x_eq$type, "expression_scalar")) { x <- ir_parse_rewrite_compute(x_eq$rhs$value, eqs, cache) } } else if (is_call(x, "length")) { length_name <- as.name(array_dim_name(as.character(x[[2]]))) x <- ir_parse_rewrite_compute(length_name, eqs, cache) } else if (is_call(x, "dim")) { dim_name <- as.name(array_dim_name(as.character(x[[2]]), x[[3]])) x <- ir_parse_rewrite_compute(dim_name, eqs, cache) } else if (is.recursive(x)) { x[-1] <- lapply(x[-1], ir_parse_rewrite_compute, eqs, cache) } } cache[[key]] <- x x } ir_parse_rewrite <- function(nms, eqs) { val <- tryCatch( ir_parse_rewrite_compute_eqs(nms, eqs), error = function(e) { message("Rewrite failure: ", e$message) list() }) rewrite <- vlapply(val, function(x) is.symbol(x) || is.numeric(x)) subs <- val[rewrite] copy_self <- unlist(lapply(eqs, function(x) if (x$type == "copy") x$lhs$name_data), FALSE) subs <- subs[setdiff(names(subs), copy_self)] is_dim <- vlapply(eqs, function(x) isTRUE(x$lhs$dim)) check <- val[intersect(names_if(!rewrite), names_if(is_dim))] if (length(check) > 0) { dup <- duplicated(check) & !vlapply(check, is.null) if (any(dup)) { i <- match(check[dup], check) subs <- c(subs, set_names(lapply(names(check)[i], as.name), names(check)[dup])) } } subs_env <- list2env(subs, parent = emptyenv()) subs_dep <- vcapply(subs, function(x) if (is.numeric(x)) NA_character_ else deparse_str(x)) replace <- function(x, y) { i <- match(vcapply(x, function(x) x %||% ""), names(y)) j <- which(!is.na(i)) x[j] <- unname(y)[i[j]] na_drop(x) } rewrite_eq_array_part <- function(el) { el$value <- substitute_(el$value, subs_env) for (i in seq_along(el$index)) { el$index[[i]]$value <- substitute_(el$index[[i]]$value, subs_env) } el } rewrite_eq <- function(eq) { if (eq$type == "expression_array") { eq$rhs <- lapply(eq$rhs, rewrite_eq_array_part) } else if (eq$name %in% names(subs)) { eq$rhs$value <- subs[[eq$name]] } else { eq$rhs$value <- substitute_(eq$rhs$value, subs_env) } eq$depends$variables <- replace(eq$depends$variables, subs_dep) eq$lhs$depends$variables <- replace(eq$lhs$depends$variables, subs_dep) if (!is.null(eq$array$dimnames)) { eq$array$dimnames$length <- replace(eq$array$dimnames$length, subs)[[1]] eq$array$dimnames$dim <- replace(eq$array$dimnames$dim, subs) eq$array$dimnames$mult <- replace(eq$array$dimnames$mult, subs) } if (!is.null(eq$delay)) { eq$delay$time <- substitute_(eq$delay$time, subs_env) eq$delay$depends$variables <- replace(eq$delay$depends$variables, subs_dep) } eq } keep <- names_if(!vlapply(eqs, function(x) is.null(x$lhs$special))) i <- setdiff(names(eqs), setdiff(names(subs), keep)) lapply(eqs[i], rewrite_eq) }
unify.owin <- function(W){ if(!is.owin(W)) stop("'W' must be of spatstat class 'owin'") xr <- W$xrange yr <- W$yrange dx <- diff(xr) dy <- diff(yr) return(affine(W,matrix(c(1/dx,0,0,1/dy),2,2),c(-xr[1]/dx,-yr[1]/dy))) }
eloscoring <- function(data, id, block, item, choice, K = 30, iter = 100, wide = FALSE) { get_checks(data, id, block, item, choice, nonbibd = TRUE) out <- lapply(unique(data[[id]]), function(cid) { get_eloresults(data[data[[id]] == cid, ], block, item, choice) %>% get_eloscores(K, iter) %>% dplyr::mutate(id = cid) }) out <- do.call(dplyr::bind_rows, out) out <- out %>% dplyr::select(id, item, elo) colnames(out) <- c(id, item, "elo") if (wide) { out <- out %>% tidyr::spread(!!sym(item), elo) } return(out) }
get_latent_model <- function(){ " data { K = length(n) Sn <- sum(n[]) Sm <- sum(m[]) } model { for (i in 1:2){ z0[i] <- (i-1)*0.0001 phi0[i] <- theta[i]*z0[i] } theta[1] <- 1-St eltheta[1] ~ dgamma(1.0,1.0) theta[2] <- eltheta[2]/(1+Sr) eltheta[2] ~ dgamma(1.0E-3,1.0E-3) for (i in 3:K){ phi0[i] <- theta[i]*z0[i] eltheta[i] ~ dgamma(1.0E-3,1.0E-3) theta[i] <- eltheta[i]/(1+Sr) z0[i] <- z0[i-1]/q[i] } Sr <- sum(eltheta[2:K]) St <- sum(theta[2:K]) Sp <-sum(p0[]) Sphi0 <- sum(phi0[]) for (i in 1:K){ phi[i] <- phi0[i]/Sphi0 z[i] <- z0[i]/Sphi0 q[i] ~ dunif(0.001,0.999) p0[i] <- theta[i]*(1-lambda)+lambda*phi[i] p[i] <- p0[i]/Sp lami[i] <- lambda*phi[i]/p0[i] sens[i] <- sum(phi[i:K]) P[i] <- (1-lami[i])*p0[i] spec[i] <- sum(P[1:i])/sum(P) Q[i] <- lami[i]*p0[i] ppv[i] <- sum(Q[i:K])/sum(p0[i:K]) npv[i] = spec[i] * (1 - lambda) /(spec[i] * (1 - lambda) + ((1 - sens[i]) * lambda)) } m[1:K] ~ dmulti(theta[1:K], Sm) n[1:K] ~ dmulti(p[1:K], Sn) lambda~ dunif(0.00001,0.99999) } " }
NNS.part = function(x, y, Voronoi = FALSE, type = NULL, order = NULL, obs.req = 8, min.obs.stop = TRUE, noise.reduction = "off"){ noise.reduction <- tolower(noise.reduction) if (!any(noise.reduction %in% c("mean", "median", "mode", "off", "mode_class"))) { stop("Please ensure noise.reduction is from 'mean', 'median', 'mode' or 'off'") } if(any(class(x)==c("tbl", "data.table"))) x <- as.vector(unlist(x)) if(any(class(y)==c("tbl", "data.table"))) y <- as.vector(unlist(y)) if (is.null(obs.req)) obs.req <- 8 if (!is.null(order) && order == 0) order <- 1 if (Voronoi) { x.label <- deparse(substitute(x)) y.label <- deparse(substitute(y)) } x <- as.numeric(x) y <- as.numeric(y) PART <- data.table::data.table(x, y, quadrant = "q", prior.quadrant = "pq")[, `:=`(counts, .N), by = "quadrant"][, `:=`(old.counts, .N), by = "prior.quadrant"] if(Voronoi) plot(x, y, col = "steelblue", cex.lab = 1.5, xlab = x.label, ylab = y.label) if (length(x) <= 8) { if(is.null(order)){ order <- 1 hard.stop <- max(ceiling(log(length(x), 2)), 1) } else { obs.req <- 0 hard.stop <- length(x) } } if(is.null(order)) order <- max(ceiling(log(length(x), 2)), 1) if(!is.numeric(order)){ obs.req <- 0 hard.stop <- max(ceiling(log(length(x), 2)), 1) + 2 } else { obs.req <- obs.req hard.stop <- 2*max(ceiling(log(length(x), 2)), 1) + 2 } if(is.null(type)) { i <- 0L while (i >= 0) { if(i == order || i == hard.stop) break PART[counts >= obs.req, `:=`(counts, .N), by = quadrant] PART[old.counts >= obs.req, `:=`(old.counts, .N), by = prior.quadrant] l.PART <- max(PART$counts) obs.req.rows <- PART[counts >= obs.req, which = TRUE] old.obs.req.rows <- PART[old.counts >= obs.req, which = TRUE] if(length(obs.req.rows)==0) break if(min.obs.stop && obs.req > 0 && (length(obs.req.rows) < length(old.obs.req.rows))) break if(noise.reduction == "off") { if(Voronoi) { if(l.PART > obs.req) { PART[obs.req.rows, { segments(min(x), gravity(y), max(x), gravity(y), lty = 3) segments(gravity(x), min(y), gravity(x), max(y), lty = 3) }, by = quadrant] } } RP <- PART[obs.req.rows, mapply(function(f,z) as.list(f(z)), list(x = gravity, y = gravity), .SD), by = quadrant, .SDcols = x:y] } if(noise.reduction == "mean") { if(Voronoi) { if(l.PART > obs.req) { PART[obs.req.rows, { segments(min(x), mean(y), max(x), mean(y), lty = 3) segments(gravity(x), min(y), gravity(x), max(y), lty = 3) }, by = quadrant] } } RP <- PART[obs.req.rows, mapply(function(f,z) as.list(f(z)), list(x = gravity, y = mean), .SD), by = quadrant, .SDcols = x:y] } if(noise.reduction == "median") { if(Voronoi) { if(l.PART > obs.req) { PART[obs.req.rows, { segments(min(x), median(y), max(x), median(y), lty = 3) segments(gravity(x), min(y), gravity(x), max(y), lty = 3) }, by = quadrant] } } RP <- PART[obs.req.rows, mapply(function(f,z) as.list(f(z)), list(x = gravity, y = median), .SD), by = quadrant, .SDcols = x:y] } if(noise.reduction == "mode") { if(Voronoi) { if(l.PART > obs.req) { PART[obs.req.rows, { segments(min(x), mode(y), max(x), mode(y), lty = 3) segments(gravity(x), min(y), gravity(x), max(y), lty = 3) }, by = quadrant] } } RP <- PART[obs.req.rows, mapply(function(f,z) as.list(f(z)), list(x = gravity, y = mode), .SD), by = quadrant, .SDcols = x:y] } if(noise.reduction == "mode_class") { if(Voronoi) { if(l.PART > obs.req) { PART[obs.req.rows, { segments(min(x), mode_class(y), max(x), mode_class(y), lty = 3) segments(gravity_class(x), min(y), gravity_class(x), max(y), lty = 3) }, by = quadrant] } } RP <- PART[obs.req.rows, mapply(function(f,z) as.list(f(z)), list(x = gravity_class, y = mode_class), .SD), by = quadrant, .SDcols = x:y] } RP[, `:=`(prior.quadrant, (quadrant))] PART[obs.req.rows, `:=`(prior.quadrant, (quadrant))] old.parts <- length(unique(PART$quadrant)) PART[RP, on = .(quadrant), `:=`(q_new, { lox = x.x <= i.x loy = x.y <= i.y 1L + lox + loy * 2L })] PART[obs.req.rows, `:=`(quadrant, paste0(quadrant, q_new))] new.parts <- length(unique(PART$quadrant)) if((min(PART$counts) <= obs.req) && i >= 1) break i = i + 1L } if(!exists("RP")) RP <- PART[, c("quadrant", "x", "y")] if(!is.numeric(order) || is.null(dim(RP))) RP <- PART[, c("quadrant", "x", "y")] else RP[, `:=`(prior.quadrant = NULL)] PART[, `:=`(counts = NULL, old.counts = NULL, q_new = NULL)] RP <- data.table::setorder(RP[], quadrant)[] if (Voronoi) { title(main = paste0("NNS Order = ", i), cex.main = 2) if(min.obs.stop) points(RP$x, RP$y, pch = 15, lwd = 2, col = "red") } if(min.obs.stop == FALSE) RP <- NULL return(list(order = i, dt = PART[], regression.points = RP)) } if(!is.null(type)) { i <- 0L while (i >= 0) { if(i == order || i == hard.stop) break PART[counts > obs.req/2, `:=`(counts, .N), by = quadrant] PART[old.counts > obs.req/2, `:=`(old.counts, .N), by = prior.quadrant] obs.req.rows <- PART[counts > obs.req/2, which = TRUE] old.obs.req.rows <- PART[old.counts > obs.req/2, which = TRUE] if(length(obs.req.rows)==0) break if(obs.req > 0 && (length(obs.req.rows) < length(old.obs.req.rows))) break if(noise.reduction == "off") { RP <- PART[obs.req.rows, mapply(function(f,z) as.list(f(z)), list(x = gravity, y = gravity), .SD), by = quadrant, .SDcols = x:y] } if(noise.reduction == "mean") { RP <- PART[obs.req.rows, mapply(function(f,z) as.list(f(z)), list(x = gravity, y = mean), .SD), by = quadrant, .SDcols = x:y] } if(noise.reduction == "mode") { RP <- PART[obs.req.rows, mapply(function(f,z) as.list(f(z)), list(x = gravity, y = mode), .SD), by = quadrant, .SDcols = x:y] } if(noise.reduction == "mode_class") { RP <- PART[obs.req.rows, mapply(function(f,z) as.list(f(z)), list(x = gravity_class, y = mode_class), .SD), by = quadrant, .SDcols=x:y] } if(noise.reduction == "median") { RP <- PART[obs.req.rows, mapply(function(f,z) as.list(f(z)), list(x = gravity, y = median), .SD), by = quadrant, .SDcols = x:y] } RP[, `:=`(prior.quadrant, (quadrant))] PART[obs.req.rows, `:=`(prior.quadrant, (quadrant))] old.parts <- length(unique(PART$quadrant)) PART[RP, on = .(quadrant), `:=`(q_new, { lox = x.x > i.x 1L + lox })] PART[obs.req.rows, `:=`(quadrant, paste0(quadrant, q_new))] new.parts <- length(unique(PART$quadrant)) if((min(PART$counts) <= obs.req) && i >= 1) break i <- i + 1L } if(!exists("RP")) RP <- PART[, c("quadrant", "x", "y")] if(!is.numeric(order) || is.null(dim(RP))) RP <- PART[, c("quadrant", "x", "y")] else RP[, `:=`(prior.quadrant = NULL)] PART[, `:=`(counts = NULL, old.counts = NULL, q_new = NULL)] RP <- data.table::setorder(RP[], quadrant)[] if(mean(c(length(unique(diff(x))), length(unique(x)))) < .33*length(x)) RP$x <- ifelse(RP$x%%1 < .5, floor(RP$x), ceiling(RP$x)) if(Voronoi) { abline(v = c(PART[ ,min(x), by=prior.quadrant]$V1,max(x)), lty = 3) if(min.obs.stop) points(RP$x, RP$y, pch = 15, lwd = 2, col = "red") title(main = paste0("NNS Order = ", i), cex.main = 2) } if(min.obs.stop == FALSE) RP <- NULL return(list(order = i, dt = PART[], regression.points = RP)) } }
p_functions <- function (package = "base", all = FALSE, character.only = FALSE){ if (!character.only & is.name(substitute(package))) { package <- deparse(substitute(package)) } if (identical(package, character(0))) { package <- "base" } ns <- loadNamespace(package) if(all){ packagefunctions <- ls(ns) }else{ packagefunctions <- getNamespaceExports(ns) } datas <- suppressWarnings(utils::data(package = package)[["results"]][, 3]) packagefunctions <- packagefunctions[!packagefunctions %in% datas] return(sort(packagefunctions)) } p_funs <- p_functions
multi.collinear <- function(x, perm = FALSE, leave.out = FALSE, n = 99, p = 1e-07, na.rm = FALSE) { if (!inherits(x, "data.frame") & !inherits(x, "matrix")) stop("x must be a data.frame or matrix") if ( (dim(x)[2] < 2) == TRUE) stop("Need at least two parameters to test") if(!inherits(x, "matrix")) x <- as.data.frame(x) if(na.rm) { x <- stats::na.omit(x) } qrd <- function(x) { x <- as.matrix(x) n <- ncol(x) m <- nrow(x) q <- matrix(0, m, n) r <- matrix(0, n, n) for (j in 1:n) { v = x[,j] if (j > 1) { for (i in 1:(j-1)) { r[i,j] <- t(q[,i]) %*% x[,j] v <- v - r[i,j] * q[,i] } } r[j,j] <- sqrt(sum(v^2)) q[,j] <- v / r[j,j] } return( list('qr'=q, 'rank'=r) ) } mc.test <- function(v, p) { qrx <- qr(v, tol = p) if (length(names(v)[qrx$pivot[1:qrx$rank]]) != length(v) ) { keep <- names(v)[qrx$pivot[1:qrx$rank]] return(paste(setdiff(names(v), keep))) } return(NULL) } if(perm == TRUE) { freq <- data.frame(variables = names(x), frequency = 0) if(leave.out == TRUE) freq <- data.frame(freq, leave.out = 0) for(i in 1:n) { x.data <- x[,sample(1:length(names(x)))] if(leave.out == TRUE) { x.data <- x.data[,-sample(1:length(names(x.data)),1)] lo.idx <- which(freq$variables %in% setdiff(names(x), names(x.data))) freq[lo.idx,]$leave.out <- freq[lo.idx,]$leave.out + 1 } mc <- mc.test(x.data, p = p) if(!is.null(mc)) { idx <- which(freq$variables %in% mc) freq[idx,]$frequency <- freq[idx,]$frequency + 1 } } return( freq ) } else { mc <- mc.test(x, p = p) if( length(mc) > 0) { return( mc ) } else { return( NULL ) } } }
"ncvs_personal_codebook"
setGeneric(name = ".VerifyTxName", def = function(txName, ...) { standardGeneric(".VerifyTxName") }) setMethod(f = ".VerifyTxName", signature = c(txName = "ANY"), definition = function(txName, ...) { stop("txName must be a vector of character objects", call. = FALSE) }) setMethod(f = ".VerifyTxName", signature = c(txName = "character"), definition = function(txName, ..., data) { if (length(x = txName) == 0L) { stop("txName must be provided", call. = FALSE) } test <- tryCatch(expr = data[,txName,drop = FALSE], error = function(e) { return( NULL ) }) if (is.null(x = test) && length(x = txName) == 1L) { dataNames <- colnames(x = data) cov <- strsplit(x = dataNames, split = ".", fixed = TRUE) areAs <- lapply(X = cov, FUN = function(x){x[[ 1L ]] == txName}) if (sum(areAs) > 0L) { nDP <- sum(areAs) message("detected ", nDP, "decision points") txName <- dataNames[areAs] return( .VerifyTxName(txName = txName, data = data) ) } } test <- tryCatch(expr = data[,txName,drop = FALSE], error = function(e) { stop("unable to retrieve 'txName' from data", e$message, call. = FALSE) }) if (any(sapply(X = test, FUN = is.nan))) { stop("txName cannot include NaN values", call. = FALSE) } for (i in 1L:ncol(x = test)) { if (!is.factor(x = test[,i])) { if (is.numeric(x = test[,i])) { if (!isTRUE(all.equal(target = test[,i], current = round(x = test[,i], digits = 0L)))) { stop("treatment variable must be integer or factor", call. = FALSE) } } else { stop("treatment variable must be integer or factor", call. = FALSE) } } } return( txName ) })
NO_PANEL <- -1L locate_grid <- function(data, panels, rows = NULL, cols = NULL, margins = FALSE) { if (empty(data)) { return(cbind(data, PANEL = integer(0))) } rows <- as.quoted(rows) cols <- as.quoted(cols) vars <- c(names(rows), names(cols)) margin_vars <- list(intersect(names(rows), names(data)), intersect(names(cols), names(data))) data <- reshape2::add_margins(data, margin_vars, margins) facet_vals <- quoted_df(data, c(rows, cols)) missing_facets <- setdiff(vars, names(facet_vals)) if (length(missing_facets) > 0) { to_add <- unique(panels[missing_facets]) data_rep <- rep.int(1:nrow(data), nrow(to_add)) facet_rep <- rep(1:nrow(to_add), each = nrow(data)) data <- plyr::unrowname(data[data_rep, , drop = FALSE]) facet_vals <- plyr::unrowname(cbind( facet_vals[data_rep, , drop = FALSE], to_add[facet_rep, , drop = FALSE])) } if (nrow(facet_vals) == 0) { data$PANEL <- NO_PANEL } else { facet_vals[] <- lapply(facet_vals[], as.factor) facet_vals[] <- lapply(facet_vals[], addNA, ifany = TRUE) keys <- plyr::join.keys(facet_vals, panels, by = vars) data$PANEL <- panels$PANEL[match(keys$x, keys$y)] } data[order(data$PANEL), , drop = FALSE] } locate_wrap <- function(data, panels, vars) { if (empty(data)) { return(cbind(data, PANEL = integer(0))) } vars <- as.quoted(vars) facet_vals <- quoted_df(data, vars) facet_vals[] <- lapply(facet_vals[], as.factor) missing_facets <- setdiff(names(vars), names(facet_vals)) if (length(missing_facets) > 0) { to_add <- unique(panels[missing_facets]) data_rep <- rep.int(1:nrow(data), nrow(to_add)) facet_rep <- rep(1:nrow(to_add), each = nrow(data)) data <- plyr::unrowname(data[data_rep, , drop = FALSE]) facet_vals <- plyr::unrowname(cbind( facet_vals[data_rep, , drop = FALSE], to_add[facet_rep, , drop = FALSE])) } keys <- plyr::join.keys(facet_vals, panels, by = names(vars)) data$PANEL <- panels$PANEL[match(keys$x, keys$y)] data[order(data$PANEL), ] }
checkLEs <- function(filePaths, leDF) { if(length(filePaths) < 2) {stop("At least two data bases must be specified ('filePath' must be a character vector of length 2 or more).")} allV <- list(trendLevel1 = "trendLevel1", trendLevel2 = "trendLevel2", parameter = "parameter", linkingError="linkingError", depVar = "depVar") allN <- lapply(allV, FUN=function(ii) {eatTools::existsBackgroundVariables(dat = leDF, variable=ii, warnIfMissing = TRUE)}) namlis<- lapply(filePaths, eatGADS::namesGADS) depVar<- unique(leDF[,allN[["depVar"]]]) dep_notIn_nam_list <- lapply(seq_along(namlis), function(i) { nam <- namlis[[i]] dep_notIn_nam <- setdiff(depVar, unlist(nam)) if(length(dep_notIn_nam) > 0) {message("The following variables have linking errors but are not variables in data base ", i, ": '", paste0(dep_notIn_nam, collapse = "', '"),"'")} return(dep_notIn_nam) }) tLevel<- unique(unlist(leDF[,c(allN[["trendLevel1"]], allN[["trendLevel2"]])])) if ( length(tLevel) != length(filePaths)) {warning(paste0("Number of trend levels do not match: Expect ",length(filePaths)," trend levels in data base ('filePaths' has length ",length(filePaths),"). ",length(tLevel), " trend levels for linking errors ('",paste(tLevel, collapse="', '"),"') found."))} return(list(dep_notIn_nam = dep_notIn_nam_list)) }
library(dplyr) filter(mtcars, cyl == 8) filter(mtcars, cyl < 6) filter(mtcars, cyl < 6 & vs == 1) filter(mtcars, cyl < 6 | vs == 1) filter(mtcars, cyl < 6, vs == 1) filter(mtcars, row_number() == 1L) filter(mtcars, row_number() == n()) filter(mtcars, between(row_number(), 5, n()-2)) mutate(mtcars, displ_l = disp / 61.0237) transmute(mtcars, displ_l = disp / 61.0237) mutate(mtcars, cyl = NULL) slice(mtcars, 1L) slice(mtcars, n()) slice(mtcars, 5:n()) slice(mtcars, c(2,4,5,10)) (by_cyl <- group_by(mtcars, cyl)) slice(by_cyl, 1:2) tbl_df(mtcars) glimpse(mtcars) View(mtcars) df = tibble::rownames_to_column(df, var='cars') df2 = mtcars names(df) tibble::has_rownames(mtcars) tibble::has_rownames(df) head(df2[1:5]) tibble::remove_rownames(df2) tibble::rowid_to_column(df, var = "rowid") head(df[1:5]) tibble::column_to_rownames(df, var = "cars") mtcars %>% group_by(am) mtcars %>% group_by(am) %>% summarise(mean(mpg), max(wt)) summarise(mtcars, mean(disp)) summarise(group_by(mtcars, cyl), mean(disp)) summarise(group_by(mtcars, cyl), m = mean(disp), sd = sd(disp)) mtcars %>% group_by(am, gear) %>% summarise_all(mean) mtcars %>% group_by(am, gear)%>% summarise_all(c("min", "max")) mtcars %>% group_by(am, gear)%>% summarise_all(funs(med = median)) mtcars %>% summarise(mean(mpg), max(wt)) mtcars %>% summarise_all(mean) mtcars %>% select(wt, gear)%>% summarise_all(c("min", "max")) mtcars %>% summarise_all(funs(med = median)) mtcars %>% summarise_if(is.numeric, mean, na.rm = TRUE) iris %>% summarise_if(is.numeric, mean, na.rm = TRUE) mtcars %>% summarise_at(c("mpg", "wt"), mean, na.rm = TRUE) dplyr::tbl_df(iris) print(dplyr::tbl_df(mtcars), n=20) tbl_df(mtcars) %>% print(n = Inf) tbl_df(mtcars) %>% print(width = Inf) tbl_df(mtcars) %>% as.data.frame(mtcars) glimpse(mtcars) df = mtcars row.names(df) = NULL df %>% select(mpg) select(mtcars, mpg, vs) mtcars %>% dplyr::select(vs, mpg, wt) mtcars %>% group_by(cyl) %>% summarise(avgwt = mean(wt), meanhp = mean(hp)) %>% arrange( desc(meanhp), avgwt) mtcars names(mtcars) filter(mtcars, mpg > 23 | wt < 2) mtcars %>% filter(mpg > 23 & wt > 2) mtcars %>% select(mpg, wt) %>% filter(mpg > 23) mtcars %>% filter(iris, Sepal.Length > 7) filter(mtcars, cyl == 4) distinct(mtcars) df = data.frame(a=c(2,2),b=c(2,2)) df distinct(df) sample_frac(mtcars, 0.2, replace=T) sample_n(mtcars, 60, replace=T) %>% select(mpg) slice(mtcars,10:14) top_n(mtcars,-2, mpg) select(mtcars, mpg) %>% arrange(desc(mpg)) select(mtcars, mpg, wt) select(mtcars, contains('a')) names(mtcars) select(mtcars, contains ='vs') select(mtcars, everything()) mtcars %>% group_by(cyl, am) %>% summarise_all(mean) df = data.frame(marks=c(1,2,3,7,1)) cbind(df, dplyr::mutate_each(df, funs(min_rank))) mtcars %>% lead() %>% lag() dplyr::n(mtcars) select(mtcars, mpg2 = mpg) df = mtcars[1:4] names(df) = c('MPG','C1','C2','C3') df= rename(df, C5=C1) names(df) df rename(df, marks2 = marks) df %>% mutate(marks2 = marks + 2, marks3 = marks + 4) df %>% transmute(marks2 = marks + 2, marks3 = marks + 4) library(nycflights13) data(flights) destinations <- group_by(flights, dest) destinations summarise(destinations, planes = n_distinct(tailnum), flights = n() ) select(iris, -ends_with("Width")) %>% head vars <- c("Petal.Length", "Petal.Width1") select(iris, from=1, to=n()) filter(mtcars, row_number() == n()) filter(mtcars, between(row_number(), 5, n())) mtcars %>% group_by(cyl) %>% filter(1:3) > mtcars %.% group_by(cyl) %.% filter(sample(n(), 10)) group_by( mtcars, cyl ) %>% integer_filter(1:2) ?integer_filter mtcars %>% slice(from = 1, to = n(), by = 2) slice(mtcars, from = 2, to = n(), by = 2) slice(mtcars, from = 1, to = 10) slice(mtcars, n()-10: n()) slice(mtcars, 1:4) mtcars df <- tibble( g1 = c(1, 1, 2, 2, 2), g2 = c(1, 2, 1, 2, 1), a = sample(5), b = sample(5) ) df df %>% slice(n()-2:n()) var1 <- quo(letters[1:5]) var1 quo(toupper(!!var1)) quo(toupper(letters[1:5])) quo(toupper(!!letters[1:5])) quo(toupper(UQ(letters[1:5]))) toupper(letters[1:5]) quote(toupper(letters[1:5])) head(mtcars) slice(mtcars, 1:5) slice(mtcars, 1) slice(mtcars, 1L) tail(mtcars,n=5) slice(mtcars, n()-5:n()) slice(mtcars, n()) slice(mtcars, n() - 1) mtcars %>% top_n(2) mtcars %>% top_n(-2) mtcars %>% group_by(cyl) %>% tally(cyl) %>% top_n(1, cyl) dim(mtcars) bind_rows(mtcars, mtcars) bind_cols(mtcars,mtcars) gtable_combine(list(mtcars, mtcars)) dim_desc(mtcars) f1 <- factor("a") f2 <- factor("b") c(f1, f2) unlist(list(f1, f2)) gtable_combine(f1, f2) gtable_combine(list(f1, f2)) slice( mtcars, c(1L,3L,2L,7L)) by_cyl <- mtcars %>% group_by(cyl) mtcars %>% slice(1) by_cyl %>% slice(1) mtcars %>% slice(n()) by_cyl %>% slice(n()) mtcars %>% slice(10) by_cyl %>% slice(10) mtcars %>% slice(1:9) by_cyl %>% slice(1:3) mtcars %>% slice(c(1, 3, 9)) by_cyl %>% slice(c(1, 3, 5)) mtcars %>% slice(seq(2, n(), by = 2)) by_cyl %>% slice(seq(2, n(), by = 2)) %>% select(cyl, everything()) mtcars %>% group_by(cyl, am) %>% slice(1) by_cyl %>% slice(1) by_cyl %>% slice() df <- data.frame(x = c(10, 4, 1, 6, 3, 1, 1)) df %>% top_n(2) df %>% top_n(-2)
library(ggplot2) library(patchwork) data(VADeaths) reshape_VADeaths = transform( expand.grid(sex = colnames(VADeaths), age = rownames(VADeaths)), rates = as.vector(t(VADeaths)) ) p = ggplot(data = reshape_VADeaths, aes(x = age, y = rates, fill = sex)) + labs(x = "年龄", y = "死亡率", fill = "性别") + scale_fill_discrete(labels = c("农村男性", "农村女性", "城市男性", "城市女性")) p1 = p + geom_col(position = "stack") p2 = p + geom_col(position = "dodge") print(p1 / p2)
makeRLearner.regr.nnet = function() { makeRLearnerRegr( cl = "regr.nnet", package = "nnet", par.set = makeParamSet( makeIntegerLearnerParam(id = "size", default = 3L, lower = 0L), makeIntegerLearnerParam(id = "maxit", default = 100L, lower = 1L), makeLogicalLearnerParam(id = "skip", default = FALSE), makeNumericLearnerParam(id = "rang", default = 0.7), makeNumericLearnerParam(id = "decay", default = 0, lower = 0), makeLogicalLearnerParam(id = "Hess", default = FALSE), makeLogicalLearnerParam(id = "trace", default = TRUE, tunable = FALSE), makeIntegerLearnerParam(id = "MaxNWts", default = 1000L, lower = 1L, tunable = FALSE), makeNumericLearnerParam(id = "abstol", default = 1.0e-4), makeNumericLearnerParam(id = "reltol", default = 1.0e-8) ), par.vals = list(size = 3L), properties = c("numerics", "factors", "weights"), name = "Neural Network", short.name = "nnet", note = "`size` has been set to `3` by default.", callees = "nnet" ) } trainLearner.regr.nnet = function(.learner, .task, .subset, .weights = NULL, ...) { if (is.null(.weights)) { f = getTaskFormula(.task) nnet::nnet(f, data = getTaskData(.task, .subset), linout = TRUE, ...) } else { f = getTaskFormula(.task) nnet::nnet(f, data = getTaskData(.task, .subset), linout = TRUE, weights = .weights, ...) } } predictLearner.regr.nnet = function(.learner, .model, .newdata, ...) { predict(.model$learner.model, newdata = .newdata, ...)[, 1L] }
L_regress <- function(y, x, verb=TRUE) { m1=anova(lm(y ~ x)) tss <- sum(m1$`Sum Sq`) N <- (sum(m1$Df)+1) lin_df <- m1$Df[1] S_LN <- -0.5 * N * (log(m1$`Sum Sq`[2]) - log(tss)) k2 <- 1 k1 <- m1$Df[1] + 1 Ac <- k1 - k2 S_LNc <- S_LN - Ac m3 <- anova(lm(y ~ x + I(x^2) + I(x^3))) m3 quad_ss <- m3$`Sum Sq`[2] + m3$`Sum Sq`[1] unex_q_ss <- tss - quad_ss S_QL <- -0.5 * N * (log(unex_q_ss) - log(m1$`Sum Sq`[2])) S_QLc <- S_QL - 1 unex_c_ss <- tss - sum(m3$`Sum Sq`[1:3]) S_QC <- -0.5 * N * (log(unex_q_ss) - log(unex_c_ss)) S_QCc <- S_QC + 1 plot(x, y) fit1<-lm(y ~ poly(x,1,raw=TRUE)) linear = fit1$coefficient[2]*x + fit1$coefficient[1] fit2<-lm(y ~ poly(x,2,raw=TRUE)) quadratic = fit2$coefficient[3]*x^2 + fit2$coefficient[2]*x + fit2$coefficient[1] fit3<-lm(y ~ poly(x,3,raw=TRUE)) cubic = fit3$coefficient[4]*x^3 + fit3$coefficient[3]*x^2 + fit3$coefficient[2]*x + fit3$coefficient[1] lines(x,linear, col="black") lines(x,quadratic, col="red") lines(x,cubic, col="blue",lty=2) if(verb) cat("\nSupport for linear fit over null model ", round(S_LNc,3), sep= "", "\n Support for quadratic versus linear fit = ", round(S_QLc,3), "\n Support for quadratic versus cubic = ", round(S_QCc,3), "\n N = ", N, "\n P values for linear, quadratic and cubic fits = ", m3$`Pr(>F)`[1], " ", m3$`Pr(>F)`[2], " ", m3$`Pr(>F)`[3], "\n ") invisible(list(S.LNc = S_LNc, S.LN = S_LN, S.QLc = S_QLc, S.QL = S_QL, S.QCc = S_QCc, N = N, p.vals = m3$`Pr(>F)`[1:3])) }
plotTsAnom <- function(x, xlab = NULL, ylab = NULL, strip.labels = colnames(x), ...) { if (!is.ts(x)) stop("x must be of class 'ts'") if (missing(xlab)) xlab <- "" if (missing(ylab)) ylab <- "" if (is.matrix(x)) { strip.labels <- strip.labels colnames(x) <- gsub(' ', '.', colnames(x)) x.mean <- apply(x, 2, mean, na.rm = TRUE) x.mean.df <- data.frame(variable = factor(names(x.mean)), x.mean) d <- data.frame(time=as.Date(time(x)), x) d1 <- melt(d, id = 'time') d2 <- merge(d1, x.mean.df) d3 <- within(d2, variable <- factor(variable, levels = levels(variable), labels = strip.labels)) d3 <- na.omit(d3) d3$ymin. <- with(d3, ifelse(value >= x.mean, x.mean, value)) d3$ymax. <- with(d3, ifelse(value >= x.mean, value, x.mean)) d3$colour. <- with(d3, value >= x.mean) ggplot(d3, aes_string(x="time", y="value", ymin="ymin.", ymax="ymax.", colour="colour.")) + geom_linerange() + geom_hline(aes(yintercept = x.mean), size = 0.25) + labs(x = xlab, y = ylab) + facet_wrap(~ variable, ...) + theme(legend.position='none', panel.grid.minor = element_blank(), axis.text.x = element_text(angle=45, colour="grey50")) } else { x.mean <- mean(x, na.rm = TRUE) d1 <- data.frame(time = as.Date(time(x)), x = as.numeric(x), x.mean) d1 <- na.omit(d1) d1$ymin. <- with(d1, ifelse(x >= x.mean, x.mean, x)) d1$ymax. <- with(d1, ifelse(x >= x.mean, x, x.mean)) d1$colour. <- with(d1, x >= x.mean) ggplot(d1, aes_string(x="time", y="x", ymin="ymin.", ymax="ymax.", colour="colour.")) + geom_linerange() + geom_hline(aes(yintercept = x.mean), size = 0.25) + labs(x = xlab, y = ylab) + theme(legend.position='none', panel.grid.minor = element_blank(), axis.text.x = element_text(angle=45, colour="grey50")) } }
library(chorddiag) m <- matrix(c(11975, 5871, 8916, 2868, 1951, 10048, 2060, 6171, 8010, 16145, 8090, 8045, 1013, 990, 940, 6907), byrow = TRUE, nrow = 4, ncol = 4) groupNames <- c("black", "blonde", "brown", "red") row.names(m) <- groupNames colnames(m) <- groupNames m chorddiag(m, showGroupnames = T) groupColors <- c(" chorddiag(m, groupColors = groupColors, groupnamePadding = 50, margin = 100, tooltipGroupConnector = " prefer ") chorddiag(t(m), groupColors = groupColors, groupnamePadding = 50, margin = 100, tooltipGroupConnector = " prefered by ") chorddiag(100*m/rowSums(m), groupColors = groupColors, groupnamePadding = 30, margin = 60, tickInterval = 5, tooltipGroupConnector = " prefer ", tooltipUnit = " %", precision = 2) if (requireNamespace("dplyr", quietly = TRUE)) { library(dplyr) titanic_tbl <- tibble::as_tibble(Titanic) titanic_tbl <- titanic_tbl %>% mutate(across(where(is.character), as.factor)) by_class_survival <- titanic_tbl %>% group_by(Class, Survived) %>% summarise(Count = sum(n)) %>% ungroup() titanic.mat <- matrix(by_class_survival$Count, nrow = 4, ncol = 2, byrow = TRUE) dimnames(titanic.mat ) <- list(Class = levels(titanic_tbl$Class), Survival = levels(titanic_tbl$Survived)) print(titanic.mat) groupColors <- c(" chorddiag(titanic.mat, type = "bipartite", groupColors = groupColors, tickInterval = 50) }
balancedSplit <- function(fac, size) { trainer <- rep(FALSE, length(fac)) for (lev in levels(fac)) { N <- sum(fac==lev) wanted <- max(1, trunc(N*size)) trainer[fac==lev][sample(N, wanted)] <- TRUE } trainer }
.node.desc<- function(tr, node) { ee<- tr$edge nT<- length(tr$tip.label) nN<- tr$Nnode d.tips<- numeric() d.nodes<- numeric() desc<- ee[ee[,1]==node,2] d.tips<- append(d.tips, desc[desc<=nT]) d.nodes<- append(d.nodes, desc[desc>nT]) if (node<=nT) d.tips<- node else{ while(!all(desc<=nT)) { desc<- ee[ee[,1] %in% desc,2] d.tips<- append(d.tips, desc[desc<=nT]) d.nodes<- append(d.nodes, desc[desc>nT]) } } return(list(tips=d.tips, nodes=d.nodes, node.label=node)) }
library(testthat) context("taxmap parsers") test_that("Taxmap can be intialized from complex data", { my_vector <- c("A;B;C;D", "A;E;F;G", "A;B;H;I") my_list_1 <- list("A;B;C;D", "A;E;F;G", c("A;B", "H;I")) my_list_2 <- list(c(Phylum = "A", Class = "B", Order = "C", Family = "D"), c(Phylum = "A", Class = "E", Order = "F", Family = "G"), c(Phylum = "A", Class = "B", Order = "H", Family = "I")) my_frame <- data.frame(tax = c("A;B;C", "A;E;F", "A;B;H"), species = c("D", "G", "I")) my_frames <- list(data.frame(tax = c("A", "B", "C", "D")), data.frame(tax = c("A", "E", "F", "G")), data.frame(tax = c("A", "B", "H", "I"))) vector_result <- parse_tax_data(my_vector, include_tax_data = FALSE) list_1_result <- parse_tax_data(my_list_1, include_tax_data = FALSE) list_2_result <- parse_tax_data(my_list_2, include_tax_data = FALSE) frame_result <- parse_tax_data(my_frame, class_cols = c("tax", "species"), include_tax_data = FALSE) expect_equal(length(vector_result$taxon_ids()), 9) expect_equal(length(vector_result$roots()), 1) expect_equal(vector_result, list_1_result) expect_equal(vector_result, list_2_result) expect_equal(vector_result, frame_result) result <- parse_tax_data(my_list_2, include_tax_data = FALSE, named_by_rank = TRUE) expect_true(all(c("Phylum", "Class", "Order", "Family") %in% result$taxon_ranks())) test_obj <- parse_tax_data(my_vector, list(test = letters[1:3]), mappings = c("{{index}}" = "{{index}}")) expect_equal(test_obj$map_data(test, taxon_names), structure(c("D", "G", "I"), .Names = c("a", "b", "c"))) a_dataset <- data.frame(my_index = c(3, 2), dataset_key = c("key_3", "key_2")) rownames(a_dataset) <- c("name_3", "name_2") a_tax_data <- data.frame(tax = c("A;B;C", "A;E;F", "A;B;H"), species = c("D", "G", "I"), tax_key = c("key_1", "key_2", "key_3")) rownames(a_tax_data) <- c("name_1", "name_2", "name_3") test_obj <- parse_tax_data(a_tax_data, class_cols = c("tax", "species"), datasets = list(my_data = a_dataset), mappings = c("{{index}}" = "my_index")) expect_equal(test_obj$data$my_data$taxon_id, c("j", "i")) test_obj <- parse_tax_data(a_tax_data, class_cols = c("tax", "species"), datasets = list(my_data = a_dataset), mappings = c("{{name}}" = "{{name}}")) expect_equal(test_obj$data$my_data$taxon_id, c("j", "i")) test_obj <- parse_tax_data(a_tax_data, class_cols = c("tax", "species"), datasets = list(my_data = a_dataset), mappings = c("tax_key" = "dataset_key")) expect_equal(test_obj$data$my_data$taxon_id, c("j", "i")) my_frames <- list(data.frame(tax = c("A", "B", "C", "D"), my_rank = c("P", "C", "O", "F")), data.frame(tax = c("A", "E", "F", "G"), my_rank = c("P", "C", "O", "F")), data.frame(tax = c("A", "B", "H", "I"), my_rank = c("P", "C", "O", "F"))) test_obj <- parse_tax_data(my_frames, class_cols = "tax") expect_equal(length(test_obj$taxon_ids()), nrow(test_obj$data$tax_data)) expect_true("my_rank" %in% colnames(test_obj$data$tax_data)) raw_data <- c("K__Mammalia;P__Carnivora;C__Felidae;G__Panthera;S__leo", "K__Mammalia;P__Carnivora;C__Felidae;G__Panthera;S__tigris", "K__Mammalia;P__Carnivora;C__Felidae;G__Ursus;S__americanus") result <- parse_tax_data(raw_data, class_sep = ";", class_regex = "^(.+)__(.+)$", class_key = c(my_rank = "taxon_rank", tax_name = "taxon_name"), include_match = FALSE) expect_true(all(c("K", "P", "C", "G", "S") %in% result$taxon_ranks())) expect_error(parse_tax_data(raw_data, class_sep = ";", class_regex = "^(.+)__(.+)$", class_key = c(my_rank = "taxon_rank", tax_name = "taxon_name"), include_match = FALSE, named_by_rank = TRUE)) result <- parse_tax_data(raw_data, class_sep = ";", class_regex = "^(.+)__(.+)$", class_key = c("taxon_rank", "taxon_name")) expect_true(all(c("taxon_rank_match", "taxon_name_match") %in% colnames(result$data$class_data))) result <- parse_tax_data(raw_data, class_sep = ";", class_regex = "^(.+)__(.+)(.+)(.+)$", class_key = c("taxon_rank", "taxon_name", "info", "info")) expect_true("info_match_1" %in% colnames(result$data$class_data)) result <- parse_tax_data(raw_data, class_sep = ";", class_regex = "^(.+)__(.+)(.+)(.+)$", class_key = c("taxon_rank", "taxon_name", "info", x = "info")) expect_true(all(c("info_match", "x") %in% colnames(result$data$class_data))) expect_error(parse_tax_data(raw_data, class_sep = ";", class_regex = "^(.+)_invalid_(.+)$", class_key = c(rank = "info", tax_name = "taxon_name"), include_match = FALSE), "could not be matched by the regex supplied") expect_error(parse_tax_data(raw_data, class_cols = "Not a column"), 'No item') expect_error(parse_tax_data(1:3, class_cols = "Not a column"), 'No item named') expect_error(parse_tax_data(1:3, class_cols = 10), 'out of bounds for inputs:') expect_error(parse_tax_data(1:3, datasets = list(), mappings = 1), 'must have the same number of values') expect_error(parse_tax_data(1:3, datasets = 1, mappings = 1), 'The mapping options must be named.') expect_error(parse_tax_data(1:3, datasets = 1, mappings = c(sdaff = 2)), 'Invalid inputs to the `mappings` found') expect_equal(taxon_names(parse_tax_data(c())), character(0)) expect_equal(taxon_names(parse_tax_data(character(0))), character(0)) expect_equal(taxon_names(parse_tax_data(list())), character(0)) expect_equal(taxon_names(parse_tax_data(data.frame())), character(0)) }) test_that("Taxmap can be intialized from queried data", { skip_on_cran() raw_data <- data.frame(tax = c("Mammalia;Carnivora;Felidae", "Mammalia;Carnivora;Felidae", "Mammalia;Carnivora;Ursidae"), species = c("Panthera leo", "Panthera tigris", "Ursus americanus"), my_tax_id = c("9689", "9694", "9643"), my_seq = c("AB548412", "FJ358423", "DQ334818"), species_id = c("A", "B", "C")) abundance <- data.frame(id = c("A", "B", "C", "A", "B", "C"), sample_id = c(1, 1, 1, 2, 2, 2), counts = c(23, 4, 3, 34, 5, 13)) common_names <- c(A = "Lion", B = "Tiger", C = "Bear", "Oh my!") foods <- list(c("ungulates", "boar"), c("ungulates", "boar"), c("salmon", "fruit", "nuts")) Sys.sleep(1) name_result = lookup_tax_data(raw_data, type = "taxon_name", datasets = list(counts = abundance, my_names = common_names, foods = foods), mappings = c("species_id" = "id", "species_id" = "{{name}}", "{{index}}" = "{{index}}"), column = "species") Sys.sleep(1) expect_equal(lookup_tax_data("poa annus", type = "fuzzy_name")$taxon_names(), lookup_tax_data("Poa annua", type = "taxon_name")$taxon_names()) Sys.sleep(1) id_result = lookup_tax_data(raw_data, type = "taxon_id", datasets = list(counts = abundance, my_names = common_names, foods = foods), mappings = c("species_id" = "id", "species_id" = "{{name}}", "{{index}}" = "{{index}}"), column = "my_tax_id") Sys.sleep(1) seq_result = lookup_tax_data(raw_data, type = "seq_id", datasets = list(counts = abundance, my_names = common_names, foods = foods), mappings = c("species_id" = "id", "species_id" = "{{name}}", "{{index}}" = "{{index}}"), column = "my_seq") Sys.sleep(1) expect_equal(name_result, id_result) expect_equal(name_result, seq_result) expect_error(lookup_tax_data(1:3, type = "seq_id", database = "not valid"), "not a valid database") expect_error(lookup_tax_data(1:3, type = "seq_id", database = "bold"), "not a valid database") expect_error(lookup_tax_data(raw_data, column = "Not a column", type = "seq_id"), 'No column "Not a column" in input table') expect_error(lookup_tax_data(1:3, column = "Not a column", type = "seq_id"), 'No item named "Not a column" in the following inputs:') expect_error(lookup_tax_data(1:3, column = 10, type = "seq_id"), 'out of bounds for inputs:') Sys.sleep(1) raw_data <- data.frame(species = c("Panthera leo", "not a taxon", "Ursus americanus"), my_tax_id = c("9689", "not a taxon id 6", "9643"), my_seq = c("AB548412", "777777777777", "DQ334818"), species_id = c("A", "B", "C")) expect_warning(result <- lookup_tax_data(raw_data, type = "taxon_name", column = "species")) expect_warning(result <- lookup_tax_data(raw_data, type = "taxon_name", column = "species"), ask = FALSE) expect_equal(unname(result$data$query_data$taxon_id[2]), "unknown") expect_warning(result <- lookup_tax_data(raw_data, type = "taxon_id", column = "my_tax_id")) expect_equal(unname(result$data$query_data$taxon_id[2]), "unknown") expect_warning(result <- lookup_tax_data(raw_data, type = "seq_id", column = "my_seq")) expect_equal(unname(result$data$query_data$taxon_id[2]), "unknown") expect_equal(taxon_names(lookup_tax_data(c())), character(0)) expect_equal(taxon_names(lookup_tax_data(character(0))), character(0)) expect_equal(taxon_names(lookup_tax_data(list())), character(0)) expect_equal(taxon_names(lookup_tax_data(data.frame())), character(0)) }) test_that("Taxmap can be intialized from raw strings", { raw_data <- c(">var_1:A--var_2:9689--non_target--tax:K__Mammalia;P__Carnivora;C__Felidae;G__Panthera;S__leo", ">var_1:B--var_2:9694--non_target--tax:K__Mammalia;P__Carnivora;C__Felidae;G__Panthera;S__tigris", ">var_1:C--var_2:9643--non_target--tax:K__Mammalia;P__Carnivora;C__Felidae;G__Ursus;S__americanus") result <- extract_tax_data(raw_data, key = c(var_1 = "info", var_2 = "info", tax = "class"), regex = "^>var_1:(.+)--var_2:(.+)--non_target--tax:(.+)$", class_sep = ";", class_regex = "^(.+)__(.+)$", class_key = c(my_rank = "info", tax_name = "taxon_name")) expect_equal(length(result$taxa), 8) expect_equal(result$data$tax_data$var_1, c("A", "B", "C")) expect_true("my_rank" %in% colnames(result$data$class_data)) raw_data <- c("K;Mammalia;P;Carnivora;C;Felidae;G;Panthera;S;leo;", "K;Mammalia;P;Carnivora;C;Felidae;G;Panthera;S;tigris;", "K;Mammalia;P;Carnivora;C;Felidae;G;Ursus;S;americanus;") result <- extract_tax_data(raw_data, key = c(tax = "class"), regex = "(.*)", class_regex = "(.+?);(.*?);", class_key = c(my_rank = "info", tax_name = "taxon_name")) expect_equal(length(result$taxa), 8) expect_equal(length(result$roots()), 1) expect_true("my_rank" %in% colnames(result$data$class_data)) raw_data <- c(">var_1:A--var_2:9689--non_target--tax:K__Mammalia;P__Carnivora;C__Felidae;G__Panthera;S__leo", "not a match", ">var_1:C--var_2:9643--non_target--tax:K__Mammalia;P__Carnivora;C__Felidae;G__Ursus;S__americanus") expect_warning(extract_tax_data(raw_data, key = c(var_1 = "info", var_2 = "info", tax = "class"), regex = "^>var_1:(.+)--var_2:(.+)--non_target--tax:(.+)$", class_sep = ";", class_regex = "^(.+)__(.+)$", class_key = c(my_rank = "info", tax_name = "taxon_name")), "indexes failed to match the regex supplied") expect_equal(taxon_names(extract_tax_data(c())), character(0)) expect_equal(taxon_names(extract_tax_data(character(0))), character(0)) expect_equal(taxon_names(extract_tax_data(list())), character(0)) }) test_that("Taxmap can be intialized from raw strings and lookup data", { skip_on_cran() raw_data <- c(">var_1:A--var_2:9689--non_target--tax:K__Mammalia;P__Carnivora;C__Felidae;G__Panthera;S__leo", ">var_1:B--var_2:9694--non_target--tax:K__Mammalia;P__Carnivora;C__Felidae;G__Panthera;S__tigris", ">var_1:C--var_2:9643--non_target--tax:K__Mammalia;P__Carnivora;C__Felidae;G__Ursus;S__americanus") Sys.sleep(1) result <- extract_tax_data(raw_data, key = c(var_1 = "info", var_2 = "taxon_id", tax = "info"), regex = "^>var_1:(.+)--var_2:(.+)--non_target--tax:(.+)$") expect_true("ncbi_id" %in% colnames(result$data$tax_data)) expect_equivalent(result$roots(value = "taxon_names"), "cellular organisms") })
insertSeparators <- function(dates) { if (!any(stri_detect_regex(dates[!is.na(dates)], pattern = "[-/]"))) { if (all(suppressWarnings(as.integer(dates[!is.na(dates)]) & !is.na(as.integer(dates[!is.na(dates)]))))) { dates <- vapply(dates, function(x) { stri_c(stri_sub(x, from = 1, to = 4), "-", stri_sub(x, from = 5, to = 6), "-", stri_sub(x, from = 7, to = 8)) }, character(1)) } } dates }
elevenprobs <- function(X, parnames = FALSE) { listp <- c(as.numeric(c(0.25, 0.5, 1) %o% 10^(-8:-1)), 0.15, 0.20) ltX <- length(X) p1 <- listp[findInterval(1/ltX, listp) + 1] p2 <- listp[findInterval(1/ltX, listp) + 2] p3 <- listp[findInterval(1/ltX, listp) + 3] p11 <- c(p1, p2, p3, 0.25, 0.35, 0.50, 0.65, 0.75, 1-p3, 1-p2, 1-p1) np11 <- character(length(p11)) for (i in 1:length(np11)) { np11[i] <- format(p11[i], nsmall=2, scientific=FALSE) } names(p11) <- if (parnames) { np11 } else { NULL } return(p11) } sevenprobs <- function(X, parnames = FALSE) { listp <- c(as.numeric(c(0.25, 0.5, 1) %o% 10^(-8:-1)), 0.15, 0.20) ltX <- length(X) p1 <- listp[findInterval(1/ltX, listp) + 1] p2 <- listp[findInterval(1/ltX, listp) + 2] p7 <- c(p1, p2, 0.25, 0.50, 0.75, 1-p2, 1-p1) np7 <- character(length(p7)) for (i in 1:length(np7)) { np7[i] <- format(p7[i], nsmall=2, scientific=FALSE) } names(p7) <- if (parnames) { np7 } else { NULL } return(p7) } fiveprobs <- function (X, i = 4, parnames = FALSE) { X <- sort(as.numeric(X[is.finite(X)])) N <- length(X) if (i == 0) { listp <- c(as.numeric(c(0.25, 0.5, 1) %o% 10^(-8:-1)), 0.15, 0.20) p1 <- listp[findInterval(1/N, listp) + 1] p5 <- c(p1, 0.25, 0.5, 0.75, 1-p1) } else { p5 <- c(i/(N+1), 0.25, 0.50, 0.75, (N+1-i)/(N+1)) } if (parnames) { np5 <- character(length(p5)) for (i in 1:length(np5)) { np5[i] <- format(p5[i], nsmall = 2, scientific = FALSE) } names(p5) <- np5 } return(p5) } checkquantiles <- function(x, proba = FALSE, acceptNA = FALSE, STOP = TRUE) { if (acceptNA) { x <- x[!is.na(x)] } n <- length(x) nt1 <- (n == 0) nt2 <- anyNA(x) nt3 <- (dimdim1(x) != 1) nt4 <- (!is(x, "numeric")) nt5 <- (proba && any(x < 0)) nt6 <- (proba && any(x > 1)) nt7 <- any(nt1, nt2, nt3, nt4, nt5, nt6) nt1;nt2;nt3;nt4;nt5;nt6;nt7 z <- if (nt7) { FALSE } else { if (n == 1) { TRUE } else { (sum(x[2:n] <= x[1:(n-1)]) == 0) } } if (STOP && !z) { stop("Error in checkquantiles(): x is not correct (dim, NA, proba, etc...) or not sorted. Please check.") } return(z) } estimkiener11 <- function(x11, p11, ord = 7, maxk = 10) { if (length(x11) != 11) {stop("length(x11) is of wrong size. Must be 11.")} if (length(p11) != 11) {stop("length(p11) is of wrong size. Must be 11.")} if (!is.element(ord, 1:12)) {stop("ord must be in 1:12")} names(x11) <- NULL if (checkquantiles(x11, STOP = FALSE)) { m <- x11[6] k <- .hestimkappa11(x11, p11, ord, maxk) d <- .hestimdelta11(x11, p11, ord) d <- if (abs(d) > (0.90/k)) {sign(d)*0.90/k} else {d} g <- .hestimgamma11(x11, p11, k, ord) e <- kd2e(k, d) a <- ke2a(k, e) w <- ke2w(k, e) z <- c(m, g, a, k, w, d, e) } else { z <- c(NA, NA, NA, NA, NA, NA, NA) } names(z) <- c("m", "g", "a", "k", "w", "d", "e") return(z) } estimkiener7 <- function(x7, p7, maxk = 10) { if (length(x7) != 7) {stop("length(x7) is of wrong size. Must be 7.")} if (length(p7) != 7) {stop("length(p7) is of wrong size. Must be 7.")} names(x7) <- NULL if (checkquantiles(x7, STOP = FALSE)) { dx <- abs(x7 - x7[4]) lp7 <- logit(p7) m <- x7[4] k <- ( .hestimkappa6(lp7[5], lp7[7], dx[1], dx[3], dx[5], dx[7], maxk) +.hestimkappa6(lp7[5], lp7[6], dx[2], dx[3], dx[5], dx[6], maxk))/2 d <- log(dx[7]/dx[1]) /4/lp7[7] + log(dx[6]/dx[2]) /4/lp7[6] d <- if (abs(d) > (0.90/k)) {sign(d)*0.90/k} else {d} g <- sqrt(dx[3]*dx[5]) /2/k /sinh(lp7[5] /k) e <- kd2e(k, d) a <- ke2a(k, e) w <- ke2w(k, e) z <- c(m, g, a, k, w, d, e) } else { z <- c(NA, NA, NA, NA, NA, NA, NA) } names(z) <- c("m", "g", "a", "k", "w", "d", "e") return(z) } estimkiener5 <- function(x5, p5, maxk = 20, maxe = 0.90) { names(x5) <- names(p5) <- NULL if (length(x5) != 5) {stop("length(x5) is wrong. Must be 5.")} if (length(p5) != 5) {stop("length(p5) is wrong. Must be 5.")} if (checkquantiles(x5, STOP = FALSE)) { dx <- abs(x5 - x5[3]) lp <- logit(p5) m <- x5[3] d <- log((x5[5]-x5[3])/(x5[3]-x5[1]))/2/lp[5] Q <- (x5[5]-x5[1])/(x5[4]-x5[2])*cosh(d*lp[4])/cosh(d*lp[5]) fOPT<- function (k, q, p, Q) { (Q - sinh(logit(p)/k)/sinh(logit(q)/k))^2 } k <- if (Q <= sinh(lp[5]/maxk)/sinh(lp[4]/maxk)) { maxk } else { optimize(fOPT, c(0.1, maxk), tol = 0.0001, q = p5[4], p = p5[5], Q = Q)$minimum } k <- min(k, abs(1/d)*maxe) e <- kd2e(k, d) a <- kd2a(k, d) w <- kd2w(k, d) g <- (x5[4]-x5[2])/4/k/sinh(lp[4]/k)/cosh(d*lp[4]) z <- c(m, g, a, k, w, d, e) } else { z <- c(NA, NA, NA, NA, NA, NA, NA) } names(z) <- c("m", "g", "a", "k", "w", "d", "e") return(z) } .hestimdelta11 <- function(x11, p11, ord) { if (length(x11) != 11) {stop("length(x11) is of wrong side. Must be 11.")} if (length(p11) != 11) {stop("length(p11) is of wrong side. Must be 11.")} if (!is.element(ord, 1:12)) {stop("ord must be an integer in 1:12")} lp11 <- logit(p11) dx <- abs(x11 - x11[6]) d1 <- log(dx[11]/dx[1]) /2/lp11[11] d2 <- log(dx[10]/dx[2]) /2/lp11[10] d3 <- log(dx[9] /dx[3]) /2/lp11[9] d12 <- (d1 + d2)/2 d123 <- (d1 + d2 + d3)/3 d <- switch(as.character(ord), "1"=d1, "2"=d2, "3"=d12, "4"=d123, "5"=d1, "6"=d2, "7"=d12, "8"=d123, "9"=d1,"10"=d2,"11"=d12,"12"=d123) names(d) <- NULL return(d) } .hestimkappa11 <- function(x11, p11, ord, maxk = 10) { if (length(x11) != 11) {stop("length(x11) is of wrong size. Must be 11.")} if (length(p11) != 11) {stop("length(p11) is of wrong size. Must be 11.")} if (!is.element(ord, 1:12)) {stop("ord must be in 1:12")} dx <- abs(x11 - x11[6]) lp11 <- logit(p11) l65 <- lp11[7] l75 <- lp11[8] l3 <- lp11[9] l2 <- lp11[10] l1 <- lp11[11] k <- switch(as.character(ord), "1"= .hestimkappa6(l65, l1, dx[1], dx[5], dx[7], dx[11], maxk), "2"= .hestimkappa6(l65, l2, dx[2], dx[5], dx[7], dx[10], maxk), "3"= mean(c(.hestimkappa6(l65, l1, dx[1], dx[5], dx[7], dx[11], maxk), .hestimkappa6(l65, l2, dx[2], dx[5], dx[7], dx[10], maxk))), "4"= mean(c(.hestimkappa6(l65, l1, dx[1], dx[5], dx[7], dx[11], maxk), .hestimkappa6(l65, l2, dx[2], dx[5], dx[7], dx[10], maxk), .hestimkappa6(l65, l3, dx[3], dx[5], dx[7], dx[ 9], maxk))), "5"= .hestimkappa6(l75, l1, dx[1], dx[4], dx[8], dx[11], maxk), "6"= .hestimkappa6(l75, l2, dx[2], dx[4], dx[8], dx[10], maxk), "7"= mean(c(.hestimkappa6(l75, l1, dx[1], dx[4], dx[8], dx[11], maxk), .hestimkappa6(l75, l2, dx[2], dx[4], dx[8], dx[10], maxk))), "8"= mean(c(.hestimkappa6(l75, l1, dx[1], dx[4], dx[8], dx[11], maxk), .hestimkappa6(l75, l2, dx[2], dx[4], dx[8], dx[10], maxk), .hestimkappa6(l75, l3, dx[3], dx[4], dx[8], dx[ 9], maxk))), "9"= mean(c(.hestimkappa6(l65, l1, dx[1], dx[5], dx[7], dx[11], maxk), .hestimkappa6(l75, l1, dx[1], dx[4], dx[8], dx[11], maxk))), "10"= mean(c(.hestimkappa6(l65, l2, dx[2], dx[5], dx[7], dx[10], maxk), .hestimkappa6(l75, l2, dx[2], dx[4], dx[8], dx[10], maxk))), "11"= mean(c(.hestimkappa6(l65, l1, dx[1], dx[5], dx[7], dx[11], maxk), .hestimkappa6(l65, l2, dx[2], dx[5], dx[7], dx[10], maxk), .hestimkappa6(l75, l1, dx[1], dx[4], dx[8], dx[11], maxk), .hestimkappa6(l75, l2, dx[2], dx[4], dx[8], dx[10], maxk))), "12"= mean(c(.hestimkappa6(l65, l1, dx[1], dx[5], dx[7], dx[11], maxk), .hestimkappa6(l65, l2, dx[2], dx[5], dx[7], dx[10], maxk), .hestimkappa6(l65, l3, dx[3], dx[5], dx[7], dx[ 9], maxk), .hestimkappa6(l75, l1, dx[1], dx[4], dx[8], dx[11], maxk), .hestimkappa6(l75, l2, dx[2], dx[4], dx[8], dx[10], maxk), .hestimkappa6(l75, l3, dx[3], dx[4], dx[8], dx[ 9], maxk)))) names(k) <- NULL return(k) } .hestimgamma11 <- function(x11, p11, k, ord) { dx <- abs(x11 - x11[6]) lp11 <- logit(p11) g75 <- sqrt(dx[4]*dx[8]) /2/k /sinh(lp11[8] /k) g65 <- sqrt(dx[5]*dx[7]) /2/k /sinh(lp11[7] /k) gmm <- (g65 + g75)/2 g <- switch(as.character(ord), "1"=g65, "2"=g65, "3"=g65, "4"=g65, "5"=g75, "6"=g75, "7"=g75, "8"=g75, "9"=gmm, "10"=gmm, "11"=gmm, "12"=gmm) names(g) <- NULL return(g) } .hestimkappa6 <- function(lg, lp, dx1p, dx1g, dxg, dxp, maxk = 10) { h <- lp/lg lgh <- lg * sqrt((-7 +3*h*h) /30) rss <- 1.2 - 1.6 /(-1 + h*h) psi <- sqrt(dx1p *dxp /dx1g /dxg) / h k <- if (psi <= 1) { maxk } else { lgh /sqrt(-1 + sqrt(1 + rss*(-1 +psi))) } k <- if (is.na(k)) { maxk } else { k } k <- if (k < maxk) { k } else { maxk } names(k) <- NULL return(k) }
anthro_zscore_triceps_skinfold_for_age <- function(triskin, age_in_days, age_in_months, sex, flag_threshold = 5, growthstandards = growthstandards_tsanthro) { anthro_zscore_adjusted( name = "ts", measure = triskin, age_in_days = age_in_days, age_in_months = age_in_months, sex = sex, growthstandards = growthstandards, flag_threshold = flag_threshold, allowed_age_range = c(91, 1856) ) }
rate_limit <- function(resource_match = NULL, token = NULL) { json <- TWIT_get(token, "/1.1/application/rate_limit_status") resources <- unlist(unname(json$resources), recursive = FALSE) df <- tibble::tibble( resource = names(resources), limit = unlist(lapply(resources, "[[", "limit"), use.names = FALSE), remaining = unlist(lapply(resources, "[[", "remaining"), use.names = FALSE), reset_at = unlist(lapply(resources, "[[", "reset"), use.names = FALSE), ) df$reset_at <- .POSIXct(df$reset_at) df$reset <- round(difftime(df$reset_at, Sys.time(), units = "mins")) if (!is.null(resource_match)) { df <- df[grepl(resource_match, df$resource), ] } df } rate_limit_reset <- function(endpoint, token = NULL) { endpoint <- gsub("^/", "", endpoint) resource <- strsplit(endpoint, "/")[[1]][[1]] params <- list(resource = resource) json <- TWIT_get(token, "/1.1/application/rate_limit_status", params) info <- json$resources[[resource]][[paste0("/", endpoint)]] if (is.null(info)) { stop("Unrecognised endpoint '", endpoint, "'", call. = FALSE) } if (info$remaining > 0) { Sys.time() } else { .POSIXct(info$reset) } } rate_limit_wait <- function(endpoint, token = NULL) { reset <- unclass(rate_limit_reset(endpoint, token)) wait_until(reset, endpoint) invisible() } wait_until <- function(until, api, fps = 8, verbose = TRUE) { until <- unclass(until) seconds <- until - unclass(Sys.time()) if (!verbose) { Sys.sleep(ceiling(seconds)) return() } if (seconds < 0) { return(invisible()) } pb <- progress::progress_bar$new( total = seconds * fps, format = paste0( "Rate limit exceeded for Twitter endpoint '", api, "'. ", "Waiting for refresh in :mins mins :spin" ) ) withr::defer(pb$terminate()) while(Sys.time() < until) { Sys.sleep(1 / fps) mins <- round((until - unclass(Sys.time())) / 60) pb$tick(tokens = list(mins = mins)) } invisible() }
test_that(". escaped around surround by anchors", { expect_equal(escape_path("."), "^\\.$") }) test_that("strip trailing /", { expect_equal(escape_path("./"), "^\\.$") })
ebic.lmm.bsreg <- function(target, dataset, id, wei = NULL, gam = NULL) { dm <- dim(dataset) n <- dm[1] p <- dm[2] if ( p > n ) { res <- paste("The number of variables is higher than the sample size. No backward procedure was attempted") } else { tic <- proc.time() logn <- log(n) if ( is.null(gam) ) { con <- 2 - log(p) / logn } else con <- 2 * gam if ( (con) < 0 ) con <- 0 tool <- numeric(p + 1) ini <- lme4::lmer( target ~ dataset + (1|id), REML = FALSE, weights = wei ) bic0 <- BIC(ini) tool[1] <- bic0 bic <- numeric(p) M <- dim(dataset)[2] - 1 if ( M == 0 ) { mod <- lme4::lmer( target ~ 1 + (1|id), REML = FALSE, weights = wei) bic <- BIC(mod) if (bic0 - bic < 0 ) { info <- matrix( 0, nrow = 0, ncol = 2 ) mat <- matrix( c(1, bic - bic0), ncol = 2 ) } else { info <- matrix( c(1, bic), ncol = 2 ) mat <- matrix(0, nrow = 0, ncol = 2 ) } runtime <- proc.time() - tic colnames(info) <- c("Variables", "eBIC") colnames(mat) <- c("Variables", "eBIC") res <- list(runtime = runtime, info = info, mat = mat ) } else { for (j in 1:p) { mod <- lme4::lmer( target ~ dataset[, -j, drop = FALSE] + (1|id), REML = FALSE, weights = wei) bic[j] <- BIC(mod) + con * lchoose(p, M) } mat <- cbind(1:p, bic ) sel <- which.min( mat[, 2] ) info <- matrix( c(0, 0), ncol = 2 ) colnames(info) <- c("Variables", "eBIC") colnames(mat) <- c("Variables", "eBIC") if ( bic0 - mat[sel, 2] < 0 ) { runtime <- proc.time() - tic res <- list(runtime = runtime, info = info, mat = mat ) } else { info[1, ] <- mat[sel, ] mat <- mat[-sel, , drop = FALSE] dat <- dataset[, -sel, drop = FALSE] tool[2] <- info[1, 2] i <- 2 if ( tool[2] != 0 ) { while ( tool[i - 1] - tool[i ] > 0 & NCOL(dat) > 0 ) { ini <- lme4::lmer( target ~ dat + (1|id), REML = FALSE, weights = wei ) M <- dim(dat)[2] bic0 <- BIC(mod) + con * lchoose(p, M) i <- i + 1 if ( M == 1 ) { mod <- lme4::lmer(target ~ 1 + (1|id), REML = FALSE, weights = wei ) bic <- BIC(mod) tool[i] <- bic if (bic0 - bic < 0 ) { runtime <- proc.time() - tic res <- list(runtime = runtime, info = info, mat = mat ) } else { runtime <- proc.time() - tic info <- rbind(info, c(mat[, 1], bic) ) mat <- mat[-1, , drop = FALSE] res <- list(runtime = runtime, info = info, mat = mat ) dat <- dataset[, -info[, 1], drop = FALSE ] } } else { bic <- numeric(M) M <- dim(dat)[2] - 1 for ( j in 1:(M + 1) ) { mod <- lme4::lmer( target ~ dat[, -j, drop = FALSE] + (1|id), REML = FALSE, weights = wei ) bic[j] <- BIC(mod) + con * lchoose(p, M) } mat[, 2] <- bic sel <- which.min( mat[, 2] ) tool[i] <- mat[sel, 2] if ( bic0 - mat[sel, 2] < 0 ) { runtime <- proc.time() - tic res <- list(runtime = runtime, info = info, mat = mat ) } else { info <- rbind(info, mat[sel, ] ) mat <- mat[-sel, , drop = FALSE] dat <- dataset[, -info[, 1], drop = FALSE ] } } } } } } runtime <- proc.time() - tic res <- list(runtime = runtime, info = info, mat = mat ) } res }
ref_sequence <- function(alignment) { if(!is_align_mat_class(alignment)) stop('`alignment` should be an object of class align_mat.') m <- remove_align_mat_class(alignment) ref_seq <- m[1, ] return(ref_seq) }
f2si <- function(number, unit=""){ sifactor <- c(1e-24, 1e-21, 1e-18, 1e-15, 1e-12, 1e-09, 1e-06, 1e-03, 1e+00, 1e+03, 1e+06, 1e+09, 1e+12, 1e+15, 1e+18, 1e+21, 1e+24) pre <- c(" y", " z", " a", " f", " p", " n", " u", " m", "", " k", " M", " G", " T", " P", " E", " Z", " Y") absolutenumber <- number * sign(number) ix <- findInterval(absolutenumber, sifactor) if (length(ix) > 0 ) { sistring <- paste(number/sifactor[ix], pre[ix], sep="", unit=unit) } else { sistring <- as.character(number) } return(sistring) }
cs_per <- function(x, knots = NULL, nk = 5, xmax = max(x, na.rm=TRUE), xmin = min(x, na.rm=TRUE)){ if( is.null(knots) ) { knots <- rcspline.eval(x, nk = nk, knots.only = TRUE) } nk <- length(knots) rcs.out <- matrix(NA, ncol = nk, nrow = length(x)) for( j in 1:nk ){ a_j <- (-1 / (xmax - xmin)) * ( ((xmax^2 + xmin^2 + 4 * xmin * xmax) / 2) * (xmax - knots[j]) - ((3 * (xmax + xmin) / 2) * (xmax - knots[j])^2) + (xmax - knots[j])^3 ) b_j <- ((3 * (xmax + xmin) * (xmax - knots[j])) / (2*(xmax - xmin))) - ((3 * (xmax - knots[j])^2) / (2 * (xmax - xmin))) c_j <- ( - (xmax - knots[j])/(xmax - xmin)) rcs.out[, j] <- (a_j * x) + (b_j * (x^2)) + (c_j * (x^3)) + ifelse( (x - knots[j] > 0), (x - knots[j])^3, 0) } attr(rcs.out, "knots") <- knots return(rcs.out) }
plotDivRates <- function(rates, facet = TRUE){ message("Using default time units in x-axis label: Age (Ma)") rates_to_plot <- unique(rates$item)[grep("rate", unique(rates$item))] `%>%` <- dplyr::`%>%` p <- rates %>% subset(grepl("rate", item)) %>% ggplot2::ggplot(ggplot2::aes(time, value, color = item)) + ggplot2::geom_step(ggplot2::aes(time, value), direction = "vh") + geom_stepribbon(ggplot2::aes(x = time, ymin = lower, ymax = upper, fill = item), direction = "vh", alpha = 0.4, color = NA) + ggplot2::scale_x_reverse() + ggplot2::xlab("Age (Ma)") + ggplot2::ylab("Rate") + ggplot2::theme_bw() + ggplot2::theme(legend.title = ggplot2::element_blank(), legend.position = "none", panel.grid.major = ggplot2::element_blank(), panel.grid.minor = ggplot2::element_blank(), strip.background = ggplot2::element_blank()) + ggplot2::scale_color_manual(values = colFun(length(rates_to_plot))) + ggplot2::scale_fill_manual(values = colFun(length(rates_to_plot))) if (facet){ p <- p + ggplot2::facet_wrap(dplyr::vars(item), scales = "free_y", labeller = ggplot2::labeller(item = .titleFormatLabeller)) } return(p) }
rvquantile <- function(x, ...) { UseMethod("rvquantile") } rvquantile.rv <- function(x, probs=c(0.025, 0.10, 0.25, 0.50, 0.75, 0.90, 0.975), ignoreInf=FALSE, ...) { if (ignoreInf) { .f <- function (x) { quantile(x[is.finite(x)], probs=probs, ..., na.rm=TRUE) } t(rvsimapply(x, .f)) } else { t(rvsimapply(x, quantile, probs=probs, ..., na.rm=TRUE)) } } rvquantile.rvsummary <- function(x, probs=c(0.025, 0.10, 0.25, 0.50, 0.75, 0.90, 0.975), ...) { Q <- t(sims(x)) all_probs <- attr(Q, "quantiles") M <- NULL name <- character(0) for (p in probs) { ix <- (all_probs==p) if (any(ix)) { M <- cbind(M, Q[,ix,drop=FALSE]) } else { name <- paste(p*100, "%", sep="") M <- cbind(M, NA) colnames(M)[ncol(M)] <- name } } return(M) }
expect_equal <- function(x, y) { stopifnot(isTRUE(all.equal(x, y))) } expect_equal_pairs <- function(x, y) { setkey(pairs_ref, .x, .y) setkey(pairs_loc, .x, .y) expect_equal(pairs_ref$.x, pairs_loc$.x) expect_equal(pairs_ref$.y, pairs_loc$.y) } library(reclin2) library(parallel) x <- data.table(a = c(1,1,2,2), b = c(1,2,1,2)) y <- data.table(a = c(3,3,2,2), b = c(1,2,1,2)) cl <- makeCluster(2) pairs <- cluster_pair_minsim(cl, x, y, on = c("a", "b"), minsim = 2) pairs_ref <- pair_minsim(x, y, on = c("a", "b"), minsim = 2) pairs_loc <- cluster_collect(pairs) expect_equal_pairs(pairs_ref, pairs_loc) pairs <- cluster_pair_minsim(cl, x, on = c("a", "b"), minsim = 1, deduplication = TRUE) pairs_ref <- pair_minsim(x, on = c("a", "b"), minsim = 1, deduplication = TRUE) pairs_loc <- cluster_collect(pairs) expect_equal_pairs(pairs_ref, pairs_loc) stopCluster(cl)
marginal.g.oneWay = Vectorize(function(g,F,N,J,rscale,log=FALSE, log.const=0) { dfs = (J-1)/(N*J-J) omega = (1+(N*g/(dfs*F+1)))/(N*g+1) m = log(rscale) - 0.5*log(2*pi) - 1.5*log(g) - rscale^2/(2*g) - (J-1)/2*log(N*g+1) - (N*J-1)/2*log(omega) - log.const ifelse(log,m,exp(m)) },"g")
df_build_site <- function() { df_build_readme() df_setup_vignette() devtools::document() pkgdown::build_site() } df_build_readme <- function() { if(fs::dir_exists("README_cache")) { fs::dir_delete("README_cache") } rmarkdown::render("README.rmd", output_file = "README.md") if(fs::file_exists("README.html")) { fs::file_delete("README.html") } if(fs::dir_exists("README_cache")) { fs::dir_delete("README_cache") } } df_setup_vignette <- function(excl = "", strip_number = FALSE) { purrr::walk(list.dirs("vignettes/",recursive = FALSE), ~{ fs::dir_delete(.x) }) purrr::walk(list.files("vignettes/", pattern = "*.Rmd", full.names = TRUE), fs::file_delete) lf = list.files("book", pattern="*.Rmd") lf = lf[!is.na(as.integer(sapply(lf, function(x) substr(x, 1, 2))))] lf = lf[sapply(lf, function(x) !(x %in% excl))] purrr::walk(lf, function(file) { fs::file_copy( file.path("book", file), ifelse(strip_number,file.path("vignettes", substr(file, 4, nchar(file))),file.path("vignettes", file)) , overwrite = TRUE) }) NULL } df_test <- function() { devtools::test() } df_build_vignettes_for_cran <- function() { rmd_files = list.files("vignettes/", pattern = "*.Rmd", full.names = TRUE) purrr::map(rmd_files, fs::file_delete) rmd_files = list.files("vignettes/", pattern = "*.tex", full.names = TRUE) purrr::map(rmd_files, fs::file_delete) rmd_files = list.files("vignettes/", pattern = "*.pdf", full.names = TRUE) purrr::map(rmd_files, fs::file_delete) df_ready_for_cran() lapply( list.files("vignettes/", pattern = "*.Rmd", full.names = TRUE), rmarkdown::render ) rmd_files = list.files("vignettes/", pattern = "*.pdf", full.names = TRUE) rmd_files = paste0(substr(rmd_files, 1, nchar(rmd_files)-3),"Rmd") purrr::map(rmd_files, function(x) { if (file.exists(x)) { fs::file_delete(x) } }) rmd_files = list.files("vignettes/", pattern = "*.tex", full.names = TRUE) purrr::map(rmd_files, fs::file_delete) rmd_files = list.dirs("vignettes/", recursive = FALSE) purrr::map(rmd_files, fs::file_delete) mergedf = data.frame( pdfs = c("concepts", "convenience-features", "custom-group-by", "data-table-syntax", "glm", "group-by", "ingesting-data", "intro-disk-frame"), asis = c("concepts", "convenience-features", "custom-group-by", "data-table-syntax", "glm", "group-by", "ingesting-data", "intro-disk-frame"), index_entry = c( "Key disk.frame concepts", "Convenience Features", "Custom Group-by", "Using data.table syntax", "Generalized Linear Models (GLMs)", "Group-by", "Ingesting Data", "Quick-start" ) ) mergedf$rmd_files = paste0("vignettes/", mergedf$pdfs, ".pdf") abc = data.frame(rmd_files = list.files("vignettes/", pattern = "*.pdf", full.names = TRUE)) mergedf = mergedf %>% left_join(abc, by = "rmd_files") purrr::walk2(paste0(mergedf$rmd_files, ".asis"), mergedf$index_entry, function(x, y) { xf = file(x) writeLines(c( glue::glue("%\\VignetteIndexEntry{|y|}", .open="|", .close="|"), glue::glue("%\\VignetteEngine{R.rsp::asis}", .open="|", .close="|")), xf) close(xf) }) } df_ready_for_cran <- function() { df_build_readme() devtools::clean_vignettes() df_setup_vignette(excl = c("08-more-epic.Rmd", "06-vs-dask-juliadb.Rmd", "01-intro.Rmd"), strip_number = TRUE) devtools::document() if(fs::dir_exists("tests")) { fs::dir_copy("tests", "tests_manual") fs::dir_delete("tests") } if(fs::dir_exists("README_cache")) { fs::dir_delete("README_cache") } } df_check <- function() { df_ready_for_cran() rmd_files = list.files("vignettes/", pattern = "*.Rmd", full.names = TRUE) purrr::map(rmd_files, fs::file_delete) devtools::check(args = c('--as-cran')) } df_release <- function() { df_ready_for_cran() rmd_files = list.files("vignettes/", pattern = "*.Rmd", full.names = TRUE) purrr::map(rmd_files, fs::file_delete) devtools::release() } if(F) { df_check() }
library(tidymodels) library(RWeka) library(janitor) dry_beans <- read.arff(url("https://www.muratkoklu.com/datasets/vtdhnd02.php")) %>% dplyr::rename(AspectRatio = AspectRation) %>% clean_names() %>% as_tibble() %>% mutate(class = tolower(as.character(class)), class = factor(class)) names(dry_beans) <- gsub("([1-4]$)", "_\\1", names(dry_beans), perl = TRUE) save(dry_beans, file = "RData/dry_beans.RData", compress = "xz", version = 2)
ystand <- function(fixed,clustername,data,method,y,N,p,nlevel,stand,affequiv){ sv=diag(p) if(method=="ls"){method="identity"} if(method=="identity" & p>1){affequiv=TRUE} if(method=="mixed") { rando="~1|" for(i in 1:(nlevel-1)) { if(i==(nlevel-1)) { rando=paste0(rando,clustername[i]) } else if(i<(nlevel-1)) rando=paste0(rando,clustername[i],"/") } rando=formula(rando) } if(p==1) {if(stand=="location") { if(method=="identity") { yc=y-mean(y) } else if(method=="sign") { yc=sign(y-median(y)) } else if(method=="rank") { ray=rank(y) yc=ray-mean(ray) } } else if(stand=="reg") { if(method=="identity") { yc=lm(fixed,data)$residuals } if(method=="mixed") { yc=residuals(lme(fixed,data,rando),level=0) } else if(method=="sign") { yc=sign(rq(fixed,data,tau=.5)$residuals) } else if(method=="rank") { resr=rank(rfit(fixed,data)$residuals) yc=resr-mean(resr) } } } else if(p>1) { if(stand=="location") { estim=mv.1sample.est(y,score=method,stand="inner",maxiter=10000) u=estim$location yres=y-matrix(u,nrow=N,ncol=p,byrow=T) } else if(stand=="reg") { if(method=="identity") { yres=lm(fixed,data=data)$residuals } else if(method=="sign" | method=="rank") { yres=mv.l1lm(fixed,scores=method,stand="inner",maxiter=10000,data=data)$residuals } } if(affequiv==TRUE) { if(method=="identity" | method=="sign") { sha=mv.shape.est(yres,score=method,location=rep(0,p),estimate="inner") } else if(method=="rank") { sha=mv.shape.est(yres,score=method,estimate="inner") sha=sha*p/sum(diag(sha)) } ev=eigen(sha) svi=ev$vectors %*% sqrt(solve(diag(ev$values))) %*% t(ev$vectors) sv=ev$vectors %*% sqrt(diag(ev$values)) %*% t(ev$vectors) } if(affequiv==FALSE) { sv=diag(p) svi=diag(p) } if(method=="identity") { yc=yres %*% svi } else if(method=="sign") { yc=spatial.signs(yres %*% svi,center=FALSE,shape=FALSE) } else if(method=="rank") { yc=spatial.rank(yres %*% svi,center=FALSE,shape=FALSE) } } yc=as.matrix(yc) list(yc,sv) }
weight_func <- function(values = values_weight_func) { new_qual_param( type = "character", values = values, label = c(weight_func = "Distance Weighting Function"), finalize = NULL ) } values_weight_func <- c("rectangular", "triangular", "epanechnikov", "biweight", "triweight", "cos", "inv", "gaussian", "rank", "optimal")
bi_legend <- function(pal, dim = 3, xlab, ylab, size = 10){ bi_class = bi_fill = x = y = NULL if (missing(pal) == TRUE){ stop("A palette must be specified for the 'pal' argument.") } if ("bi_pal_custom" %in% class(pal) == TRUE) { if (dim == 2 & length(pal) != 4){ stop("There is a mismatch between the length of your custom palette object and the given dimensions.") } else if (dim == 3 & length(pal) != 9){ stop("There is a mismatch between the length of your custom palette object and the given dimensions.") } } else if ("bi_pal_custom" %in% class(pal) == FALSE){ if (pal %in% c("Brown", "DkBlue", "DkCyan", "DkViolet", "GrPink") == FALSE){ stop("The given palette is not one of the allowed options for bivariate mapping. Please choose one of: 'Brown', 'DkBlue', 'DkCyan', 'DkViolet', or 'GrPink'.") } } if (is.numeric(dim) == FALSE){ stop("The 'dim' argument only accepts the numeric values '2' or '3'.") } if (dim != 2 & dim != 3){ stop("The 'dim' argument only accepts the numeric values '2' or '3'.") } if (missing(xlab) == TRUE){ xlab <- "x var " } if (is.character(xlab) == FALSE){ stop("The 'xlab' argument must be a character string.") } if (missing(ylab) == TRUE){ ylab <- "y var " } if (is.character(ylab) == FALSE){ stop("The 'ylab' argument must be a character string.") } if (is.numeric(size) == FALSE){ stop("The 'size' argument must be a numeric value.") } xQN <- rlang::quo_name(rlang::enquo(xlab)) yQN <- rlang::quo_name(rlang::enquo(ylab)) if ("bi_pal_custom" %in% class(pal) == TRUE) { x <- pal } else if ("bi_pal_custom" %in% class(pal) == FALSE){ if (pal == "DkViolet"){ x <- pal_dkviolet(n = dim) } else if (pal == "GrPink"){ x <- pal_grpink(n = dim) } else if (pal == "DkBlue"){ x <- pal_dkblue(n = dim) } else if (pal == "DkCyan"){ x <- pal_dkcyan(n = dim) } else if (pal == "Brown"){ x <- pal_brown(n = dim) } } x <- dplyr::tibble( bi_class = names(x), bi_fill = x ) leg <- tidyr::separate(x, bi_class, into = c("x", "y"), sep = "-") leg <- dplyr::mutate(leg, x = as.integer(x), y = as.integer(y)) legend <- ggplot2::ggplot() + ggplot2::geom_tile(data = leg, mapping = ggplot2::aes(x = x, y = y, fill = bi_fill)) + ggplot2::scale_fill_identity() + ggplot2::labs(x = substitute(paste(xQN, ""%->%"")), y = substitute(paste(yQN, ""%->%""))) + bi_theme() + ggplot2::theme(axis.title = ggplot2::element_text(size = size)) + ggplot2::coord_fixed() return(legend) }
inspectDifferences <- function(varName, GADSdat1, GADSdat2, id) { check_GADSdat(GADSdat1) check_GADSdat(GADSdat2) if(!is.character(varName) || length(varName) != 1) stop("'varName' must be a character of length 1.") if(!is.character(id) || length(id) != 1) stop("'id' must be a character of length 1.") if(!varName %in% namesGADS(GADSdat1)) stop("'varName' is not a variable in 'GADSdat1'.") if(!varName %in% namesGADS(GADSdat2)) stop("'varName' is not a variable in 'GADSdat2'.") if(!id %in% namesGADS(GADSdat1)) stop("'id' is not a variable in 'GADSdat1'.") if(!id %in% namesGADS(GADSdat2)) stop("'id' is not a variable in 'GADSdat2'.") if(nrow(GADSdat1$dat) != nrow(GADSdat2$dat)) stop("'GADSdat1' and 'GADSdat2' have different row numbers.") if(any(is.na(GADSdat1$dat[, id]))) stop("Missing values in 'id' column of 'GADSdat1'.") if(any(is.na(GADSdat2$dat[, id]))) stop("Missing values in 'id' column of 'GADSdat2'.") if(any(GADSdat1$dat[, id] != GADSdat2$dat[, id])) stop("'id' column is not equal for 'GADSdat1' and 'GADSdat2'.") if(is.numeric(GADSdat1$dat[, varName]) && !is.numeric(GADSdat2$dat[, varName])) stop("'varName' column is numeric in 'GADSdat1' but not in 'GADSdat2'.") if(!is.numeric(GADSdat1$dat[, varName]) && is.numeric(GADSdat2$dat[, varName])) stop("'varName' column is numeric in 'GADSdat2' but not in 'GADSdat1'.") if(isTRUE(all.equal(GADSdat2$dat[, varName], GADSdat1$dat[, varName], scale = 1))) return("all.equal") unequal_rows <- c(which(GADSdat2$dat[, varName] != GADSdat1$dat[, varName]), which(is.na(GADSdat2$dat[, varName]) & !is.na(GADSdat1$dat[, varName])), which(!is.na(GADSdat2$dat[, varName]) & is.na(GADSdat1$dat[, varName]))) unequal_case_dat2 <- GADSdat2$dat[unequal_rows, ] unequal_case_dat1 <- GADSdat1$dat[unequal_rows, ] ncol1 <- ifelse(ncol(GADSdat1$dat) > 8, yes = 8, no = ncol(GADSdat1$dat)) ncol2 <- ifelse(ncol(GADSdat2$dat) > 8, yes = 8, no = ncol(GADSdat2$dat)) nrow1 <- ifelse(nrow(unequal_case_dat1) > 5, yes = 5, no = nrow(unequal_case_dat1)) nrow2 <- ifelse(nrow(unequal_case_dat2) > 5, yes = 5, no = nrow(unequal_case_dat2)) list(cross_table = table(GADSdat1$dat[, varName], GADSdat2$dat[, varName], useNA = "if", dnn = c("GADSdat1", "GADSdat2")), some_unequals_GADSdat1 = unequal_case_dat1[1:nrow1, unique(c(namesGADS(GADSdat1)[1:ncol1], varName))], some_unequals_GADSdat2 = unequal_case_dat2[1:nrow2, unique(c(namesGADS(GADSdat2)[1:ncol2], varName))], unequal_IDs = unequal_case_dat2[, id] ) }
runHook = function(obj, hook, ...) { UseMethod("runHook") } runHook.Registry = function(obj, hook, ...) { f = obj$cluster.functions$hooks[[hook]] if (is.null(f)) return(NULL) "!DEBUG [runHook]: Running hook '`hook`'" f(obj, ...) } runHook.JobCollection = function(obj, hook, ...) { f = obj$hooks[[hook]] if (is.null(f)) return(NULL) "!DEBUG [runHook]: Running hook '`hook`'" f(obj, ...) }
setClass("disk.matrix", slots=list( file="character", read.func="character", func.args="list" ), validity=function(object) { errors <- character() if (length(object@file) != 1) { msg <- "slot 'file' must be a single value" errors <- c(errors, msg) } if (!file.exists(object@file)) { msg <- "slot 'file' must be a file path to an existing file" errors <- c(errors, msg) } if (length([email protected]) != 1 && [email protected] %nin% c("read.table", "readRDS")) { msg <- "slot 'read.func' must be either \"read.table\" or \"readRDS\"" errors <- c(errors, msg) } if (length(errors) > 0) { return(errors) } else { return(TRUE) } }) attach.disk.matrix <- function(file, serialized=TRUE, ...) { if (is.na(serialized) || length(serialized) != 1) { stop("'serialized' must be 'TRUE' or 'FALSE'") } if (length(file) != 1 || !is.character(file) || !file.exists(file)) { stop("'file' must be the name of a file and that file must already exist") } read.func <- ifelse(serialized, "readRDS", "read.table") new("disk.matrix", file=normalizePath(file), read.func=read.func, func.args=list(...)) } serialize.table <- function(file, ...) { if (length(file) != 1 || !is.character(file) || !file.exists(file)) { stop("'file' must be the name of a file and that file must already exist") } ext <- gsub(".*\\.", "", file) serialized.file <- gsub(paste0(ext, "$"), "rds", file) m <- as.matrix(read.table(file, ...)) saveRDS(m, serialized.file) serialized.file } is.disk.matrix <- function(x) { "disk.matrix" %in% class(x) } setGeneric("as.disk.matrix", function(x, file, serialize=TRUE) { standardGeneric("as.disk.matrix") }) setMethod("as.disk.matrix", signature(x="disk.matrix"), function(x, file, serialize=TRUE) { warning("already a 'disk.matrix'") return(x) }) setMethod("as.disk.matrix", signature(x="matrix"), function(x, file, serialize=TRUE) { if (is.na(serialize) || length(serialize) != 1) { stop("'serialize' must be 'TRUE' or 'FALSE'") } if (length(file) != 1 || !is.character(file)) { stop("'file' must be the name of a file to save the matrix to") } if (serialize) { saveRDS(x, file) attach.disk.matrix(file) } else { write.table(x, file, col.names=!is.null(colnames(x)), row.names=!is.null(rownames(x)), sep="\t", quote=FALSE) attach.disk.matrix(file, FALSE, header=!is.null(colnames(x)), row.names=ifelse(is.null(rownames(x)), FALSE, 1), sep="\t") } }) setMethod("as.disk.matrix", signature(x="ANY"), function(x, file, serialize=TRUE) { x <- as.matrix(x) as.disk.matrix(x, file, serialize) }) setMethod("as.matrix", signature(x="disk.matrix"), function(x) { if (!file.exists(x@file)) { stop("file ", prettyPath(x@file), " does not exist") } as.matrix(do.call([email protected], c(file=x@file, [email protected]))) }) setMethod("show", signature(object="disk.matrix"), function(object) { cat("Pointer to matrix stored at", prettyPath(object@file), "\n") })
test_that("The combined transition probabilities file is correctly generated", { wp$rf <- c("n1.RDS", "n2.RDS", "n3.RDS", "n4.RDS") options("wordpredictor" = wp) source("./inc.R") fns <- c("words", "model-4", "tp2", "tp3", "tp4") tp <- TPGenerator$new(opts = list(n = 4, dir = ed), ve = wp$ve) tp$generate_tp() for (fn in fns) { fn <- paste0(ed, "/", fn, ".RDS") expect_true(file.exists(fn), label = fn) } source("./cu.R") })
sirt_display_function <- function(length=66) { disp <- paste0( rep("-",length), collapse="") cat(disp, "\n") }
ll <- generateTestDensityMultiNormal(sigma = "no correlation") bayesianSetup = createBayesianSetup(likelihood = ll, lower = rep(-10, 3), upper = rep(10, 3)) settings = list(iterations = 1000) out <- runMCMC(bayesianSetup = bayesianSetup, settings = settings) newPrior = createPriorDensity(out, method = "multivariate", eps = 1e-10, lower = rep(-10, 3), upper = rep(10, 3), best = NULL) bayesianSetup <- createBayesianSetup(likelihood = ll, prior = newPrior) \dontrun{ settings = list(iterations = 1000) out <- runMCMC(bayesianSetup = bayesianSetup, settings = settings) }
knitr::opts_chunk$set( collapse = TRUE, comment = " ) library(hydraulics) nu = kvisc(T = 55, units = 'Eng') cat(sprintf("Kinematic viscosity: %.3e ft2/s\n", nu)) rho = dens(T = 25, units = 'SI') cat(sprintf("Water density: %.3f kg/m3\n", rho)) Ts <- seq(0, 100, 10) nus <- kvisc(T = Ts, units = 'SI') xlbl <- expression("Temperature, " (degree*C)) ylbl <- expression("Kinematic viscosity," ~nu~ (m^{2}/s)) par(cex=0.8, mgp = c(2,0.7,0)) plot(Ts, nus, xlab = xlbl, ylab = ylbl, type="l") T <- 25 Dens <- dens(T = T, units = 'SI', ret_units = TRUE) Dvisc <- dvisc(T = T, units = 'SI', ret_units = TRUE) Kvisc <- Dvisc / Dens Kvisc units::set_units(Kvisc, m^2/s) units::set_units(Kvisc, ft^2/s) vps <- svp(T = 10, units = "SI", ret_units = T) vps units::set_units(vps,lbf/`in`^2) Temperature <- units::set_units(seq(0, 100, 10), degree_Celsius) Kinematic_Viscosity <- kvisc(T = Temperature, units = 'SI', ret_units = TRUE) par(cex=0.8, mar = par("mar") + c(0, .2, 0, 0)) plot(Temperature, Kinematic_Viscosity, type="l") knitr::kable(data.frame(Fr=c("<1.0","=1.0",">1.0"), Condition=c("subcritical","critical","supercritical")), format="pipe", padding=0) D <- 20/12 L <- 10560 Q <- 4 T <- 60 ks <- 0.0005 ans <- darcyweisbach(Q = Q,D = D, L = L, ks = ks, nu = kvisc(T=T, units="Eng"), units = c("Eng")) cat(sprintf("Reynolds no: %.0f\nFriction Fact: %.4f\nHead Loss: %.2f ft\n", ans$Re, ans$f, ans$hf)) ans <- darcyweisbach(Q = 4.0,D = 20/12, L = 10560, ks = 0.0005, nu = kvisc(T=T, units="Eng"), units = "Eng", ret_units = TRUE) knitr::kable(format(as.data.frame(ans), digits = 2), format = "pipe") Q <- 37.5 L <- 8000 hf <- 215 T <- 68 ks <- 0.0008 ans <- darcyweisbach(Q = Q, hf = hf, L = L, ks = ks, nu = kvisc(T=T, units='Eng'), units = c('Eng')) cat(sprintf("Reynolds no: %.0f\nFriction Fact: %.4f\nDiameter: %.2f ft\n", ans$Re, ans$f, ans$D)) Qs <- seq(30, 45, 1.0) L <- 8000 hf <- 215 T <- 68 ks <- 0.0008 ans <- mapply(darcyweisbach, Q=Qs, MoreArgs = list(hf = hf, L = L, ks = ks, nu = kvisc(T=T, units='Eng'), units = 'Eng')) ans <- as.data.frame(t(ans)) plot(ans$Q, ans$D, xlab = "Q, ft^3/s", ylab = "D, ft", type="l") grid() knitr::kable(data.frame(Q_liter_s=c("0.20","0.24","0.30"), Headloss_m=c("0.052","0.073","0.110")), format="pipe", padding=0) Qs = c(0.00020, 0.00024, 0.00030) hfs <- c(0.052,0.073,0.110) ans <- mapply(darcyweisbach, Q=Qs, hf=hfs, MoreArgs = list(L = 3.0, D = 0.025, nu = kvisc(T=20, units='SI'), units = 'SI')) ks_values = unlist((as.data.frame(t(ans)))$ks) cat(round(ks_values,6)) cat(paste0("\nMean Roughness, ks = ",round(mean(ks_values),6), " m")) Re_values <- unlist((as.data.frame(t(ans)))$Re) f_values <- unlist((as.data.frame(t(ans)))$f) moody(Re = Re_values, f = f_values) ans <- manningc(Q=0.01, n=0.013, Sf=0.001, d = 0.2, units="SI", ret_units = TRUE) knitr::kable(format(as.data.frame(ans), digits = 2), format = "pipe", padding=0) ans <- manningc(Q=0.01, n=0.013, Sf=0.001, d = 0.2, units="SI", ret_units = TRUE) xc_circle(y = ans$y, d=ans$d, units = "SI") ans <- manningt(Q = 360., n = 0.015, m = 1, b = 20.0, y = 3.0, units = "Eng") cat(sprintf("Slope: %.5f ft\n", ans$Sf)) knitr::kable(format(as.data.frame(ans), digits = 2), format = "pipe", padding=0) ans <- manningt(Q = 360., n = 0.015, m = 1, b = 20.0, y = 3.0, units = "Eng", ret_units = TRUE) knitr::kable(format(as.data.frame(ans), digits = 2), format = "pipe", padding=0) xc_trap( y = 3.0, b = 20.0, m = 1.0, units = "Eng") ans <- manningt(Q = 360., n = 0.015, m = 1, y = 3.0, Sf = 0.00088, units = "Eng") knitr::kable(format(as.data.frame(ans), digits = 2), format = "pipe", padding=0) cat(sprintf("Optimal bottom width: %.5f ft\n", ans$bopt)) ans <- manningt(Q = 360., n = 0.015, m = 1, b = 4.767534, Sf = 0.00088, units = "Eng") cat(sprintf("Optimal depth: %.5f ft\n", ans$yopt)) spec_energy_trap( Q = 360, b = 20, m = 1, scale = 4, units = "Eng" ) spec_energy_trap( Q = 360, b = 20, m = 1, scale = 4, y=3.0, units = "Eng" ) ns <- seq(0.011, 0.021, 0.002) ys <- seq(1.5, 2.1, 0.1) ny <- expand.grid(n=ns, y=ys) ans <- mapply(manningt, n = ny$n, y = ny$y, MoreArgs = list(m = 2, Sf = 0.0005, b = 3, units = "SI")) x <- as.data.frame(t(ans)) x2 <- data.frame(Q=unlist(x$Q),y=unlist(x$y),n=unlist(x$n)) ggplot2::ggplot(data=x2,ggplot2::aes(x=y,y=Q, group=n, colour=n)) + ggplot2::geom_line() + ggplot2::labs(x = "y, m", y = expression(paste("Q, ", ~m^3/s))) knitr::include_graphics('./TDH_Example_2_Diagram.png') ans <- darcyweisbach(Q = 1,D = 20/12, L = 3884, ks = 0.0005, nu = 1.23e-5, units = "Eng") cat(sprintf("Coefficient K: %.3f\n", ans$hf)) scurve <- systemcurve(hs = 30, K = ans$hf, units = "Eng") knitr::include_graphics('./goulds_pump_3409.png') knitr::kable(data.frame(type=c("poly1","poly2","poly3"), Equation=c("$h=a+{b}{Q}+{c}{Q}^2$","$h=a+{c}{Q}^2$","$h_{shutoff}+{c}{Q}^2$")), format="pipe", padding=0) qgpm <- units::set_units(c(0, 5000, 7850), gallons/minute) qcfs <- units::set_units(qgpm, ft^3/s) hft <- c(81, 60, 20) pcurve <- pumpcurve(Q = qcfs, h = hft, eq = "poly2", units = "Eng") pcurve$p oppt <- operpoint(pcurve = pcurve, scurve = scurve) cat(sprintf("Operating Point: Q = %.3f, h = %.3f\n", oppt$Qop, oppt$hop)) oppt$p knitr::include_graphics('./hardycross_system.png') dfpipes <- data.frame( ID = c(1,2,3,4,5,6,7,8,9,10), D = c(0.3,0.2,0.2,0.2,0.2,0.15,0.25,0.15,0.15,0.25), L = c(250,100,125,125,100,100,125,100,100,125), f = c(.01879,.02075,.02075,.02075,.02075,.02233,.01964,.02233,.02233,.01964) ) loops <- list(c(1,2,3,4,5),c(4,6,7,8),c(3,9,10,6)) Qs <- list(c(.040,.040,.02,-.02,-.04),c(.02,0,0,-.02),c(-.02,.02,0,0)) ans <- hardycross(dfpipes = dfpipes, loops = loops, Qs = Qs, n_iter = 3, units = "SI") knitr::kable(ans$dfloops, digits = 4, format = "pipe", padding=0) knitr::kable(ans$dfpipes, digits = 4, format = "pipe", padding=0) dfpipes <- data.frame( ID = c(1,2,3,4,5,6,7,8,9,10), D = c(0.3,0.2,0.2,0.2,0.2,0.15,0.25,0.15,0.15,0.25), L = c(250,100,125,125,100,100,125,100,100,125), ks = rep(0.00025,10) ) loops <- list(c(1,2,3,4,5),c(4,6,7,8),c(3,9,10,6)) Qs <- list(c(.040,.040,.02,-.02,-.04),c(.02,0,0,-.02),c(-.02,.02,0,0)) ans <- hardycross(dfpipes = dfpipes, loops = loops, Qs = Qs, n_iter = 3, units = "SI") knitr::kable(ans$dfpipes, digits = 4, format = "pipe", padding=0)
dfs_idx <- function(.x, .f) { .f <- purrr::as_mapper(.f) res <- list() num <- 0L walk <- function(x, idx) { for (i in seq_along(x)) { if (isTRUE(tryCatch(.f(x[[i]]), error = function(e) FALSE))) { res[[num <<- num + 1L]] <<- append(idx, i) } if (is.list(x[[i]])) { walk(x[[i]], append(idx, i)) } } } walk(.x, integer()) res }
context("traceback") test_that("tb()", { mockery::stub(tb, "format_trace", function(x) "ok") expect_identical(tb(), "ok") }) test_that("tb(frame)", { expect_message(ret <- tb(1000000), "Invalid frame") expect_null(ret) mockery::stub(tb, "trace_code", function(a,b,c) list(a,b,c)) res <- tb(1, 10) expect_equal(res[[2]], 1) expect_equal(res[[3]], 10) })
enmtools.maxent <- function(species, env, test.prop = 0, nback = 1000, env.nback = 10000, report = NULL, overwrite = FALSE, rts.reps = 0, bg.source = "default", verbose = FALSE, clamp = TRUE, corner = NA, bias = NA, ...){ check.packages("rJava") notes <- NULL species <- check.bg(species, env, nback = nback, bg.source = bg.source, verbose = verbose, bias = bias) maxent.precheck(f, species, env) test.data <- NA test.evaluation <- NA env.test.evaluation <- NA rts.test <- NA if(is.numeric(test.prop)){ if(test.prop > 0 & test.prop < 1){ test.inds <- sample(1:nrow(species$presence.points), ceiling(nrow(species$presence.points) * test.prop)) test.data <- species$presence.points[test.inds,] species$presence.points <- species$presence.points[-test.inds,] } } if(is.character(test.prop)){ if(test.prop == "block"){ if(is.na(corner)){ corner <- ceiling(runif(1, 0, 4)) } else if(corner < 1 | corner > 4){ stop("corner should be an integer from 1 to 4!") } test.inds <- get.block(species$presence.points, species$background.points) test.bg.inds <- which(test.inds$bg.grp == corner) test.inds <- which(test.inds$occ.grp == corner) test.data <- species$presence.points[test.inds,] test.bg <- species$background.points[test.bg.inds,] species$presence.points <- species$presence.points[-test.inds,] species$background.points <- species$background.points[-test.bg.inds,] } } analysis.df <- rbind(species$presence.points, species$background.points) analysis.df$presence <- c(rep(1, nrow(species$presence.points)), rep(0, nrow(species$background.points))) if(length(names(env)) == 1){ oldname <- names(env) env <- stack(env, env) env[[2]][!is.na(env[[2]])] <- 0 names(env) <- c(oldname, "dummyvar") notes <- c(notes, "Only one predictor was provided, so a dummy variable was created in order to be compatible with dismo's prediction function.") } if(verbose){ this.mx <- dismo::maxent(env, p = analysis.df[analysis.df$presence == 1,1:2], a = analysis.df[analysis.df$presence == 0,1:2], ...) suitability <- predict(env, this.mx, type = "response") } else { invisible(capture.output(this.mx <- dismo::maxent(env, p = analysis.df[analysis.df$presence == 1,1:2], a = analysis.df[analysis.df$presence == 0,1:2], ...))) invisible(capture.output(suitability <- predict(env, this.mx, type = "response"))) } clamping.strength <- NA if(clamp == TRUE){ this.df <- as.data.frame(extract(env, species$presence.points)) env <- clamp.env(this.df, env) if(verbose){ clamped.suitability <- predict(env, this.mx, type = "response") } else { invisible(capture.output(clamped.suitability <- predict(env, this.mx, type = "response"))) } clamping.strength <- clamped.suitability - suitability suitability <- clamped.suitability } if(verbose){ model.evaluation <-dismo::evaluate(species$presence.points[,1:2], species$background.points[,1:2], this.mx, env) env.model.evaluation <- env.evaluate(species, this.mx, env, n.background = env.nback) } else { invisible(capture.output(model.evaluation <-dismo::evaluate(species$presence.points[,1:2], species$background.points[,1:2], this.mx, env))) invisible(capture.output(env.model.evaluation <- env.evaluate(species, this.mx, env, n.background = env.nback))) } if(is.numeric(test.prop)){ if(test.prop > 0 & test.prop < 1){ test.check <- raster::extract(env, test.data) test.data <- test.data[complete.cases(test.check),] temp.sp <- species temp.sp$presence.points <- test.data if(verbose){ test.evaluation <-dismo::evaluate(test.data, species$background.points[,1:2], this.mx, env) env.test.evaluation <- env.evaluate(temp.sp, this.mx, env, n.background = env.nback) } else { invisible(capture.output(test.evaluation <-dismo::evaluate(test.data, species$background.points[,1:2], this.mx, env))) invisible(capture.output(env.test.evaluation <- env.evaluate(temp.sp, this.mx, env, n.background = env.nback))) } } } if(is.character(test.prop)){ if(test.prop == "block"){ test.check <- raster::extract(env, test.data) test.data <- test.data[complete.cases(test.check),] temp.sp <- species temp.sp$presence.points <- test.data temp.sp$background.points <- test.bg if(verbose){ test.evaluation <-dismo::evaluate(test.data, test.bg, this.mx, env) env.test.evaluation <- env.evaluate(temp.sp, this.mx, env, n.background = env.nback) } else { invisible(capture.output(test.evaluation <-dismo::evaluate(test.data, test.bg, this.mx, env))) invisible(capture.output(env.test.evaluation <- env.evaluate(temp.sp, this.mx, env, n.background = env.nback))) } } } if(rts.reps > 0){ message("\nBuilding RTS replicate models...\n") if(!is.numeric(test.prop)){ stop(paste("RTS test can only be conducted with randomly withheld data, and test.prop is set to", test.prop)) } rts.models <- list() rts.geog.training <- c() rts.geog.test <- c() rts.env.training <- c() rts.env.test <- c() if (requireNamespace("progress", quietly = TRUE)) { pb <- progress::progress_bar$new( format = " [:bar] :percent eta: :eta", total = rts.reps, clear = FALSE, width= 60) } for(i in 1:rts.reps){ if (requireNamespace("progress", quietly = TRUE)) { pb$tick() } if(verbose == TRUE){message(paste("Replicate", i, "of", rts.reps))} rep.species <- species allpoints <- rbind(test.data, species$background.points[,1:2], species$presence.points[,1:2]) rep.rows <- sample(nrow(allpoints), nrow(species$presence.points)) rep.species$presence.points <- allpoints[rep.rows,] allpoints <- allpoints[-rep.rows,] if(test.prop > 0){ test.rows <- sample(nrow(allpoints), nrow(test.data)) rep.test.data <- allpoints[test.rows,] allpoints <- allpoints[-test.rows,] } rep.species$background.points <- allpoints rep.species <- add.env(rep.species, env, verbose = verbose) rts.df <- rbind(rep.species$presence.points, rep.species$background.points) rts.df$presence <- c(rep(1, nrow(rep.species$presence.points)), rep(0, nrow(rep.species$background.points))) if(verbose){ thisrep.mx <- dismo::maxent(env, p = rts.df[rts.df$presence == 1,1:2], a = rts.df[rts.df$presence == 0,1:2], ...) thisrep.model.evaluation <-dismo::evaluate(rep.species$presence.points[,1:2], species$background.points[,1:2], thisrep.mx, env) thisrep.env.model.evaluation <- env.evaluate(rep.species, thisrep.mx, env, n.background = env.nback) } else { invisible(capture.output(thisrep.mx <- dismo::maxent(env, p = rts.df[rts.df$presence == 1,1:2], a = rts.df[rts.df$presence == 0,1:2], ...))) invisible(capture.output(thisrep.model.evaluation <-dismo::evaluate(rep.species$presence.points[,1:2], species$background.points[,1:2], thisrep.mx, env))) invisible(capture.output(thisrep.env.model.evaluation <- env.evaluate(rep.species, thisrep.mx, env, n.background = env.nback))) } rts.geog.training[i] <- thisrep.model.evaluation@auc rts.env.training[i] <- thisrep.env.model.evaluation@auc if(test.prop > 0 & test.prop < 1){ temp.sp <- rep.species temp.sp$presence.points <- rep.test.data if(verbose){ thisrep.test.evaluation <-dismo::evaluate(rep.test.data, rep.species$background.points[,1:2], thisrep.mx, env) thisrep.env.test.evaluation <- env.evaluate(temp.sp, thisrep.mx, env, n.background = env.nback) } else { invisible(capture.output(thisrep.test.evaluation <-dismo::evaluate(rep.test.data, rep.species$background.points[,1:2], thisrep.mx, env))) invisible(capture.output(thisrep.env.test.evaluation <- env.evaluate(temp.sp, thisrep.mx, env, n.background = env.nback))) } rts.geog.test[i] <- thisrep.test.evaluation@auc rts.env.test[i] <- thisrep.env.test.evaluation@auc } rts.models[[paste0("rep.",i)]] <- list(model = thisrep.mx, training.evaluation = thisrep.model.evaluation, env.training.evaluation = thisrep.env.model.evaluation, test.evaluation = thisrep.test.evaluation, env.test.evaluation = thisrep.env.test.evaluation) } rts.geog.training.pvalue = mean(rts.geog.training > model.evaluation@auc) rts.env.training.pvalue = mean(rts.env.training > env.model.evaluation@auc) if(test.prop > 0){ rts.geog.test.pvalue <- mean(rts.geog.test > test.evaluation@auc) rts.env.test.pvalue <- mean(rts.env.test > env.test.evaluation@auc) } else { rts.geog.test.pvalue <- NA rts.env.test.pvalue <- NA } training.plot <- qplot(rts.geog.training, geom = "histogram", fill = "density", alpha = 0.5) + geom_vline(xintercept = model.evaluation@auc, linetype = "longdash") + xlim(0,1) + guides(fill = FALSE, alpha = FALSE) + xlab("AUC") + ggtitle(paste("Model performance in geographic space on training data")) + theme(plot.title = element_text(hjust = 0.5)) env.training.plot <- qplot(rts.env.training, geom = "histogram", fill = "density", alpha = 0.5) + geom_vline(xintercept = env.model.evaluation@auc, linetype = "longdash") + xlim(0,1) + guides(fill = FALSE, alpha = FALSE) + xlab("AUC") + ggtitle(paste("Model performance in environmental space on training data")) + theme(plot.title = element_text(hjust = 0.5)) if(test.prop > 0){ test.plot <- qplot(rts.geog.test, geom = "histogram", fill = "density", alpha = 0.5) + geom_vline(xintercept = test.evaluation@auc, linetype = "longdash") + xlim(0,1) + guides(fill = FALSE, alpha = FALSE) + xlab("AUC") + ggtitle(paste("Model performance in geographic space on test data")) + theme(plot.title = element_text(hjust = 0.5)) env.test.plot <- qplot(rts.env.test, geom = "histogram", fill = "density", alpha = 0.5) + geom_vline(xintercept = env.test.evaluation@auc, linetype = "longdash") + xlim(0,1) + guides(fill = FALSE, alpha = FALSE) + xlab("AUC") + ggtitle(paste("Model performance in environmental space on test data")) + theme(plot.title = element_text(hjust = 0.5)) } else { test.plot <- NA env.test.plot <- NA } rts.pvalues = list(rts.geog.training.pvalue = rts.geog.training.pvalue, rts.env.training.pvalue = rts.env.training.pvalue, rts.geog.test.pvalue = rts.geog.test.pvalue, rts.env.test.pvalue = rts.env.test.pvalue) rts.distributions = list(rts.geog.training = rts.geog.training, rts.env.training = rts.env.training, rts.geog.test = rts.geog.test, rts.env.test = rts.env.test) rts.plots = list(geog.training.plot = training.plot, env.training.plot = env.training.plot, geog.test.plot = test.plot, env.test.plot = env.test.plot) rts.test <- list(rts.models = rts.models, rts.pvalues = rts.pvalues, rts.distributions = rts.distributions, rts.plots = rts.plots, rts.nreps = rts.reps) } output <- list(species.name = species$species.name, analysis.df = analysis.df, test.data = test.data, test.prop = test.prop, model = this.mx, training.evaluation = model.evaluation, test.evaluation = test.evaluation, env.training.evaluation = env.model.evaluation, env.test.evaluation = env.test.evaluation, rts.test = rts.test, suitability = suitability, clamping.strength = clamping.strength, call = sys.call(), notes = notes) class(output) <- c("enmtools.maxent", "enmtools.model") response.plots <- list() for(i in names(env)){ response.plots[[i]] <- marginal.plots(output, env, i) } output[["response.plots"]] <- response.plots if(!is.null(report)){ if(file.exists(report) & overwrite == FALSE){ stop("Report file exists, and overwrite is set to FALSE!") } else { message("This function not enabled yet. Check back soon!") } } return(output) } summary.enmtools.maxent <- function(object, ...){ cat("\n\nData table (top ten lines): ") print(kable(head(object$analysis.df, 10))) cat("\n\nModel: ") print(summary(object$model)) cat("\n\nModel fit (training data): ") print(object$training.evaluation) cat("\n\nEnvironment space model fit (training data): ") print(object$env.training.evaluation) cat("\n\nProportion of data wittheld for model fitting: ") cat(object$test.prop) cat("\n\nModel fit (test data): ") print(object$test.evaluation) cat("\n\nEnvironment space model fit (test data): ") print(object$env.test.evaluation) cat("\n\nSuitability: \n") print(object$suitability) cat("\n\nNotes: \n") print(object$notes) plot(object) } print.enmtools.maxent <- function(x, ...){ summary(x) } plot.enmtools.maxent <- function(x, ...){ suit.points <- data.frame(rasterToPoints(x$suitability)) colnames(suit.points) <- c("Longitude", "Latitude", "Suitability") suit.plot <- ggplot(data = suit.points, aes_string(y = "Latitude", x = "Longitude")) + geom_raster(aes_string(fill = "Suitability")) + scale_fill_viridis_c(option = "B", guide = guide_colourbar(title = "Suitability")) + coord_fixed() + theme_classic() + geom_point(data = x$analysis.df[x$analysis.df$presence == 1 & x$analysis.df$Longitude > extent(x$suitability)[1] & x$analysis.df$Longitude < extent(x$suitability)[2] & x$analysis.df$Latitude > extent(x$suitability)[3] & x$analysis.df$Latitude < extent(x$suitability)[4],], aes_string(y = "Latitude", x = "Longitude"), pch = 21, fill = "white", color = "black", size = 2) if(!(all(is.na(x$test.data)))){ suit.plot <- suit.plot + geom_point(data = x$test.data, aes_string(y = "Latitude", x = "Longitude"), pch = 21, fill = "green", color = "black", size = 2) } if(!is.na(x$species.name)){ title <- paste("Maxent model for", x$species.name) suit.plot <- suit.plot + ggtitle(title) + theme(plot.title = element_text(hjust = 0.5)) } return(suit.plot) } predict.enmtools.maxent <- function(object, env, maxpts = 1000, clamp = TRUE, ...){ suitability <- invisible(capture.output(raster::predict(env, object$model))) if(clamp == TRUE){ this.df <- as.data.frame(rbind(object$model@presence, object$model@absence)) env <- clamp.env(this.df, env) clamped.suitability <- invisible(capture.output(raster::predict(env, object$model))) clamping.strength <- clamped.suitability - suitability suitability <- clamped.suitability } suit.points <- data.frame(rasterToPoints(suitability)) colnames(suit.points) <- c("Longitude", "Latitude", "Suitability") suit.plot <- ggplot(data = suit.points, aes_string(y = "Latitude", x = "Longitude")) + geom_raster(aes_string(fill = "Suitability")) + scale_fill_viridis_c(option = "B", guide = guide_colourbar(title = "Suitability")) + coord_fixed() + theme_classic() if(!is.na(object$species.name)){ title <- paste("Maxent model projection for", object$species.name) suit.plot <- suit.plot + ggtitle(title) + theme(plot.title = element_text(hjust = 0.5)) } this.threespace = threespace.plot(object, env, maxpts) output <- list(suitability.plot = suit.plot, suitability = suitability, clamping.strength = clamping.strength, threespace.plot = this.threespace) return(output) } maxent.precheck <- function(f, species, env){ jar <- paste(system.file(package="dismo"), "/java/maxent.jar", sep='') if (!file.exists(jar)) { stop('file missing:\n', jar, '.\nPlease download it here: http://www.cs.princeton.edu/~schapire/maxent/') } if(!inherits(species, "enmtools.species")){ stop("Argument \'species\' must contain an enmtools.species object!") } check.species(species) if(!inherits(species$presence.points, "data.frame")){ stop("Species presence.points do not appear to be an object of class data.frame") } if(!inherits(species$background.points, "data.frame")){ stop("Species background.points do not appear to be an object of class data.frame") } if(!inherits(env, c("raster", "RasterLayer", "RasterStack", "RasterBrick"))){ stop("No environmental rasters were supplied!") } if(ncol(species$presence.points) != 2){ stop("Species presence points do not contain longitude and latitude data!") } if(ncol(species$background.points) != 2){ stop("Species background points do not contain longitude and latitude data!") } }
add_point_cloud_layer <- function(deckgl, id = "point-cloud-layer", data = NULL, properties = list(), ...) { add_layer(deckgl, "PointCloudLayer", id, data, properties, ...) }
testthat::context("Test R6Class transpilation") testthat::test_that("Test component functions", { expr <- parse_expr('list(x = 1, y = 2, z = function(x) {x})') expect_equal( deparse_public_list(expr, default_deparsers()), "self.x = 1\n self.y = 2\n self.z = function(x) {\n x\n }" ) expr <- parse_expr('list(x = 1, y = 2, z = function(x) {x})') expect_equal( deparse_private_list(expr, default_deparsers()), "private.x = 1\n private.y = 2\n private.z = function(x) {\n x\n }" ) expr <- parse_expr('list(x = 1, y = 2, z = function(x) {this$x})') expect_equal( deparse_private_list(expr, default_deparsers()), "private.x = 1\n private.y = 2\n private.z = function(x) {\n $(this, x)\n }" ) expr <- parse_expr('list(x = 1, y = 2, z = function(x) {x})') expect_equal( get_constructor_arg(expr, default_deparsers()), "" ) expr <- parse_expr('list(initialize = function(x) {x})') expect_equal( get_constructor_arg(expr, default_deparsers()), "x" ) expr <- parse_expr('list(initialize = function(x = 1) {x})') expect_equal( get_constructor_arg(expr, default_deparsers()), "x = 1" ) expr <- parse_expr('list(initialize = function(x = 1, y) {x})') expect_equal( get_constructor_arg(expr, default_deparsers()), "x = 1, y" ) expr <- parse_expr('list(initialize = function(x = 1, y = 3) {x})') expect_equal( get_constructor_arg(expr, default_deparsers()), "x = 1, y = 3" ) })
BuildNIHR <- function(HRVData, verbose=NULL) { HRVData = HandleVerboseArgument(HRVData, verbose) VerboseMessage(HRVData$Verbose, "Calculating non-interpolated heart rate") CheckBeats(HRVData) NBeats = length(HRVData$Beat$Time) VerboseMessage(HRVData$Verbose, paste("Number of beats:",NBeats)) hr = c(0) hr[2:NBeats] = 60.0 / diff(HRVData$Beat$Time) hr[1] = hr[2] HRVData$Beat$niHR = hr rr = c(0) rr[2:NBeats] = 1000.0 * diff(HRVData$Beat$Time) rr[1] = rr[2] HRVData$Beat$RR = rr return(HRVData) }
Becker83 <- matrix( c(1,-0.33,0.03,25,2, 2,0.07,0.03,25,2, 3,-0.3,0.02,50,2, 4,0.35,0.02,100,38, 5,0.69,0.07,100,30, 6,0.81,0.22,100,45, 7,0.4,0.05,100,45, 8,0.47,0.07,100,45, 9,0.37,0.05,100,5, 10,-0.06,0.03,100,5), ncol=5, byrow=TRUE) dimnames(Becker83) <- list(NULL, c("study","di","vi","percentage","items")) Becker83 <- data.frame(Becker83)
expected <- eval(parse(text="c(137.806684813768, 121.8139365262, 65.7401124541654, 58.1108376312736, 45.405715266083, 27.7215361928801, 24.5043950862888, 19.146851627154, 11.6897209360412, 10.3331048565334, 8.07391591749891, 4.92936519144333, 4.35730225537612, 3.85162839581514, 1.83740349181702, 1.43568090186075, 0.87652578203274, 0.774803168078583, 0.326721894204603, 0.28880514857566, 0.15586133644587, 0.13777330882289, 0.0580967635187637, 0.0453947182615431, 0.0244984602619516)")); test(id=0, code={ argv <- eval(parse(text="list(c(4.92585186838819, 4.80249477012754, 4.18570927882429, 4.06235218056364, 3.81563798404234, 3.32220959099974, 3.19885249273909, 2.95213829621779, 2.45870990317518, 2.33535280491453, 2.08863860839323, 1.59521021535063, 1.47185311708998, 1.34849601882933, 0.608353429265429, 0.361639232744128, -0.131789160298473, -0.255146258559123, -1.11864594638368, -1.24200304464433, -1.85878853594758, -1.98214563420823, -2.84564532203278, -3.09235951855408, -3.70914500985733))")); do.call(`exp`, argv); }, o=expected);
clusOpt3fixedPSU <- function(unit.cost, m, delta1, delta2, unit.rv, k1=1, k2=1, CV0=NULL, tot.cost=NULL, cal.sw){ if (m<0) stop("m must be positive.\n") delta1.chk <- any(delta1 < 0 | delta1 > 1) if (delta1.chk) stop("delta1 must be in [0,1].\n") delta2.chk <- any(delta2 < 0 | delta2 > 1) if (delta2.chk) stop("delta2 must be in [0,1].\n") if (!is.null(CV0) & !is.null(tot.cost)) stop("CV0 and C.prime cannot both be non-null.\n") if (is.null(CV0) & is.null(tot.cost)) stop("CV0 and C.prime cannot both be null.\n") if (sum(length(m)>1, length(delta1)>1, length(delta2)>1, length(unit.rv)>1, length(CV0)>1, length(tot.cost)>1) > 1) stop("Only one argument to function can be vector.\n") C1 <- unit.cost[1] C2 <- unit.cost[2] C3 <- unit.cost[3] C.prime <- tot.cost/m - C1 q.opt <- sqrt((1-delta2)/delta2 * C2 / C3) if (cal.sw == 1){ n <- C.prime/(C2 + C3*q.opt) n.chk <- any(n < 0) if (n.chk) stop(paste("n is negative. Check inputs. n=",n,"\n")) tot.cost <- C1*m + C2*m*n + C3*m*n*q.opt CV <- sqrt(unit.rv/m/n/q.opt * (k1*delta1*n*q.opt + k2*(1 + delta2*(q.opt-1)))) output <- structure(list(C1 = C1, C2 = C2, C3 = C3, m = m, delta1 = delta1, delta2 = delta2, "unit relvar" = unit.rv, k1 = k1, k2 = k2, "variable budget" = tot.cost-C1*m, "total cost" = round(tot.cost,0), n = round(n,1), q = round(q.opt,1), CV = round(CV,4)), class = "power.htest") } if (cal.sw == 2) { n <- k2*(1 + delta2*(q.opt-1)) / q.opt / (CV0^2*m/unit.rv - k1*delta1) n.chk <- any(n < 0) if (n.chk) stop(paste("n is negative. Check inputs. n=",n,"\n")) tot.cost <- C1*m + C2*m*n + C3*m*n*q.opt CV.chk <- sqrt(unit.rv/m/n/q.opt * (k1*delta1*n*q.opt + k2*(1+ delta2*(q.opt-1)))) output <- structure(list(C1 = C1, C2 = C2, C3 = C3, m = m, delta1 = delta1, delta2 = delta2, "unit relvar" = unit.rv, k1 = k1, k2 = k2, "variable budget" = tot.cost-C1*m, "total cost" = round(tot.cost,0), n = round(n,1), q = round(q.opt,1), CV = CV0, "CV check" = round(CV.chk,4)), class = "power.htest") } output }
knitr::opts_chunk$set( collapse = TRUE, comment = " ) library(ggplot2) data(anscombe) computational_components <- list( Linear = ggplot(anscombe, aes(x = x1, y = y1)) + geom_point() + theme_bw(), `Non Linear` = ggplot(anscombe, aes(x = x2, y = y2)) + geom_point() + theme_bw(), `Outlier Vertical`= ggplot(anscombe, aes(x = x3, y = y3)) + geom_point() + theme_bw(), `Outlier Horizontal` = ggplot(anscombe, aes(x = x4, y = y4)) + geom_point() + theme_bw()) library(listdown) ld <- listdown(load_cc_expr = readRDS("comp-comp.rds"), package = "ggplot2") doc <- c( as.character(ld_rmarkdown_header("Anscombe's Quartet", author = "Francis Anscombe", date = "1973")), ld_make_chunks(ld)) cat("\n", paste(doc, collapse = "\n")) ld <- listdown(load_cc_expr = readRDS("comp-comp.rds"), package = "ggplot2", echo = FALSE) cat(paste(ld_make_chunks(ld), collapse = "\n")) computational_components$Data <- anscombe cat(paste(ld_make_chunks(ld), collapse = "\n")) library(DT) ld <- listdown(load_cc_expr = readRDS("comp-comp.rds"), package = c("ggplot2", "DT"), decorator = list(data.frame = datatable)) cat(paste(ld_make_chunks(ld), collapse = "\n"))
as.genind.DNAbin <- function(x, pops){ h <- haplotype(x) tab <- sapply(attr(h, 'index'), function(i) sapply(1:dim(x)[1], function(j) sum(i==j))) colnames(tab) <- paste("L1", 1:dim(tab)[2], sep=".") res <- genind(tab=tab, pop=pops, ploidy=1) return(res) }
library(testthat) context("Cause-specific Cox regression") library(riskRegression) library(pec) library(rms) library(survival) library(prodlim) test_that("CSC vs prodlim",{ data(Melanoma) nd <- data.frame(sex=factor(levels(Melanoma$sex))) A <- prodlim(Hist(time,status)~1,data=Melanoma) B <- CSC(Hist(time,status)~1,data=Melanoma) pB <- predictRisk(B,times=sort(unique(Melanoma$time)),newdata=nd[1,,drop=FALSE],cause=1) pA <- predictRisk(A,times=sort(unique(Melanoma$time)),newdata=nd[1,,drop=FALSE],cause=1) expect_equal(c(pB),pA,tolerance=1e3) a <- prodlim(Hist(time,status)~sex,data=Melanoma) b <- CSC(Hist(time,status)~strat(sex),data=Melanoma,fitter="cph") c <- CSC(Hist(time,status)~strat(sex),data=Melanoma,surv.type="survival",fitter="cph") pa <- predictRisk(a,times=c(0,10,100,1000,2000),newdata=nd,cause=1) pb <- predictRisk(b,times=c(0,10,100,1000,2000),newdata=nd,cause=1) pc <- predictRisk(c,times=c(0,10,100,1000,2000),newdata=nd,cause=1) expect_equal(c(pb),c(pa),tolerance=0.1) expect_equal(c(pb),c(pc),tolerance=0.00000001) u <- CSC(Hist(time,status)~strat(sex)+age+invasion,data=Melanoma,fitter="cph") v <- CSC(Hist(time,status)~strat(sex)+age+invasion,data=Melanoma,surv.type="survival",fitter="cph") pu <- predictRisk(u,times=c(0,10,100,1000,2000),newdata=Melanoma[c(17,84),],cause=1) pv <- predictRisk(v,times=c(0,10,100,1000,2000),newdata=Melanoma[c(17,84),],cause=1) expect_equal(c(pu),c(pv),tolerance=0.1) }) test_that("predictSurv",{ set.seed(17) d <- prodlim::SimSurv(100) f <- coxph(Surv(time,status)~X1+X2,data=d,x=TRUE,y=TRUE) h <- cph(Surv(time,status)~X1+X2,data=d,surv=TRUE,x=TRUE,y=TRUE) af <- predictRisk(f,newdata=d[c(17,88,3),],times=c(0,1,8.423,100,1000)) bf <- 1-predictSurvProb(f,newdata=d[c(17,88,3),],times=c(0,1,8.423,100,1000)) expect_equal(unname(af),unname(bf),tolerance = 1e-8) ah <- predictRisk(h,newdata=d[c(17,88,3),],times=c(0,1,8.423,100,1000)) bh <- 1-predictSurvProb(h,newdata=d[c(17,88,3),],times=c(0,1,8.423,100,1000)) colnames(bh) <- NULL expect_equal(unname(ah),unname(bh),tolerance = 1e-8) }) test_that("Cox models",{ set.seed(17) d <- prodlim::SimCompRisk(100) a <- CSC(Hist(time,event)~X1+X2,data=d) A <- CSC(Hist(time,event)~X1+X2,data=d,surv.type="surv") a1 <- coxph(Surv(time,event==1)~X1+X2,data=d) a2 <- coxph(Surv(time,event==2)~X1+X2,data=d) A2 <- coxph(Surv(time,event!=0)~X1+X2,data=d) expect_equal(coef(a$models[[1]]),coef(a1),tolerance = 1e-8) expect_equal(coef(a$models[[2]]),coef(a2),tolerance = 1e-8) expect_equal(coef(A$models[[2]]),coef(A2),tolerance = 1e-8) }) test_that("strata",{ set.seed(17) d <- prodlim::SimCompRisk(100) a <- CSC(Hist(time,event)~strata(X1)+X2,data=d) A <- CSC(Hist(time,event)~strata(X1)+X2,data=d,surv.type="surv") a1 <- coxph(Surv(time,event==1)~strata(X1)+X2,data=d) a2 <- coxph(Surv(time,event==2)~strata(X1)+X2,data=d) A2 <- coxph(Surv(time,event!=0)~strata(X1)+X2,data=d) expect_equal(coef(a$models[[1]]),coef(a1),tolerance = 1e-8) expect_equal(coef(a$models[[2]]),coef(a2),tolerance = 1e-8) expect_equal(coef(A$models[[2]]),coef(A2),tolerance = 1e-8) }) test_that("strat and strata",{ data(Melanoma) a <- CSC(Hist(time,status)~strat(sex)+age+invasion+logthick+strat(epicel)+strat(ulcer),data=Melanoma,fitter="cph") predictRisk(a,times=c(0,100,1000,4000),newdata=Melanoma[c(17,77,188),],cause=2) b <- CSC(Hist(time,status)~strata(sex)+age+invasion+logthick+strata(epicel)+strata(ulcer),data=Melanoma,fitter="coxph") pa <- predictRisk(a,times=c(0,100,1000,4000),newdata=Melanoma[c(17,77,188),],cause=2) pb <- predictRisk(b,times=c(0,100,1000,4000),newdata=Melanoma[c(17,77,188),],cause=2) expect_equal(pa,pb,tolerance=1e-6) })
con_iachan_holland<-function(a,b){ a <- toupper(unlist(strsplit(a,split = "",fixed = TRUE))) b <- toupper(unlist(strsplit(b,split = "",fixed = TRUE))) if(length(a) != length(b)) stop("a and b must have the same number of characters in Holland-code") if(length(a) > 3 ) stop("a Iachan-Index is only defined for three-letter Holland-code") IM<-matrix(c(22,10,4,10,5,2,4,2,1),ncol=3) bM<-t(matrix(rbind(b,b,b),ncol=3)) aM<-matrix(rbind(a,a,a),ncol=3) ergM<-((aM==bM)*IM) erg <- sum(ergM) return (erg) }
library(testthat) library(workflows) test_check("workflows")
compute_s2_paths <- function(pm, s2_list_l1c, s2_list_l2a, tmpdir, list_prods, force_tiles = FALSE, check_tmp = TRUE, ignorelist) { . <- type <- mission <- level <- id_orbit <- extent_name <- file_ext <- mission <- level <- sensing_date <- id_orbit <- prod_type <- res <- sensing_datetime <- id_tile <- NULL list_prods <- list_prods[!is.na(nn(list_prods))] list_rgb <- pm$list_rgb[!is.na(nn(pm$list_rgb))] list_indices <- pm$list_indices[!is.na(nn(pm$list_indices))] list_prods <- list_prods[!duplicated(list_prods)] list_rgb <- list_rgb[!duplicated(list_rgb)] list_indices <- list_indices[!duplicated(list_indices)] s2_list_l1c <- s2_list_l1c[!duplicated(names(s2_list_l1c))] s2_list_l2a <- s2_list_l2a[!duplicated(names(s2_list_l2a))] nomsk <- c("SCL", "CLD", "SNW", "AOT") steps_todo <- c( "tiles" = TRUE, "merged" = TRUE, "warped" = pm$clip_on_extent, "warped_nomsk" = pm$clip_on_extent & any(nomsk %in% list_prods), "rgb" = length(list_rgb) > 0, "masked" = !is.na(pm$mask_type), "indices" = length(list_indices) > 0 ) output_req <- c( "tiles" = !is.na(pm$path_tiles), "merged" = !is.na(pm$path_merged) | !is.na(pm$path_out) & !steps_todo[["warped"]] & !steps_todo[["masked"]], "warped" = length(pm$list_prods[!is.na(pm$list_prods) & pm$list_prods != "SCL"]) > 0 & !steps_todo[["masked"]] & pm$clip_on_extent, "warped_nomsk" = any(nomsk %in% pm$list_prods), "rgb" = steps_todo[["rgb"]], "masked" = length(pm$list_prods[!is.na(pm$list_prods) & !pm$list_prods %in% nomsk]) > 0 & steps_todo[["masked"]], "indices" = steps_todo[["indices"]] ) gdal_formats <- fromJSON(system.file("extdata/settings/gdal_formats.json",package="sen2r"))$drivers sel_driver <- gdal_formats[gdal_formats$name==pm$outformat,] sel_rgb_driver <- gdal_formats[gdal_formats$name==pm$rgb_outformat,] if (nrow(sel_driver)==0) { print_message( type="error", "Format \"",pm$outformat,"\" is not recognised; ", "please use one of the formats supported by your GDAL installation." ) } if (nrow(sel_rgb_driver)==0) { print_message( type="error", "Format \"",pm$rgb_outformat,"\" is not recognised; ", "please use one of the formats supported by your GDAL installation." ) } main_format <- sel_driver[1,"name"] rgb_format <- sel_rgb_driver[1,"name"] out_format <- c( "tiles" = if (output_req["tiles"]) main_format else "VRT", "merged" = if (output_req["merged"]) main_format else "VRT", "warped" = if (output_req["warped"]) main_format else "VRT", "warped_nomsk" = main_format, "rgb" = rgb_format, "masked" = main_format, "indices" = main_format ) main_ext <- sel_driver[1,"ext"] rgb_ext <- sel_rgb_driver[1,"ext"] out_ext <- c( "tiles" = if (output_req["tiles"]) main_ext else "vrt", "merged" = if (output_req["merged"]) main_ext else "vrt", "warped" = if (output_req["warped"]) main_ext else "vrt", "warped_nomsk" = main_ext, "rgb" = rgb_ext, "masked" = main_ext, "indices" = main_ext ) output_dep <- c( "tiles" = "SAFE", "merged" = "tiles", "warped" = "merged", "warped_nomsk" = "merged", "rgb" = if (steps_todo["warped"]) {"warped"} else {"merged"}, "masked.nonnomsk" = if (steps_todo["warped"]) {"warped"} else {"merged"}, "masked.nomsk" = if (steps_todo["warped"]) {"warped_nomsk"} else {"merged"}, "indices" = if (steps_todo["masked"]) {"masked"} else if (steps_todo["warped"]) {"warped"} else {"merged"} ) cloudcovered_steps <- c( "tiles" = FALSE, "merged" = FALSE, "warped" = FALSE, "warped_nomsk" = FALSE, "rgb" = FALSE, "masked" = TRUE, "indices" = TRUE ) paths_istemp <- c( "L1C" = is.na(pm$path_l1c), "L2A" = is.na(pm$path_l2a), "tiles" = !output_req[["tiles"]], "merged" = is.na(pm$path_merged) & (is.na(pm$path_out) | steps_todo[["warped"]] | steps_todo[["masked"]]), "warped" = !output_req[["warped"]], "warped_nomsk" = !output_req[["warped_nomsk"]], "rgb" = !output_req[["rgb"]], "masked" = !output_req[["masked"]], "indices" = !output_req[["indices"]] ) paths <- c( "L1C" = if (!paths_istemp[["L1C"]]) {pm$path_l1c} else {file.path(tmpdir,"SAFE")}, "L2A" = if (!paths_istemp[["L2A"]]) {pm$path_l2a} else {file.path(tmpdir,"SAFE")}, "tiles" = if (!paths_istemp[["tiles"]]) {pm$path_tiles} else {file.path(tmpdir,"tiles")}, "merged" = if (!is.na(pm$path_merged)) { pm$path_merged } else if (!is.na(pm$path_out) & !steps_todo[["warped"]] & !steps_todo[["masked"]]) { pm$path_out } else { file.path(tmpdir,"merged") }, "warped" = if (!paths_istemp[["warped"]]) {pm$path_out} else {file.path(tmpdir,"warped")}, "warped_nomsk" = if (!paths_istemp[["warped_nomsk"]]) {pm$path_out} else {file.path(tmpdir,"warped")}, "rgb" = if (!paths_istemp[["rgb"]]) {pm$path_rgb} else {file.path(tmpdir,"rgb")}, "masked" = if (!paths_istemp[["masked"]]) {pm$path_out} else {file.path(tmpdir,"masked")}, "indices" = if (!paths_istemp[["indices"]]) {pm$path_indices} else {file.path(tmpdir,"indices")} ) paths <- sapply(paths, normalize_path, mustWork = FALSE) ExtentName <- if (steps_todo["warped"]) {pm$extent_name} else {""} level_for_indices <- switch(pm$index_source, TOA = "1C", BOA = "2A") l1c_prods <- c("TOA") l2a_prods <- c("BOA","SCL","TCI","AOT","WVP","CLD","SNW") remove_duplicates <- function(x) {x[!duplicated(x)]} merge_exp_req <- function(exp_paths, req_paths, step) { sapply(names(exp_paths[[step]]), function(prod) { remove_duplicates(nn(unlist( as.vector(c( if (output_req[step]) {exp_paths[[step]][[prod]]}, unlist(sapply( req_paths[gsub("\\..*$","",names(which(output_dep==step)))], function(sellist) {sellist[[prod]]} )) )) ))) }, simplify = FALSE, USE.NAMES = TRUE) } nonex_paths <- function(list_paths, overwrite = FALSE) { if (overwrite) { list_paths } else { sapply(names(list_paths), function(prod) { list_paths[[prod]][!file.exists(list_paths[[prod]])] }, simplify = FALSE, USE.NAMES = TRUE) } } exi_paths <- list( "tiles" = sapply(list_prods, function(prod) { list.files( file.path(paths["tiles"], if (pm$path_subdirs) {prod}), paste0("^S2([AB])([12][AC])\\_([0-9]{8})\\_([0-9]{3})\\_([0-9]{2}[A-Z]{3})\\_(",prod,")\\_([126]0)\\.?(",out_ext["tiles"],")$"), full.names=TRUE ) }, simplify = FALSE, USE.NAMES = TRUE), "merged" = sapply(list_prods, function(prod) { list.files( file.path(paths["merged"], if (pm$path_subdirs) {prod}), paste0("^S2([AB])([12][AC])\\_([0-9]{8})\\_([0-9]{3})\\_\\_(",prod,")\\_([126]0)\\.?(",out_ext["merged"],")$"), full.names=TRUE ) }, simplify = FALSE, USE.NAMES = TRUE), "warped" = sapply(list_prods[!list_prods %in% nomsk], function(prod) { list.files( file.path(paths["warped"], if (pm$path_subdirs) {prod}), paste0("^S2([AB])([12][AC])\\_([0-9]{8})\\_([0-9]{3})\\_([^\\_\\.]*)\\_(",prod,")\\_([126]0)\\.?(",out_ext["warped"],")$"), full.names=TRUE ) }, simplify = FALSE, USE.NAMES = TRUE), "warped_nomsk" = sapply(list_prods[list_prods %in% nomsk], function(prod) { list.files( file.path(paths["warped_nomsk"], if (pm$path_subdirs) {prod}), paste0("^S2([AB])([12][AC])\\_([0-9]{8})\\_([0-9]{3})\\_([^\\_\\.]*)\\_(",prod,")\\_([126]0)\\.?(",out_ext["warped_nomsk"],")$"), full.names=TRUE ) }, simplify = FALSE, USE.NAMES = TRUE), "rgb" = sapply(list_rgb, function(prod) { list.files( file.path(paths["rgb"], if (pm$path_subdirs) {prod}), paste0("^S2([AB])([12][AC])\\_([0-9]{8})\\_([0-9]{3})\\_([^\\_\\.]*)\\_(",prod,")\\_([126]0)\\.?(",out_ext["rgb"],")$"), full.names=TRUE ) }, simplify = FALSE, USE.NAMES = TRUE), "masked" = sapply(list_prods[!list_prods %in% nomsk], function(prod) { list.files( file.path(paths["masked"], if (pm$path_subdirs) {prod}), paste0("^S2([AB])([12][AC])\\_([0-9]{8})\\_([0-9]{3})\\_([^\\_\\.]*)\\_(",prod,")\\_([126]0)\\.?(",out_ext["masked"],")$"), full.names=TRUE ) }, simplify = FALSE, USE.NAMES = TRUE), "indices" = sapply(list_indices, function(prod) { list.files( file.path(paths["indices"], if (pm$path_subdirs) {prod}), paste0("^S2([AB])([12][AC])\\_([0-9]{8})\\_([0-9]{3})\\_([^\\_\\.]*)\\_(",prod,")\\_([126]0)\\.?(",out_ext["indices"],")$"), full.names=TRUE ) }, simplify = FALSE, USE.NAMES = TRUE) ) exi_meta <- sapply(names(exi_paths), function(step) { sapply(names(exi_paths[[step]]), function(prod) { table <- suppressWarnings(sen2r_getElements(exi_paths[[step]][[prod]], abort = FALSE)) table$names <- exi_paths[[step]][[prod]] if (is.null(table$prod_type)) {table$prod_type <- character()} if (is.null(table$mission)) {table$mission <- character()} if (is.null(table$level)) {table$level <- character()} if (is.null(table$file_ext)) {table$file_ext <- character()} table <- table[ type != "unrecognised" & mission %in% toupper(substr(pm$sel_sensor,3,3)) & level %in% toupper(substr(pm$s2_levels,2,3)) ,] if (length(pm$timewindow)>0 & !anyNA(pm$timewindow) & length(table$sensing_date)>0) { table <- table[ table$sensing_date>=pm$timewindow[1] & table$sensing_date<=pm$timewindow[2] ,] } if (length(pm$s2orbits_selected)>0 & !anyNA(pm$s2orbits_selected) & length(table$id_orbit)>0) { table <- table[id_orbit %in% pm$s2orbits_selected,] } table }, simplify = FALSE, USE.NAMES = TRUE) }, simplify = FALSE, USE.NAMES = TRUE) rm(exi_paths) if (length(pm$s2tiles_selected)>0 & !anyNA(pm$s2tiles_selected)) { for (step in c("tiles")) { exi_meta[[step]] <- sapply(names(exi_meta[[step]]), function(prod) { table <- exi_meta[[step]][[prod]] table <- table[extent_name %in% pm$s2tiles_selected,] table }, simplify = FALSE, USE.NAMES = TRUE) } } if (length(pm$extent_name)>0 & !anyNA(pm$extent_name)) { for (step in c("warped", "warped_nomsk", "masked", "rgb", "indices")) { exi_meta[[step]] <- sapply(names(exi_meta[[step]]), function(prod) { table <- exi_meta[[step]][[prod]] table <- table[extent_name %in% pm$extent_name,] table }, simplify = FALSE, USE.NAMES = TRUE) } } for (step in c("warped", "warped_nomsk", "masked")) { exi_meta[[step]] <- sapply(names(exi_meta[[step]]), function(prod) { table <- exi_meta[[step]][[prod]] table <- table[file_ext %in% out_ext[step],] table }, simplify = FALSE, USE.NAMES = TRUE) } exi_paths <- sapply(exi_meta, function(x) { sapply(x, function(y) {y$names}, simplify = FALSE, USE.NAMES = FALSE) }, simplify = FALSE, USE.NAMES = TRUE) exi_paths[cloudcovered_steps] <- sapply(exi_paths[cloudcovered_steps], function(x) { sapply(x, function(y) { y[!sen2r_getElements(y)$sensing_date %in% ignorelist$dates_cloudcovered] }, simplify = FALSE, USE.NAMES = FALSE) }, simplify = FALSE, USE.NAMES = TRUE) exi_paths <- sapply(exi_paths, function(x) { sapply(x, function(y) { y[!basename(y) %in% ignorelist$names_missing] }, simplify = FALSE, USE.NAMES = FALSE) }, simplify = FALSE, USE.NAMES = TRUE) exp_paths <- list() if (steps_todo["tiles"]) { exp_paths[["tiles"]] <- sapply(list_prods, function(prod){ nn( unlist(c( sapply( if (prod %in% l1c_prods) {file.path(pm$path_l1c,names(s2_list_l1c))} else if (prod %in% l2a_prods) {file.path(pm$path_l2a,names(s2_list_l2a))}, function(safe){ sel_av_tiles <- tryCatch( safe_getMetadata( safe, info = "tiles", format = "vector", abort = TRUE, simplify = TRUE ), error = function(e){ safe_getMetadata( safe, info = "id_tile", format = "vector", simplify = TRUE ) } ) file.path( paths["tiles"], if (pm$path_subdirs) {prod} else {""}, basename(safe_shortname( safe, prod_type=prod, ext=out_ext["tiles"], res=pm$res_s2, tiles=pm$s2tiles_selected, force_tiles=force_tiles, multiple_names=TRUE )) ) }, simplify = FALSE, USE.NAMES = FALSE ), exi_paths$tiles[[prod]] )) ) }, simplify = FALSE, USE.NAMES = TRUE) if (any(unlist(lapply(exp_paths[["tiles"]], duplicated)))) { exp_paths[["tiles"]] <- sapply(exp_paths[["tiles"]], function(p) { add_tile_suffix(p) }, simplify = FALSE, USE.NAMES = TRUE) } } if (steps_todo["merged"]) { exp_paths[["merged"]] <- sapply(list_prods, function(prod) { expaths <- if (length(exp_paths[[output_dep["merged"]]][[prod]]) == 0) { character(0) } else { file.path( paths["merged"], if (pm$path_subdirs) {prod} else {""}, sen2r_getElements(exp_paths[[output_dep["merged"]]][[prod]])[,paste0( "S2", mission, level,"_", strftime(sensing_date,"%Y%m%d"),"_", id_orbit,"__", prod_type,"_", substr(res,1,2),".", out_ext["merged"] )] ) } remove_duplicates(nn( unlist(c(expaths, exi_paths[["merged"]][[prod]])) )) }, simplify = FALSE, USE.NAMES = TRUE) } if (steps_todo["warped"]) { exp_paths[["warped"]] <- sapply(list_prods[!list_prods %in% nomsk], function(prod) { expaths <- if (length(exp_paths[[output_dep["warped"]]][[prod]]) == 0) { character(0) } else { file.path( paths["warped"], if (pm$path_subdirs) {prod} else {""}, sen2r_getElements(exp_paths[[output_dep["warped"]]][[prod]])[,paste0( "S2", mission, level,"_", strftime(sensing_date,"%Y%m%d"),"_", id_orbit,"_", ExtentName,"_", prod_type,"_", substr(res,1,2),".", out_ext["warped"] )] ) } remove_duplicates(nn( unlist(c(expaths, exi_paths[["warped"]][[prod]])) )) }, simplify = FALSE, USE.NAMES = TRUE) } if (steps_todo["warped_nomsk"]) { exp_paths[["warped_nomsk"]] <- sapply(list_prods[list_prods %in% nomsk], function(prod) { expaths <- if (length(exp_paths[[output_dep["warped_nomsk"]]][[prod]]) == 0) { character(0) } else { file.path( paths["warped_nomsk"], if (pm$path_subdirs) {prod} else {""}, sen2r_getElements(exp_paths[[output_dep["warped_nomsk"]]][[prod]])[,paste0( "S2", mission, level,"_", strftime(sensing_date,"%Y%m%d"),"_", id_orbit,"_", ExtentName,"_", prod_type,"_", substr(res,1,2),".", out_ext["warped_nomsk"] )] ) } remove_duplicates(nn( unlist(c(expaths, exi_paths[["warped_nomsk"]][[prod]])) )) }, simplify = FALSE, USE.NAMES = TRUE) } if (steps_todo["rgb"]) { exp_paths[["rgb"]] <- sapply(list_rgb, function(prod) { expaths <- if (length(unlist(exp_paths[[output_dep["rgb"]]])) == 0) { character(0) } else { gsub( "<rgbname>", prod, file.path( paths["rgb"], if (pm$path_subdirs) {prod} else {""}, unique( sen2r_getElements(unlist(exp_paths[[output_dep["rgb"]]]))[ level == switch(substr(prod,7,7), T = "1C", B = "2A"), paste0( "S2", mission, level,"_", strftime(sensing_date,"%Y%m%d"),"_", id_orbit,"_", ExtentName,"_", "<rgbname>_", substr(res,1,2),".", out_ext["rgb"] )] ) ) ) } remove_duplicates(nn( unlist(c(expaths, exi_paths[["rgb"]][[prod]])) )) }, simplify = FALSE, USE.NAMES = TRUE) } if (steps_todo["masked"]) { exp_paths[["masked"]] <- sapply(list_prods[!list_prods %in% nomsk], function(prod) { expaths <- if (length(exp_paths[[output_dep["masked.nonnomsk"]]][[prod]]) == 0) { character(0) } else { canbemasked <- sen2r_getElements( exp_paths[[output_dep["masked.nonnomsk"]]][[prod]] )[,paste(sensing_date,id_orbit,ExtentName)] %in% sen2r_getElements( exp_paths[[output_dep["masked.nomsk"]]][["SCL"]] )[,paste(sensing_date,id_orbit,ExtentName)] file.path( paths["masked"], if (pm$path_subdirs) {prod} else {""}, sen2r_getElements(exp_paths[[output_dep["masked.nonnomsk"]]][[prod]])[canbemasked, paste0( "S2", mission, level,"_", strftime(sensing_date,"%Y%m%d"),"_", id_orbit,"_", ExtentName,"_", prod_type,"_", substr(res,1,2),".", out_ext["masked"] )] ) } remove_duplicates(nn( unlist(c(expaths, exi_paths$masked[[prod]])) )) }, simplify = FALSE, USE.NAMES = TRUE) } if (steps_todo["indices"]) { exp_paths[["indices"]] <- sapply(list_indices, function(prod) { expaths <- if (length(unlist(exp_paths[[output_dep["indices"]]])) == 0) { character(0) } else { gsub( "<index>", prod , file.path( paths["indices"], if (pm$path_subdirs) {prod} else {""}, sen2r_getElements(unlist(exp_paths[[output_dep["indices"]]]))[ level %in% level_for_indices, paste0( "S2", mission, level,"_", strftime(sensing_date,"%Y%m%d"),"_", id_orbit,"_", ExtentName,"_", "<index>_", substr(res,1,2),".", out_ext["indices"] )] ) ) } remove_duplicates(nn( unlist(c(expaths, exi_paths$indices[[prod]])) )) }, simplify = FALSE, USE.NAMES = TRUE) } exp_paths[cloudcovered_steps] <- sapply(exp_paths[cloudcovered_steps], function(x) { sapply(x, function(y) { y[!sen2r_getElements(y)$sensing_date %in% ignorelist$dates_cloudcovered] }, simplify = FALSE, USE.NAMES = FALSE) }, simplify = FALSE, USE.NAMES = TRUE) exp_paths <- sapply(exp_paths, function(x) { sapply(x, function(y) { y[!basename(y) %in% ignorelist$names_missing] }, simplify = FALSE, USE.NAMES = FALSE) }, simplify = FALSE, USE.NAMES = TRUE) new_paths <- req_paths <- list() if (steps_todo["indices"]) { new_paths[["indices"]] <- nonex_paths(exp_paths[["indices"]], pm$overwrite) req_paths[["indices"]] <- list() req_paths[["indices"]][[pm$index_source]] <- if (length(unlist(new_paths[["indices"]])) == 0) { character(0) } else { file.path( paths[output_dep["indices"]], if (pm$path_subdirs) {pm$index_source} else {""}, remove_duplicates( sen2r_getElements( unlist(new_paths[["indices"]]) )[level %in% level_for_indices, paste0( "S2", mission, level,"_", strftime(sensing_date,"%Y%m%d"),"_", id_orbit,"_", ExtentName,"_", pm$index_source,"_", substr(res,1,2),".", out_ext[output_dep["indices"]] )] ) ) } } if (steps_todo["masked"]) { exp_paths[["masked"]] <- merge_exp_req(exp_paths, req_paths, "masked") new_paths[["masked"]] <- nonex_paths(exp_paths[["masked"]], pm$overwrite) req_paths[["masked"]] <- sapply(list_prods[!list_prods %in% nomsk], function(prod) { if (length(new_paths[["masked"]][[prod]]) == 0) { character(0) } else { file.path( paths[output_dep["masked.nonnomsk"]], if (pm$path_subdirs) {prod} else {""}, remove_duplicates( sen2r_getElements(new_paths[["masked"]][[prod]])[ ,paste0( "S2", mission, level,"_", strftime(sensing_date,"%Y%m%d"),"_", id_orbit,"_", ExtentName,"_", prod,"_", substr(res,1,2),".", out_ext[output_dep["masked.nonnomsk"]] )] ) ) } }, simplify = FALSE, USE.NAMES = TRUE) req_paths[["masked"]][["SCL"]] <- if (length(unlist(new_paths[["masked"]])) == 0) { character(0) } else { file.path( paths[output_dep["masked.nomsk"]], if (pm$path_subdirs) {"SCL"} else {""}, remove_duplicates( sen2r_getElements( unlist(new_paths[["masked"]]) )[,paste0( "S2", mission, "2A","_", strftime(sensing_date,"%Y%m%d"),"_", id_orbit,"_", ExtentName,"_", "SCL","_", substr(res,1,2),".", out_ext[output_dep["masked.nomsk"]] )] ) ) } } if (steps_todo["rgb"]) { new_paths[["rgb"]] <- nonex_paths(exp_paths[["rgb"]], pm$overwrite) req_paths[["rgb"]] <- sapply(list_prods[list_prods %in% c("TOA", "BOA")], function(prod) { if (length(unlist(new_paths[["rgb"]][substr(names(new_paths[["rgb"]]),7,7) == substr(prod,1,1)])) == 0) { character(0) } else { file.path( paths[output_dep["rgb"]], if (pm$path_subdirs) {prod} else {""}, remove_duplicates( sen2r_getElements(unlist( new_paths[["rgb"]][substr(names(new_paths[["rgb"]]),7,7) == substr(prod,1,1)] ))[ level == switch(prod, TOA = "1C", BOA = "2A"), paste0( "S2", mission, level,"_", strftime(sensing_date,"%Y%m%d"),"_", id_orbit,"_", ExtentName,"_", prod,"_", substr(res,1,2),".", out_ext[output_dep["rgb"]] )] ) ) } }, simplify = FALSE, USE.NAMES = TRUE) } if (steps_todo["warped_nomsk"]) { exp_paths[["warped_nomsk"]] <- merge_exp_req(exp_paths, req_paths, "warped_nomsk") new_paths[["warped_nomsk"]] <- nonex_paths(exp_paths[["warped_nomsk"]], pm$overwrite) req_paths[["warped_nomsk"]] <- sapply(list_prods[list_prods %in% nomsk], function(prod) { if (length(new_paths[["warped_nomsk"]][[prod]]) == 0) { character(0) } else { file.path( paths[output_dep["warped_nomsk"]], if (pm$path_subdirs) {prod} else {""}, remove_duplicates( sen2r_getElements(new_paths[["warped_nomsk"]][[prod]])[ ,paste0( "S2", mission, level,"_", strftime(sensing_date,"%Y%m%d"),"_", id_orbit,"_", "_", prod,"_", substr(res,1,2),".", out_ext[output_dep["warped_nomsk"]] )] ) ) } }, simplify = FALSE, USE.NAMES = TRUE) } if (steps_todo["warped"]) { exp_paths[["warped"]] <- merge_exp_req(exp_paths, req_paths, "warped") new_paths[["warped"]] <- nonex_paths(exp_paths[["warped"]], pm$overwrite) req_paths[["warped"]] <- sapply(list_prods[!list_prods %in% nomsk], function(prod) { if (length(new_paths[["warped"]][[prod]]) == 0) { character(0) } else { file.path( paths[output_dep["warped"]], if (pm$path_subdirs) {prod} else {""}, remove_duplicates( sen2r_getElements(new_paths[["warped"]][[prod]])[ ,paste0( "S2", mission, level,"_", strftime(sensing_date,"%Y%m%d"),"_", id_orbit,"_", "_", prod,"_", substr(res,1,2),".", out_ext[output_dep["warped"]] )] ) ) } }, simplify = FALSE, USE.NAMES = TRUE) } if (steps_todo["merged"]) { exp_paths[["merged"]] <- merge_exp_req(exp_paths, req_paths, "merged") new_paths[["merged"]] <- nonex_paths(exp_paths[["merged"]], pm$overwrite) req_paths[["merged"]] <- sapply(list_prods, function(prod) { if (length(new_paths[["merged"]][[prod]]) == 0) { character(0) } else { reqpaths <- sen2r_getElements(new_paths[["merged"]][[prod]])[ ,paste0( "S2", mission, level,"_", strftime(sensing_date,"%Y%m%d"),"_", id_orbit,"_", "[0-9]{2}[A-Z]{3}[a-z]?_", prod,"_", substr(res,1,2),".", out_ext[output_dep["merged"]] )] remove_duplicates(nn(unlist( lapply(reqpaths, function(x){ exp_paths[[output_dep["merged"]]][[prod]][grep(x, exp_paths[[output_dep["merged"]]][[prod]])] }) ))) } }, simplify = FALSE, USE.NAMES = TRUE) } if (steps_todo["tiles"]) { exp_paths[["tiles"]] <- merge_exp_req(exp_paths, req_paths, "tiles") new_paths[["tiles"]] <- nonex_paths(exp_paths[["tiles"]], pm$overwrite) req_paths[["tiles"]] <- if (sum(sapply(new_paths[["tiles"]], length)) == 0) { list("L1C" = character(0), "L2A" = character(0)) } else { safe_dt_av <- safe_getMetadata( c(names(s2_list_l1c),names(s2_list_l2a)), info = c("nameinfo"), format = "data.table", simplify = FALSE ) tiles_basenames_av <- safe_dt_av[,paste0( "S",mission,level,"_", strftime(sensing_datetime,"%Y%m%d"),"_", id_orbit,"_", ifelse(id_tile!="",id_tile,"[A-Z0-9]{5}"),"_", "[A-Z0-9]{3}_", "[126]0\\.", out_ext["tiles"] )] if (any(duplicated(tiles_basenames_av))) { for (sel_basename_av in names(table(tiles_basenames_av))[table(tiles_basenames_av)>1]) { tiles_basenames_av[tiles_basenames_av==sel_basename_av] <- sapply( letters[seq_len(sum(tiles_basenames_av==sel_basename_av))], function(l) { gsub( "_([0-9]{2}[A-Z]{3})_", paste0("_\\1",l,"_"), sel_basename_av ) } ) } } list( "L1C" = file.path( pm$path_l1c, basename(names(s2_list_l1c))[unlist( lapply( tiles_basenames_av[safe_dt_av$level=="1C"], function(x){length(grep(x,unlist(new_paths[["tiles"]]))) > 0} ) )] ), "L2A" = file.path( pm$path_l2a, basename(names(s2_list_l2a))[unlist( lapply( tiles_basenames_av[safe_dt_av$level=="2A"], function(x){length(grep(x,unlist(new_paths[["tiles"]]))) > 0} ) )] ) ) } } outnames <- list("exi" = exi_paths, "exp" = exp_paths, "new" = new_paths, "req" = req_paths) attr(outnames, "is_todo") <- steps_todo attr(outnames, "is_req") <- output_req attr(outnames, "out_ext") <- out_ext attr(outnames, "out_format") <- out_format attr(outnames, "which_dep") <- output_dep attr(outnames, "paths") <- paths attr(outnames, "paths_istemp") <- paths_istemp outnames }
server = function(input, output, session) { shapeGamma <- 2 scaleGamma <- 50 xSkew <- seq(0,shapeGamma*scaleGamma+7.5*sqrt(shapeGamma)*scaleGamma, length.out=600) ySkew <- dgamma(xSkew,shape=shapeGamma,scale=scaleGamma) popDen <- list(x=xSkew,y=ySkew) popMean <- shapeGamma*scaleGamma yMax <- 1.5*max(popDen$y) set.seed(as.numeric(Sys.time())) rv <- reactiveValues(sample = NULL, mean = NULL, lower = NULL, upper = NULL, sims = 0, good = 0) observeEvent(input$takeSample, { samp <- rgamma(input$n,shape=shapeGamma,scale=scaleGamma) xbar <- mean(samp) conf = isolate(input$confLevel/100) t.input = conf + ((1 - conf)/2) tMultiplier = qt(t.input, df = input$n - 1) se = sd(samp)/sqrt(input$n) margin = tMultiplier * se rv$sample <- rgamma(input$n,shape=shapeGamma,scale=scaleGamma) rv$mean <- xbar rv$lower <- xbar - margin rv$upper <- xbar + margin goodInterval <- popMean >= rv$lower & popMean <= rv$upper rv$sims <- rv$sims + 1 rv$good <- rv$good + goodInterval }) observeEvent(input$reset, { rv$sims <- 0 rv$good <- 0 }) output$ciplot <- renderPlot({ par(family = "serif", font = 2, bg = NA) plot(popDen$x,popDen$y,type="l",lwd=3,col="red",cex.axis = 1.25, yaxt = "n", main="Density Curve of Population", xlab="", ylab="", ylim = c(0,yMax)) text(c(400,500,600),rep(0.0108,3),c("Simulations","Good","Percent Good"), cex = 1.4) text(c(400,500,600), rep(0.0100,3),c(rv$sims, rv$good, round(rv$good/rv$sims, digits = 4)), cex = 1.4) box(lwd = 2.5) axis(side = 2, las = 1, cex.axis = 1.25, tck = 0.025, hadj = 0.65) mtext(side = 2, "density f(t)", line = 3, cex.lab = 1.25) abline(v=popMean,lwd=2) if (input$takeSample) { sampDen <- density(rv$sample, from = 0) xdens <- sampDen$x ydens <- sampDen$y firstx <- xdens[1] lastx <- xdens[length(xdens)] polygon(x = c(firstx,xdens,lastx), y = c(0,ydens,0), col = alpha("lightblue",0.5)) intLevel <- 0.95*yMax segments(x0 = rv$lower, y0 = intLevel, x1 = rv$upper, y1 = intLevel, col = "green", lwd = 3) text(x=rv$lower,y=intLevel,labels="(") text(x=rv$upper,y=intLevel,labels=")") points(rv$mean, intLevel, col = "blue", pch = 20,cex=2) rug(rv$sample) } }) }
layoutClones <- function(fish,separate.independent.clones=FALSE){ [email protected]=lapply(rownames([email protected]),getInnerSpace,fish) [email protected]=getOuterSpace(fish) ytop.vec = c() ybtm.vec = c() xpos.vec = c() for(timepos in 1:length(fish@timepoints)){ timepoint=fish@timepoints[timepos] ytop = rep(NA,length(fish@parents)) ybtm = rep(NA,length(fish@parents)) xpos = rep(timepoint, length(fish@parents)) parentsList = 0 while(length(parentsList) > 0){ parent = parentsList[[1]] children = which(fish@parents == parent) parentsList = parentsList[-1] parentsList = c(parentsList, children) numChildren = length(children) spacing = 0 y = [email protected][timepos]/2; if(separate.independent.clones){ y=0 if(parent == 0){ numZeros = length(which(fish@parents==0)) if(numZeros > 1 & [email protected][timepos] > 0){ [email protected][timepos]/(numZeros+1) } } } if(parent!=0){ y = ybtm[parent] spacing = [email protected][[parent]][timepos]/(numChildren(fish,parent,timepos)+1) } for(clone in children){ if([email protected][clone,timepos] == 0){ xpos[clone] = NA if(timepos > 1){ if([email protected][clone,timepos-1] > 0){ ybtm[clone] = y+spacing/2 ytop[clone] = y+spacing/2 xpos[clone] = timepoint-0.25 } } } else { ybtm[clone] = y+spacing y = y + [email protected][clone,timepos] ytop[clone] = y+spacing y = y+spacing } } } ybtm.vec = c(ybtm.vec,ybtm) ytop.vec = c(ytop.vec,ytop) xpos.vec = c(xpos.vec,xpos) } ybtm = matrix(ybtm.vec,ncol=ncol([email protected])) ytop = matrix(ytop.vec,ncol=ncol([email protected])) xpos = matrix(xpos.vec,ncol=ncol([email protected])) ybtm.list = list() ytop.list = list() xpos.list = list() for(i in 1:nrow([email protected])){ ybtm.list[[i]] = ybtm[i,!is.na(ybtm[i,])] ytop.list[[i]] = ytop[i,!is.na(ytop[i,])] xpos.list[[i]] = xpos[i,!is.na(xpos[i,])] } fish@ybtm = ybtm.list fish@ytop = ytop.list fish@xpos = xpos.list return(fish) } numChildren <- function(fish,clone,timepoint){ if(clone==0){ return(0) } return(length(which([email protected][which(fish@parents==clone), timepoint]>0))) } getInnerSpace <- function(clone,fish){ total = [email protected][as.numeric(clone),] for(i in which(fish@parents==clone)){ total = total - [email protected][i,] } return(total) } getOuterSpace <- function(fish){ z = [email protected][which(fish@parents==0),] if(is.vector(z)){ return(100-z) } return(100-colSums(z)) }
"dialog.s.class" <- function(show, history) { tt <- tktoplevel() tkwm.title(tt,"s.class") frame1 <- tkframe(tt, relief="groove", borderwidth=2) frame3 <- tkframe(tt, relief="groove", borderwidth=2) frame4 <- tkframe(tt, relief="groove", borderwidth=2) frame2 <- tkframe(tt, relief="groove", borderwidth=2) xyframe <- tkframe(frame1, relief="groove", borderwidth=2) labframe <- tkframe(frame1, relief="groove", borderwidth=2) facframe <- tkframe(frame2, relief="groove", borderwidth=2) classframe <- tkframe(frame2, relief="groove", borderwidth=2) miscframe <- tkframe(frame2, relief="groove", borderwidth=2) limframe <- tkframe(frame3, relief="groove", borderwidth=2) posframe <- tkframe(frame3, relief="groove", borderwidth=2) legframe <- tkframe(frame3, relief="groove", borderwidth=2) optframe <- tkframe(frame4, relief="groove", borderwidth=2) origframe <- tkframe(frame4, relief="groove", borderwidth=2) gridframe <- tkframe(frame4, relief="groove", borderwidth=2) xyvar <- tclVar() facvar <- tclVar() wtvar <- tclVar() colvar <- tclVar() cstarvar <- tclVar() nxvar <- tclVar(1) nyvar <- tclVar(2) labvar <- tclVar() clabvar <- tclVar(1) pchvar <- tclVar(20) cpvar <- tclVar(1) xl1var <- tclVar() xl2var <- tclVar() yl1var <- tclVar() yl2var <- tclVar() blengthvar <- tclVar(1) ellsizevar <- tclVar(1.5) cgrvar <- tclVar(1) orxvar <- tclVar(0) oryvar <- tclVar(0) subvar <- tclVar() csubvar <- tclVar(1) spvar <- tclVar() gridvar <- tclVar(1) axesvar <- tclVar(0) origvar <- tclVar(1) posvar <- tclVar(3) addvar <- tclVar(0) axellvar <- tclVar(1) TFrame <- tkframe(tt, relief="groove") labh <- tklabel(TFrame, bitmap="questhead") tkgrid(tklabel(TFrame,text="Classes", font="Times 18", foreground="red"), labh) tkbind(labh, "<Button-1>", function() print(help("s.class"))) tkpack(TFrame) xy.entry <- tkentry(xyframe, textvariable=xyvar, width=10) nx.entry <- tkentry(xyframe, textvariable=nxvar, width=3) ny.entry <- tkentry(xyframe, textvariable=nyvar, width=3) dfnr.label <- tklabel(xyframe, width=4) dfnc.label <- tklabel(xyframe, width=4) choosexy.but <- tkbutton(xyframe, text="Set", command=function() choosedf(xy.entry, dfnr.label, dfnc.label)) tkgrid(tklabel(xyframe, text="- Coordinates -", foreground="blue"), columnspan=5) tkgrid(tklabel(xyframe,text="XY Coordinates"), xy.entry, choosexy.but, dfnr.label, dfnc.label) tkgrid(tklabel(xyframe,text="X axis col. tkgrid(tklabel(xyframe,text="Y axis col. lab.entry <- tkentry(labframe, textvariable=labvar, width=10) clab.entry <- tkentry(labframe, textvariable=clabvar, width=10) pch.entry <- tkentry(labframe, textvariable=pchvar, width=10) cp.entry <- tkentry(labframe, textvariable=cpvar, width=10) chooselab.but <- tkbutton(labframe, text="Set", command=function() chooselab(tt, dfnr.label, lab.entry)) tkgrid(tklabel(labframe, text="- Labels & symbols -", foreground="blue"), columnspan=3) tkgrid(tklabel(labframe,text="Labels"), lab.entry, chooselab.but) tkgrid(tklabel(labframe,text="Label size"), clab.entry) tkgrid(tklabel(labframe,text="Character tkgrid(tklabel(labframe,text="Char. size"), cp.entry) tkpack(xyframe, labframe, side="left") tkpack(frame1) fac.entry <- tkentry(facframe, textvariable=facvar, width=10) wt.entry <- tkentry(facframe, textvariable=wtvar, width=10) col.entry <- tkentry(facframe, textvariable=colvar, width=10) choosefac.but <- tkbutton(facframe, text="Set", command=function() choosefac(fac.entry, dfnr.label)) choosewt.but <- tkbutton(facframe, text="Set", command=function() choosewt(wt.entry, dfnr.label)) choosecol.but <- tkbutton(facframe, text="Set", command=function() choosecol(col.entry, facvar)) tkgrid(tklabel(facframe, text="- Grouping factor -", foreground="blue"), columnspan=3) tkgrid(tklabel(facframe,text="Factor"), fac.entry, choosefac.but) tkgrid(tklabel(facframe,text="Weights"), wt.entry, choosewt.but) tkgrid(tklabel(facframe,text="Colors"), col.entry, choosecol.but) blength.entry <- tkentry(classframe, textvariable=blengthvar, width=5) ellsize.entry <- tkentry(classframe, textvariable=ellsizevar, width=5) axell.cbut <- tkcheckbutton(classframe,text="Draw ellipse axes", variable=axellvar) tkgrid(tklabel(classframe, text="- Ellipses & stars -", foreground="blue"), columnspan=3) tkgrid(axell.cbut) tkgrid(tklabel(classframe,text="Star branch length"), blength.entry) tkgrid(tklabel(classframe,text="Ellipse size factor"), ellsize.entry) sp.entry <- tkentry(miscframe, textvariable=spvar, width=10) choosesp.but <- tkbutton(miscframe, text="Set", command=function() choosesp(sp.entry)) tkgrid(tklabel(miscframe, text="- Misc. options -", foreground="blue"), columnspan=3) tkgrid(tklabel(miscframe,text="Spatial object"), sp.entry, choosesp.but) tkpack(facframe, classframe, miscframe, side="left", expand=1) tkpack(frame2, fill="x") xl1.entry <- tkentry(limframe, textvariable=xl1var, width=10) xl2.entry <- tkentry(limframe, textvariable=xl2var, width=10) yl1.entry <- tkentry(limframe, textvariable=yl1var, width=10) yl2.entry <- tkentry(limframe, textvariable=yl2var, width=10) tkgrid(tklabel(limframe, text="- Limits -", foreground="blue"), columnspan=2) tkgrid(tklabel(limframe,text="X min"), xl1.entry) tkgrid(tklabel(limframe,text="X max"), xl2.entry) tkgrid(tklabel(limframe,text="Y min"), yl1.entry) tkgrid(tklabel(limframe,text="Y max"), yl2.entry) tkpack(tklabel(posframe, text="- Sub-title position -", foreground="blue"), anchor="w") tkpack(tkradiobutton(posframe, text="Top left", value=1, variable=posvar), anchor="w") tkpack(tkradiobutton(posframe, text="Top right", value=2, variable=posvar), anchor="w") tkpack(tkradiobutton(posframe, text="Bottom left", value=3, variable=posvar), anchor="w") tkpack(tkradiobutton(posframe, text="Bottom right", value=4, variable=posvar), anchor="w") sub.entry <- tkentry(legframe, textvariable=subvar) csub.entry <- tkentry(legframe, textvariable=csubvar, width=10) tkgrid(tklabel(legframe, text="- Sub-title -", foreground="blue"), columnspan=2) tkgrid(tklabel(legframe,text="Sub-title string"), sub.entry) tkgrid(tklabel(legframe,text="Sub-title size"), csub.entry) tkpack(limframe, legframe, posframe, side="left", expand=1) tkpack(frame3, fill="x") axes.cbut <- tkcheckbutton(optframe,text="Draw axes", variable=axesvar) add.cbut <- tkcheckbutton(optframe,text="Add to plot", variable=addvar) tkgrid(tklabel(optframe, text="- Draw options -", foreground="blue")) tkgrid(axes.cbut) tkgrid(add.cbut) orig.cbut <- tkcheckbutton(origframe,text="Include origin", variable=origvar) orx.entry <- tkentry(origframe, textvariable=orxvar, width=10) ory.entry <- tkentry(origframe, textvariable=oryvar, width=10) tkgrid(tklabel(origframe, text="- Origin -", foreground="blue"), columnspan=2) tkgrid(orig.cbut) tkgrid(tklabel(origframe,text="X Origin"), orx.entry) tkgrid(tklabel(origframe,text="Y Origin"), ory.entry) grid.cbut <- tkcheckbutton(gridframe,text="Draw grid", variable=gridvar) cgr.entry <- tkentry(gridframe, textvariable=cgrvar, width=10) tkgrid(tklabel(gridframe, text="- Grid -", foreground="blue"), columnspan=2) tkgrid(grid.cbut) tkgrid(tklabel(gridframe,text="Grid legend size"), cgr.entry) tkpack(optframe, gridframe, origframe, side="left", expand=1) tkpack(frame4, fill="x") vnr=NULL vnc=NULL numi=1 done <- tclVar(0) "build" <- function() { l <- list(dfxy = .test1value(tclvalue(xyvar), ""), xax = .test1value(tclvalue(nxvar), ""), yax = .test1value(tclvalue(nyvar), ""), labels = .test1value(tclvalue(labvar), ""), plabels.cex = .test1value(tclvalue(clabvar), ""), ppoints.pch = .test1value(tclvalue(pchvar), ""), ppoints.cex = .test1value(tclvalue(cpvar), ""), xlim = .test2values(tclvalue(xl1var), tclvalue(xl2var), ""), ylim = .test2values(tclvalue(yl1var), tclvalue(yl2var), ""), psub.text = tclvalue(subvar), psub.cex = .test1value(tclvalue(csubvar), ""), paxes.draw = as.logical(tclObj(axesvar)), add = as.logical(tclObj(addvar)), pgrid.draw = as.logical(tclObj(gridvar)), pgrid.text.cex = .test1value(tclvalue(cgrvar), ""), porigin.include = as.logical(tclObj(origvar)), porigin.origin = .test2values(tclvalue(orxvar), tclvalue(oryvar), ""), Sp = .test1value(tclvalue(spvar), ""), pellipses.axes.draw = as.logical(tclObj(axellvar)), starSize = .test1value(tclvalue(blengthvar), ""), ellipseSize = .test1value(tclvalue(ellsizevar), ""), col = .test1value(tclvalue(colvar), ""), wt = .test1value(tclvalue(wtvar), ""), fac = .test1value(tclvalue(facvar), as.factor(rep(1, tclvalue(tkcget(dfnr.label, "-text"))))), plot = FALSE ) if (tclvalue(posvar) == 1) l$psub.position <- "topleft" if (tclvalue(posvar) == 2) l$psub.position <- "topright" if (tclvalue(posvar) == 3) l$psub.position <- "bottomleft" if (tclvalue(posvar) == 4) l$psub.position <- "bottomright" l <- l[which(l != "")] return(do.call("s.class", l)) } "reset" <- function() { tclvalue(xyvar) <- "" tclvalue(facvar) <- "" tclvalue(wtvar) <- "" tclvalue(colvar) <- "" tclvalue(cstarvar) <- "" tclvalue(nxvar) <- "1" tclvalue(nyvar) <- "2" tclvalue(labvar) <- "" tclvalue(clabvar) <- "1" tclvalue(pchvar) <- "20" tclvalue(cpvar) <- "1" tclvalue(xl1var) <- "" tclvalue(xl2var) <- "" tclvalue(yl1var) <- "" tclvalue(yl2var) <- "" tclvalue(blengthvar) <- "1" tclvalue(ellsizevar) <- "1.5" tclvalue(cgrvar) <- "1" tclvalue(orxvar) <- "0" tclvalue(oryvar) <- "0" tclvalue(subvar) <- "" tclvalue(csubvar) <- "1" tclvalue(pmvar) <- "" tclvalue(spvar) <- "" tkconfigure(dfnr.label, text="") tkconfigure(dfnc.label, text="") tclvalue(gridvar) <- "1" tclvalue(axesvar) <- "0" tclvalue(origvar) <- "1" tclvalue(posvar) <- "3" tclvalue(addvar) <- "0" tclvalue(axellvar) <- "1" } "drawgraph" <- function() { cmd <- print(build()) if (show) { pr1 <- substr(options("prompt")$prompt, 1, 2) cat(deparse(cmd@Call, width.cutoff = 500), "\n", pr1, sep="") } eval.parent(cmd) assign("cmdlist", c(get("cmdlist", envir=env_ade4tkgui), cmd@Call), envir=env_ade4tkgui) if (history) rewriteHistory(deparse(cmd@Call, width.cutoff = 500)) } RCSFrame <- tkframe(tt, relief="groove") reset.but <- tkbutton(RCSFrame, text="Reset", command=reset) cancel.but <- tkbutton(RCSFrame, text="Dismiss", command=function() tkdestroy(tt)) submit.but <- tkbutton(RCSFrame, text="Submit", default="active", command=function() drawgraph()) tkgrid(cancel.but, submit.but, reset.but, ipadx=20) tkpack(RCSFrame) tkbind(tt, "<Destroy>", function() tclvalue(done)<-2) tkbind(tt, "<KeyPress-Return>", function() drawgraph()) tkbind(tt, "<KeyPress-Escape>", function() tkdestroy(tt)) if(tclvalue(done)=="2") return() }
boot_probtrans<-function(coxrfx_fits_boot,patient_data,tmat,initial_state,max_time){ msfit_objects_boot<-vector("list",length(coxrfx_fits_boot)) probtrans_objects_boot<-vector("list",length(coxrfx_fits_boot)) for(i in 1:length(coxrfx_fits_boot)){ print(i) covariate_df<-as.data.frame(coxrfx_fits_boot[[i]]$Z) covariate_df$strata<-coxrfx_fits_boot[[i]]$strata mstate_data_expanded.boot<-list() mstate_data_expanded.boot$time<-coxrfx_fits_boot[[i]]$surv[,1] mstate_data_expanded.boot$status<-coxrfx_fits_boot[[i]]$surv[,2] patient_data2<-patient_data[names(patient_data)%in%names(covariate_df)] patient_data2$strata<-patient_data$strata msfit_objects_boot[[i]]<-msfit_generic(coxrfx_fits_boot[[i]],patient_data2,trans=tmat) probtrans_objects_boot[[i]]<-probtrans_ebmstate(initial_state,msfit_objects_boot[[i]],"clockreset")[[1]] probtrans_objects_boot[[i]]<-probtrans_objects_boot[[i]][sapply(seq(from=0,to=max_time,length.out = 400),function(x) which.min(abs(probtrans_objects_boot[[i]]$time-x))),] } probtrans_CIs<-lapply(colnames(tmat),CIs_for_target_state,probtrans_objects_boot=probtrans_objects_boot) names(probtrans_CIs)<-colnames(tmat) return(list(probtrans_CIs=probtrans_CIs,probtrans_objects_boot=probtrans_objects_boot, msfit_objects_boot=msfit_objects_boot)) } extract_function<-function(list_object,tstate){ as.vector(list_object[tstate]) } CIs_for_target_state<-function(target_state,probtrans_objects_boot){ target_state_boot_samples<-as.data.frame(sapply(probtrans_objects_boot, extract_function,tstate=target_state)) apply(target_state_boot_samples,1,hdi,credMass=0.95) } cumhazCIs_for_target_transition<-function(transition,msfit_objects_boot){ unique_time_points<-sort(unique(unlist(sapply(msfit_objects_boot,function(x) unique(x[[1]][,"time"]))))) cumhaz_fun<-function(msfit_object_boot,unique_time_point,transition){ msfit_for_target_trans<-msfit_object_boot[[1]][msfit_object_boot[[1]][,"trans"]==transition,] msfit_for_target_trans[which.max(msfit_for_target_trans[,"time"]>=unique_time_point),"Haz"] } obj<-sapply(msfit_objects_boot,function(x) sapply(unique_time_points,cumhaz_fun,msfit_object_boot=x,transition=transition)) output<-apply(obj,1,hdi,credMass=0.95) colnames(output)<-unique_time_points output } boot_coxrfx<-function(mstate_data_expanded,which_group,min_nr_samples=100,output="CIs",...){ coxrfx_fits_boot<-vector("list") rownames(mstate_data_expanded)<-1:nrow(mstate_data_expanded) boot_matrix<-matrix(nrow=0,ncol = sum(!names(mstate_data_expanded)%in%c("id","from","to","trans","Tstart","Tstop","time","status","strata","type")),dimnames = list(NULL,names(mstate_data_expanded)[!names(mstate_data_expanded)%in%c("id","from","to","trans","Tstart","Tstop","time","status","strata","type")])) j<-1 repeat{ boot_samples_trans_1<-sample(rownames(mstate_data_expanded[mstate_data_expanded$trans==1,]),replace = TRUE) boot_samples_trans_2<-sample(rownames(mstate_data_expanded[mstate_data_expanded$trans==2,]),replace = TRUE) boot_samples_trans_3<-sample(rownames(mstate_data_expanded[mstate_data_expanded$trans==3,]),replace = TRUE) boot_samples<-c(boot_samples_trans_1,boot_samples_trans_2,boot_samples_trans_3) mstate_data_expanded.boot<-mstate_data_expanded[boot_samples,] covariate_df<-mstate_data_expanded.boot[!names(mstate_data_expanded.boot)%in%c("id","from","to","trans","Tstart","Tstop","time","status","type")] groups2<-which_group[names(covariate_df)[names(covariate_df)!="strata"]] coxrfx_fits_boot[[j]]<-CoxRFX(covariate_df,Surv(mstate_data_expanded.boot$time,mstate_data_expanded.boot$status),groups =groups2,... ) if(coxrfx_fits_boot[[j]]$iter[1]!=as.list(coxrfx_fits_boot[[j]]$call)$max.iter & sum(is.na(coxrfx_fits_boot[[j]]$coefficients))==0){ boot_matrix<-rbind(boot_matrix,rep(NA,ncol(boot_matrix))) boot_matrix[j,names(coxrfx_fits_boot[[j]]$coefficients)]<-coxrfx_fits_boot[[j]]$coefficients print(min(apply(boot_matrix, 2, function(x) sum(!is.na(x))))) j<-j+1 } if(min(apply(boot_matrix, 2, function(x) sum(!is.na(x))))>=min_nr_samples) break } CIs<-apply(boot_matrix,2,hdi,credMass=0.95) CIs<-rbind(CIs,apply(boot_matrix, 2, function(x) sum(!is.na(x)))) dimnames(CIs)[[1]][3]<-"n_samples" if(output=="CIs_and_coxrfx_fits"){ return(list(CIs=CIs,coxrfx_fits_boot=coxrfx_fits_boot)) }else if(output=="CIs"){ return(CIs) } } boot_ebmstate<-function(mstate_data_expanded=NULL,which_group=NULL,min_nr_samples=NULL, patient_data=NULL,initial_state=NULL,tmat=NULL,time_model=NULL, backup_file=NULL,input_file=NULL,coxrfx_args=NULL, msfit_args=NULL,probtrans_args=NULL){ list2env(coxrfx_args,envir = environment()) if(!is.null(input_file)){ load(input_file) }else{ coxrfx_fits_boot<-vector("list") msfit_objects_boot<-vector("list") probtrans_objects_boot<-vector("list") rownames(mstate_data_expanded)<-1:nrow(mstate_data_expanded) boot_matrix<-matrix(nrow=0, ncol = sum(!names(mstate_data_expanded)%in%c("id","from","to","trans", "Tstart","Tstop","strata","time", "status","type")), dimnames = list(NULL,names(mstate_data_expanded)[!names(mstate_data_expanded)%in%c("id","from","to","trans", "Tstart","Tstop","strata","time", "status","type")])) j<-1 } tol<-unlist(mget("tol",ifnotfound = list(function(tol) 0.001))) max.iter<- unlist(mget("max.iter",ifnotfound = list(function(max.iter) 50))) sigma0<- unlist(mget("sigma0",ifnotfound = list(function(sigma0) 0.1))) sigma.hat<- unlist(mget("sigma.hat",ifnotfound = list(function(sigma.hat) "df"))) verbose<- unlist(mget("verbose",ifnotfound = list(function(verbose) FALSE))) repeat{ boot_samples_trans_1<-sample(rownames(mstate_data_expanded[mstate_data_expanded$trans==1,]),replace = TRUE) boot_samples_trans_2<-sample(rownames(mstate_data_expanded[mstate_data_expanded$trans==2,]),replace = TRUE) boot_samples_trans_3<-sample(rownames(mstate_data_expanded[mstate_data_expanded$trans==3,]),replace = TRUE) boot_samples<-c(boot_samples_trans_1,boot_samples_trans_2,boot_samples_trans_3) mstate_data_expanded.boot<-mstate_data_expanded[boot_samples,] covariate_df<-mstate_data_expanded.boot[!names(mstate_data_expanded.boot)%in%c("id","from","to", "Tstart","Tstop","time","status","type")] groups2<-which_group[names(covariate_df)[!names(covariate_df)%in%c("strata","trans")]] if(time_model=="clockreset"){ surv_object<-Surv(mstate_data_expanded.boot$time,mstate_data_expanded.boot$status) }else if(time_model=="clockforward"){ surv_object<-Surv(mstate_data_expanded.boot$Tstart,mstate_data_expanded.boot$Tstop,mstate_data_expanded.boot$status) } which.mu<-unlist(mget("which.mu",ifnotfound = list(function(which.mu) unique(groups2)))) coxrfx_fits_boot[[j]]<-CoxRFX(covariate_df,surv_object,groups2,which.mu =which.mu, tol = tol, max.iter = max.iter, sigma0 = sigma0, sigma.hat = sigma.hat, verbose = verbose,coxrfx_args) if(sum(is.na(coxrfx_fits_boot[[j]]$coefficients))==0){ boot_matrix<-rbind(boot_matrix,rep(NA,ncol(boot_matrix))) boot_matrix[j,names(coxrfx_fits_boot[[j]]$coefficients)]<-coxrfx_fits_boot[[j]]$coefficients msfit_objects_boot[[j]]<-do.call("msfit_generic",c(list(object=coxrfx_fits_boot[[j]],newdata=patient_data,trans=tmat),msfit_args)) probtrans_objects_boot[[j]]<-do.call("probtrans_ebmstate",c(list(initial_state=initial_state,cumhaz=msfit_objects_boot[[j]],model=time_model),probtrans_args))[[1]] print(min(apply(boot_matrix, 2, function(x) sum(!is.na(x))))) if(j %%5==0 & !is.null(backup_file)){ save(coxrfx_fits_boot,probtrans_objects_boot, msfit_objects_boot,boot_matrix,j, file =backup_file) } j<-j+1 } if(min(apply(boot_matrix, 2, function(x) sum(!is.na(x))))>=min_nr_samples) break } CIs<-apply(boot_matrix,2,hdi,credMass=0.95) CIs<-rbind(CIs,apply(boot_matrix, 2, function(x) sum(!is.na(x)))) dimnames(CIs)[[1]][3]<-"n_samples" probtrans_CIs<-lapply(colnames(tmat),CIs_for_target_state, probtrans_objects_boot=probtrans_objects_boot) names(probtrans_CIs)<-colnames(tmat) cumhaz_CIs<-lapply(sort(unique(mstate_data_expanded$trans)),cumhazCIs_for_target_transition, msfit_objects_boot=msfit_objects_boot) return(list(coefficients_CIs=CIs,coxrfx_fits_boot=coxrfx_fits_boot, probtrans_CIs=probtrans_CIs, probtrans_objects_boot=probtrans_objects_boot, msfit_objects_boot=msfit_objects_boot, patient_data=patient_data,cumhaz_CIs=cumhaz_CIs)) } loo_ebmstate<-function(mstate_data,mstate_data_expanded,which_group, patient_IDs,initial_state,tmat,time_model, backup_file=NULL,input_file=NULL,coxrfx_args=list(), msfit_args=NULL,probtrans_args=NULL){ list2env(coxrfx_args,envir = environment()) if(!is.null(input_file)){ load(input_file) indices<-j:length(patient_IDs) }else{ coxrfx_fits_loo<-vector("list") msfit_objects_loo<-vector("list") probtrans_objects_loo<-vector("list") indices<-1:length(patient_IDs) } tol<-unlist(mget("tol",ifnotfound = list(function(tol) 0.001))) max.iter<- unlist(mget("max.iter",ifnotfound = list(function(max.iter) 50))) sigma0<- unlist(mget("sigma0",ifnotfound = list(function(sigma0) 0.1))) sigma.hat<- unlist(mget("sigma.hat",ifnotfound = list(function(sigma.hat) "df"))) verbose<- unlist(mget("verbose",ifnotfound = list(function(verbose) FALSE))) for(j in indices){ mstate_data_expanded_loo<-mstate_data_expanded[mstate_data_expanded$id!=patient_IDs[j],] covariate_df<-mstate_data_expanded_loo[!names(mstate_data_expanded_loo)%in%c("id","from","to","Tstart","Tstop","time","status","type")] groups2<-which_group[names(covariate_df)[!names(covariate_df)%in%c("strata","trans")]] if(time_model=="clockreset"){ surv_object<-Surv(mstate_data_expanded_loo$time,mstate_data_expanded_loo$status) }else if(time_model=="clockforward"){ surv_object<-Surv(mstate_data_expanded_loo$Tstart,mstate_data_expanded_loo$Tstop,mstate_data_expanded_loo$status) } which.mu<-unlist(mget("which.mu",ifnotfound = list(function(which.mu) unique(groups2)))) coxrfx_fits_loo[[j]]<-CoxRFX(covariate_df,surv_object,groups2,which.mu =which.mu, tol = tol, max.iter = max.iter, sigma0 = sigma0, sigma.hat = sigma.hat, verbose = verbose,coxrfx_args) if(sum(is.na(coxrfx_fits_loo[[j]]$coefficients))==0){ patient_data<-mstate_data[mstate_data$id==patient_IDs[j],,drop=FALSE][1,][rep(1,length(unique(mstate_data$trans))),] patient_data$trans<-1:length(unique(mstate_data$trans)) patient_data<-expand.covs(patient_data, covs = names(patient_data)[!names(patient_data)%in%c("id","from","to","trans","strata","Tstart","Tstop","time","status","type")]) patient_data<-patient_data[names(mstate_data_expanded)] patient_data$strata<-unique(mstate_data[c("trans","strata")])[,2] msfit_objects_loo[[j]]<-do.call("msfit_generic",c(list(object=coxrfx_fits_loo[[j]],newdata=patient_data,trans=tmat),msfit_args)) probtrans_objects_loo[[j]]<-do.call("probtrans_ebmstate",c(list(initial_state=initial_state,cumhaz=msfit_objects_loo[[j]],model=time_model),probtrans_args)) if(j %%5==0 & !is.null(backup_file)){ save(patient_IDs,coxrfx_fits_loo,msfit_objects_loo,probtrans_objects_loo,j,file=backup_file) } print(j) } } return(list(coxrfx_fits_loo=coxrfx_fits_loo, probtrans_objects_loo=probtrans_objects_loo, msfit_objects_loo=msfit_objects_loo, patient_IDs=patient_IDs)) }
NULL .apigatewaymanagementapi$delete_connection_input <- function(...) { args <- c(as.list(environment()), list(...)) shape <- structure(list(ConnectionId = structure(logical(0), tags = list(location = "uri", locationName = "connectionId", type = "string"))), tags = list(type = "structure")) return(populate(args, shape)) } .apigatewaymanagementapi$delete_connection_output <- function(...) { list() } .apigatewaymanagementapi$get_connection_input <- function(...) { args <- c(as.list(environment()), list(...)) shape <- structure(list(ConnectionId = structure(logical(0), tags = list(location = "uri", locationName = "connectionId", type = "string"))), tags = list(type = "structure")) return(populate(args, shape)) } .apigatewaymanagementapi$get_connection_output <- function(...) { args <- c(as.list(environment()), list(...)) shape <- structure(list(ConnectedAt = structure(logical(0), tags = list(locationName = "connectedAt", type = "timestamp", timestampFormat = "iso8601")), Identity = structure(list(SourceIp = structure(logical(0), tags = list(locationName = "sourceIp", type = "string")), UserAgent = structure(logical(0), tags = list(locationName = "userAgent", type = "string"))), tags = list(locationName = "identity", type = "structure")), LastActiveAt = structure(logical(0), tags = list(locationName = "lastActiveAt", type = "timestamp", timestampFormat = "iso8601"))), tags = list(type = "structure")) return(populate(args, shape)) } .apigatewaymanagementapi$post_to_connection_input <- function(...) { args <- c(as.list(environment()), list(...)) shape <- structure(list(Data = structure(logical(0), tags = list(type = "blob")), ConnectionId = structure(logical(0), tags = list(location = "uri", locationName = "connectionId", type = "string"))), tags = list(type = "structure", payload = "Data")) return(populate(args, shape)) } .apigatewaymanagementapi$post_to_connection_output <- function(...) { list() }
CST_Load <- function(...) { exp <- Load(...) result <- as.s2dv_cube(exp) result }
buildXmlEnvelope <- function(myMcc, userAgent, developerToken, adwordsService, ManagedCustomerService_Selector_fields = NULL, categoryProductsAndServices = NULL, competition = NULL, ideaTextFilter_included = NULL, ideaTextFilter_excluded = NULL, includeAdultContent = NULL, language = NULL, location = NULL, network_GoogleSearch = NULL, network_SearchNetwork = NULL, relatedToQuery = NULL, relatedToUrl = NULL, searchVolumeMinimum = NULL, searchVolumeMaximum = NULL, seedAdGroupId = NULL, requestType = NULL, attributeTypes = NULL, pagingStartIndex = NULL, pagingNumberResults = NULL, apiVersion = NULL) { if (is.null(apiVersion)) { apiVersion <- "v201806" } currentlySupportedServices <- c("ManagedCustomerService", "TargetingIdeaService") if (adwordsService %in% currentlySupportedServices) { if (adwordsService %in% c("ManagedCustomerService")) { wsdlNamespace <- "mcm" } else if (adwordsService %in% c("TargetingIdeaService")) { wsdlNamespace <- "o" } paste0( '<env:Envelope xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:wsdl="https://adwords.google.com/api/adwords/', wsdlNamespace, '/', apiVersion, '" xmlns:env="http://schemas.xmlsoap.org/soap/envelope/" xmlns:cm="https://adwords.google.com/api/adwords/cm/', apiVersion, '" xmlns:o="https://adwords.google.com/api/adwords/o/', apiVersion, '">\n', buildXmlHeader(myMcc, userAgent, developerToken, apiVersion), buildXmlBody(adwordsService, ManagedCustomerService_Selector_fields, categoryProductsAndServices, competition, ideaTextFilter_included, ideaTextFilter_excluded, includeAdultContent, language, location, network_GoogleSearch, network_SearchNetwork, relatedToQuery, relatedToUrl, searchVolumeMinimum, searchVolumeMaximum, seedAdGroupId, requestType, attributeTypes, pagingStartIndex, pagingNumberResults, apiVersion), '\n</env:Envelope>' ) } else { stop(paste0(adwordsService, " is not currently supported.")) } } buildXmlHeader <- function(myMcc, userAgent, developerToken, apiVersion) { paste0( '<env:Header>\n <wsdl:RequestHeader xmlns="https://adwords.google.com/api/adwords/cm/', apiVersion, '">\n <clientCustomerId>', myMcc, '</clientCustomerId>\n <userAgent>', userAgent, '</userAgent>\n <developerToken>', developerToken, '</developerToken>\n </wsdl:RequestHeader>\n </env:Header>' ) } buildXmlBody <- function(adwordsService, ManagedCustomerService_Selector_fields = NULL, categoryProductsAndServices = NULL, competition = NULL, ideaTextFilter_included = NULL, ideaTextFilter_excluded = NULL, includeAdultContent = NULL, language = NULL, location = NULL, network_GoogleSearch = NULL, network_SearchNetwork = NULL, relatedToQuery = NULL, relatedToUrl = NULL, searchVolumeMinimum = NULL, searchVolumeMaximum = NULL, seedAdGroupId = NULL, requestType = NULL, attributeTypes = NULL, pagingStartIndex = NULL, pagingNumberResults = NULL, apiVersion) { if (adwordsService == "ManagedCustomerService") { xmlBody <- paste0('<env:Body>\n', ManagedCustomerService_get(ManagedCustomerService_Selector_fields, apiVersion), '\n</env:Body>' ) } else if (adwordsService == "TargetingIdeaService") { xmlBody <- paste0('<env:Body>\n', TargetingIdeaService_get(categoryProductsAndServices, competition, ideaTextFilter_included, ideaTextFilter_excluded, includeAdultContent, language, location, network_GoogleSearch, network_SearchNetwork, relatedToQuery, relatedToUrl, searchVolumeMinimum, searchVolumeMaximum, seedAdGroupId, requestType, attributeTypes, pagingStartIndex, pagingNumberResults, apiVersion), '\n</env:Body>' ) } xmlBody }
desparsePrec <- function( dat, rho, type = 'glasso' ){ empCv = cov(dat); switch( type, glasso = desparsePrecGlasso( empCv, rho ), ridge = desparsePrecRidge( dat,empCv, rho ) ) } desparsePrecGlasso <- function( emp, rho ){ glEst = glasso(emp,rho); return( 2*glEst$wi - glEst$wi%*%emp%*%glEst$wi ); } desparsePrecRidge <- function( dat, emp, rho ){ k = ncol(dat); sv = svd(dat); glEst = glasso(emp,1)$wi; prj= sv$v%*%t(sv$v)%*%glEst; diag(prj) <- 0; rdEst = solve(emp+diag(rho,k)); return( rdEst + prj ); } sparseMat <- function( shat, k, alf=0.5, iter = 10, pnrm=Inf, THRSH='hard' ){ thrLen = length(THRSH); if(thrLen==1){ if(THRSH=='all'){ THRSH =c("hard","soft","scad","adpt"); thrLen = length(THRSH); } } toRet = vector("list",thrLen); names(toRet) <- THRSH tmp = array(0,dim=c(k,k,iter+1)); for( i in 1:thrLen ){ toRet[[i]] = tmp; toRet[[i]][,,1]=shat; } origDiag= abs(diag(shat)); diag(shat) = abs(diag(shat)); eDiag = sqrt(abs(diag(diag(shat)))); invDiag = sqrt(abs(diag(1/diag(shat)))); shat = invDiag %*% shat %*% invDiag; sigmas = shat[lower.tri(diag(k))]; if(iter==0){ return(toRet); } if( is.na(sum(abs(sigmas))) ) warning("NA Error in sigmas"); sigMedi = c(0,quantile( abs(sigmas),1-alf,na.rm=T)); fp00 = diag(diag(shat)); for( j in 1:thrLen ) toRet[[j]][,,2] = eDiag%*%thresh( shat,sigMedi[2],THRSH[j] )%*%eDiag; if(iter==1) return(toRet); for( i in 2:iter ){ fp50 = thresh(shat,sigMedi[i],type='hard'); if(sum(fp50==diag(k))==k^2){ for( j in 1:thrLen) toRet[[j]][,,i] = origDiag; sigMedi = c(sigMedi,sigMedi[i]) next; } ra50a = phi( fp50-fp00,pnrm ); ra = ra50a*(sqrt(alf)); sigMedi = c(sigMedi, spc_CoMZeroBinSearchRev( shat, k, ra, pnrm ) ); for( j in 1:thrLen ) toRet[[j]][,,i+1] = eDiag%*%thresh( shat,sigMedi[i+1],THRSH[j] )%*%eDiag; } return(toRet); } spc_CoMZeroBinSearchRev <- function( shat, k, ra, pnrm ){ maxThr = 1; lmbPrev = 0; quant = 0.5; delta = 0.25; spar = shat; for( i in 1:10 ){ lmb = quant; if( lmb == lmbPrev ) break; if(lmb > maxThr){ quant=quant-delta; delta=delta/2; next; } snew= thresh( shat,lmb,'hard' ); if( phi(snew-diag(k),pnrm)<ra ){ spar = snew; quant= quant - delta; } else { quant= quant + delta; } lmbPrev = lmb; delta = delta/2; } return(lmb); } phi <- function( mat,p ){ return(pschnorm(mat,p)) } pschnorm <- function( mat, p ){ if(p==2) return( hsnorm(mat) ); ev = abs(eigen(mat,only.values=TRUE,symmetric=TRUE)$values); if( is.infinite(p) ) return(max(ev)); return( sum(ev^p)^(1/p) ); } hsnorm <- function( mat ){ return( sqrt(sum(mat^2)) ); } sqrtMat <-function(A) { svdA= svd( A ); D = diag( sqrt(svdA$d) ); return( svdA$u %*% D %*% t(svdA$v) ) } thresh <- function( s, lmb, type ){ switch( type, soft = thr_soft(s,lmb), hard = thr_hard(s,lmb), scad = thr_scad(s,lmb), adpt = thr_adpt(s,lmb) ) } thr_hard <- function(s,lmb){ return( s*(abs(s)>lmb) ); } thr_soft <- function(s,lmb){ res = abs(s)-lmb; return( sign(s)*res*( res>0 ) ); } thr_scad <- function(s,lmb,aa=3.7){ msk1= s<(2*lmb); msk2= (s<(aa*lmb))-msk1; msk3= rep(1,length(s))-msk1-msk2; scd = (aa-1)/(aa-2)*(s-2*lmb)+lmb; res = thr_soft(s*msk1,lmb) + (scd)*msk2 + thr_hard(s*msk3,lmb); return(res); } thr_adpt <- function(s,lmb,eta=1){ res = abs(s)-lmb^(eta+1)*abs(s)^(-eta); res[which(is.na(res))]=0; res[which(is.infinite(res))]=0; return( sign(s)*res*( res>0 ) ); }
gen_freq <- function(af, n_seeds, result="initialSB", distribution = NA, max_vec_length=1e+07){ cat("gen_freq starts...") dfgenotype <- get0("dfgenotype", envir = parent.frame(n = 1)) if(length(result)>1){ if(distribution[1] == "equal") distribution <- rep(1/length(result), length(result)) if(length(distribution) != length(result)) stop("Result and distribution has to be of the same length.") if(any(distribution > 1 | distribution < 0)) stop("Distribution must be between 1 and 0.") if(sum(distribution) > 1) stop("Distribution must not sum up to more the 1.") n_seeds <- round(distribution*n_seeds, digits=0) } if(is.null(af)){ for(i in seq_along(result)){ dfgenotype[[result[i]]] <- n_seeds[i] } assign("dfgenotype", value=dfgenotype, pos = -1, envir=parent.frame(n = 1)) cat("finished!\n") return(dfgenotype) } gt_probab <- rep(1, nrow(dfgenotype)) n_loci <- length(af) locus_value <- data.frame(matrix(ncol=n_loci,as.numeric(unlist(strsplit(as.character(dfgenotype[[1]]), split=NULL))),byrow=TRUE), stringsAsFactors = TRUE) for (i in seq(along=af)){ locus <- locus_value[,i] gt_probab <- gt_probab * af[i]^locus * (1-af[i])^abs(2-locus) *(-(locus-1)^2+2) } gtFreq <- data.frame(genotype = dfgenotype$genotype, stringsAsFactors = TRUE) for(i in seq_along(result)){ gtFreq[[result[i]]] <- 0 i1 <- n_seeds[i] %/% max_vec_length i2 <- n_seeds[i] %% max_vec_length if(i1 > 0){ for(i in 1:i1){ tmp1 <- data.frame(table(sample(gtFreq$genotype, size = max_vec_length, replace = TRUE, prob = gt_probab)), stringsAsFactors = TRUE) tmp2 <- merge(gtFreq, tmp1, by.x = "genotype", by.y = "Var1", all.x = TRUE) if(anyNA(tmp2$Freq)){ warning("Frequencies of the genotype in the initial seed bank could not be calculated correctly") tmp2[is.na(tmp2$Freq)] <- 0 } gtFreq[,2] <- gtFreq[,2] + tmp2$Freq } } tmp1 <- data.frame(table(sample(gtFreq$genotype, size = i2, replace = TRUE, prob = gt_probab))) tmp2 <- merge(gtFreq, tmp1, by.x = "genotype", by.y = "Var1", all.x = TRUE) if(anyNA(tmp2$Freq)){ warning("Frequencies of the genotype in the initial seed bank could not be calculated correctly") tmp2$Freq[is.na(tmp2)] <- 0 } gtFreq[,2] <- gtFreq[,2] + tmp2$Freq dfgenotype[[result[i]]] <- gtFreq[,2] } cat("finished!\n") assign("dfgenotype", value=dfgenotype, pos = -1, envir=parent.frame(n = 1)) invisible(dfgenotype) }
fastSumID <- function (x, group) { as.vector(x = rowsum.default(x, group, reorder = FALSE), mode = "numeric") }
download_img_data = function(lib.loc = NULL){ stubs = c("T1Strip.nii.gz", "T2Strip.nii.gz") if (!is.null(lib.loc)) { desc = system.file("DESCRIPTION", package = "WhiteStripe") lib.dir = file.path(lib.loc, "WhiteStripe") if (!dir.exists(lib.dir)) { dir.create(lib.dir) } out_desc = file.path(lib.dir, "DESCRIPTION") if (!file.exists(out_desc)) { if (file.exists(desc)) { file.copy(desc, out_desc) } } } img_files = ws_img_data(lib.loc = lib.loc, warn = FALSE) if (!all(file.exists(img_files))) { for (istub in stubs) { url = paste0("https://raw.githubusercontent.com/muschellij2/", "WhiteStripe", "/gh-pages/", istub) urlfile <- file.path(system.file(package = "WhiteStripe", lib.loc = lib.loc), istub) download.file(url, urlfile, quiet = TRUE) } } img_files = ws_img_data(lib.loc = lib.loc, warn = FALSE) return(all(file.exists(img_files))) } ws_img_data = function(lib.loc = NULL, warn = TRUE){ stubs = c(T1 = "T1Strip.nii.gz", T2 = "T2Strip.nii.gz") img_files = system.file(stubs, package = "WhiteStripe", lib.loc = lib.loc) if (all(img_files == "")) { if (warn) { warning("Files are not downloaded, use download_img_data first!") } } else { bn = basename(img_files) bn = gsub("(.*)Strip.*", "\\1", bn) names(img_files) = bn } return(img_files) }
setwd("/home/rstudio2/m4") library("forecast") library("MAPA") library("nnfor") library("thief") library("pbmcapply") ncore = detectCores()-4 set.seed(12345) perd = "HOURLY" h.fc = 48 freq = 168 MyData <- read.csv(file="Hourly-train.csv", header=TRUE, sep=",") mydata.list <- setNames(split(MyData[,-1],seq(nrow(MyData))),MyData[,1]) mydata.list <- mclapply(mydata.list, function(x) x[!is.na(x)],mc.cores = ncore) mydata.full.ts <- mclapply(mydata.list,function(x) ts(as.vector(t(x)),frequency = freq),mc.cores = ncore) mydata.full.ts_207 <- mydata.full.ts[1:207] mydata.full.ts_414 <- mydata.full.ts[208:414] forx = function(x, h.fc, frequency = freq, transform = FALSE) { if(transform & min(x, na.rm = TRUE) >= 0) { lambda = BoxCox.lambda(na.contiguous(x), method = "guerrero", lower = 0, upper = 1) x.bc = BoxCox(x, lambda) } else { lambda = NULL x.bc = x transform = FALSE } k <- ges(x.bc) kf <- (forecast(k,h=h.fc)) m <- thief(x.bc,h = h.fc,usemodel = "arima") n <- thief(as.ts(x.bc,frequency=168),h = 48,usemodel = "snaive") nn <- elm(x.bc) f.nn <- forecast(nn,h=h.fc) mlpm <- mlp(x.bc) f.mlp <- forecast(mlpm,h=h.fc) iu <- msts(x.bc,c(24,168)) decomp <- dshw(iu) ui <- forecast(decomp,h=h.fc)$mean ty <- tbats(x.bc,use.box.cox = T,use.parallel = F) ty.f <- (forecast(ty,h=h.fc))$mean agg <- cbind( ges=kf$forecast[1:h.fc],arima=m$mean[1:h.fc], naive=n$mean[1:h.fc],tbats = ty.f[1:h.fc], dshw = ui[1:h.fc],elm = f.nn$mean,mlp=f.mlp$mean ) med <- apply(agg,1, median, na.rm = TRUE) men <- rowMeans(agg) med.noelm <- apply(agg[,c("ges","arima","naive","tbats","dshw")],1, median, na.rm = TRUE) men.noelm <- rowMeans(agg[,c("ges","arima","naive","tbats","dshw")]) return(cbind (ges=kf$forecast[1:h.fc], arima=m$mean[1:h.fc],naive=n$mean[1:h.fc],tbats = ty.f[1:h.fc], dshw = ui[1:h.fc],elm = f.nn$mean,mlp=f.mlp$mean, med = med, men=men, med.noelm = med.noelm, men.noelm = men.noelm ) ) } system.time(for.hm4_207 <- pbmclapply(mydata.full.ts_207, forx, h.fc = h.fc, frequency = freq, transform = FALSE, mc.cores = ncore)) save(for.hm4_207, file = paste0("M4_", perd, "207_srihari.rda"))
module_log_server <- function(input, output, session, rv, input_re, ...) { arguments <- list(...) observe({ file <- reactiveFileReader(500, session, arguments$logfilename, readLines) rv$logfile <- file() output$download_logfile <- downloadHandler( filename = function() { paste0("BC_logfile.txt") }, content = function(file) { write(rv$logfile, file) }, contentType = "text/csv" ) }) output$log_out <- reactive({ paste(paste0(rv$logfile, collapse = "\n")) }) } module_log_ui <- function(id) { ns <- NS(id) tagList(fluidRow( box( title = "Log", verbatimTextOutput(ns("log_out")), tags$head(tags$style( paste0( " "max-height: 70vh; background: ghostwhite;}" ) )), width = 9 ), box( title = "Download Log File", div( class = "row", style = "text-align: center;", shinyjs::disabled(downloadButton( ns("download_logfile"), "Download Log File", paste0( "white-space: normal; ", "text-align:center; ", "padding: 9.5px 9.5px 9.5px 9.5px; ", "margin: 6px 10px 6px 10px;" ) )) ), tags$hr(), width = 3 ) )) }
group_2d_graphics <- function(zargs, glabs = NULL, sep = "\n", loc = c(0.5, 0.5), add = FALSE, plot... = NULL, ...) { check_zargs(zargs, "turns", "vars", "num") turns <- zargs$turns vars <- zargs$vars num <- zargs$num ii <- range(vars[num,]) ii <- if(turns[num-1] == "u" || turns[num] == "u") rev(ii) else ii if(is.null(glabs)) { glabs <- extract_2d(zargs)$glabs } else { len.groups <- length(unlist(zargs$x, recursive = FALSE)) if(length(glabs) != len.groups) stop("length(glabs) has to equal the number ",len.groups," of variables in all groups together; consider rep()") } labs <- paste0(glabs[ii], collapse = sep) opar <- par(usr = c(0, 1, 0, 1)) on.exit(par(opar)) if(!add) plot_region(xlim = 0:1, ylim = 0:1, plot... = plot...) text(x = loc[1], y = loc[2], labels = labs, ...) } points_2d_graphics <- function(zargs, cex = 0.4, box = FALSE, add = FALSE, group... = NULL, plot... = NULL, ...) { r <- extract_2d(zargs) xlim <- r$xlim ylim <- r$ylim x <- as.matrix(r$x) y <- as.matrix(r$y) same.group <- r$same.group if(same.group) { if(!add) plot_region(xlim = xlim, ylim = ylim, plot... = plot...) points(x = x, y = y, cex = cex, ...) if(box) box(...) } else { args <- c(list(zargs = zargs, add = add), group...) do.call(group_2d_graphics, args) } } qq_2d_graphics <- function(zargs, do.line = TRUE, lines... = NULL, cex = 0.4, box = FALSE, add = FALSE, group... = NULL, plot... = NULL, ...) { r <- extract_2d(zargs) xlim <- r$xlim ylim <- r$ylim x <- as.matrix(r$x) y <- as.matrix(r$y) same.group <- r$same.group if(same.group) { if(!add) plot_region(xlim = xlim, ylim = ylim, plot... = plot...) sx <- sort(x) sy <- sort(y) lenx <- length(sx) leny <- length(sy) if (leny < lenx) sx <- approx(1L:lenx, sx, n = leny)$y if (leny > lenx) sy <- approx(1L:leny, sy, n = lenx)$y points(x = sx, y = sy, cex = cex, ...) if(do.line) { qx <- quantile(x, probs = c(0.25, 0.75), na.rm = TRUE, names = FALSE) qy <- quantile(y, probs = c(0.25, 0.75), na.rm = TRUE, names = FALSE) slope <- diff(qy) / diff(qx) intercept <- qy[1] - qx[1] * slope do.call(abline, c(list(a = intercept, b = slope), lines...)) } if(box) box(...) } else { args <- c(list(zargs = zargs, add = add), group...) do.call(group_2d_graphics, args) } } density_2d_graphics <- function(zargs, ngrids = 25, drawlabels = FALSE, axes = FALSE, box = FALSE, add = FALSE, group... = NULL, ...) { r <- extract_2d(zargs) xlim <- r$xlim ylim <- r$ylim x <- r$x y <- r$y same.group <- r$same.group if(same.group) { data <- na.omit(cbind(x, y)) dens <- kde2d(data[,1], data[,2], n = ngrids, lims = c(xlim, ylim)) contour(dens$x, dens$y, dens$z, drawlabels = drawlabels, axes = axes, add = add, ...) if(box) box(...) } else { args <- c(list(zargs = zargs, add = add), group...) do.call(group_2d_graphics, args) } } axes_2d_graphics <- function(zargs, length = 0.1, eps = 0.04, code = 2, xpd = NA, add = FALSE, group... = NULL, plot... = NULL, ...) { r <- extract_2d(zargs) xlim <- r$xlim ylim <- r$ylim same.group <- r$same.group if(same.group) { if(!add) plot_region(xlim = xlim, ylim = ylim, plot... = plot...) epsx <- eps * diff(xlim) epsy <- eps * diff(ylim) arrows(xlim[1]-epsx, ylim[1]-epsy, xlim[2]+epsx, ylim[1]-epsy, length = length, code = code, xpd = xpd, ...) arrows(xlim[1]-epsx, ylim[1]-epsy, xlim[1]-epsx, ylim[2]+epsy, length = length, code = code, xpd = xpd, ...) } else { args <- c(list(zargs = zargs, add = add), group...) do.call(group_2d_graphics, args) } } arrow_2d_graphics <- function(zargs, loc = c(0.5, 0.5), angle = 60, length = 0.2, add = FALSE, group... = NULL, plot... = NULL, ...) { r <- extract_2d(zargs) same.group <- r$same.group check_zargs(zargs, "num", "turns") turn.out <- zargs$turns[zargs$num] if(same.group) { arrow <- zenarrow(turn.out, angle = angle, length = length, coord.scale = 1) arr <- loc + arrow opar <- par(usr = c(0, 1, 0, 1)) on.exit(par(opar)) if(!add) plot_region(xlim = 0:1, ylim = 0:1, plot... = plot...) segments(x0 = rep(arr[1,2], 2), y0 = rep(arr[2,2], 2), x1 = c(arr[1,1], arr[1,3]), y1 = c(arr[2,1], arr[2,3]), ...) } else { args <- c(list(zargs = zargs, add = add), group...) do.call(group_2d_graphics, args) } } rect_2d_graphics <- function(zargs, loc = c(0.5, 0.5), width = 1, height = 1, add = FALSE, group... = NULL, plot... = NULL, ...) { r <- extract_2d(zargs) same.group <- r$same.group if(same.group) { x <- c(loc[1] - width/2, loc[1] + width/2) y <- c(loc[2] - height/2, loc[2] + height/2) opar <- par(usr = c(0, 1, 0, 1)) on.exit(par(opar)) if(!add) plot_region(xlim = 0:1, ylim = 0:1, plot... = plot...) rect(xleft = x[1], ybottom = y[1], xright = x[2], ytop = y[2], ...) } else { args <- c(list(zargs = zargs, add = add), group...) do.call(group_2d_graphics, args) } } label_2d_graphics <- function(zargs, loc = c(0.98, 0.05), label = NULL, adj = 1:0, box = FALSE, add = FALSE, group... = NULL, plot... = NULL, ...) { r <- extract_2d(zargs) same.group <- r$same.group vlabs <- r$vlabs check_zargs(zargs, "vars", "num") vars <- zargs$vars num <- zargs$num if(same.group) { xlab <- vlabs[vars[num, 1]] ylab <- vlabs[vars[num, 2]] if(is.null(label)) label <- paste0("(",xlab,", ",ylab,")") opar <- par(usr = c(0, 1, 0, 1)) on.exit(par(opar)) if(!add) plot_region(xlim = 0:1, ylim = 0:1, plot... = plot...) text(x = loc[1], y = loc[2], labels = label, adj = adj, ...) if(box) box(...) } else { args <- c(list(zargs = zargs, add = add), group...) do.call(group_2d_graphics, args) } } layout_2d_graphics <- function(zargs, ...) label_2d_graphics(zargs, loc = c(0.5, 0.5), adj = rep(0.5, 2), box = TRUE, group... = list(...), ...)
hilbert.wge<- function( input) { n <- length( input ) if( (n%%2)==1 ) { input <- input[-1] n <- length( input ) } x1 <- fft( input ) x2 <- rep(0,n) x2[1] <- x2[n/2+1] <- 1 x2[2:(n/2)] <- 2 x3 <- x1*x2 ans <- fft( x3, inverse=T )/n return( ans ) }
checkComplex = function(x, any.missing = TRUE, all.missing = TRUE, len = NULL, min.len = NULL, max.len = NULL, unique = FALSE, names = NULL, typed.missing = FALSE, null.ok = FALSE) { .Call(c_check_complex, x, any.missing, all.missing, len, min.len, max.len, unique, names, typed.missing, null.ok) } check_complex = checkComplex assertComplex = makeAssertionFunction(checkComplex, c.fun = "c_check_complex", use.namespace = FALSE) assert_complex = assertComplex testComplex = makeTestFunction(checkComplex, c.fun = "c_check_complex") test_complex = testComplex expect_complex = makeExpectationFunction(checkComplex, c.fun = "c_check_complex", use.namespace = FALSE)
amTimeSeries <- function(data, col_date, col_series, main = "", ylab = "", color = c(" type = c("line"), bullet = NULL, bulletSize = 2, linetype = c(0, 5, 10, 15, 20), linewidth = c(1, 1, 1, 1, 1, 1), fillAlphas = 0, precision = 1, connect = FALSE, export = FALSE, legend = TRUE, legendPosition = "bottom", legendHidden = FALSE, aggregation = c("Average", "Low", "High", "Sum"), maxSeries = 300, groupToPeriods = c('ss', 'mm', 'hh', 'DD', 'MM', 'YYYY'), checkGroupToPeriods = TRUE, ZoomButton = data.frame(Unit = "MAX", multiple = 1, label ="All"), ZoomButtonPosition = "bottom", periodFieldsSelection = FALSE, scrollbar = TRUE, scrollbarPosition = "bottom", scrollbarHeight = 40, scrollbarGraph = NULL, cursor = TRUE, cursorValueBalloonsEnabled = TRUE, creditsPosition = "top-right", group = NULL, is_ts_module = FALSE, dataDateFormat = 'YYYY-MM-DD JJ:NN:SS', categoryBalloonDateFormats = list(list(period = 'YYYY', format = 'YYYY'), list(period='MM', format = 'YYYY-MM'), list(period = 'WW', format = 'YYYY-MM-DD'), list(period='DD', format = 'YYYY-MM-DD'), list(period = 'hh', format = 'YYYY-MM-DD JJ:NN'), list(period='mm', format = 'YYYY-MM-DD JJ:NN'), list(period = 'ss', format = 'YYYY-MM-DD JJ:NN:SS'), list(period='fff', format = 'YYYY-MM-DD JJ:NN:SS')), dateFormats = list(list(period = 'YYYY', format = 'YYYY'), list(period='MM', format = 'MMM'), list(period = 'WW', format = 'MMM DD'), list(period='DD', format = 'MMM DD'), list(period = 'hh', format = 'JJ:NN'), list(period='mm', format = 'JJ:NN'), list(period = 'ss', format = 'JJ:NN:SS'), list(period='fff', format = 'JJ:NN:SS')), ...) { .testFormatData(data) data <- data.frame(data, check.names = FALSE, stringsAsFactors = FALSE) .testIn(vect = col_date, control = names(data)) init_col_series <- col_series if(is.list(col_series)){ n_col_series <- sapply(col_series, length) col_series <- do.call("c", col_series) .testIn(vect = col_series, control = names(data)) if(any(!n_col_series%in%c(3, 1))){ stop("col_series list element must be a vector of length 1 (one curve) or 3 (upper/lower curve).") } } else if(is.vector(col_series)) { .testIn(vect = col_series, control = names(data)) n_col_series <- rep(1, length(col_series)) } else { stop("col_series must be a vector or a list") } data <- data[, c(col_date, col_series)] .testCharacter(char = color) aggregation <- match.arg(aggregation) .testCharacterLength1(ZoomButtonPosition) .testIn(ZoomButtonPosition, c("left", "right", "bottom", "top")) .testLogicalLength1(logi = legend) .testCharacterLength1(legendPosition) .testIn(legendPosition, c("left", "right", "bottom", "top")) .testNumericLength1(scrollbarHeight) .testLogicalLength1(logi = scrollbar) .testCharacterLength1(scrollbarPosition) .testIn(scrollbarPosition, c("left", "right", "bottom", "top")) if(!is.null(scrollbarGraph)){ stopifnot(scrollbarGraph%in%colnames(data)) } .testCharacterLength1(creditsPosition) .testIn(creditsPosition, c("top-right", "top-left", "bottom-right", "bottom-left")) if (!is.null(bullet)) .testIn(bullet, c("diamond", "square", "bubble", "yError", "xError", "round", "triangleLeft", "triangleRight", "triangleUp")) .testNumeric(fillAlphas) .testNumeric(linewidth) .testNumericLength1(num = maxSeries) if (!is.null(ZoomButton)) { .testIn(vect = names(ZoomButton), control = c("Unit", "multiple", "label", "selected")) .testNumeric(num = ZoomButton$multiple) } .testNumericLength1(num = precision) .testCharacterLength1(char = ylab) .testCharacterLength1(char = main) .testLogicalLength1(logi = export) data[,col_date] <- data[,col_date] + (as.POSIXlt(as.character(data[,col_date]), tz = "UTC") - data[,col_date]) if(nrow(data) >= 5){ difft <- min(c(as.numeric(difftime(data[4,col_date], data[3,col_date], units = "secs")), as.numeric(difftime(data[5,col_date], data[4,col_date], units = "secs")))) } else if(nrow(data) >= 2){ difft <- as.numeric(difftime(data[2,col_date], data[1,col_date], units = "secs")) } else { difft <- 1 } if(checkGroupToPeriods){ if(length(groupToPeriods) == 1){ minPeriod = groupToPeriods groupToPeriods <- list(groupToPeriods) } else { groupToPeriods <- controlgroupToPeriods(groupToPeriods, difft) minPeriod = groupToPeriods[1] if(length(groupToPeriods) == 1){ groupToPeriods <- list(groupToPeriods) } } } if(isTRUE(all.equal('YYYY', groupToPeriods))){ groupToPeriods <- c('DD', 'YYYY') } fieldMapping <- lapply(col_series, function(x) { list(fromField=x, toField=x, title = x) }) graph_maker <- data.frame(column = col_series, stringsAsFactors = F) if (length(color) > 1) { graph_maker$color <- rep(color[1:length(n_col_series)], n_col_series) } else { graph_maker$color <- color } if (length(type) > 1) { graph_maker$type <- rep(type[1:length(n_col_series)], n_col_series) } else { graph_maker$type <- type } if (length(linewidth) > 1) { graph_maker$linewidth <- rep(linewidth[1:length(n_col_series)], n_col_series) } else { graph_maker$linewidth <- linewidth } if(!is.null(bullet)){ if (length(bullet) > 1) { graph_maker$bullet <- rep(bullet[1:length(n_col_series)], n_col_series) } else { graph_maker$bullet <- bullet graph_maker$bulletAlpha <- 1 } } else { graph_maker$bullet <- "round" graph_maker$bulletSize <- 5 graph_maker$bulletAlpha <- 0 } if(!is.null(bullet)){ if (length(bulletSize) > 1) { graph_maker$bulletSize <- rep(bulletSize[1:length(n_col_series)], n_col_series) } else { graph_maker$bulletSize <- bulletSize } } if (length(linetype) > 1) { graph_maker$dashLength <- rep(linetype[1:length(n_col_series)], n_col_series) } else { graph_maker$dashLength <- linetype } if (length(fillAlphas) > 1) { graph_maker$fillAlphas <- rep(fillAlphas[1:length(n_col_series)], n_col_series) } else { graph_maker$fillAlphas <- fillAlphas } graph_maker$aggregation <- aggregation graph_maker$am_type <- do.call("c", lapply(n_col_series, function(x){ if(x == 3){ c("low", "curve-uplow", "up") } else { "curve" } })) event_hide <- "" event_show <- "" if(is.list(init_col_series)){ for(i in 1:length(init_col_series)){ if(length(init_col_series[[i]]) == 3){ event_hide <- paste0(event_hide, 'if(id === "', init_col_series[[i]][2], '"){\n', 'event.chart.hideGraph(event.chart.getGraphById("', init_col_series[[i]][1], '"));\n', 'event.chart.hideGraph(event.chart.getGraphById("', init_col_series[[i]][3], '"));}') event_show <- paste0(event_show, 'if(id === "', init_col_series[[i]][2], '"){\n', 'event.chart.showGraph(event.chart.getGraphById("', init_col_series[[i]][1], '"));\n', 'event.chart.showGraph(event.chart.getGraphById("', init_col_series[[i]][3], '"));}') } } } if(event_hide != ""){ st_legend <- stockLegend(enabled = legend, labelText = "[[title]]", useGraphSettings = TRUE) %>>% addListener("hideItem", paste0('function(event) {console.info(event);var id = event.dataItem.id;\n', event_hide, '}')) %>>% addListener("showItem", paste0('function(event) {var id = event.dataItem.id;\n',event_show, '}')) } else { st_legend <- stockLegend(enabled = legend, labelText = "[[title]]", useGraphSettings = TRUE) } if (length(legendHidden) > 1) { graph_maker$hidden <- rep(legendHidden[1:length(n_col_series)], n_col_series) } else { graph_maker$hidden <- legendHidden } stockgraph <- lapply(1:nrow(graph_maker), function(x) { if(graph_maker[x, "am_type"] == "curve"){ stockGraph(title = graph_maker[x, "column"], id = graph_maker[x, "column"] , connect = connect, valueField = graph_maker[x, "column"], comparable = TRUE, periodValue = graph_maker[x, "aggregation"], compareField = graph_maker[x, "column"], balloonText = paste0(graph_maker[x, "column"], ' : <b>[[value]]</b>'), lineColor = graph_maker[x, "color"], fillAlphas = graph_maker[x, "fillAlphas"], bulletSize = graph_maker[x, "bulletSize"], type = graph_maker[x, "type"], minBulletSize = 0, dashLength = graph_maker[x, "dashLength"], useDataSetColors = FALSE, bullet = ifelse(is.null(graph_maker[x, "bullet"]), "none", graph_maker[x, "bullet"]), bulletAlpha = graph_maker[x, "bulletAlpha"], precision = precision, hidden = graph_maker[x, "hidden"], lineThickness = graph_maker[x, "linewidth"] ) } else if(graph_maker[x, "am_type"] == "low"){ stockGraph(title = graph_maker[x, "column"], id = graph_maker[x, "column"] , connect = connect, valueField = graph_maker[x, "column"], comparable = TRUE, periodValue = graph_maker[x, "aggregation"], compareField = graph_maker[x, "column"], showBalloon = FALSE, lineAlpha = 0, lineColor = graph_maker[x, "color"], type = graph_maker[x, "type"], fillAlphas = 0, useDataSetColors = FALSE, visibleInLegend = FALSE, hidden = graph_maker[x, "hidden"], precision = precision ) } else if(graph_maker[x, "am_type"] == "curve-uplow"){ stockGraph(title = graph_maker[x, "column"], id = graph_maker[x, "column"] , connect = connect, valueField = graph_maker[x, "column"], comparable = TRUE, periodValue = graph_maker[x, "aggregation"], compareField = graph_maker[x, "column"], balloonText = paste0(graph_maker[x+1, "column"],' : <b> [[', graph_maker[x+1, "column"], ']] </b><br>', graph_maker[x, "column"], ' : <b> [[value]] </b><br>', graph_maker[x-1, "column"],' : <b> [[', graph_maker[x-1, "column"], ']] </b>'), lineColor = graph_maker[x, "color"], type = graph_maker[x, "type"], fillAlphas = graph_maker[x, "fillAlphas"], bulletSize = graph_maker[x, "bulletSize"], minBulletSize = 0, dashLength = graph_maker[x, "dashLength"], useDataSetColors = FALSE, bullet = ifelse(is.null(graph_maker[x, "bullet"]), "none", graph_maker[x, "bullet"]), bulletAlpha = graph_maker[x, "bulletAlpha"], precision = precision, hidden = graph_maker[x, "hidden"], lineThickness = graph_maker[x, "linewidth"] ) } else if(graph_maker[x, "am_type"] == "up"){ stockGraph(title = graph_maker[x, "column"], id = graph_maker[x, "column"] , connect = connect, valueField = graph_maker[x, "column"], comparable = TRUE, periodValue = graph_maker[x, "aggregation"], compareField = graph_maker[x, "column"], type = graph_maker[x, "type"], showBalloon = FALSE, lineAlpha = 0, lineColor = graph_maker[x, "color"], fillAlphas = 0.2, useDataSetColors = FALSE, fillToGraph = graph_maker[x-2, "column"], visibleInLegend = FALSE, hidden = graph_maker[x, "hidden"], precision = precision ) } }) periodZoom <- periodSelector(position = ZoomButtonPosition, inputFieldsEnabled = periodFieldsSelection) if (!is.null(ZoomButton)) { if(!"selected" %in% colnames(ZoomButton)){ ZoomButton$selected <- FALSE ZoomButton$selected[1] <- TRUE } for (i in 1:nrow(ZoomButton)) { periodZoom <- pipeR::pipeline(periodZoom, addPeriod(period = ZoomButton$Unit[i], selected = ZoomButton$selected[i], count = ZoomButton$multiple[i], label = ZoomButton$label[i]) ) } } dataset_obj <- pipeR::pipeline(dataSet(categoryField = col_date) , setDataProvider(data, keepNA = FALSE), setFieldMappings(fieldMapping)) panel_obj <- pipeR::pipeline(panel(title = ylab, stockGraphs = stockgraph), setStockLegend(st_legend), addTitle(text = main)) am_output <- pipeR::pipeline( amStockChart(dataDateFormat = dataDateFormat, useUTC = TRUE, group = group, is_ts_module = is_ts_module, ...), setExport(enabled = export), addDataSet(dataset_obj), addPanel(panel_obj), setChartCursorSettings(enabled = cursor, valueBalloonsEnabled = cursorValueBalloonsEnabled, fullWidth = TRUE, cursorAlpha = 0.1, valueLineBalloonEnabled = TRUE, valueLineEnabled = TRUE, valueLineAlpha = 0.5, categoryBalloonDateFormats = categoryBalloonDateFormats), setPeriodSelector(periodZoom), setCategoryAxesSettings(parseDates = TRUE, minPeriod = minPeriod, groupToPeriods = groupToPeriods, maxSeries = maxSeries, dateFormats = dateFormats), setPanelsSettings(marginTop = 30, creditsPosition = creditsPosition, thousandsSeparator = " "), setLegendSettings(position = legendPosition) ) if(is.null(scrollbarGraph)){ am_output <- setChartScrollbarSettings(am_output, enabled = scrollbar,position = scrollbarPosition, height = scrollbarHeight) } else { am_output <- setChartScrollbarSettings(am_output, enabled = scrollbar, graph = scrollbarGraph, graphType = "line", position = scrollbarPosition, height = scrollbarHeight) } am_output } controlgroupToPeriods <- function(groupToPeriods = c('30ss', 'mm', 'hh', 'DD', 'MM', 'YYYY'), diffTime = 30){ ref_period <- data.frame(periode = c('ss', 'mm', 'hh', 'DD', 'MM', 'YYYY'), seconds = c(1, 60, 3600, 24*3600, 31*24*3600, 365*24*3600)) rownames(ref_period) <- ref_period$periode if(!is.null(groupToPeriods)){ number <- as.numeric(gsub("ss|mm|hh|DD|MM|YYYY", "", groupToPeriods)) number[is.na(number)] <- 1 period <- gsub("^[[:digit:]]*", "", groupToPeriods) period <- ref_period[period, "seconds"] select <- groupToPeriods[period*number >= diffTime] if(length(select) == 0){ select <- groupToPeriods[1] } } else { select <- c(as.character(ref_period$periode)[ref_period$seconds == diffTime]) } if(diffTime > 1){ minperiod <- max(which(ref_period$seconds/diffTime<1)) } else { minperiod <- 1 } if(length(minperiod)>0){ if(ref_period$seconds[minperiod+1] != diffTime){ select <- c(paste0(diffTime/ref_period[minperiod,]$seconds, ref_period[minperiod,]$periode), select) } } gsub("^0", "", unique(select[grepl("^[[:digit:]]*((ss)|(mm)|(hh)|(DD)|(MM)|(YYYY))$", select)])) }
EvolutionStrategy.int <- function(genomeLen, codonMin, codonMax, genomeMin=rep.int(codonMin, genomeLen), genomeMax=rep.int(codonMax, genomeLen), suggestion=NULL, popSize=4, newPerGen = 4, iterations=500, terminationCost=NA, mutationChance=1/(genomeLen+1), monitorFunc=NULL, evalFunc, allowrepeat = TRUE, showSettings=FALSE, verbose=FALSE, plapply = lapply) { is.verbose = verbose verbose = function(...) { if (is.verbose) cat(...)} if (is.null(evalFunc)) { stop("A evaluation function must be provided. See the evalFunc parameter."); } stopifnot(genomeLen > 1) verbose("Testing the sanity of parameters...\n"); if (length(genomeMin) != length(genomeMax)) { stop("The vectors genomeMin and genomeMax must be of equal length."); } if (iterations < 1) { stop("The number of iterations must be at least 1.") } if ((mutationChance < 0) | (mutationChance > 1)) { stop("mutationChance must be between 0 and 1.") } if ((popSize + newPerGen) < 1) { stop("Total new generation (popSize + newPerGen) must be at least 1") } if (showSettings) { verbose("The start conditions:\n"); result = list(genomeMin=genomeMin, genomeMax=genomeMax, suggestions=suggestion, popSize=popSize, iterations=iterations, mutationChance=mutationChance); class(result) = "rbga"; cat(summary(result)); } else { verbose("Not showing GA settings...\n"); } if (!is.null(suggestion)) { verbose("Adding suggestions to first population...\n"); suggestionCount = 1 parent = suggestion } else { verbose("Starting with random values in the given domains...\n"); suggestionCount = 0 parent = ga.new.chromosome(genomeLen, genomeMin, genomeMax, allowrepeat) } parentEval = NA bestEvals = rep(NA, iterations); meanEvals = rep(NA, iterations); totalPopulation = 1 + popSize + newPerGen for (iter in 1:iterations) { verbose(paste("Starting iteration", iter, "\n")); population = matrix(rep(parent, totalPopulation), nrow=totalPopulation, byrow = TRUE) evalVals = rep(NA, totalPopulation); evalVals[1] = parentEval if (mutationChance > 0 & popSize > 0) { verbose(" applying mutations... "); mutationCount = 0; for (object in 2:(1 + popSize)) { dampeningFactor = 1 mutResult <- ga.mutation(population[object,], mutationChance, genomeLen, genomeMin, genomeMax, allowrepeat, dampeningFactor) population[object,] = mutResult$newGenome evalVals[object] = NA; mutationCount = mutationCount + 1; } verbose(paste(mutationCount, "mutations applied\n")); } verbose("Adding New Chromosomes ... "); if (newPerGen > 0) { for (i in (popSize+1+1):totalPopulation) { population[i,] = ga.new.chromosome(genomeLen, genomeMin, genomeMax, allowrepeat) } } verbose("Calucating evaluation values... "); to.eval.Ids = which(is.na(evalVals)) evalVals[to.eval.Ids] = unlist(plapply(to.eval.Ids, function(i, population, evalFunc) evalFunc(population[i, ]), population, evalFunc)) if ((!all(is.numeric(evalVals))) | any(is.na(evalVals)) | any(is.nan(evalVals))) { stop("Invalid cost function return value (NA or NaN).") } verbose(" sorting results...\n"); bestInd = which.min(evalVals) parent = population[bestInd,] parentEval = evalVals[bestInd] bestEvals[iter] = min(evalVals); meanEvals[iter] = mean(evalVals); verbose(" done.\n"); collect.results <- function() { settings = list(genomeMin=genomeMin, genomeMax=genomeMax, popSize=popSize, newPerGen = newPerGen, totalPopulation = popSize + newPerGen, iterations=iterations, suggestions=suggestion, mutationChance=mutationChance) pop.info = list(population=population, evaluations=evalVals, best=bestEvals, mean=meanEvals, currentIteration=iter) best = list(genome=population[bestInd,], cost = evalVals[bestInd]); ret = list(settings = settings, population = pop.info, best = best) class(ret) = "EvolutionStrategy.int"; return (ret) } if (!is.null(monitorFunc)) { verbose("Sending current state to the monitor()...\n"); monitorFunc(collect.results()); } if (iter == iterations) { verbose("End of generations iteration reached.\n"); break } if (!is.na(terminationCost)) { if (parentEval <= terminationCost) { verbose("Cost better than termination cost reached.\n"); break } } } return(collect.results()); }
setMethodS3("fitLoessKD", "matrix", function(X, Y, ...) { X <- as.matrix(X); Y <- as.matrix(Y); if (!all(dim(Y) == dim(X))) { dimXStr <- paste(dim(X), collapse="x"); dimYStr <- paste(dim(Y), collapse="x"); throw("The dimensions of argument 'Y' and 'X' do not match: ", dimYStr, " != ", dimXStr); } data <- list(X=X, Y=Y); ok <- rowAlls(is.finite(X) & is.finite(Y)); X <- X[ok,,drop=FALSE]; Y <- Y[ok,,drop=FALSE]; fitList <- list(); for (cc in seq_len(ncol(X))) { fitList[[cc]] <- loess(Y[,cc] ~ X); } predictY <- function(X, ...) { if (ncol(X) != length(fitList)) { throw("The number of columns in argument 'X' does not match the number of fitted dimensions: ", ncol(X), " != ", length(fitList)); } X <- as.matrix(X); naValue <- as.double(NA); Y <- array(naValue, dim=dim(X)); ok <- rowAlls(is.finite(X)); ok <- which(ok); X <- X[ok,,drop=FALSE]; for (cc in seq_len(ncol(X))) { fit <- fitList[[cc]]; yPred <- predict(fit, newdata=X); Y[ok,cc] <- yPred; } Y; } fit <- list(data=data, fitList=fitList, predictY=predictY); class(fit) <- c("LoessKDFit", class(fit)); fit; }); setMethodS3("normalizeLoessKD", "matrix", function(X, fit, ...) { fit <- Arguments$getInstanceOf(fit, "LoessKDFit"); fit$predictY(X, ...); }) setMethodS3("fitLoessKD", "data.frame", function(X, ...) { X <- as.matrix(X); fitLoessKD(X, ...); }) setMethodS3("normalizeLoessKD", "data.frame", function(X, ...) { XN <- X; X <- as.matrix(X); XN <- normalizeLoessKD(X, ...); XN <- as.data.frame(XN); XN; })
underlined = function(x, y, label, col){ text(x, y, label, col = col, font = 2) sw = strwidth(label) sh = strheight(label) lines(x + c(-sw/2, sw/2), rep(y - 1.5*sh/2, 2), col = col) } largest.cex = function(node, height, width, hfrac = 0.7, wfrac = 0.9) { guess = hfrac * height / strheight(node, cex = 1) best = optimize(f = function(x) abs(strheight(node, cex = x) - hfrac * height), interval = guess * c(0.5, 2), tol = 0.025)$minimum best = best * min(wfrac * width / strwidth(node, cex = best), 1) return(best) } lighter.colour = function(col, offset = 0.25) { rgb = col2rgb(col)[, 1] do.call("rgb", as.list((rgb + (255 - rgb) * offset) / 255)) }
histCount <- function(C1, C2 = NULL, x0 = FALSE, nbmax = 30, color = c('skyblue', adjustcolor('red', 0.5)), border='white', bty='n', ylab="Frequency", xlab="Count", main="", ...) { rng <- range(C1, C2) if(x0) rng[1] <- 0 range <- diff(rng) + 1 if(range <= nbmax) { bwidth=1 } else { bwidth <- range %/% nbmax + 1 } nb <- ceiling(range/bwidth) br <- seq(min(C1,C2), max(C1,C2)+bwidth, by=bwidth) - 0.5 H1 <- hist(C1, breaks = br, plot=FALSE) ymax <- max(H1$counts) if(!is.null(C2)) { H2 <- hist(C2, breaks = br, plot=FALSE) ymax <- max(ymax, H2$counts) } plot(H1, col=color[1], border=border,yaxs='i', ylim=c(0, ymax), xlim=rng, bty=bty, ylab=ylab, xlab=xlab, main=main) if(!is.null(C2)) { plot(H2, col=color[2], border=border,add=TRUE) plot(H1, breaks = br, col=NA, border=border,add=TRUE) } axis(2) segments(rng[1], 0, rng[2], 0) }
pad_sequences <- function(sequences, maxlen = NULL, dtype = "int32", padding = "pre", truncating = "pre", value = 0.0) { if (is.list(sequences)) { sequences <- lapply(sequences, function(seq) { if (length(seq) == 1) as.list(seq) else seq }) } keras$preprocessing$sequence$pad_sequences( sequences = sequences, maxlen = as_nullable_integer(maxlen), dtype = dtype, padding = padding, truncating = truncating, value = value ) } skipgrams <- function(sequence, vocabulary_size, window_size = 4, negative_samples = 1.0, shuffle = TRUE, categorical = FALSE, sampling_table = NULL, seed = NULL) { args <- list( sequence = as.integer(sequence), vocabulary_size = as.integer(vocabulary_size), window_size = as.integer(window_size), negative_samples = negative_samples, shuffle = shuffle, categorical = categorical, sampling_table = sampling_table ) if (keras_version() >= "2.0.7") args$seed <- as_nullable_integer(seed) sg <- do.call(keras$preprocessing$sequence$skipgrams, args) sg <- list( couples = sg[[1]], labels = sg[[2]] ) } make_sampling_table <- function(size, sampling_factor = 1e-05) { keras$preprocessing$sequence$make_sampling_table( size = as.integer(size), sampling_factor = sampling_factor ) } text_to_word_sequence <- function(text, filters = '!" lower = TRUE, split=' ') { keras$preprocessing$text$text_to_word_sequence( text = text, filters = filters, lower = lower, split = split ) } text_one_hot <- function(input_text, n, filters = '!" lower = TRUE, split = ' ', text = NULL) { if (tensorflow::tf_version() >= "2.3" && !is.null(text)) { warning("text is deprecated as of TF 2.3. use input_text instead") if (!missing(input_text)) stop("input_text and text must not be bopth specified") input_text <- text } keras$preprocessing$text$one_hot( input_text, n = as.integer(n), filters = filters, lower = lower, split = split ) } text_hashing_trick <- function(text, n, hash_function = NULL, filters = '!" lower = TRUE, split = ' ') { if (length(text) != 1) { stop("`text` should be length 1.") } if (is.na(text)) { return(NA_integer_) } keras$preprocessing$text$hashing_trick( text = text, n = as.integer(n), hash_function = hash_function, filters = filters, lower = lower, split = split ) } text_tokenizer <- function(num_words = NULL, filters = '!" lower = TRUE, split = ' ', char_level = FALSE, oov_token = NULL) { args <- list( num_words = as_nullable_integer(num_words), filters = filters, lower = lower, split = split, char_level = char_level ) if (keras_version() >= "2.1.3") args$oov_token <- oov_token do.call(keras$preprocessing$text$Tokenizer, args) } fit_text_tokenizer <- function(object, x) { tokenizer <- object if (is.list(x)) tokenizer$fit_on_sequences(x) else { tokenizer$fit_on_texts(if (is.function(x)) reticulate::py_iterator(x) else as_texts(x)) } invisible(tokenizer) } save_text_tokenizer <- function(object, filename) { py_save_object(object, filename) invisible(object) } load_text_tokenizer <- function(filename) { py_load_object(filename) } texts_to_sequences <- function(tokenizer, texts) { tokenizer$texts_to_sequences(as_texts(texts)) } texts_to_sequences_generator <- function(tokenizer, texts) { tokenizer$texts_to_sequences_generator(as_texts(texts)) } texts_to_matrix <- function(tokenizer, texts, mode = c("binary", "count", "tfidf", "freq")) { tokenizer$texts_to_matrix( texts = as_texts(texts), mode = mode ) } as_texts <- function(texts) { if (is.character(texts) && length(texts) == 1) as.array(texts) else texts } sequences_to_matrix <- function(tokenizer, sequences, mode = c("binary", "count", "tfidf", "freq")) { if (is.list(sequences)) { sequences <- lapply(sequences, function(seq) { if (length(seq) == 1) as.list(seq) else seq }) } tokenizer$sequences_to_matrix( sequences = sequences, mode = mode ) } image_load <- function(path, grayscale = FALSE, color_mode='rgb', target_size = NULL, interpolation = "nearest") { if (!have_pillow()) stop("The Pillow Python package is required to load images") if (!is.null(target_size)) { if (length(target_size) != 2) stop("target_size must be 2 element integer vector") target_size <- as.integer(target_size) target_size <- tuple(target_size[[1]], target_size[[2]]) } args <- list( path = normalize_path(path), color_mode = color_mode, grayscale = grayscale, target_size = target_size ) if (keras_version() >= "2.0.9") args$interpolation <- interpolation do.call(keras$preprocessing$image$load_img, args) } image_to_array <- function(img, data_format = c("channels_last", "channels_first")) { keras$preprocessing$image$img_to_array( img = img, data_format = match.arg(data_format) ) } image_array_resize <- function(img, height, width, data_format = c("channels_last", "channels_first")) { np <- import("numpy") scipy <- import("scipy") img <- np$copy(img) dims <- dim(img) is_4d_array <- FALSE if (length(dims) == 4 && dims[[1]] == 1) { is_4d_array <- TRUE img <- array_reshape(img, dims[-1]) } data_format <- match.arg(data_format) if (data_format == "channels_last") { factors <- tuple( height / dim(img)[[1]], width / dim(img)[[2]], 1 ) } else { factors <- tuple( 1, height / dim(img)[[1]], width / dim(img)[[2]], ) } img <- scipy$ndimage$zoom(img, factors, order = 1L) if (is_4d_array) img <- array_reshape(img, dim = c(1, dim(img))) img } image_array_save <- function(img, path, data_format = NULL, file_format = NULL, scale = TRUE) { if (keras_version() >= "2.2.0") { keras$preprocessing$image$save_img( path, img, data_format = data_format, file_format = file_format, scale = scale ) } else { pil <- import("PIL") pil$Image$fromarray(reticulate::r_to_py(img)$astype("uint8"))$save(path) } } image_data_generator <- function(featurewise_center = FALSE, samplewise_center = FALSE, featurewise_std_normalization = FALSE, samplewise_std_normalization = FALSE, zca_whitening = FALSE, zca_epsilon = 1e-6, rotation_range = 0.0, width_shift_range = 0.0, height_shift_range = 0.0, brightness_range = NULL, shear_range = 0.0, zoom_range = 0.0, channel_shift_range = 0.0, fill_mode = "nearest", cval = 0.0, horizontal_flip = FALSE, vertical_flip = FALSE, rescale = NULL, preprocessing_function = NULL, data_format = NULL, validation_split=0.0) { args <- list( featurewise_center = featurewise_center, samplewise_center = samplewise_center, featurewise_std_normalization = featurewise_std_normalization, samplewise_std_normalization = samplewise_std_normalization, zca_whitening = zca_whitening, rotation_range = rotation_range, width_shift_range = width_shift_range, height_shift_range = height_shift_range, shear_range = shear_range, zoom_range = zoom_range, channel_shift_range = channel_shift_range, fill_mode = fill_mode, cval = cval, horizontal_flip = horizontal_flip, vertical_flip = vertical_flip, rescale = rescale, preprocessing_function = preprocessing_function, data_format = data_format ) if (keras_version() >= "2.0.4") args$zca_epsilon <- zca_epsilon if (keras_version() >= "2.1.5") { args$brightness_range <- brightness_range args$validation_split <- validation_split } do.call(keras$preprocessing$image$ImageDataGenerator, args) } generator_next <- function(generator, completed = NULL) { reticulate::iter_next(generator, completed = completed) } fit_image_data_generator <- function(object, x, augment = FALSE, rounds = 1, seed = NULL) { generator <- object history <- generator$fit( x = keras_array(x), augment = augment, rounds = as.integer(rounds), seed = seed ) invisible(history) } flow_images_from_data <- function( x, y = NULL, generator = image_data_generator(), batch_size = 32, shuffle = TRUE, sample_weight = NULL, seed = NULL, save_to_dir = NULL, save_prefix = "", save_format = 'png', subset = NULL) { args <- list( x = keras_array(x), y = keras_array(y), batch_size = as.integer(batch_size), shuffle = shuffle, seed = as_nullable_integer(seed), save_to_dir = normalize_path(save_to_dir), save_prefix = save_prefix, save_format = save_format ) stopifnot(args$batch_size > 0) if (keras_version() >= "2.1.5") args$subset <- subset if (keras_version() >= "2.2.0") args$sample_weight <- sample_weight do.call(generator$flow, args) } flow_images_from_directory <- function( directory, generator = image_data_generator(), target_size = c(256, 256), color_mode = "rgb", classes = NULL, class_mode = "categorical", batch_size = 32, shuffle = TRUE, seed = NULL, save_to_dir = NULL, save_prefix = "", save_format = "png", follow_links = FALSE, subset = NULL, interpolation = "nearest") { args <- list( directory = normalize_path(directory), target_size = as.integer(target_size), color_mode = color_mode, classes = classes, class_mode = class_mode, batch_size = as.integer(batch_size), shuffle = shuffle, seed = as_nullable_integer(seed), save_to_dir = normalize_path(save_to_dir), save_prefix = save_prefix, save_format = save_format, follow_links = follow_links ) stopifnot(args$batch_size > 0) if (keras_version() >= "2.1.2") args$interpolation <- interpolation if (keras_version() >= "2.1.5") args$subset <- subset do.call(generator$flow_from_directory, args) } flow_images_from_dataframe <- function( dataframe, directory = NULL, x_col = "filename", y_col = "class", generator = image_data_generator(), target_size = c(256,256), color_mode = "rgb", classes = NULL, class_mode = "categorical", batch_size = 32, shuffle = TRUE, seed = NULL, save_to_dir = NULL, save_prefix = "", save_format = "png", subset = NULL, interpolation = "nearest", drop_duplicates = NULL) { if (!reticulate::py_module_available("pandas")) stop("Pandas (Python module) must be installed in the same environment as Keras.", 'Install it using reticulate::virtualenv_install("pandas", envname = "r-tensorflow") ', 'or reticulate::conda_install("pandas", envname = "r-tensorflow") depending on ', 'the kind of environment you are using.') args <- list( dataframe = as.data.frame(dataframe), directory = normalize_path(directory), x_col = x_col, y_col = y_col, target_size = as.integer(target_size), color_mode = color_mode, classes = classes, class_mode = class_mode, batch_size = as.integer(batch_size), shuffle = shuffle, seed = as_nullable_integer(seed), save_to_dir = normalize_path(save_to_dir), save_prefix = save_prefix, save_format = save_format, drop_duplicates = drop_duplicates ) stopifnot(args$batch_size > 0) if (keras_version() >= "2.1.2") args$interpolation <- interpolation if (keras_version() >= "2.1.5") args$subset <- subset if(!is.null(drop_duplicates) && tensorflow::tf_version() >= "2.3") { warning("\'drop_duplicates\' is deprecated as of tensorflow 2.3 and will be ignored. Make sure the supplied dataframe does not contain duplicates.") args$drop_duplicates <- NULL } if (is.null(drop_duplicates) && tensorflow::tf_version() < "2.3") args$drop_duplicates <- TRUE do.call(generator$flow_from_dataframe, args) } image_dataset_from_directory <- function( directory, labels="inferred", label_mode="int", class_names=NULL, color_mode="rgb", batch_size=32, image_size=c(256, 256), shuffle=TRUE, seed=NULL, validation_split=NULL, subset=NULL, interpolation="bilinear", follow_links=FALSE ) { if (!is.character(labels)) labels <- as.integer(labels) args <- list( directory=normalizePath(directory, mustWork = FALSE), labels=labels, label_mode=label_mode, class_names=class_names, color_mode=color_mode, batch_size=as.integer(batch_size), image_size=as_integer_tuple(image_size), shuffle=shuffle, seed=as_nullable_integer(seed), validation_split=validation_split, subset=subset, interpolation=interpolation, follow_links=follow_links ) out <- do.call(keras$preprocessing$image_dataset_from_directory, args) class(out) <- c("tf_dataset", class(out)) out } text_dataset_from_directory <- function(directory, labels = "inferred", label_mode = "int", class_names = NULL, batch_size = 32L, max_length = NULL, shuffle = TRUE, seed = NULL, validation_split = NULL, subset = NULL, follow_links = FALSE, ... ) { args <- capture_args(match.call(), list(batch_size = as.integer, max_length = as_nullable_integer, seed = as_nullable_integer)) do.call(keras$preprocessing$text_dataset_from_directory, args) } timeseries_dataset_from_array <- function(data, targets, sequence_length, sequence_stride = 1L, sampling_rate = 1L, batch_size = 128L, shuffle = FALSE, ..., seed = NULL, start_index = NULL, end_index = NULL) { require_tf_version("2.6", "timeseries_dataset_from_array") args <- capture_args(match.call(), list( sequence_length = as.integer, sequence_stride = as.integer, sampling_rate = as.integer, batch_size = as.integer, seed = as_nullable_integer, start_index = as_nullable_integer, end_index = as_nullable_integer )) do.call(keras$preprocessing$timeseries_dataset_from_array, args) }
objah <- ooplah$new() test_that("object_class", { expect_equal(object_class(objah), "ooplah") expect_equal(object_class(objah, 1), "OoplahParent") expect_equal(object_class(objah, 2), "R6") }) test_that("get_object_class", { expect_equal(get_object_class(objah), ooplah) expect_equal(get_object_class(objah, 1)$classname, OoplahParent$classname) }) test_that("object_classes", { expect_equal(object_classes(objah, objah), rep("ooplah", 2)) expect_equal(object_classes(objects = list(objah, objah)), rep("ooplah", 2)) })
library(ggplot2) library(scales) this_base <- "fig04-05_state-areas-top-axis-labeled-with-original-scale" my_data <- data.frame( state_name = state.name, state_area = state.area, state_area_scaled = state.area / 1000) p <- ggplot(my_data, aes(x = state_area_scaled, y = reorder(state_name, state_area_scaled))) + geom_point() + scale_x_continuous(trans = log2_trans(), breaks = trans_breaks("log2", function(x) 2^x)) + labs(x = "Area (thousand square miles)", y = NULL) + ggtitle("Fig. 4.5 State Areas: BOTTOM Axis Labeled \nwith Original Scale") + theme_bw() + theme(panel.grid.major.x = element_blank(), panel.grid.major.y = element_line(colour = "grey70"), plot.title = element_text(face = "bold", vjust = 1.5), axis.ticks.y = element_blank(), axis.text.y = element_text(size = rel(0.9))) p ggsave(paste0(this_base, ".png"), p, width = 6, height = 6)
source("ESEUR_config.r") library("lubridate") library("plyr") plot_layout(6, 4, max_height=30) par(mar=MAR_default-c(0.8, 1.7, 0.7, 0.7)) scm=read.csv(paste0(ESEUR_dir, "time-series/smr1615/scmlog.csv.xz"), as.is=TRUE, quote="\'") scm$rev=NULL scm$message=NULL scm$date=as.Date(scm$date, format="%Y-%m-%d") start_date=as.Date("1991-01-01", format="%Y-%m-%d") end_date=as.Date("2012-01-01", format="%Y-%m-%d") cfl=read.csv(paste0(ESEUR_dir, "time-series/smr1615/commits_files_lines.csv.xz"), as.is=TRUE, quote="\'") cfl$date=scm$date[cfl$commit] cfl=subset(cfl, (date >= start_date) & (date <= end_date)) cfl$year=year(cfl$date) plot(table(trunc(as.numeric(3+cfl$date) %% 7)), col=point_col, cex=1.3, cex.lab=1.4, xlab="Total", ylab="") d_ply(cfl, .(year), function(df) plot(table(trunc(as.numeric(3+df$date) %% 7)), col=point_col, cex=1.3, cex.lab=1.4, xlab=df$year[1], ylab=""))
test_that("initialize parameters are checked", { s1 <- MarkovState$new("s1") s2 <- MarkovState$new("s2") expect_silent(Transition$new(s1,s2)) expect_error(Transition$new(s1,s2,label=42), class="non-string_label") n1 <- Node$new() expect_error(Transition$new(n1,s2), class="invalid_source") expect_error(Transition$new(s1,n1), class="invalid_target") expect_silent(Transition$new(s1,s2)) expect_error(Transition$new(s1,s2,cost="200"), class="invalid_cost") expect_silent(Transition$new(s1,s2,label="")) expect_silent(Transition$new(s1,s2,label="mychance")) MT <- Transition$new(s1,s2,cost=20) expect_equal(MT$cost(),20) }) test_that("costs can be modified", { s1 <- MarkovState$new("s1") s2 <- MarkovState$new("s2") t <- Transition$new(s1, s2) expect_equal(t$cost(), 0) t <- Transition$new(s1, s2, cost=42) expect_equal(t$cost(), 42) t$set_cost(1000) expect_equal(t$cost(), 1000) c <- ConstModVar$new("c1", "GBP", 42) t <- Transition$new(s1, s2, cost=c) expect_equal(t$cost(), 42) t <- Transition$new(s1, s2) t$set_cost(c) expect_equal(t$cost(), 42) }) test_that("ModVars are identified and their values are returned", { s1 <- MarkovState$new("s1") s2 <- MarkovState$new("s2") fortytwo <- ConstModVar$new("fortytwo", "GBP", 42) e <- Transition$new(s1,s2,cost=fortytwo,label="label") expect_equal(e$cost(), 42) mv <- e$modvars() expect_equal(length(mv),1) discount <- ConstModVar$new("discount", "rate", 0.1) dcost <- ExprModVar$new( "true cost", "GPB", rlang::quo(fortytwo*(1-discount)) ) e <- Transition$new(s1,s2,cost=dcost,label="label") expect_equal(e$cost(), 42*0.9) mv <- e$modvars() expect_equal(length(mv),3) })
removeAlgorithm = function(reg, id, force = FALSE) { checkExperimentRegistry(reg, strict = TRUE, writeable = TRUE) syncRegistry(reg) assertString(id) if (id %nin% dbGetAllAlgorithmIds(reg)) stop("Algorithm not present in registry: ", id) info("Removing Experiments from database") ids = dbFindExperiments(reg, algo.pattern = id, like = FALSE) removeExperiments(reg, ids = ids, force = force) info("Removing Algorithm from database") dbRemoveAlgorithm(reg, id) fn = getAlgorithmFilePath(reg$file.dir, id) info("Deleting algorithm file: %s", fn) ok = file.remove(fn) if (!ok) warningf("Could not remove algorithm file: %s", fn) invisible(NULL) }
transform_prediction <- function(pred, loss_fun_type){ link_type = "" if(loss_fun_type %in% c("mse")){ link_type = "identity" }else if(loss_fun_type %in% c("logloss")){ link_type = "logit" }else if(loss_fun_type %in% c("poisson", "gamma::log", "negbinom")){ link_type = "log" }else if(loss_fun_type %in% c("gamma::neginv")){ link_type = "neginv" }else{ warning(paste0("No link-function match for loss: ", loss_fun_type, " Using identity")) link_type = "identity" } if(link_type == "identity"){ res <- pred }else if(link_type == "logit"){ res <- 1/(1+exp(-pred)) }else if(link_type == "log"){ res <- exp(pred) }else if(link_type == "neginv"){ res <- -1/pred } return(res) } loss_to_xgbloss <- function(loss_function){ if(loss_function == "mse"){ xgbloss = "reg:squarederror" }else if(loss_function == "logloss"){ xgbloss = "binary:logistic" }else if(loss_function == "gamma::log"){ xgbloss = "reg:gamma" }else if(loss_function == "poisson"){ xgbloss = "count:poisson" } return(xgbloss) } loss_to_lgbloss <- function(loss_function){ if(loss_function == "mse"){ lgbloss = "regression" }else if(loss_function == "logloss"){ lgbloss = "binary" }else if(loss_function == "gamma::log"){ lgbloss = "gamma" }else if(loss_function == "poisson"){ lgbloss = "poisson" } return(lgbloss) } gbt.model.complexity <- function(model){ loss_function = model$get_loss_function() initial_raw_prediction = model$initialPred res <- list( "loss_function" = loss_function, "nrounds" = model$get_num_trees(), "learning_rate" = model$get_learning_rate(), "initial_raw_prediction" = initial_raw_prediction, "initial_prediction" = transform_prediction(initial_raw_prediction, loss_function), "max_depth" = max(model$get_tree_depths()), "min_loss_reductions" = model$get_max_node_optimism(), "sum_hessian_weights" = model$get_min_hessian_weights(), "number_of_leaves" = max(model$get_num_leaves()), "l1_regularization" = 0.0, "l2_regularization" = 0.0, "row_subsampling" = 1.0, "column_subsampling" = 1.0 ) return(res) } gbt.complexity <- function(model, type){ message("Experimental feature") message("Best results are obtained using agtboost algorithm global-subset") model_complexity <- gbt.model.complexity(model) parameters = with(model_complexity, if(type=="xgboost"){ list( "base_score" = initial_prediction, "nrounds" = nrounds, "learning_rate" = learning_rate, "max_depth" = max_depth, "gamma" = min_loss_reductions, "min_child_weight" = sum_hessian_weights, "max_leaves" = number_of_leaves, "grow_policy" = "lossguide", "objective" = loss_to_xgbloss(loss_function), "alpha" = 0.0, "lambda" = 0.0, "subsample" = 1.0, "colsample_bytree" = 1.0 ) }else if(type=="lightgbm"){ list( "init_score" = initial_prediction, "nrounds" = nrounds, "learning_rate" = learning_rate, "max_depth" = max_depth, "min_gain_to_split" = min_loss_reductions, "min_sum_hessian_in_leaf" = sum_hessian_weights, "num_leaves" = number_of_leaves, "objective" = loss_to_lgbloss(loss_function), "lambda_l1" = 0.0, "lambda_l2" = 0.0, "bagging_fraction" = 1.0, "feature_fraction" = 1.0 ) } ) return(parameters) }
autointensity <- function(times, window=NULL, maxlag=max(times), ylab="Conditional probability", xlab="Lag", main="Autointensity function", xlim=c(0,max(times)), ylim=c(0,if(plotse)max(se1)else max(z$density)), lty=1, plot=TRUE, plotse=TRUE, add=FALSE, ...){ if(any(times<=0))stop("Times must be strictly positive") n <- length(times) total <- sum(times) if(is.null(window))window <- total/n/2 tmp <- cumsum(times) times <- tmp[2:n]-tmp[1] for(i in 3:n)times <- c(times,tmp[i:n]-tmp[i-1]) times <- times[times<maxlag] z <- hist(times,plot=FALSE,breaks=seq(0,max(times+window),by=window)) z$density <- z$counts*total/(total-z$mids)/window/n se1 <- ifelse(z$density>0,(sqrt(z$density)+2/sqrt(window*n))^2,0) se2 <- sqrt(z$density)-2/sqrt(window*n) se2 <- ifelse(z$density>0,ifelse(se2>0,se2,0)^2,0) if(plot){ if(add)plot(z$mids,z$density,type="l",lty=lty) else plot(z$mids,z$density,type="l",main=main,ylab=ylab,xlab=xlab, xlim=xlim,ylim=ylim,lty=lty,...) lines(xlim,rep(n/total,2)) if(plotse){ lines(z$mids,se1,lty=3) lines(z$mids,se2,lty=3)}} invisible(list(mids=z$mids,density=z$density,se=rbind(se1,se2)))}
library(lmForc) vector_lag <- function(vector, n) { vector <- c(rep(NA, n), vector[1:(length(vector) - n)]) return(vector) } date <- as.Date(c("2010-03-31", "2010-06-30", "2010-09-30", "2010-12-31", "2011-03-31", "2011-06-30", "2011-09-30", "2011-12-31", "2012-03-31", "2012-06-30", "2012-09-30", "2012-12-31")) y <- c(1.09, 1.71, 1.09, 2.46, 1.78, 1.35, 2.89, 2.11, 2.97, 0.99, 1.31, 2.33) x1 <- c(4.22, 3.86, 4.27, 5.60, 5.11, 4.31, 4.92, 5.80, 6.30, 4.17, 4.18, 5.89) x2 <- c(10.03, 10.49, 10.85, 10.47, 9.09, 10.91, 8.68, 9.91, 7.87, 6.63, 6.67, 7.77) data <- data.frame(date, y, x1, x2) realized_vec = data$y h_ahead = 4L ar_lags = 3L estimation_end = 4L time_vec = 1:length(realized_vec) estimation_window = NULL output_start <- which(time_vec == estimation_end) output_end <- length(realized_vec) - h_ahead output_length <- length(output_start:output_end) full_train_data <- realized_vec[1:8] full_train_data_lag1 <- vector_lag(full_train_data, n = 1) full_train_data_lag2 <- vector_lag(full_train_data, n = 2) full_train_data_lag3 <- vector_lag(full_train_data, n = 3) train_data <- full_train_data train_data_lag1 <- full_train_data_lag1 train_data_lag2 <- full_train_data_lag2 train_data_lag3 <- full_train_data_lag3 coefs <- lm(train_data ~ train_data_lag1 + train_data_lag2 + train_data_lag3)$coefficients forc_1ahead <- coefs[[1]] + coefs[[2]] * realized_vec[[8]] + coefs[[3]] * realized_vec[[7]] + coefs[[4]] * realized_vec[[6]] forc_2ahead <- coefs[[1]] + coefs[[2]] * forc_1ahead + coefs[[3]] * realized_vec[[8]] + coefs[[4]] * realized_vec[[7]] forc_3ahead <- coefs[[1]] + coefs[[2]] * forc_2ahead + coefs[[3]] * forc_1ahead + coefs[[4]] * realized_vec[[8]] forc_4ahead <- coefs[[1]] + coefs[[2]] * forc_3ahead + coefs[[3]] * forc_2ahead + coefs[[4]] * forc_1ahead pos5_forc <- forc_4ahead full_train_data <- realized_vec full_train_data_lag1 <- vector_lag(full_train_data, n = 1) full_train_data_lag2 <- vector_lag(full_train_data, n = 2) train_data <- full_train_data[5:9] train_data_lag1 <- full_train_data_lag1[5:9] train_data_lag2 <- full_train_data_lag2[5:9] coefs <- lm(train_data ~ train_data_lag1 + train_data_lag2)$coefficients pos4_forc2 <- coefs[[1]] + coefs[[2]] * realized_vec[[9]] + coefs[[3]] * realized_vec[[8]] forc <- autoreg_forc( realized_vec = data$y, h_ahead = 4L, ar_lags = 3L, estimation_end = 4L, time_vec = NULL, estimation_window = NULL ) forc2 <- autoreg_forc( realized_vec = data$y, h_ahead = 1L, ar_lags = 2L, estimation_end = as.Date("2011-06-30"), time_vec = data$date, estimation_window = 4L ) test_that("Origin and future output are of the correct class.", { expect_equal(class(time_vec), class(origin(forc))) expect_equal(class(time_vec), class(future(forc))) }) test_that("Output values are correct.", { expect_equal(origin(forc), time_vec[output_start:output_end]) expect_equal(future(forc), time_vec[(nrow(data) - output_length + 1):nrow(data)]) expect_equal(realized(forc), data$y[(nrow(data) - output_length + 1):nrow(data)]) expect_equal(forc(forc)[5], pos5_forc) expect_equal(forc(forc2)[4], pos4_forc2) }) test_that("Output Forecast is the correct length.", { expect_equal(output_length, length(output_start:output_end)) })
update.compareGroups<- function (object, formula., ..., evaluate = TRUE) { if(!inherits(object, "compareGroups")) stop("argument 'object' must be of class 'compareGroups'") if(inherits(object, "compareGroups.subset")) stop("Update process might not work properly (different variables selected) since 'obj' has been subset previously") if(inherits(object, "rbind.compareGroups")) stop("Update process does not work for rbind.compareGroups objects") call <- attr(object,"call")$call if (is.null(call)) stop("need an object with call component") extras <- match.call(expand.dots = FALSE)$... if (!missing(formula.)){ if (inherits(formula., "formula")){ if (inherits(eval(call$formula), "formula")){ call$formula <- update.formula2(call$formula, formula.) }else { call$data <- call$formula call$formula <- formula. } } else { call$formula <- formula. } } if (length(extras)) { existing <- !is.na(match(names(extras), names(call))) for (a in names(extras)[existing]) call[[a]] <- extras[[a]] if (any(!existing)) { call <- c(as.list(call), extras[!existing]) call <- as.call(call) } } if (evaluate) eval(call, parent.frame()) else call }