code
stringlengths 1
13.8M
|
---|
paleopop_simulator <- function(inputs) {
if (is.null(inputs$time_steps) || is.null(inputs$populations) || is.null(inputs$initial_abundance) ||
is.null(inputs$transition_rate) || is.null(inputs$carrying_capacity)) {
incomplete_inputs <- if (is.null(inputs$time_steps)) "time_steps"
incomplete_inputs <- c(incomplete_inputs, if (is.null(inputs$populations)) "populations")
incomplete_inputs <- c(incomplete_inputs, if (is.null(inputs$initial_abundance)) "initial_abundance")
incomplete_inputs <- c(incomplete_inputs, if (is.null(inputs$transition_rate)) "transition_rate")
incomplete_inputs <- c(incomplete_inputs, if (is.null(inputs$carrying_capacity)) "carrying_capacity")
stop(paste("Minimal inputs required to run simulation should include:",
paste(incomplete_inputs, collapse = ", ")), call. = FALSE)
}
time_steps <- inputs$time_steps
years_per_step <- ifelse(is.null(inputs$years_per_step), 1, inputs$years_per_step)
random_seed <- inputs$random_seed
if (!is.null(random_seed)) {
set.seed(random_seed)
}
transition_rate <- inputs$transition_rate
standard_deviation <- ifelse(is.null(inputs$standard_deviation), 0, inputs$standard_deviation)
environmental_stochasticity <- (standard_deviation*transition_rate > 0)
populations <- inputs$populations
population_abundances <- array(inputs$initial_abundance, c(1, populations))
density_dependence <- inputs$density_dependence
if (is.null(density_dependence)) {
density_dependence <- "none"
}
growth_rate_max <- ifelse(is.null(inputs$growth_rate_max), log(transition_rate), inputs$growth_rate_max)
abundance_threshold <- inputs$abundance_threshold
occupancy_threshold <- inputs$occupancy_threshold
dispersal_target_k <- inputs$dispersal_target_k
carrying_capacities <- matrix(inputs$carrying_capacity, nrow = populations)
if (any(is.na(carrying_capacities))) {
carrying_capacities[which(is.na(carrying_capacities))] <- 0
}
dispersal_present <- (!is.null(inputs$dispersal_data) && nrow(inputs$dispersal_data[[1]]))
if (dispersal_present) {
dispersal_data <- inputs$dispersal_data[[1]]
dispersal_compact_rows <- max(dispersal_data[, c("emigrant_row", "immigrant_row")])
dispersal_zero_array <- array(0, c(dispersal_compact_rows, populations))
dispersal_compact_matrix <- dispersal_zero_array
dispersal_compact_matrix[as.matrix(dispersal_data[, c("emigrant_row", "source_pop")])] <- dispersal_data$dispersal_rate
dispersals_change_over_time <- (length(inputs$dispersal_data) > 1)
if (dispersals_change_over_time) {
dispersal_data_changes <- inputs$dispersal_data
dispersal_data_changes[[1]] <- dispersal_data_changes[[1]][NULL,]
}
dispersal_depends_on_target_pop_k <- (!is.null(dispersal_target_k) && dispersal_target_k > 0)
if (dispersal_depends_on_target_pop_k) {
dispersal_target_pop_map <- dispersal_zero_array
dispersal_target_pop_map[as.matrix(dispersal_data[, c("emigrant_row", "source_pop")])] <- dispersal_data$target_pop
}
dispersal_compact_indices <- array(1:(dispersal_compact_rows*populations), c(dispersal_compact_rows, populations))
dispersal_immigrant_map <- dispersal_zero_array
dispersal_immigrant_map[as.matrix(dispersal_data[, c("emigrant_row", "source_pop")])] <- dispersal_compact_indices[as.matrix(dispersal_data[, c("immigrant_row", "target_pop")])]
dispersal_abundance_rep_indices <- rep(1, dispersal_compact_rows)
dispersal_data <- NULL; dispersal_compact_indices <- NULL
}
if (environmental_stochasticity) {
use_env_correlation <- FALSE
if (!is.null(inputs$compact_decomposition)) {
use_env_correlation <- TRUE
t_decomposition_compact_matrix <- inputs$compact_decomposition$matrix
t_decomposition_compact_map <- inputs$compact_decomposition$map
t_decomposition_compact_rows <- nrow(t_decomposition_compact_matrix)
}
}
harvest <- ifelse(is.null(inputs$harvest), FALSE, inputs$harvest)
if (harvest) {
harvest_max <- inputs$harvest_max
harvest_g <- inputs$harvest_g
harvest_z <- inputs$harvest_z
harvest_max_n <- inputs$harvest_max_n
human_densities <- inputs$human_density
}
results_selection <- inputs$results_selection
if (is.null(results_selection)) results_selection <- "abundance"
carrying_capacity_t_max <- ncol(carrying_capacities)
if (carrying_capacity_t_max == 1) {
carrying_capacity <- carrying_capacities[, 1]
}
results <- list()
if ("abundance" %in% results_selection) {
results$abundance <- array(0, c(populations, time_steps))
}
if ("ema" %in% results_selection) {
results$ema <- array(0, c(populations, time_steps))
min_abundances <- population_abundances[1,]
}
if ("extirpation" %in% results_selection) {
results$extirpation <- array(NA, populations)
results$extirpation[which(population_abundances == 0)] <- 0
}
if ("harvested" %in% results_selection) {
results$harvested <- array(0, c(populations, time_steps))
}
if ("occupancy" %in% results_selection) {
results$occupancy <- array(0, time_steps)
}
if ("human_density" %in% results_selection) {
results$human_density <- inputs$human_density
}
if ("dispersal_tracking" %in% names(inputs) ||
("attached" %in% names(inputs) && "dispersal_tracking" %in% names(inputs$attached))) {
results$emigrants <- array(0, c(populations, time_steps))
results$immigrants <- array(0, c(populations, time_steps))
dispersal_tracking <- TRUE
} else {
dispersal_tracking <- FALSE
}
for (tm in 1:time_steps) {
transitions <- array(transition_rate, populations)
if (!is.null(occupancy_threshold)) {
if (length(which(as.logical(population_abundances))) <= occupancy_threshold) {
population_abundances[] <- 0
}
}
if (carrying_capacity_t_max > 1) {
carrying_capacity <- carrying_capacities[, min(tm, carrying_capacity_t_max)]
}
occupied_indices <- which(as.logical(carrying_capacity*population_abundances[1,]))
occupied_populations <- length(occupied_indices)
zero_indices <- which(carrying_capacity <= 0 & as.logical(population_abundances))
if (occupied_populations && !is.null(density_dependence) && density_dependence %in% c("competition", "logistic")) {
selected_carrying_capacity <- carrying_capacity[occupied_indices]
selected_population_abundances <- population_abundances[occupied_indices]
if (density_dependence == "competition") {
growth_rate <- growth_rate_max - log(exp(growth_rate_max)*selected_population_abundances/selected_carrying_capacity -
selected_population_abundances/selected_carrying_capacity + 1)
} else if (density_dependence == "logistic") {
growth_rate <- growth_rate_max*(1 - selected_population_abundances/selected_carrying_capacity)
}
density_dependence_multipliers <- exp(growth_rate)/transition_rate
transitions[occupied_indices] <- transitions[occupied_indices]*density_dependence_multipliers
negative_indices <- occupied_indices[which(transitions[occupied_indices] < 0)]
if (length(negative_indices)) {
transitions[negative_indices] <- 0
}
}
if (occupied_populations && environmental_stochasticity) {
if (use_env_correlation) {
occupied_correlated_deviates <- .colSums(t_decomposition_compact_matrix[, occupied_indices]*stats::rnorm(populations)[t_decomposition_compact_map[, occupied_indices]],
m = t_decomposition_compact_rows, n = occupied_populations, na.rm = TRUE)
} else {
occupied_correlated_deviates <- stats::rnorm(occupied_populations)
}
if (length(occupied_indices)) {
log_common <- log((standard_deviation/transitions[occupied_indices])^2 + 1)
log_common[which(transitions[occupied_indices] == 0)] <- 0
log_common[which(is.infinite(log_common))] <- 0
transitions[occupied_indices] <- transitions[occupied_indices]*exp(sqrt(log_common)*occupied_correlated_deviates - 0.5*log_common)
}
negative_indices <- occupied_indices[which(transitions[occupied_indices] < 0)]
if (length(negative_indices)) {
transitions[negative_indices] <- 0
}
}
population_abundances[zero_indices] <- 0
if (occupied_populations) {
population_abundances[occupied_indices] <- stats::rpois(occupied_populations, transitions[occupied_indices]*population_abundances[occupied_indices])
}
if (occupied_populations && density_dependence == "ceiling") {
above_capacity_indices <- occupied_indices[which(population_abundances[occupied_indices] > carrying_capacity[occupied_indices])]
if (length(above_capacity_indices)) {
population_abundances[above_capacity_indices] <- carrying_capacity[above_capacity_indices]
}
}
if (harvest || "harvested" %in% results_selection) {
occupied_indices <- occupied_indices[which(as.logical(population_abundances[occupied_indices]))]
occupied_populations <- length(occupied_indices)
harvested <- array(0, populations)
}
if (occupied_populations && harvest) {
harvest_rate <- array(0, occupied_populations)
human_presence_indices <- which(as.logical(human_densities[occupied_indices, tm]))
human_presence_occupied_indices <- occupied_indices[human_presence_indices]
if (length(human_presence_indices)) {
prey_density <- population_abundances[human_presence_occupied_indices]/harvest_max_n
prey_z <- prey_density^harvest_z
max_functional_response <- (harvest_max*prey_z)/(harvest_g + prey_z)
functional_response <- max_functional_response*human_densities[human_presence_occupied_indices, tm]
harvest_rate[human_presence_indices] <- functional_response/prey_density
}
harvest_rate <- 1 - (1 - harvest_rate)^years_per_step
harvested[occupied_indices] <- stats::rbinom(occupied_populations, population_abundances[occupied_indices], harvest_rate)
population_abundances[occupied_indices] <- population_abundances[occupied_indices] - harvested[occupied_indices]
}
if (occupied_populations && dispersal_present) {
if (tm == 1) {
dispersal_compact_matrix_tm <- dispersal_compact_matrix
} else if (dispersals_change_over_time && nrow(dispersal_data_changes[[tm]])) {
dispersal_compact_matrix_tm[as.matrix(dispersal_data_changes[[tm]][,c("emigrant_row","source_pop")])] <- dispersal_data_changes[[tm]]$dispersal_rate
}
occupied_dispersals <- dispersal_compact_matrix_tm[, occupied_indices]
occupied_dispersal_indices <- which(as.logical(occupied_dispersals))
if (dispersal_depends_on_target_pop_k) {
dd_multipliers <- array(1, populations)
modify_pop_indices <- which(carrying_capacity < dispersal_target_k)
dd_multipliers[modify_pop_indices] <- carrying_capacity[modify_pop_indices]/dispersal_target_k
selected_dd_multipliers <- dd_multipliers[dispersal_target_pop_map[, occupied_indices][occupied_dispersal_indices]]
modify_indices <- which(selected_dd_multipliers < 1)
if (length(modify_indices)) {
modify_dipersal_indices <- occupied_dispersal_indices[modify_indices]
occupied_dispersals[modify_dipersal_indices] <- occupied_dispersals[modify_dipersal_indices]*selected_dd_multipliers[modify_indices]
occupied_dispersal_indices <- which(as.logical(occupied_dispersals))
}
modify_pop_indices <- NULL; dd_multipliers <- NULL; selected_dd_multipliers <- NULL; modify_indices <- NULL; modify_dipersal_indices <- NULL
}
occupied_abundances <- population_abundances[occupied_indices]
occupied_abundances_rep <- population_abundances[dispersal_abundance_rep_indices, occupied_indices]
dispersers <- array(0, c(dispersal_compact_rows, occupied_populations))
dispersers[occupied_dispersal_indices] <- stats::rbinom(length(occupied_dispersal_indices), occupied_abundances_rep[occupied_dispersal_indices], occupied_dispersals[occupied_dispersal_indices])
occupied_dispersals <- NULL; occupied_abundances_rep <- NULL; occupied_dispersal_indices <- NULL
emigrants <- array(0, occupied_populations)
emigrants[] <- .colSums(dispersers, m = dispersal_compact_rows, n = occupied_populations)
excessive_indices <- which(emigrants > occupied_abundances)
if (length(excessive_indices) > 0) {
for (excessive_index in excessive_indices) {
excessive_rows <- which(as.logical(dispersers[, excessive_index]))
excessive_dispersers <- dispersers[excessive_rows, excessive_index]
disperser_reduction <- emigrants[excessive_index] - occupied_abundances[excessive_index]
for (remove_row_index in rep(excessive_rows,
times = excessive_dispersers)[sample(sum(excessive_dispersers),
size = disperser_reduction)]) {
dispersers[remove_row_index, excessive_index] <- dispersers[remove_row_index, excessive_index] - 1
}
}
emigrants[excessive_indices] <- occupied_abundances[excessive_indices]
}
population_abundances[occupied_indices] <- population_abundances[occupied_indices] - emigrants
if (dispersal_tracking) {
results$emigrants[occupied_indices, tm] <- emigrants
}
occupied_abundances <- NULL; emigrants <- NULL; excessive_indices <- NULL
disperser_indices <- which(as.logical(dispersers))
immigrant_array <- dispersal_zero_array
immigrant_array[dispersal_immigrant_map[, occupied_indices][disperser_indices]] <- dispersers[disperser_indices]
immigrants <- .colSums(immigrant_array, m = dispersal_compact_rows, n = populations)
population_abundances[1,] <- population_abundances[1,] + immigrants
if (dispersal_tracking) {
results$immigrants[, tm] <- immigrants
}
dispersers <- NULL; immigrant_array <- NULL; immigrants <- NULL
}
if (!is.null(abundance_threshold)) {
below_threshold_indices <- which(as.logical(population_abundances) & population_abundances <= abundance_threshold)
if (length(below_threshold_indices)) {
population_abundances[below_threshold_indices] <- 0
}
}
if ("abundance" %in% results_selection) {
results$abundance[, tm] <- population_abundances[1,]
}
if ("ema" %in% results_selection) {
min_abundances <- pmin(min_abundances, population_abundances[1,])
results$ema[,tm] <- min_abundances
}
if ("extirpation" %in% results_selection) {
results$extirpation <- pmin(results$extirpation, rep(tm, populations), na.rm = TRUE)
results$extirpation[which(as.logical(population_abundances))] <- NA
}
if ("harvested" %in% results_selection) {
results$harvested[, tm] <- harvested
}
if ("occupancy" %in% results_selection) {
results$occupancy[tm] <- sum(as.logical(population_abundances))
}
}
return(results)
}
|
pbsjkREtest<-function(formula, data, w, index=NULL, ...) {
gindex <- data[,1]
tindex <- data[,2]
data <- data[order(tindex, gindex),]
mymod <- spreml(formula=formula, data=data, w=w,
index=index, lag=FALSE, errors="semsr", ...)
tr<-function(x) sum(diag(x))
msq<-function(x) x%*%x
if("listw" %in% class(w)) w<-listw2mat(w)
X <- model.matrix(formula, data)
y <- model.response(model.frame(formula,data))
beta0 <- mymod$coefficients
u.hat <- as.numeric(y-X%*%beta0)
nt.<- length(y)
n.<- dim(w)[[1]]
t.<-nt./n.
sigma2e<-as.numeric(crossprod(u.hat)/nt.)
rho <- mymod$errcomp["psi"]
lambda <- mymod$errcomp["rho"]
Jt<-matrix(1,ncol=t.,nrow=t.)
V1<-matrix(ncol=t.,nrow=t.)
for(i in 1:t.) V1[i,]<-rho^abs(1:t.-i)
Vrho <- (1/(1-rho^2)) * V1
iVrho<-solve(Vrho)
VrhoJt <- solve(Vrho,Jt)
g. <- (1-rho)/sigma2e^2 * ( 2 + (t.-2)*(1-rho) )
B<-diag(1,n.)-lambda*w
BB<-crossprod(B)
BB.1 <- solve(BB)
wBBw<-crossprod(w,B)+crossprod(B,w)
blackspade <- kronecker(VrhoJt %*% iVrho, msq(BB))
Dhat <- -g./2 * tr(BB) + 1/(2*sigma2e^2) *
crossprod(u.hat, blackspade) %*% u.hat
d3<-tr( wBBw%*%BB.1 )
d6<-tr( msq( wBBw %*% BB.1 ) )
j11<-nt./(2*sigma2e^2)
j12<-g.*tr(BB)/(2*sigma2e)
j13<-(n.*rho)/(sigma2e*(1-rho^2))
j14<-t.*d3/(2*sigma2e)
j22<-g.^2*tr(msq(BB))/2
j23<-tr(BB)/(sigma2e*(1+rho)) * ( (2-t.)*rho^2 + (t.-1) + rho )
j24<-g./2*tr(wBBw)
j33<-n./(1-rho^2)^2 * (3*rho^2 - t.*rho^2 +t.-1)
j34<-(rho*d3)/(1-rho^2)
j44<-t.*d6/2
Jtheta<-matrix(ncol=4,nrow=4)
Jtheta[1,]<-c(j11,j12,j13,j14)
Jtheta[2,]<-c(j12,j22,j23,j24)
Jtheta[3,]<-c(j13,j23,j33,j34)
Jtheta[4,]<-c(j14,j24,j34,j44)
J22.1<-solve(Jtheta)[2,2]
LMm.rl <- (Dhat^2) * J22.1
df.<-1
pval <- pchisq(LMm.rl,df=df.,lower.tail=F)
names(LMm.rl)="LM"
names(df.)<-"df"
dname <- paste(deparse(substitute(formula)))
RVAL <- list(statistic = LMm.rl, parameter = df.,
method = "Baltagi, Song, Jung and Koh C.3 conditional test \n \n H_0: no random effects, sub serial corr. and spatial dependence in error terms",
p.value = pval,
data.name = dname)
class(RVAL) <- "htest"
return(RVAL)
}
|
mice_ml_lmer_include_cluster_means <- function(y, ry, type, x, levels_id, aggregate_automatically,
clus, groupcenter.slope, variables_levels )
{
types_sel <- names(type)[ type==1 ]
types_sel <- intersect(types_sel, colnames(x))
x_sel <- x[, types_sel, drop=FALSE ]
NL <- length(levels_id)
if (aggregate_automatically){
for (ll in 1:NL){
id_ll <- levels_id[ll]
clus_ll <- clus[[ll]]
clus_name_ll <- levels_id[ll]
vars_aggr <- mice_ml_lmer_choice_aggregated_variables( x_sel=x_sel,
clus=clus_ll, eps=1e-5)
LV <- length(vars_aggr)
if (LV > 0){
ind_aggr <- which( substring( names(vars_aggr), 1, 2 )=="M." )
if ( length(ind_aggr) > 0 ){
vars_aggr <- vars_aggr[ - ind_aggr ]
}
}
x_sel1 <- cbind( clus_ll, x_sel )
colnames(x_sel1)[1] <- clus_name_ll
type1 <- c( -2, rep( 1, ncol(x_sel) ) )
names(type1) <- c( clus_name_ll, colnames(x_sel) )
if ( LV > 0 ){
type1[ names(vars_aggr) ] <- 3
}
res <- mice_multilevel_add_groupmeans( y=y, ry=ry, x=x_sel1, type=type1,
groupcenter.slope=groupcenter.slope,
aggr_label=paste0( "M.", clus_name_ll, "_" ) )
x <- res$x
type <- res$type
x_sel <- x[,-1,drop=FALSE]
type1 <- type[-1]
}
}
type_sel <- mice_imputation_create_type_vector( variables=colnames(x_sel), value=1)
res <- list( x=x_sel, type=type_sel)
return(res)
}
|
Sindex <- function(x,pervar,vvar,base){
colNameCheck <- checkNames(x, c(pervar,vvar))
if (colNameCheck$result == FALSE) {
stop(colNameCheck$message)
}
if(!all(sapply(list(pervar,vvar,base),isstring))){
stop('Arguments pervar, vvar, and base must be character strings')
}
base <- as.character(base)
values <- x[[vvar]]
if(anyDuplicated(x[[pervar]])!=0){
stop('The base variable cannot have repeated values')
}
names(values) <- x[[pervar]]
if(!base %in% names(values)){
stop(paste(base, ' is not a value in the variable "',pervar,'"',sep=''))
}
return(eval(parse(text=paste('data.frame(index_',make.names(base),'=values/values[base])',sep=""))))
}
Deflat <- function(x,pervar,cvar,defl,base){
colNameCheck <- checkNames(x, c(pervar,cvar,defl))
if (colNameCheck$result == FALSE) {
stop(colNameCheck$message)
}
if(!all(sapply(list(pervar,cvar, defl, base),isstring))){
stop('Arguments pervar, cvar, defl, and base must be character strings')
}
base <- as.character(base)
current <- x[[cvar]]
deflator <- x[[defl]]
if(anyDuplicated(x[[pervar]])!=0){
stop('The base variable cannot have repeated values')
}
names(current) <- names(deflator) <-x[[pervar]]
if(!base %in% names(current)){
stop(paste(base, ' is not a value in the variable "',pervar,'"',sep=''))
}
return(eval(parse(text=paste('data.frame(const_',make.names(base),'=current/deflator*deflator[base])',sep=""))))
}
priceIndexNum <- function (x, prodID, pervar, pvar, qvar, base, indexMethod = "laspeyres", output = "fixedBase", ...){
colNameCheck <- checkNames(x, c(prodID, pervar, pvar, qvar))
if (colNameCheck$result == FALSE) {
stop(colNameCheck$message)
}
if(!all(sapply(list(prodID, pervar, pvar, qvar, base),isstring))){
stop('Arguments prodID, pervar, pvar, qvar, and base must be character strings')
}
x[[pervar]] <- as.factor(x[[pervar]])
isord <- FALSE
if(is.ordered(x[[pervar]])) isord <- TRUE
if(!base %in% levels(x[[pervar]])){
stop(paste(base, ' is not a value in the variable "',pervar,'"',sep=''))
}
pos<-which(levels(x[[pervar]])==base)
oldlevels <- levels(x[[pervar]])
if(isord) class(x[[pervar]]) <- "factor"
x[[pervar]] <- relevel(x[[pervar]],base)
x[[pervar]] <- as.numeric(x[[pervar]])
meth <- function(y,...) {
numInd <- IndexNumR::priceIndex(x,pvar = pvar,qvar = qvar, pervar = pervar,prodID = prodID, indexMethod = y, output = output, ...)
numInd <- append(numInd[-1],numInd[1],after=(pos-1))
return(numInd)
}
numInd <- lapply(indexMethod, meth,...)
priceind <- data.frame(oldlevels,numInd)
names(priceind) <- c("period",indexMethod)
if(isord) priceind[["period"]] <- as.ordered(priceind[["period"]])
return(priceind)
}
ComplexIN <- function (data, means = c("arithmetic", "geometric", "harmonic"), zero.rm=TRUE, na.rm=TRUE,...)
{
meanG <- function(x,zero.rm=FALSE,...){
if (zero.rm){
x <- x[x>0]
}
return(exp(mean(log(x),...)))
}
meanH <- function(x,zero.rm=FALSE,...){
if (zero.rm){
x <- x[x!=0]
}
return(1/mean(1/x,...))
}
data <- as.data.frame(data)
variables <- names(data)
if (missing(means)) means <- c("arithmetic", "geometric", "harmonic")
means <- match.arg(means, c("arithmetic", "geometric", "harmonic"), several.ok=TRUE)
avge <- c("mean", "meanG", "meanH")[c("arithmetic", "geometric", "harmonic") %in% means]
navge <- length(avge)
nvars <- length(variables)
table <- matrix(0, nvars, navge)
rownames(table) <- variables
colnames(table) <- means
if ("mean" %in% avge) table[,"arithmetic"] <- colMeans(data, na.rm=na.rm)
if ("meanG" %in% avge) table[,"geometric"] <- sapply(data,meanG,zero.rm=zero.rm,na.rm=na.rm,...)
if ("meanH" %in% avge) table[, "harmonic"] <- sapply(data,meanH,zero.rm=zero.rm,na.rm=na.rm,...)
NAs <- colSums(is.na(data[, variables, drop=FALSE]))
n <- nrow(data) - NAs
table
}
|
boot_fit_loess <- function(bullet, groove, B=1000, alpha=0.95) {
value <- NULL
y <- NULL
bullet_filter <- subset(bullet, !is.na(value) & y > groove$groove[1] & y < groove$groove[2])
my.loess <- loess(value ~ y, data = bullet_filter)
bullet_filter$fitted <- fitted(my.loess)
bullet_filter$resid <- resid(my.loess)
N <- nrow(bullet_filter)
resids <- plyr::rdply(B, function(n) {
bf <- bullet_filter[sample(N,N, replace=TRUE),]
my.loess <- loess(value ~ y, data = bf)
dframe <- data.frame(y=bullet_filter$y, fitted=predict(my.loess, newdata=bullet_filter))
dframe$resid <- bullet_filter$value-dframe$fitted
dframe
})
quantiles <- resids %>% group_by(y) %>% summarize(
nas = sum(is.na(resid)),
low = quantile(resid, probs=(1-alpha)/2, na.rm=TRUE),
high = quantile(resid, probs=1 - (1-alpha)/2, na.rm=TRUE)
)
quantiles
}
fit_loess <- function(bullet, groove, span = 0.75) {
value <- NULL
y <- NULL
chop <- NULL
bullet_filter <- subset(bullet, !is.na(value) & y > groove$groove[1] & y < groove$groove[2])
my.loess <- loess(value ~ y, data = bullet_filter, span = span)
bullet_filter$fitted <- fitted(my.loess)
bullet_filter$resid <- resid(my.loess)
bullet_filter$se <- predict(my.loess, se=TRUE)$se.fit
bullet_filter$abs_resid <- abs(bullet_filter$resid)
cutoff <- quantile(bullet_filter$abs_resid, probs = c(0.9975))
bullet_filter$chop <- bullet_filter$abs_resid > cutoff
bullet_filter <- subset(bullet_filter, !chop)
poly <- with(bullet_filter,
data.frame(x=c(y, rev(y)),
y=c(resid-1.96*se, rev(resid+1.96*se))))
p2 <- ggplot(aes(x=y, y=resid), data=bullet_filter) +
geom_line()+
theme_bw()
p1 <- qplot(data = bullet_filter, y, value) +
theme_bw() +
geom_smooth()
return(list(data = bullet_filter, fitted = p1, resid = p2))
}
predSmooth <- function(x, y) {
dframe <- data.frame(x, y)
if (sum(is.na(y)) > 2*length(y)/3) {
dframe$smPred <- NA
dframe$smResid <- NA
return(dframe)
}
data.lo <- loess(y~x)
dframe$smPred <- predict(data.lo, newdata=dframe)
dframe$smResid <- with(dframe, y - smPred)
dframe
}
smoothloess <- function(x, y, span, sub = 2) {
dat <- data.frame(x, y)
indx <- sub *(1: (nrow(dat) %/% sub))
subdat <- dat[indx, ]
lwp <- with(subdat, loess(y~x,span=span))
predict(lwp, newdata = dat)
}
bulletSmooth <- function(data, span = 0.03, limits = c(-5,5)) {
bullet <- NULL
y <- NULL
myspan <- NULL
lof <- data %>% group_by(bullet) %>% mutate(
myspan = ifelse(span > 1, span / diff(range(y)), span),
l30 = smoothloess(y, resid, span = myspan[1])
) %>% select(-myspan)
lof$l30 <- pmin(max(limits), lof$l30)
lof$l30 <- pmax(min(limits), lof$l30)
lof
}
|
.Fixed_point_method_two_constraints_K_SPOR_DynProg <- function(datX,datY,deg,sigma2,constraint,begin_point,end_point,FP_nbIter=20){
X = datX[datX<=end_point[1] & datX>begin_point[1]]
Y = datY[datX<=end_point[1] & datX>begin_point[1]]
for(p in 1:FP_nbIter){
M_mat <- .Jacobian_Matrix_two_constraints_K_SPOR_DynProg(X,Y,deg,sigma2,constraint,begin_point,end_point)
gammaV <- .Vector_solution_two_constraints_K_SPOR_DynProg(X,Y,deg,constraint,begin_point,end_point)
mat_param <- .Parameters_estimation_K_SPOR_DynProg(M_mat,gammaV,deg)
sigma2 <- .Variance_estimation_K_SPOR_DynProg(X,Y,deg,mat_param)
}
s <- 0
for(i in 1:(deg+1)){
s <- s + mat_param[1,i] * X^(deg+1-i)
}
pZ = rep(1,length(X))
wp <- pZ*((1/sqrt(2*pi*sigma2)) * exp( - ((Y - s)^2)/(2*sigma2)))
MLL <- -sum(log(wp))
list(mat_param,sigma2,MLL)
}
|
setConstructorS3("RspSourceCodeFactory", function(language=NA, ...) {
language <- Arguments$getCharacter(language)
extend(language, "RspSourceCodeFactory")
})
setMethodS3("getLanguage", "RspSourceCodeFactory", function(this, ...) {
as.character(this)
})
setMethodS3("makeSourceCode", "RspSourceCodeFactory", function(this, code, ...) {
lang <- getLanguage(this)
className <- sprintf("Rsp%sSourceCode", capitalize(lang))
ns <- getNamespace("R.rsp")
clazz <- Class$forName(className, envir=ns)
code <- clazz(code, ...)
code <- getCompleteCode(this, code, ...)
code <- c(code$header, code$body, code$footer)
code <- clazz(code, ...)
code
}, protected=TRUE)
setMethodS3("exprToCode", "RspSourceCodeFactory", abstract=TRUE)
setMethodS3("getCompleteCode", "RspSourceCodeFactory", function(this, object, ...) {
object <- Arguments$getInstanceOf(object, "RspSourceCode")
lang <- getLanguage(this)
className <- sprintf("Rsp%sSourceCode", capitalize(lang))
object <- Arguments$getInstanceOf(object, className)
header <- ''
footer <- ''
list(header=header, body=object, footer=footer)
}, protected=TRUE)
setMethodS3("toSourceCode", "RspSourceCodeFactory", function(object, doc, ...) {
doc <- Arguments$getInstanceOf(doc, "RspDocument")
if (length(doc) == 0L) {
code <- makeSourceCode(object, "", ..., type=getType(doc), metadata=getMetadata(doc, local=TRUE))
return(code)
}
if (any(sapply(doc, FUN=inherits, "RspDocument"))) {
throw(sprintf("%s argument 'doc' contains other RspDocuments, which indicates that it has not been flattened.", class(doc)[1L]))
}
if (any(sapply(doc, FUN=inherits, "RspDirective"))) {
throw(sprintf("%s argument 'doc' contains RSP preprocessing directives, which indicates that it has not been preprocessed.", class(doc)[1L]))
}
nok <- sapply(doc, FUN=function(expr) {
if (inherits(expr, "RspText") || inherits(expr, "RspExpression")) {
NA
} else {
class(expr)
}
})
nok <- nok[!is.na(nok)]
nok <- unique(nok)
if (length(nok) > 0L) {
throw(sprintf("%s argument 'doc' contains RSP preprocessing directives, which indicates that it has not been preprocessed: %s", class(doc)[1L], hpaste(nok)))
}
isText <- sapply(doc, FUN=inherits, "RspText")
doc[isText] <- lapply(doc[isText], FUN=function(expr) {
RspText(getContent(expr, unescape=TRUE))
})
code <- vector("list", length=length(doc))
for (kk in seq_along(doc)) {
code[[kk]] <- exprToCode(object, doc[[kk]], index=kk)
}
code <- unlist(code, use.names=FALSE)
code <- makeSourceCode(object, code, ..., type=getType(doc), metadata=getMetadata(doc, local=TRUE))
code
})
|
yac_cli <- function(enable_history = TRUE) {
if (enable_history == TRUE) {
tmphistory <- tempfile()
try(utils::savehistory(tmphistory), silent = TRUE)
on.exit(unlink(tmphistory))
}
update_history <- function(x) {
if (enable_history == TRUE) {
histcon <- file(tmphistory, open = "a")
writeLines(x, histcon)
close(histcon)
try(utils::loadhistory(tmphistory), silent = TRUE)
}
invisible(x)
}
cat("Enter Yacas commands here. Type quit to return to R\n")
x <- readline("Yacas->")
while (length(which(c("stop;", "stop", "end;", "end", "quit;",
"quit", "exit;", "exit", "e;", "e", "q;", "q", "q()", "\n") == x)) ==
0) {
update_history(x)
o <- yac_str(x)
print(o, quote=FALSE)
x <- readline("Yacas->")
}
}
|
generate_gold_standard <- function(model) {
model$gold_standard <- list()
if (model$verbose) cat("Generating gold standard mod changes\n")
model <- .add_timing(model, "5_gold_standard", "generate mod changes")
model$gold_standard$mod_changes <- .generate_gold_standard_mod_changes(model$backbone$expression_patterns)
if (model$verbose) cat("Precompiling reactions for gold standard\n")
model <- .add_timing(model, "5_gold_standard", "precompiling reactions for gold standard")
prep_data <- .generate_gold_precompile_reactions(model)
if (model$verbose) cat("Running gold simulations\n")
model <- .add_timing(model, "5_gold_standard", "running gold simulations")
simulations <- .generate_gold_standard_simulations(model, prep_data)
model$gold_standard$meta <- simulations$meta
model$gold_standard$counts <- simulations$counts
model <- .add_timing(model, "5_gold_standard", "compute dimred")
model <- model %>% calculate_dimred()
model <- .add_timing(model, "5_gold_standard", "generate simulation network from dimred")
model$gold_standard$network <- .generate_gold_standard_generate_network(model)
.add_timing(model, "5_gold_standard", "end")
}
gold_standard_default <- function(
tau = 30 / 3600,
census_interval = 10 / 60,
simulate_targets = FALSE
) {
lst(
tau,
census_interval,
simulate_targets
)
}
.generate_gold_standard_mod_changes <- function(expression_patterns) {
module_progression <- mod_diff <- substate <- mod_diff2 <- `.` <- from <- to <- from_ <- NULL
expression_patterns %>%
mutate(
mod_diff = module_progression %>% strsplit("\\|"),
substate = map(mod_diff, seq_along)
) %>%
select(-module_progression) %>%
unnest(c(mod_diff, substate)) %>%
mutate(
mod_diff2 = strsplit(mod_diff, ","),
mod_on = map(mod_diff2, function(x) x %>% keep(grepl("\\+", .)) %>% gsub("\\+", "", .)),
mod_off = map(mod_diff2, function(x) x %>% keep(grepl("-", .)) %>% gsub("-", "", .))
) %>%
select(-mod_diff2) %>%
group_by(from, to) %>%
mutate(
from_ = ifelse(row_number() == 1, from, c("", paste0(from, to, "p", (row_number() - 1)))),
to_ = ifelse(row_number() == n(), to, from_[row_number() + 1])
) %>%
ungroup()
}
.generate_gold_precompile_reactions <- function(model) {
is_tf <- mol_premrna <- mol_mrna <- mol_protein <- val <- NULL
sim_system <- model$simulation_system
tf_info <- model$feature_info
if (!model$gold_standard_params$simulate_targets) {
tf_info <- tf_info %>% filter(is_tf)
}
tf_molecules <- tf_info %>% select(mol_premrna, mol_mrna, mol_protein) %>% gather(col, val) %>% pull(val)
reactions <- sim_system$reactions %>%
keep(~ all(names(.$effect) %in% tf_molecules))
buffer_ids <- unique(unlist(map(reactions, "buffer_ids")))
comp_funs <- GillespieSSA2::compile_reactions(
reactions = reactions,
buffer_ids = buffer_ids,
state_ids = tf_molecules,
params = sim_system$parameters,
hardcode_params = FALSE,
fun_by = 1000L
)
lst(
tf_molecules,
reactions = comp_funs
)
}
.generate_gold_standard_simulations <- function(model, prep_data) {
is_tf <- module_id <- mol_premrna <- mol_mrna <- mol_protein <- val <- from <- to <-
substate <- burn <- time_per_edge <- simulation_i <- sim_time <- NULL
mod_changes <- model$gold_standard$mod_changes
gold_params <- model$gold_standard_params
sim_system <- model$simulation_system
tf_info <- model$feature_info
sim_targets <- model$gold_standard_params$simulate_targets
if (!sim_targets) {
tf_info <- tf_info %>% filter(is_tf)
}
algo <- GillespieSSA2::ode_em(tau = gold_params$tau, noise_strength = 0)
tf_molecules <- prep_data$tf_molecules
reactions <- prep_data$reactions
gold_sim_outputs <- list()
gold_sim_vectors <- list()
gold_sim_modules <- list()
start_state <- mod_changes$from[[1]]
gold_sim_vectors[[start_state]] <- model$simulation_system$initial_state[tf_molecules] %>% as.matrix
gold_sim_modules[[start_state]] <- c()
if (model$verbose) {
timer <- pbapply::timerProgressBar(
min = 0,
max = nrow(mod_changes),
width = 50
)
}
for (i in seq_len(nrow(mod_changes))) {
from_ <- mod_changes$from_[[i]]
to_ <- mod_changes$to_[[i]]
time <- mod_changes$time[[i]]
mods <-
gold_sim_modules[[from_]] %>%
union(mod_changes$mod_on[[i]]) %>%
setdiff(mod_changes$mod_off[[i]])
gold_sim_modules[[to_]] <- mods
tfs_on <- tf_info %>% filter((is.na(module_id) & sim_targets) | module_id %in% mods)
molecules_on <- tfs_on %>% select(mol_premrna, mol_mrna, mol_protein) %>% gather(col, val) %>% pull(val)
new_initial_state <- rowMeans(gold_sim_vectors[[from_]])
new_reactions <- reactions
rem <- setdiff(tf_molecules, molecules_on)
if (length(rem) > 0) {
new_reactions$state_change[match(rem, tf_molecules), ] <- 0
}
out <- GillespieSSA2::ssa(
initial_state = new_initial_state,
reactions = new_reactions,
final_time = time,
params = sim_system$parameters,
method = algo,
census_interval = gold_params$census_interval,
stop_on_neg_state = TRUE,
verbose = FALSE
)
time_out <- out$time
state_out <- out$state
meta <- tibble(from_, to_, time = time_out)
counts <- state_out %>% Matrix::Matrix(sparse = TRUE)
if (model$verbose) pbapply::setTimerProgressBar(timer, value = i)
gold_sim_outputs[[i]] <- lst(meta, counts)
end_state <- counts[nrow(counts), ] %>% as.matrix()
if (!to_ %in% gold_sim_vectors) {
gold_sim_vectors[[to_]] <- end_state
} else {
gold_sim_vectors[[to_]] <- cbind(gold_sim_vectors[[to_]], end_state)
}
}
cat("\n")
meta <- map_df(gold_sim_outputs, "meta")
counts <- do.call(rbind, map(gold_sim_outputs, "counts")) %>% Matrix::Matrix(sparse = TRUE)
meta <- meta %>%
left_join(mod_changes %>% select(from, to, from_, to_, substate, burn, time_per_edge = time), by = c("from_", "to_")) %>%
group_by(from, to) %>%
mutate(
simulation_i = 0,
sim_time = time,
time = ((substate - 1) * time_per_edge + time) / time_per_edge / max(substate)
) %>%
ungroup() %>%
select(-substate, -time_per_edge) %>%
select(simulation_i, sim_time, burn, from, to, from_, to_, time)
lst(meta, counts)
}
.generate_gold_standard_generate_network <- function(model) {
burn <- from <- to <- time <- NULL
model$backbone$expression_patterns %>%
filter(!burn) %>%
transmute(
from,
to,
length = time / sum(time) * length(time),
directed = TRUE
)
}
|
"ifa.bic" <-
function(output)
{
k<-output$L
p<-nrow(output$H)
numobs<-output$numobs
ni<-output$ni
h<-p*k+p+(3*sum(ni)-3*k )
pen<-h*log(numobs)
lik<-output$l[length(output$l)]
bic<--2*lik+pen
return(bic)
}
|
txt <- citation("ordinal")
stopifnot(as.logical(grep("year", txt)))
|
library(ic.infer)
mat <- as.matrix(swiss)
colnames(mat) <- NULL
covmat <- cov(swiss)
linmod <- lm(swiss)
linmodwt <- lm(swiss,weights=abs(-23:23))
linmodfac <- lm(1/time~poison+treat+poison:treat,boot::poisons)
orlm1 <- orlm(covmat,ui=diag(c(-1,1,-1,1,1)),df.error=41)
summary(orlm1)
ic.test(orlm1)
covmat2 <- cov(mat)
orlm1 <- orlm(covmat2,ui=diag(c(-1,1,-1,1,1)),df.error=41)
summary(orlm1)
ic.test(orlm1)
orlm1 <- orlm(linmod,ui=diag(c(-1,1,-1,1,1)))
summary(orlm1)
ic.test(orlm1)
orlm1b <- orlm(linmod,ui=diag(c(-1,1,-1,1,1)),boot=TRUE,B=100)
summary(orlm1b)
ic.test(orlm1b)
orlm1bf <- orlm(linmod,ui=diag(c(-1,1,-1,1,1)),boot=TRUE,fixed=TRUE,B=100)
summary(orlm1bf)
ic.test(orlm1bf)
linmod2 <- lm(as.data.frame(mat))
orlm1 <- orlm(linmod2,ui=diag(c(-1,1,-1,1,1)))
summary(orlm1)
ic.test(orlm1)
orlm1b <- orlm(linmod2,ui=diag(c(-1,1,-1,1,1)),boot=TRUE,B=100)
summary(orlm1b)
ic.test(orlm1b)
orlm1bf <- orlm(linmod2,ui=diag(c(-1,1,-1,1,1)),boot=TRUE,fixed=TRUE,B=100)
summary(orlm1bf)
ic.test(orlm1bf)
orlm1 <- orlm(linmodwt,ui=diag(c(-1,1,-1,1,1)))
summary(orlm1)
ic.test(orlm1)
orlm1b <- orlm(linmodwt,ui=diag(c(-1,1,-1,1,1)),boot=TRUE,B=100)
summary(orlm1b)
ic.test(orlm1b)
orlm1bf <- orlm(linmodwt,ui=diag(c(-1,1,-1,1,1)),boot=TRUE,fixed=TRUE,B=100)
summary(orlm1bf)
ic.test(orlm1bf)
orlm1 <- orlm(linmodfac,ui=diag(c(1,1,1,-1,-1)),index=2:6)
orlm1
summary(orlm1)
ic.test(orlm1)
|
library("testthat")
test_check("tibble")
|
bujar <- function(y, cens, x, valdata = NULL, degree = 1, learner = "linear.regression", center=TRUE, mimpu = NULL, iter.bj = 20, max.cycle = 5, nu = 0.1, mstop = 50, twin = FALSE, mstop2 = 100, tuning = TRUE, cv = FALSE, nfold = 5, method = "corrected", vimpint = TRUE, gamma=3, lambda=NULL, whichlambda=NULL, lamb = 0, s = 0.5, nk = 4, wt.pow = 1, theta = NULL, rel.inf = FALSE, tol = .Machine$double.eps, n.cores=2, rng=123, trace = FALSE){
call <- match.call()
if(learner == "acosso")
stop("learner = 'acosso' is no longer supported, see NEWS\n")
if(!learner%in%c("linear.regression","mars","pspline","tree","acosso","enet", "enet2", "mnet","snet")) stop(sQuote("weak learner"), learner, " is not implemented")
if(!is.null(valdata))
if((dim(x)[2]+2) !=dim(valdata)[2])
stop("check the dimension of x and valdata\n")
if(learner=="acosso" && wt.pow < 0) stop(Quote("wt.pow should be > 0"))
if(learner=="acosso" && cv) stop(Quote("if wt.pow is chosen by cross-validation, then BJ estimator is not stable, thus stop"))
if(cv && nfold < 1)
stop(sQuote("Number of CV folds"), " are less than 1")
if(!all(unique(cens)%in%c(0,1)))
stop(sQuote("censoring indicator"), " are not 0 or 1")
if(!all(unique(valdata[,2]%in%c(0,1))))
stop(sQuote("censoring indicator"), " are not 0 or 1")
if(iter.bj < 2)
stop(sQuote("iter.bj"), " should be greater than 1")
if(max.cycle < 1)
stop(sQuote("max.cycle"), " should be greater than 1")
if(!learner %in% c("tree","mars","acosso") && degree !=1)
stop("Not implemented for this degree with learner ",learner, "\n")
if(learner=="tree" && degree > 1 && twin) stop(sQuote("learner"), learner, sQuote("degree"), degree, sQuote("twin"), twin, "Not implemented\n")
if(learner=="pspline") l2 <- "sm"
else if(learner=="linear.regression") l2 <- "ls"
else if(learner=="enet2") l2 <- "enet"
else l2 <- learner
xbar <- colMeans(x)
f <- 0
mse.bj.val <- nz.bj <- NA
ynew <- y; ynew.m <- vim <- NULL; Fboost <- NA
p <- ncol(x)
sse <- rep(NA, p)
res <- ystar <- matrix(NA, length(y), p)
coef <- matrix(NA, 2, p)
b <- matrix(NA,iter.bj+max.cycle,p)
ybst <- matrix(NA, iter.bj+max.cycle, length(y))
ybstdiff <- rep(NA, iter.bj+max.cycle)
fnorm2 <- mseun <- ybstcon <- rep(NA, iter.bj+max.cycle)
mselect <- rep(NA,iter.bj+max.cycle)
if(!tuning && learner%in%c("linear.regression","pspline","tree")){
if(length(mstop) > 1){
if(length(mstop) !=length(mselect))
stop(sQuote("mstop must be one number or have the length iter.bj+max.cycle for a boosting learner"))
else mstopRep <- mstop
}
else mstopRep <- rep(mstop, iter.bj+max.cycle)
}
else if(tuning && learner%in%c("linear.regression","pspline","tree")){
if(length(mstop) > 1)
stop(sQuote("mstop must be one number if tuning=TRUE for a boosting learner"))
}
tuningSwitch <- TRUE
ydiff <- 100
k <- 1; kt <- 1
mse.bj <- pred.bj <- NA
if(trace) cat("\nBJ with",learner,"\n")
nm <- dim(x)
n <- nm[1]
if(trace){
cat("\nNumber of observations:",n)
cat("\nNumber of covariates:",nm[2],"\n")
}
one <- rep(1, n)
normx <- rep(1,dim(x)[2])
cycleperiod <- 0
nonconv <- FALSE
mse <- rep(NA,iter.bj+max.cycle)
nz.bj.iter <- rep(NA,iter.bj+max.cycle)
while (ydiff > tol && k <= iter.bj+max.cycle){
oldydiff <- ydiff
if(is.null(mimpu) && k==1) ynew <- y
else if(mimpu==FALSE && k==1){
for (i in 1:p){
res.des <- try(bj(Surv(ynew,cens) ~ x[,i], link="identity",control=list(trace=FALSE)))
ystar[,i] <- res.des$y.imputed
res[,i] <- y - predict(res.des)
sse[i] <- sum(res[,i][cens==1]^2)
}
minid <- which.min(sse)
ynew <- ystar[,minid]
cat("\nBJ step k=",k,"\n","\nInitial MSE for uncensored observations=", sse[minid]/sum(cens==1), "\n\n")
}
else{
if(mimpu==TRUE && k==1){
ynew <- bjboost.fit(cbind(y,cens),rep(0,length(y)))$y.imputed
}
else {
ynew <- bjboost.fit(cbind(y,cens),Fboost)$y.imputed}
}
dat1 <- as.data.frame(cbind(ynew,x))
x <- as.matrix(x)
if(learner%in%c("linear.regression","pspline","tree")){
if(!tuning) mselect.now <- mstopRep[k]
else if(k==1) mselect.now <- mstop
}
else mselect.now <- NULL
bstres <- bstfit(tuning, x, ynew, nu, mselect.now, mstop2, twin, center, interaction, degree, learner, l2, nfold, n.cores, cv, tuningSwitch, k, trace, gamma, lambda=lambda, lamb, whichlambda=whichlambda, method=method, rng)
dat1.glm <- bstres$dat1.glm
if(!is.null(bstres$mselect))
mselect.now <- mselect[k] <- bstres$mselect
predres <- predval(learner, twin, dat1.glm, b, k, x, s, mselect[k])
Fboost <- predres$Fboost
ybst[k,] <- Fboost
beta0bj = predres$beta0bj
betabj = predres$betabj
b <- predres$b
bdiff=predres$bdiff
if(k>1){
ydiff <- ybstdiff[k] <- max(abs(Fboost - ybst[k-1,]))
ybstcon[k] <- sum((ybst[k,]-ybst[k-1,])^2)/sum(ybst[k-1,]^2)
}
mseun[k] <- mean((Fboost-ynew)^2)
if(k >1 && trace)
cat(" k=",k," ybstdiff", ybstdiff[k]," ybstcon", ybstcon[k],"\n")
if(!nonconv){
if(k > 1)
if((learner=="linear.regression" && bdiff <= tol)
|| (ybstcon[k] <= tol)){
contype <- 0
break
}
else if(k >= iter.bj) {
cycleydiff <- NULL
if(learner=="linear.regression") {
cycle.coef.bj <- NULL
firstb <- betabj
}
nonconv <- TRUE
firstydiff <- ydiff
first.ybstcon <- ybstcon[k]
first.dat1.glm <- dat1.glm
cycleb <- NULL
tuningSwitch <- FALSE;
if(learner=="mars") ynew <- dat1.glm$ynew
}
}
else {
if(learner=="linear.regression"){
if(twin) coef.bj <- coef(dat1.glm)
else coef.bj <- coef(dat1.glm, which = 1:length(variable.names(dat1.glm)), off2int=TRUE)
cycle.coef.bj <- rbind(cycle.coef.bj,coef.bj)
}
cycleydiff <- c(cycleydiff,ydiff)
cycleperiod <- cycleperiod + 1
if(learner=="linear.regression"){
if(twin)
tmp <- (sum((firstb - coef.bj)^2) < tol || ydiff <= tol)
else tmp <- (sum((firstb - coef.bj[-1])^2) < tol || ydiff <= tol)
if(tmp){
contype <- 1
break
}
else if(cycleperiod >= max.cycle){
contype <- 2
break
}
}
else {
if(abs(ybstcon[k]-first.ybstcon) < tol){
contype <- 1
break
}
else if(cycleperiod >= max.cycle){
contype <- 2
break
}
}
}
k <- k + 1
}
if(trace)
cat("\ncycle period is",cycleperiod,"\n")
if(contype==2)
dat1.glm <- first.dat1.glm
if(all(!is.na(Fboost))){
tmpy <- y[cens==1]- Fboost[cens==1]
tmpx <- (y[cens==1] + Fboost[cens==1])/2
mse.bj <- mean(tmpy^2)
if(learner=="linear.regression"){
if(twin){
beta0bj <- attr(coef(dat1.glm), "offset2int")
names(beta0bj) <- "(Intercept)"
betabj <- coef(dat1.glm)
}
else{
beta0bj <- coef(dat1.glm)[1] + dat1.glm$offset
betabj <- coef(dat1.glm, which = 1:length(variable.names(dat1.glm)))[-1]
}
}
if(!is.null(valdata)){
if(learner=="linear.regression"){
pred.bj <- as.vector(beta0bj) + as.matrix(valdata)[,-(1:2)] %*% as.vector(betabj/normx)
mse.bj.val <- mean((valdata[,1][valdata[,2]==1] - pred.bj[valdata[,2]==1])^2)
} else if(learner %in% c("pspline", "mars"))
pred.bj <- predict(dat1.glm, newdata=valdata[,-(1:2)])
else if(learner=="tree"){
if(!twin) pred.bj <- predict(dat1.glm, newdata=as.data.frame(valdata[,-(1:2)]),n.trees=dat1.glm$n.trees)
else pred.bj <- predict(dat1.glm, newdata=valdata[,-(1:2)])
}
else if(learner=="enet")
pred.bj <- predict(dat1.glm, newx=valdata[,-(1:2)], s=s, type="fit", mode="fraction")$fit
else if(learner %in%c("enet2", "mnet", "snet"))
pred.bj <- predict(dat1.glm, newx=as.matrix(valdata[,-(1:2)]), type="response", which=mselect[k])
mse.bj.val <- mean((valdata[,1][valdata[,2]==1] - pred.bj[valdata[,2]==1])^2)
}
if(learner=="enet"){
tmp <- predict(dat1.glm, type="coef", s=s, mode="fraction")$coef
beta0.enet <- mean(ynew) - apply(x[,dat1.glm$allset], 2, mean) %*% tmp
beta.enet <- rep(0, p)
beta.enet[dat1.glm$allset] <- tmp
}
else if(learner %in% c("enet2", "mnet", "snet"))
coef.ncv <- predict(dat1.glm, newx=x, type="coefficients", which=mselect[k])
if(trace) {
cat("mse.bj=",mse.bj,"\n","correlation of predicted and observed times in noncensoring training data is",cor(y[cens==1],Fboost[cens==1]),"\n\n")
cat("mse of predicted times of validate data is\n")
cat("mse.bj.val",mse.bj.val,"\n")
}
coef.bj <- NA
if(learner=="linear.regression"){
coef.bj <- c(beta0bj, betabj)
coef.bj <- coef.bj/c(1,normx)
if(twin) nz.bj <- sum(abs(coef(dat1.glm))>0)
else nz.bj <- sum(abs(coef(dat1.glm)[-1])>0)
if(trace) {cat("Number of Non-zero coefficients with BJ boosting excluding but listing intercept is",nz.bj,"\n")
print(coef.bj[abs(coef.bj)>0])
}
}
}
cycle.coef.diff <- NA
if(exists("cycle.coef.bj") && !twin)
cycle.coef.diff <- max(abs(scale(cycle.coef.bj, coef.bj, FALSE)))
interactions=NULL
d <- ncol(x)
ind <- matrix(NA,ncol=2,nrow=d+d*(d-1)/2)
kk <- 1
for(i in 1:d)
for(j in i:d){
ind[kk,1] <- i; ind[kk,2] <- j
kk <- kk + 1
}
if(rel.inf && learner=="tree" && degree > 1){
vim <- summary(dat1.glm,plotit=FALSE,order=FALSE)[,2]
interactions <- vim.interactions(dat1.glm,pred.data=x,x.pair=subset(ind,ind[,1]!=ind[,2]),learner="tree",verbose=FALSE)
}
if(learner=="tree"){
if(!twin){
xselect <- summary(dat1.glm,order=FALSE,plotit=FALSE)[,2]
xselect <- ifelse(xselect > 0, 1, 0)
}
else{
xselect <- rep(0,dim(x)[2])
xselect[dat1.glm$xselect] <- 1
}
}
else if(learner=="mars"){
vim <- evimp(update(dat1.glm),sqrt.=TRUE,trim=FALSE)[,c(1,4)]
vim <- vim[order(vim[,1]),]
vim <- vim[,2]
xselect <- ifelse(vim > 0, 1, 0)
}
else if(learner=="linear.regression")
xselect <- ifelse(abs(coef.bj[-1]) > 0, 1, 0)
else if(learner=="enet"){
tmp <- predict(dat1.glm, type="coef", s=s, mode="fraction")$coef
xselect <- ifelse(abs(tmp) > 0, 1, 0)
}
else if(learner %in% c("enet2", "mnet", "snet"))
xselect <- ifelse(abs(coef.ncv[-1]) > 0, 1, 0)
else if(learner=="pspline"){
if(!twin){
xselect <- rep(0,dim(x)[2])
tmp <- unique(dat1.glm$xselect())-1
xselect[tmp] <- 1
}
else{
xselect <- rep(0,dim(x)[2])
xselect[dat1.glm$xselect] <- 1
}
}
else if(learner=="acosso"){
if(dat1.glm$order==1)
xselect <- ifelse(dat1.glm$theta > 0, 1, 0)
else{
ind <- gen.ind(p,learner="acosso")
xselect <- rep(0,dim(x)[2])
tmp <- unique(as.vector(ind[dat1.glm$theta > 0,]))
xselect[tmp] <- 1
}}
if(learner=="mars" && degree > 1 && vimpint){
ind <- gen.ind(p)
interactions <- vim.interactions(dat1.glm,pred.data=x,x.pair=subset(ind,ind[,1]!=ind[,2]),learner="mars",verbose=FALSE)
}
if(learner=="tree" && !twin)
vim <- summary(dat1.glm,plotit=FALSE,order=FALSE)[,2]
mse.tr <- NULL
if(learner=="enet" && tuning){
mse.tr <- sum((ynew - predict(dat1.glm, x, s=s, mode="frac", type="fit")$fit)^2)
b <- predict(dat1.glm, type="coef", s=s, mode="frac")$coef
if(any(abs(b) > 0)){
b <- which(abs(b) > 0)
x0 <- as.matrix(x[,b])
if(lamb==0) q <- dim(x0)[2]
else {q <- sum(diag(x0 %*% solve(t(x0) %*% x0 + diag(lamb, nrow=dim(x0)[2])) %*% t(x0)))
}
}
else q <- 0
mse.tr <- mse.tr/(length(ynew) - q)^2
}
else if(learner=="enet")
coef.bj <- c(beta0.enet, beta.enet)
else if(learner %in%c("enet2", "mnet", "snet"))
coef.bj <- coef.ncv
if(!is.null(vim)) vim <- 100*vim/sum(vim)
RET <- list(x=x,y=y,cens=cens,ynew=ynew,res.fit=dat1.glm,learner=learner,degree=degree,mse=mse,nz.bj.iter=nz.bj.iter,mse.bj=mse.bj,mse.bj.val=mse.bj.val,nz.bj=nz.bj,mse.all=mseun[1:(k-1)],yhat=Fboost,ybstdiff=c(NA,ybstdiff[1:(k-1)]),ybstcon = ybstcon,coef.bj=coef.bj,pred.bj=pred.bj,cycleperiod=cycleperiod,cycle.coef.diff = cycle.coef.diff,nonconv=nonconv,fnorm2=fnorm2,vim=vim,interactions=interactions,mselect=mselect,contype=contype,xselect=xselect,lamb=lamb, s=s, mse.tr=mse.tr,valdata=valdata, twin=twin)
RET$call <- match.call()
class(RET) <- "bujar"
return(RET)
}
gen.ind <- function(d,learner="tree"){
ind <- matrix(NA,ncol=2,nrow=d+d*(d-1)/2)
if(learner=="mars"){
kk <- 1
for(i in 1:d)
for(j in i:d){
ind[kk,1] <- i; ind[kk,2] <- j
kk <- kk + 1
}
}
else if(learner=="tree" || learner=="acosso"){
ind[1:d,] <- cbind(1:d,1:d)
next.ind <- d+1
for(i in 1:(d-1))
for(j in ((i+1): d)){
ind[next.ind,] <- cbind(i,j)
next.ind <- next.ind + 1
}
}
ind
}
convbujar <- function(x){
ybstdiff <- x$ybstdiff
ybstcon <- x$ybstcon
mseun <- x$mse.all
mse <- x$mse
fnorm2 <- x$fnorm2
plot(ybstcon, type="b",xlab="Buckley-James estimator iteration",ylab="Convergence criterion",ylim=c(0,0.01))
}
nxselect <- function(obj, varpos) sum(obj$xselect[varpos] == 1)
print.bujar <- function(x, ...) {
cat("\n")
cat("\t Models Fitted with Buckley-James Regression\n")
cat("\n")
if (!is.null(x$call))
cat("Call:\n", deparse(x$call), "\n\n", sep = "")
cat("\n")
if(x$learner%in%c("linear.regression","mars","pspline","tree"))
cat("Base learner: ", x$learner, "\n")
else cat("Regression methods: ", x$learner, "\n")
cat("\n")
if(x$learner=="linear.regression"){
cat("Coefficients: \n")
cf <- x$coef.bj
print(cf)
cat("\n")
}
invisible(x)
}
coef.bujar <- function(object, ...) {
if(!object$learner %in% c("linear.regression","pspline","enet", "enet2", "mnet", "snet"))
stop("Coefficients Not implemented for learner ",object$learner,"\n")
object$coef.bj
}
plot.bujar <- function(x, ...){
if(!x$learner %in% c("mars", "pspline", "acosso"))
plot(x$res.fit)
else stop("Not implemented for learner ",x$learner,"\n")
}
predict.bujar <- function(object, newx=NULL, ...){
if(is.null(newx)) return(object$yhat)
if(dim(newx)[2]!=dim(object$x)[2]) stop("newx should have the same number of predictors as x\n")
learner <- object$learner
dat1.glm <- object$res.fit
if(learner=="linear.regression")
object$coef.bj[1] + as.matrix(newx) %*% as.vector(object$coef.bj[-1])
else if(learner %in% c("pspline", "mars"))
pred.bj <- predict(dat1.glm, newdata=newx)
else if(learner=="tree"){
twin <- object$twin
if(!twin) pred.bj <- predict(dat1.glm, newdata=as.data.frame(newx),n.trees=dat1.glm$n.trees)
else pred.bj <- predict(dat1.glm, newdata=newx)
}
else if(learner=="enet")
pred.bj <- predict(dat1.glm, newx=newx, s=object$s, type="fit", mode="fraction")$fit
else if(learner %in%c("enet2", "mnet", "snet")){
mselect <- object$mselect
k <- length(mselect)
pred.bj <- predict(dat1.glm, newx=as.matrix(newx), type="response", which=mselect[k])
}
}
summary.bujar <- function(object, ...)
summary(object$res.fit, ...)
|
qlnormMixAlt <-
function (p, mean1 = exp(1/2), cv1 = sqrt(exp(1) - 1), mean2 = exp(1/2),
cv2 = sqrt(exp(1) - 1), p.mix = 0.5)
{
names.p <- names(p)
arg.mat <- cbind.no.warn(p = as.vector(p), mean1 = as.vector(mean1),
cv1 = as.vector(cv1), mean2 = as.vector(mean2), cv2 = as.vector(cv2),
p.mix = as.vector(p.mix))
na.index <- is.na.matrix(arg.mat)
if (all(na.index))
q <- rep(NA, nrow(arg.mat))
else {
q <- numeric(nrow(arg.mat))
q[na.index] <- NA
q.no.na <- q[!na.index]
for (i in c("p", "mean1", "cv1", "mean2", "cv2", "p.mix")) assign(i,
arg.mat[!na.index, i])
if (any(p < 0 | p > 1))
stop("All non-missing values of 'p' must be between 0 and 1.")
if (any(c(mean1, mean2, cv1, cv2) < .Machine$double.eps))
stop("All non-missing values of 'mean1', 'mean2', 'cv1', and 'cv2' must be positive.")
if (any(p.mix < 0 | p.mix > 1))
stop("All non-missing values of 'p.mix' must be between 0 and 1.")
q.no.na[p == 0] <- 0
q.no.na[p == 1] <- Inf
index <- (1:length(q.no.na))[0 < p & p < 1]
if (any(index)) {
o.fcn <- function(q, mean1, cv1, mean2, cv2, p.mix,
p) {
(plnormMixAlt(q, mean1, cv1, mean2, cv2, p.mix) -
p)^2
}
for (i in index) {
q.no.na[i] <- nlminb(start = (1 - p.mix[i]) *
qlnormAlt(p[i], mean1[i], cv1[i]) + p.mix[i] *
qlnormAlt(p[i], mean2[i], cv2[i]), o.fcn, lower = 0,
mean1 = mean1[i], cv1 = cv1[i], mean2 = mean2[i],
cv2 = cv2[i], p.mix = p.mix[i], p = p[i])$par
}
}
q[!na.index] <- q.no.na
}
if (!is.null(names.p))
names(q) <- rep(names.p, length = length(q))
q
}
|
par.fda.usc<-list()
par.fda.usc$verbose <- FALSE
par.fda.usc$trace <- FALSE
par.fda.usc$warning <- FALSE
par.fda.usc$ncores <- 1
par.fda.usc$int.method <- "TRAPZ"
par.fda.usc$eps <- as.double(.Machine[[1]]*10)
ops.fda.usc = function(verbose = FALSE,trace = FALSE,warning = FALSE,
ncores = NULL,
int.method = "TRAPZ",
eps = as.double(.Machine[[1]]*10)){
if (is.null(ncores)) ncores = max(parallel::detectCores() -1,1)
.par.fda.usc = list()
.par.fda.usc$verbose = verbose
.par.fda.usc$trace = trace
.par.fda.usc$warning = warning
.par.fda.usc$ncores = ncores
.par.fda.usc$int.method = int.method
.par.fda.usc$eps = eps
if (ncores==1) {
foreach::registerDoSEQ()
} else{
if (foreach::getDoParWorkers()!=ncores){
cl <- suppressWarnings(parallel::makePSOCKcluster(ncores ))
doParallel::registerDoParallel(cl)
}
}
e<-environment(ops.fda.usc)
par.unlock<-list("sym"="par.fda.usc","env"=e)
do.call("unlockBinding",par.unlock)
assign("par.fda.usc", .par.fda.usc, envir = e)
get("par.fda.usc", envir = e)
return(.par.fda.usc)
}
|
NULL
robomaker_batch_delete_worlds <- function(worlds) {
op <- new_operation(
name = "BatchDeleteWorlds",
http_method = "POST",
http_path = "/batchDeleteWorlds",
paginator = list()
)
input <- .robomaker$batch_delete_worlds_input(worlds = worlds)
output <- .robomaker$batch_delete_worlds_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$batch_delete_worlds <- robomaker_batch_delete_worlds
robomaker_batch_describe_simulation_job <- function(jobs) {
op <- new_operation(
name = "BatchDescribeSimulationJob",
http_method = "POST",
http_path = "/batchDescribeSimulationJob",
paginator = list()
)
input <- .robomaker$batch_describe_simulation_job_input(jobs = jobs)
output <- .robomaker$batch_describe_simulation_job_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$batch_describe_simulation_job <- robomaker_batch_describe_simulation_job
robomaker_cancel_deployment_job <- function(job) {
op <- new_operation(
name = "CancelDeploymentJob",
http_method = "POST",
http_path = "/cancelDeploymentJob",
paginator = list()
)
input <- .robomaker$cancel_deployment_job_input(job = job)
output <- .robomaker$cancel_deployment_job_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$cancel_deployment_job <- robomaker_cancel_deployment_job
robomaker_cancel_simulation_job <- function(job) {
op <- new_operation(
name = "CancelSimulationJob",
http_method = "POST",
http_path = "/cancelSimulationJob",
paginator = list()
)
input <- .robomaker$cancel_simulation_job_input(job = job)
output <- .robomaker$cancel_simulation_job_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$cancel_simulation_job <- robomaker_cancel_simulation_job
robomaker_cancel_simulation_job_batch <- function(batch) {
op <- new_operation(
name = "CancelSimulationJobBatch",
http_method = "POST",
http_path = "/cancelSimulationJobBatch",
paginator = list()
)
input <- .robomaker$cancel_simulation_job_batch_input(batch = batch)
output <- .robomaker$cancel_simulation_job_batch_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$cancel_simulation_job_batch <- robomaker_cancel_simulation_job_batch
robomaker_cancel_world_export_job <- function(job) {
op <- new_operation(
name = "CancelWorldExportJob",
http_method = "POST",
http_path = "/cancelWorldExportJob",
paginator = list()
)
input <- .robomaker$cancel_world_export_job_input(job = job)
output <- .robomaker$cancel_world_export_job_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$cancel_world_export_job <- robomaker_cancel_world_export_job
robomaker_cancel_world_generation_job <- function(job) {
op <- new_operation(
name = "CancelWorldGenerationJob",
http_method = "POST",
http_path = "/cancelWorldGenerationJob",
paginator = list()
)
input <- .robomaker$cancel_world_generation_job_input(job = job)
output <- .robomaker$cancel_world_generation_job_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$cancel_world_generation_job <- robomaker_cancel_world_generation_job
robomaker_create_deployment_job <- function(deploymentConfig = NULL, clientRequestToken, fleet, deploymentApplicationConfigs, tags = NULL) {
op <- new_operation(
name = "CreateDeploymentJob",
http_method = "POST",
http_path = "/createDeploymentJob",
paginator = list()
)
input <- .robomaker$create_deployment_job_input(deploymentConfig = deploymentConfig, clientRequestToken = clientRequestToken, fleet = fleet, deploymentApplicationConfigs = deploymentApplicationConfigs, tags = tags)
output <- .robomaker$create_deployment_job_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$create_deployment_job <- robomaker_create_deployment_job
robomaker_create_fleet <- function(name, tags = NULL) {
op <- new_operation(
name = "CreateFleet",
http_method = "POST",
http_path = "/createFleet",
paginator = list()
)
input <- .robomaker$create_fleet_input(name = name, tags = tags)
output <- .robomaker$create_fleet_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$create_fleet <- robomaker_create_fleet
robomaker_create_robot <- function(name, architecture, greengrassGroupId, tags = NULL) {
op <- new_operation(
name = "CreateRobot",
http_method = "POST",
http_path = "/createRobot",
paginator = list()
)
input <- .robomaker$create_robot_input(name = name, architecture = architecture, greengrassGroupId = greengrassGroupId, tags = tags)
output <- .robomaker$create_robot_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$create_robot <- robomaker_create_robot
robomaker_create_robot_application <- function(name, sources, robotSoftwareSuite, tags = NULL) {
op <- new_operation(
name = "CreateRobotApplication",
http_method = "POST",
http_path = "/createRobotApplication",
paginator = list()
)
input <- .robomaker$create_robot_application_input(name = name, sources = sources, robotSoftwareSuite = robotSoftwareSuite, tags = tags)
output <- .robomaker$create_robot_application_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$create_robot_application <- robomaker_create_robot_application
robomaker_create_robot_application_version <- function(application, currentRevisionId = NULL) {
op <- new_operation(
name = "CreateRobotApplicationVersion",
http_method = "POST",
http_path = "/createRobotApplicationVersion",
paginator = list()
)
input <- .robomaker$create_robot_application_version_input(application = application, currentRevisionId = currentRevisionId)
output <- .robomaker$create_robot_application_version_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$create_robot_application_version <- robomaker_create_robot_application_version
robomaker_create_simulation_application <- function(name, sources, simulationSoftwareSuite, robotSoftwareSuite, renderingEngine = NULL, tags = NULL) {
op <- new_operation(
name = "CreateSimulationApplication",
http_method = "POST",
http_path = "/createSimulationApplication",
paginator = list()
)
input <- .robomaker$create_simulation_application_input(name = name, sources = sources, simulationSoftwareSuite = simulationSoftwareSuite, robotSoftwareSuite = robotSoftwareSuite, renderingEngine = renderingEngine, tags = tags)
output <- .robomaker$create_simulation_application_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$create_simulation_application <- robomaker_create_simulation_application
robomaker_create_simulation_application_version <- function(application, currentRevisionId = NULL) {
op <- new_operation(
name = "CreateSimulationApplicationVersion",
http_method = "POST",
http_path = "/createSimulationApplicationVersion",
paginator = list()
)
input <- .robomaker$create_simulation_application_version_input(application = application, currentRevisionId = currentRevisionId)
output <- .robomaker$create_simulation_application_version_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$create_simulation_application_version <- robomaker_create_simulation_application_version
robomaker_create_simulation_job <- function(clientRequestToken = NULL, outputLocation = NULL, loggingConfig = NULL, maxJobDurationInSeconds, iamRole, failureBehavior = NULL, robotApplications = NULL, simulationApplications = NULL, dataSources = NULL, tags = NULL, vpcConfig = NULL, compute = NULL) {
op <- new_operation(
name = "CreateSimulationJob",
http_method = "POST",
http_path = "/createSimulationJob",
paginator = list()
)
input <- .robomaker$create_simulation_job_input(clientRequestToken = clientRequestToken, outputLocation = outputLocation, loggingConfig = loggingConfig, maxJobDurationInSeconds = maxJobDurationInSeconds, iamRole = iamRole, failureBehavior = failureBehavior, robotApplications = robotApplications, simulationApplications = simulationApplications, dataSources = dataSources, tags = tags, vpcConfig = vpcConfig, compute = compute)
output <- .robomaker$create_simulation_job_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$create_simulation_job <- robomaker_create_simulation_job
robomaker_create_world_export_job <- function(clientRequestToken = NULL, worlds, outputLocation, iamRole, tags = NULL) {
op <- new_operation(
name = "CreateWorldExportJob",
http_method = "POST",
http_path = "/createWorldExportJob",
paginator = list()
)
input <- .robomaker$create_world_export_job_input(clientRequestToken = clientRequestToken, worlds = worlds, outputLocation = outputLocation, iamRole = iamRole, tags = tags)
output <- .robomaker$create_world_export_job_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$create_world_export_job <- robomaker_create_world_export_job
robomaker_create_world_generation_job <- function(clientRequestToken = NULL, template, worldCount, tags = NULL, worldTags = NULL) {
op <- new_operation(
name = "CreateWorldGenerationJob",
http_method = "POST",
http_path = "/createWorldGenerationJob",
paginator = list()
)
input <- .robomaker$create_world_generation_job_input(clientRequestToken = clientRequestToken, template = template, worldCount = worldCount, tags = tags, worldTags = worldTags)
output <- .robomaker$create_world_generation_job_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$create_world_generation_job <- robomaker_create_world_generation_job
robomaker_create_world_template <- function(clientRequestToken = NULL, name = NULL, templateBody = NULL, templateLocation = NULL, tags = NULL) {
op <- new_operation(
name = "CreateWorldTemplate",
http_method = "POST",
http_path = "/createWorldTemplate",
paginator = list()
)
input <- .robomaker$create_world_template_input(clientRequestToken = clientRequestToken, name = name, templateBody = templateBody, templateLocation = templateLocation, tags = tags)
output <- .robomaker$create_world_template_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$create_world_template <- robomaker_create_world_template
robomaker_delete_fleet <- function(fleet) {
op <- new_operation(
name = "DeleteFleet",
http_method = "POST",
http_path = "/deleteFleet",
paginator = list()
)
input <- .robomaker$delete_fleet_input(fleet = fleet)
output <- .robomaker$delete_fleet_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$delete_fleet <- robomaker_delete_fleet
robomaker_delete_robot <- function(robot) {
op <- new_operation(
name = "DeleteRobot",
http_method = "POST",
http_path = "/deleteRobot",
paginator = list()
)
input <- .robomaker$delete_robot_input(robot = robot)
output <- .robomaker$delete_robot_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$delete_robot <- robomaker_delete_robot
robomaker_delete_robot_application <- function(application, applicationVersion = NULL) {
op <- new_operation(
name = "DeleteRobotApplication",
http_method = "POST",
http_path = "/deleteRobotApplication",
paginator = list()
)
input <- .robomaker$delete_robot_application_input(application = application, applicationVersion = applicationVersion)
output <- .robomaker$delete_robot_application_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$delete_robot_application <- robomaker_delete_robot_application
robomaker_delete_simulation_application <- function(application, applicationVersion = NULL) {
op <- new_operation(
name = "DeleteSimulationApplication",
http_method = "POST",
http_path = "/deleteSimulationApplication",
paginator = list()
)
input <- .robomaker$delete_simulation_application_input(application = application, applicationVersion = applicationVersion)
output <- .robomaker$delete_simulation_application_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$delete_simulation_application <- robomaker_delete_simulation_application
robomaker_delete_world_template <- function(template) {
op <- new_operation(
name = "DeleteWorldTemplate",
http_method = "POST",
http_path = "/deleteWorldTemplate",
paginator = list()
)
input <- .robomaker$delete_world_template_input(template = template)
output <- .robomaker$delete_world_template_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$delete_world_template <- robomaker_delete_world_template
robomaker_deregister_robot <- function(fleet, robot) {
op <- new_operation(
name = "DeregisterRobot",
http_method = "POST",
http_path = "/deregisterRobot",
paginator = list()
)
input <- .robomaker$deregister_robot_input(fleet = fleet, robot = robot)
output <- .robomaker$deregister_robot_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$deregister_robot <- robomaker_deregister_robot
robomaker_describe_deployment_job <- function(job) {
op <- new_operation(
name = "DescribeDeploymentJob",
http_method = "POST",
http_path = "/describeDeploymentJob",
paginator = list()
)
input <- .robomaker$describe_deployment_job_input(job = job)
output <- .robomaker$describe_deployment_job_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$describe_deployment_job <- robomaker_describe_deployment_job
robomaker_describe_fleet <- function(fleet) {
op <- new_operation(
name = "DescribeFleet",
http_method = "POST",
http_path = "/describeFleet",
paginator = list()
)
input <- .robomaker$describe_fleet_input(fleet = fleet)
output <- .robomaker$describe_fleet_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$describe_fleet <- robomaker_describe_fleet
robomaker_describe_robot <- function(robot) {
op <- new_operation(
name = "DescribeRobot",
http_method = "POST",
http_path = "/describeRobot",
paginator = list()
)
input <- .robomaker$describe_robot_input(robot = robot)
output <- .robomaker$describe_robot_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$describe_robot <- robomaker_describe_robot
robomaker_describe_robot_application <- function(application, applicationVersion = NULL) {
op <- new_operation(
name = "DescribeRobotApplication",
http_method = "POST",
http_path = "/describeRobotApplication",
paginator = list()
)
input <- .robomaker$describe_robot_application_input(application = application, applicationVersion = applicationVersion)
output <- .robomaker$describe_robot_application_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$describe_robot_application <- robomaker_describe_robot_application
robomaker_describe_simulation_application <- function(application, applicationVersion = NULL) {
op <- new_operation(
name = "DescribeSimulationApplication",
http_method = "POST",
http_path = "/describeSimulationApplication",
paginator = list()
)
input <- .robomaker$describe_simulation_application_input(application = application, applicationVersion = applicationVersion)
output <- .robomaker$describe_simulation_application_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$describe_simulation_application <- robomaker_describe_simulation_application
robomaker_describe_simulation_job <- function(job) {
op <- new_operation(
name = "DescribeSimulationJob",
http_method = "POST",
http_path = "/describeSimulationJob",
paginator = list()
)
input <- .robomaker$describe_simulation_job_input(job = job)
output <- .robomaker$describe_simulation_job_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$describe_simulation_job <- robomaker_describe_simulation_job
robomaker_describe_simulation_job_batch <- function(batch) {
op <- new_operation(
name = "DescribeSimulationJobBatch",
http_method = "POST",
http_path = "/describeSimulationJobBatch",
paginator = list()
)
input <- .robomaker$describe_simulation_job_batch_input(batch = batch)
output <- .robomaker$describe_simulation_job_batch_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$describe_simulation_job_batch <- robomaker_describe_simulation_job_batch
robomaker_describe_world <- function(world) {
op <- new_operation(
name = "DescribeWorld",
http_method = "POST",
http_path = "/describeWorld",
paginator = list()
)
input <- .robomaker$describe_world_input(world = world)
output <- .robomaker$describe_world_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$describe_world <- robomaker_describe_world
robomaker_describe_world_export_job <- function(job) {
op <- new_operation(
name = "DescribeWorldExportJob",
http_method = "POST",
http_path = "/describeWorldExportJob",
paginator = list()
)
input <- .robomaker$describe_world_export_job_input(job = job)
output <- .robomaker$describe_world_export_job_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$describe_world_export_job <- robomaker_describe_world_export_job
robomaker_describe_world_generation_job <- function(job) {
op <- new_operation(
name = "DescribeWorldGenerationJob",
http_method = "POST",
http_path = "/describeWorldGenerationJob",
paginator = list()
)
input <- .robomaker$describe_world_generation_job_input(job = job)
output <- .robomaker$describe_world_generation_job_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$describe_world_generation_job <- robomaker_describe_world_generation_job
robomaker_describe_world_template <- function(template) {
op <- new_operation(
name = "DescribeWorldTemplate",
http_method = "POST",
http_path = "/describeWorldTemplate",
paginator = list()
)
input <- .robomaker$describe_world_template_input(template = template)
output <- .robomaker$describe_world_template_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$describe_world_template <- robomaker_describe_world_template
robomaker_get_world_template_body <- function(template = NULL, generationJob = NULL) {
op <- new_operation(
name = "GetWorldTemplateBody",
http_method = "POST",
http_path = "/getWorldTemplateBody",
paginator = list()
)
input <- .robomaker$get_world_template_body_input(template = template, generationJob = generationJob)
output <- .robomaker$get_world_template_body_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$get_world_template_body <- robomaker_get_world_template_body
robomaker_list_deployment_jobs <- function(filters = NULL, nextToken = NULL, maxResults = NULL) {
op <- new_operation(
name = "ListDeploymentJobs",
http_method = "POST",
http_path = "/listDeploymentJobs",
paginator = list()
)
input <- .robomaker$list_deployment_jobs_input(filters = filters, nextToken = nextToken, maxResults = maxResults)
output <- .robomaker$list_deployment_jobs_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$list_deployment_jobs <- robomaker_list_deployment_jobs
robomaker_list_fleets <- function(nextToken = NULL, maxResults = NULL, filters = NULL) {
op <- new_operation(
name = "ListFleets",
http_method = "POST",
http_path = "/listFleets",
paginator = list()
)
input <- .robomaker$list_fleets_input(nextToken = nextToken, maxResults = maxResults, filters = filters)
output <- .robomaker$list_fleets_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$list_fleets <- robomaker_list_fleets
robomaker_list_robot_applications <- function(versionQualifier = NULL, nextToken = NULL, maxResults = NULL, filters = NULL) {
op <- new_operation(
name = "ListRobotApplications",
http_method = "POST",
http_path = "/listRobotApplications",
paginator = list()
)
input <- .robomaker$list_robot_applications_input(versionQualifier = versionQualifier, nextToken = nextToken, maxResults = maxResults, filters = filters)
output <- .robomaker$list_robot_applications_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$list_robot_applications <- robomaker_list_robot_applications
robomaker_list_robots <- function(nextToken = NULL, maxResults = NULL, filters = NULL) {
op <- new_operation(
name = "ListRobots",
http_method = "POST",
http_path = "/listRobots",
paginator = list()
)
input <- .robomaker$list_robots_input(nextToken = nextToken, maxResults = maxResults, filters = filters)
output <- .robomaker$list_robots_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$list_robots <- robomaker_list_robots
robomaker_list_simulation_applications <- function(versionQualifier = NULL, nextToken = NULL, maxResults = NULL, filters = NULL) {
op <- new_operation(
name = "ListSimulationApplications",
http_method = "POST",
http_path = "/listSimulationApplications",
paginator = list()
)
input <- .robomaker$list_simulation_applications_input(versionQualifier = versionQualifier, nextToken = nextToken, maxResults = maxResults, filters = filters)
output <- .robomaker$list_simulation_applications_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$list_simulation_applications <- robomaker_list_simulation_applications
robomaker_list_simulation_job_batches <- function(nextToken = NULL, maxResults = NULL, filters = NULL) {
op <- new_operation(
name = "ListSimulationJobBatches",
http_method = "POST",
http_path = "/listSimulationJobBatches",
paginator = list()
)
input <- .robomaker$list_simulation_job_batches_input(nextToken = nextToken, maxResults = maxResults, filters = filters)
output <- .robomaker$list_simulation_job_batches_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$list_simulation_job_batches <- robomaker_list_simulation_job_batches
robomaker_list_simulation_jobs <- function(nextToken = NULL, maxResults = NULL, filters = NULL) {
op <- new_operation(
name = "ListSimulationJobs",
http_method = "POST",
http_path = "/listSimulationJobs",
paginator = list()
)
input <- .robomaker$list_simulation_jobs_input(nextToken = nextToken, maxResults = maxResults, filters = filters)
output <- .robomaker$list_simulation_jobs_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$list_simulation_jobs <- robomaker_list_simulation_jobs
robomaker_list_tags_for_resource <- function(resourceArn) {
op <- new_operation(
name = "ListTagsForResource",
http_method = "GET",
http_path = "/tags/{resourceArn}",
paginator = list()
)
input <- .robomaker$list_tags_for_resource_input(resourceArn = resourceArn)
output <- .robomaker$list_tags_for_resource_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$list_tags_for_resource <- robomaker_list_tags_for_resource
robomaker_list_world_export_jobs <- function(nextToken = NULL, maxResults = NULL, filters = NULL) {
op <- new_operation(
name = "ListWorldExportJobs",
http_method = "POST",
http_path = "/listWorldExportJobs",
paginator = list()
)
input <- .robomaker$list_world_export_jobs_input(nextToken = nextToken, maxResults = maxResults, filters = filters)
output <- .robomaker$list_world_export_jobs_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$list_world_export_jobs <- robomaker_list_world_export_jobs
robomaker_list_world_generation_jobs <- function(nextToken = NULL, maxResults = NULL, filters = NULL) {
op <- new_operation(
name = "ListWorldGenerationJobs",
http_method = "POST",
http_path = "/listWorldGenerationJobs",
paginator = list()
)
input <- .robomaker$list_world_generation_jobs_input(nextToken = nextToken, maxResults = maxResults, filters = filters)
output <- .robomaker$list_world_generation_jobs_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$list_world_generation_jobs <- robomaker_list_world_generation_jobs
robomaker_list_world_templates <- function(nextToken = NULL, maxResults = NULL) {
op <- new_operation(
name = "ListWorldTemplates",
http_method = "POST",
http_path = "/listWorldTemplates",
paginator = list()
)
input <- .robomaker$list_world_templates_input(nextToken = nextToken, maxResults = maxResults)
output <- .robomaker$list_world_templates_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$list_world_templates <- robomaker_list_world_templates
robomaker_list_worlds <- function(nextToken = NULL, maxResults = NULL, filters = NULL) {
op <- new_operation(
name = "ListWorlds",
http_method = "POST",
http_path = "/listWorlds",
paginator = list()
)
input <- .robomaker$list_worlds_input(nextToken = nextToken, maxResults = maxResults, filters = filters)
output <- .robomaker$list_worlds_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$list_worlds <- robomaker_list_worlds
robomaker_register_robot <- function(fleet, robot) {
op <- new_operation(
name = "RegisterRobot",
http_method = "POST",
http_path = "/registerRobot",
paginator = list()
)
input <- .robomaker$register_robot_input(fleet = fleet, robot = robot)
output <- .robomaker$register_robot_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$register_robot <- robomaker_register_robot
robomaker_restart_simulation_job <- function(job) {
op <- new_operation(
name = "RestartSimulationJob",
http_method = "POST",
http_path = "/restartSimulationJob",
paginator = list()
)
input <- .robomaker$restart_simulation_job_input(job = job)
output <- .robomaker$restart_simulation_job_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$restart_simulation_job <- robomaker_restart_simulation_job
robomaker_start_simulation_job_batch <- function(clientRequestToken = NULL, batchPolicy = NULL, createSimulationJobRequests, tags = NULL) {
op <- new_operation(
name = "StartSimulationJobBatch",
http_method = "POST",
http_path = "/startSimulationJobBatch",
paginator = list()
)
input <- .robomaker$start_simulation_job_batch_input(clientRequestToken = clientRequestToken, batchPolicy = batchPolicy, createSimulationJobRequests = createSimulationJobRequests, tags = tags)
output <- .robomaker$start_simulation_job_batch_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$start_simulation_job_batch <- robomaker_start_simulation_job_batch
robomaker_sync_deployment_job <- function(clientRequestToken, fleet) {
op <- new_operation(
name = "SyncDeploymentJob",
http_method = "POST",
http_path = "/syncDeploymentJob",
paginator = list()
)
input <- .robomaker$sync_deployment_job_input(clientRequestToken = clientRequestToken, fleet = fleet)
output <- .robomaker$sync_deployment_job_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$sync_deployment_job <- robomaker_sync_deployment_job
robomaker_tag_resource <- function(resourceArn, tags) {
op <- new_operation(
name = "TagResource",
http_method = "POST",
http_path = "/tags/{resourceArn}",
paginator = list()
)
input <- .robomaker$tag_resource_input(resourceArn = resourceArn, tags = tags)
output <- .robomaker$tag_resource_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$tag_resource <- robomaker_tag_resource
robomaker_untag_resource <- function(resourceArn, tagKeys) {
op <- new_operation(
name = "UntagResource",
http_method = "DELETE",
http_path = "/tags/{resourceArn}",
paginator = list()
)
input <- .robomaker$untag_resource_input(resourceArn = resourceArn, tagKeys = tagKeys)
output <- .robomaker$untag_resource_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$untag_resource <- robomaker_untag_resource
robomaker_update_robot_application <- function(application, sources, robotSoftwareSuite, currentRevisionId = NULL) {
op <- new_operation(
name = "UpdateRobotApplication",
http_method = "POST",
http_path = "/updateRobotApplication",
paginator = list()
)
input <- .robomaker$update_robot_application_input(application = application, sources = sources, robotSoftwareSuite = robotSoftwareSuite, currentRevisionId = currentRevisionId)
output <- .robomaker$update_robot_application_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$update_robot_application <- robomaker_update_robot_application
robomaker_update_simulation_application <- function(application, sources, simulationSoftwareSuite, robotSoftwareSuite, renderingEngine = NULL, currentRevisionId = NULL) {
op <- new_operation(
name = "UpdateSimulationApplication",
http_method = "POST",
http_path = "/updateSimulationApplication",
paginator = list()
)
input <- .robomaker$update_simulation_application_input(application = application, sources = sources, simulationSoftwareSuite = simulationSoftwareSuite, robotSoftwareSuite = robotSoftwareSuite, renderingEngine = renderingEngine, currentRevisionId = currentRevisionId)
output <- .robomaker$update_simulation_application_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$update_simulation_application <- robomaker_update_simulation_application
robomaker_update_world_template <- function(template, name = NULL, templateBody = NULL, templateLocation = NULL) {
op <- new_operation(
name = "UpdateWorldTemplate",
http_method = "POST",
http_path = "/updateWorldTemplate",
paginator = list()
)
input <- .robomaker$update_world_template_input(template = template, name = name, templateBody = templateBody, templateLocation = templateLocation)
output <- .robomaker$update_world_template_output()
config <- get_config()
svc <- .robomaker$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.robomaker$operations$update_world_template <- robomaker_update_world_template
|
set.seed(6998768)
context("compute_posterior_interval() classes")
fit_bm <- compute_mallows(potato_visual)
fit_bm$burnin <- 1000
fit_bm_post_alpha <- compute_posterior_intervals(fit_bm, parameter = "alpha")
fit_bm_post_rho <- compute_posterior_intervals(fit_bm, parameter = "rho")
n_items <- ncol(sushi_rankings)
metric <- "footrule"
alpha_vector <- seq(from = 0, to = 15, by = 0.1)
iter <- 1e3
degree <- 10
logz_estimate <- estimate_partition_function(
method = "importance_sampling", alpha_vector = alpha_vector,
n_items = n_items, metric = metric, nmc = iter, degree = degree
)
data <- sushi_rankings[1:100, ]
leap_size <- floor(n_items / 5)
nmc <- N <- 1000
Time <- 20
fit_smc <- smc_mallows_new_users_complete(
R_obs = data, n_items = n_items, metric = metric, leap_size = leap_size,
N = N, Time = Time, logz_estimate = logz_estimate, mcmc_kernel_app = 5,
num_new_obs = 5, alpha_prop_sd = 0.5, lambda = 0.15, alpha_max = 1e6
)
fit_smc_alpha <- fit_smc$alpha_samples[, Time + 1]
fit_smc_post_alpha <- compute_posterior_intervals_alpha(
output = fit_smc_alpha, nmc = nmc, burnin = 0, verbose = FALSE
)
fit_smc_rho <- fit_smc$rho_samples[, , Time + 1]
fit_smc_post_rho <- compute_posterior_intervals_rho(
output = fit_smc_rho, nmc = nmc, burnin = 0,
verbose = FALSE
)
fit_bm_alpha <- fit_bm$alpha
fit_bm_alpha <- dplyr::group_by(fit_bm_alpha, .data$cluster)
class(fit_bm_alpha) <- c(
"posterior_BayesMallows", "grouped_df", "tbl_df", "tbl", "data.frame"
)
fit_bm_post_internal_alpha <- .compute_posterior_intervals(
fit_bm_alpha, "alpha", .95, 3L
)
fit_bm_rho <- fit_bm$rho
fit_bm_rho <- dplyr::group_by(fit_bm_rho, .data$cluster)
class(fit_bm_rho) <- c(
"posterior_BayesMallows", "grouped_df", "tbl_df", "tbl", "data.frame"
)
fit_bm_post_internal_rho <- .compute_posterior_intervals(
fit_bm_rho, "rho", .95, 3L
)
fit_smc_alpha <- data.frame(iteration = seq_len(nmc), value = fit_smc_alpha)
fit_smc_alpha$n_clusters <- 1
fit_smc_alpha$cluster <- "Cluster 1"
fit_smc_alpha <- dplyr::group_by(fit_smc_alpha, .data$cluster)
class(fit_smc_alpha) <- c(
"posterior_SMCMallows", "grouped_df", "tbl_df", "tbl", "data.frame"
)
fit_smc_post_internal_alpha <- .compute_posterior_intervals(
fit_smc_alpha, "alpha", .95, 3L
)
fit_smc_rho <- smc_processing(fit_smc_rho)
fit_smc_rho$n_clusters <- 1
fit_smc_rho$cluster <- "Cluster 1"
fit_smc_rho <- dplyr::group_by(fit_smc_rho, .data$cluster)
class(fit_smc_rho) <- c(
"posterior_SMCMallows", "grouped_df", "tbl_df", "tbl", "data.frame"
)
fit_smc_post_internal_rho <- .compute_posterior_intervals(
fit_smc_alpha, "rho", .95, 3L, discrete = TRUE
)
test_that("Classes are correctly attributed", {
expect_s3_class(fit_bm, "BayesMallows")
expect_s3_class(fit_smc, "SMCMallows")
expect_s3_class(fit_bm_post_alpha, "data.frame")
expect_s3_class(fit_bm_post_rho, "data.frame")
expect_s3_class(fit_smc_post_alpha, "data.frame")
expect_s3_class(fit_smc_post_rho, "data.frame")
expect_error(.compute_posterior_intervals(fit_bm_post_alpha))
expect_error(.compute_posterior_intervals(fit_bm_post_rho))
expect_error(.compute_posterior_intervals(fit_smc_post_alpha))
expect_error(.compute_posterior_intervals(fit_smc_post_rho))
expect_s3_class(fit_bm_post_internal_alpha, "data.frame")
expect_s3_class(fit_bm_post_internal_rho, "data.frame")
expect_s3_class(fit_smc_post_internal_alpha, "data.frame")
expect_s3_class(fit_smc_post_internal_rho, "data.frame")
})
context("compute_consensus() classes")
fit_bm_consensus_cp <- compute_consensus(fit_bm, type = "CP")
fit_bm_consensus_map <- compute_consensus(fit_bm, type = "MAP")
fit_smc_rho <- fit_smc$rho_samples[, , Time + 1]
fit_smc_consensus_cp <- compute_rho_consensus(
output = fit_smc_rho, nmc = nmc, burnin = 0, C = 1, type = "CP"
)
fit_smc_consensus_map <- compute_rho_consensus(
output = fit_smc_rho, nmc = nmc, burnin = 0, C = 1, type = "MAP"
)
test_that("Classes are correctly attributed", {
expect_s3_class(fit_bm_consensus_cp, "data.frame")
expect_s3_class(fit_bm_consensus_map, "data.frame")
expect_s3_class(fit_smc_consensus_cp, "data.frame")
expect_s3_class(fit_smc_consensus_map, "data.frame")
})
|
NULL
sw_tidy_decomp.decomposed.ts <- function(x, timetk_idx = FALSE, rename_index = "index", ...) {
if (timetk_idx) {
if (!has_timetk_idx(x$x)) {
warning("Object has no timetk index. Using default index.")
timetk_idx = FALSE
}
}
ret <- cbind(observed = x$x,
season = x$seasonal,
trend = x$trend,
random = x$random,
seasadj = forecast::seasadj(x))
ret <- tk_tbl(ret, preserve_index = TRUE, rename_index, silent = TRUE)
if (timetk_idx) {
idx <- tk_index(x$x, timetk_idx = TRUE)
if (nrow(ret) != length(idx)) ret <- ret[(nrow(ret) - length(idx) + 1):nrow(ret),]
ret[, rename_index] <- idx
}
ret <- sw_augment_columns(ret, data = NULL, rename_index = rename_index, timetk_idx = timetk_idx)
return(ret)
}
|
tribe <-
function(obj,
keep_obj = FALSE)
{
at <- attributes(obj)
if (is.null(at)) {
at <- nlist()
}
if (keep_obj) {
attr(at, ".obj_tribe") <- obj
}
at
}
"tribe<-" <-
function(obj, value)
{
attributes(obj) <- if (is_empty(value)) NULL else value
obj
}
untribe <-
function(x)
{
obj <- attr(x, ".obj_tribe")
attr(x, ".obj_tribe") <- NULL
attributes(obj) <- x
obj
}
|
yth_filter <- function(x, h = 8, p = 4, output = c("x", "trend", "cycle", "random"), ...) {
output_args <- c("x","trend", "cycle", "random")
if(length(output) != sum(grepl(paste(output_args, collapse = "|"), output))) {
stop(paste0("Incorrect argument '",
output[!grepl(paste(output_args, collapse = "|"), output)],
"' present in 'output' argument. Must be a character vector
containing `x`, 'trend', 'cycle', or 'random'."))
} else if( is.null(colnames(x)) ) {
warning("Your xts object doesn't have a dimnames attribute, aka names(your_xts) is NULL, which would've produced an error.
Thus it has been given the name 'y' within the scope, and for the output, of this function.")
colnames(x) <- ifelse( is.null(colnames(x)), "y", colnames(x) )
}
neverHP <- yth_glm(x = x , h = h, p = p, ...)
trend <- xts::as.xts(unname(neverHP$fitted.values),
order.by = get(paste0("as.",class(index(x))))(names(neverHP$fitted.values)))
names(trend) <- paste0(names(x),".trend")
if (any(length(output) == 1 & output == "trend")) {return(trend)}
cycle <- xts::as.xts(unname(neverHP$residuals),
order.by = get(paste0("as.",class(index(x))))(names(neverHP$residuals)))
names(cycle) <- paste0(names(x),".cycle")
if (any(length(output) == 1 & output == "cycle")) {return(cycle)}
random <- x-lag(x, k = h, na.pad = TRUE)
names(random) <- paste0(names(x),".random")
if (any(length(output) == 1 & output == "random")) {return(random)}
all <- merge(x, trend, cycle, random)
names(all) <- c(names(x), paste0(names(x),".",
c("trend", "cycle", "random"))
)
if (any(output == "x")) {
index <- grep(paste(output, collapse = "|"), names(all))
return(all[,c(1, index)])
} else {
index <- grep(paste(output, collapse = "|"), names(all))
return(all[,c(index)])
}
}
|
library(FSA)
df <- data.frame(y=c(10,-10,runif(28)),x=c(runif(30)),
f=sample(c("A","B","C"),30,replace=TRUE))
slrout <- lm(y~x,data=df)
ivrout <- lm(y~x*f,data=df)
aov1 <- lm(y~f,data=df)
dunnTest(y~f,data=df)
lrt(slrout,com=ivrout)
data(WhitefishLC)
ab1 <- ageBias(scaleC~otolithC,data=WhitefishLC,
ref.lab="Otolith Age",nref.lab="Scale Age")
plot(ab1)
plotAB(ab1)
data(WR79)
WR.age <- subset(WR79, !is.na(age))
WR.age$LCat <- lencat(WR.age$len,w=5)
WR.key <- prop.table(xtabs(~LCat+age,data=WR.age), margin=1)
alkPlot(WR.key,"area")
data(ChinookArg)
lm1 <- lm(w~tl*loc,data=ChinookArg)
lwCompPreds(lm1,xlab="Location")
library(plyr)
library(dplyr)
library(car)
library(dunn.test)
library(lmtest)
library(plotrix)
library(sciplot)
|
localMaxima <- function(x) {
y <- diff(c(-.Machine$integer.max, x)) > 0L
rle(y)$lengths
y <- cumsum(rle(y)$lengths)
y <- y[seq.int(1L, length(y), 2L)]
if (x[[1]] == x[[2]]) {
y <- y[-1]
}
y
}
LMDC.select <- function(y, covar, data, tol = .06, pvalue = .05,
plot = FALSE, local.dc = TRUE,
smo = FALSE, verbose = FALSE){
yy <- data[,y]
if (missing(covar)) covar=names(data)
covar <- setdiff(covar,y)
xx<-data[,covar,drop=F]
nn<-ncol(xx)
dc<-numeric(nn)
for (i in 1:nn){
a<-dcor.xy(xx[,i,drop=F],yy)
dc[i]<-a$estimate
if (verbose) cat(i,a$estimate,a$p.value,a$p.value<pvalue & a$estimate>tol,"\n")
}
if (smo) {
nbase<-ifelse(nn<50,floor(nn/2),floor(nn^(4/5)))
dc1<-fdata2fd(fdata(dc),nbasis=nbase)
dc2<-fdata(dc1,1:nn)$data[1,]
}
else dc2<-dc
regre<-TRUE
if (is.factor(yy)) regre<-FALSE
maxLocal<-max.pc1<-max.pc2<-max.pc3<-NULL
if (local.dc) maxLocal<-localMaxima(dc2)
maxLocal<-unique(c(maxLocal,max.pc1,max.pc3))
maxLocal2<-intersect(which(dc2>tol),maxLocal)
xorder<-order(dc2[maxLocal2],decreasing =TRUE)
dc2<-dc2[maxLocal2][xorder]
maxLocal2<-maxLocal2[xorder]
if (plot){
par(mfrow=c(1,1))
plot(dc2)
lines(dc,col=4)
abline(v=maxLocal,col=2)
abline(v=maxLocal2,col=3,lwd=2)
}
nvar<-length(maxLocal2)
names(dc)<-covar
return(list(dcor=dc,maxLocal=maxLocal2))
}
LMDC.regre <- function(y,covar,data,newdata,pvalue=.05,
method="lm", par.method = NULL,
plot=FALSE,verbose=FALSE){
edf<-Inf
nvar <- length(covar)
if (missing(newdata)) pred <- FALSE
else pred <- TRUE
if (is.null(covar)){
return(list(model=lm(data[,y]~ 1,data=data), xvar=xvar, pred=rep(mean(data[,y]),len=nrow(newdata))) )
}
pred0 <- NULL
nvar <- length(covar)
xnames <- covar
xvar<-NULL
ff<-paste(y,"~1")
if (method == "lm"){
par.method$formula <- formula(ff)
par.method$data <- data
model0<-do.call(method,par.method)
for (i in 1:nvar) {
if (verbose) print(i)
xentra<-xnames[i]
xvar2<- c(xvar,xentra)
ff<-paste(y,"~",paste(xvar2,collapse="+"),collapse="")
if (verbose) {print("lm"); print(1);print(ff)}
par.method$formula <- formula(ff)
model <- do.call(method,par.method)
if (rev(summary(model)$coefficients[,"Pr(>|t|)"])[1]<pvalue){
if (verbose) { print("entra");print(xentra);print(summary(model))}
model0 <- model
xvar <- xvar2
}
}
edf <- summary(model0)$df[1]
nvar<-edf-1
}
if (method == "gam"){
if (!is.null(par.method$k)) {
ik<-which(names(par.method)=="k")
print(par.method)
k<-par.method$k
par.method<-par.method[-ik]
}
else k <- 4
ff<-as.formula(ff)
model0 <- gam(ff,data=data)
par.method2 <- list("formula"=ff,"data"= data)
par.method<-c(par.method2,par.method)
for (i in 1:nvar) {
if (verbose) print(i)
xentra<-xnames[i]
xvar2<- c(xvar,xentra)
ff<-as.formula(paste(y,"~",paste("s(",xvar2,",k=",k,")",collapse="+"),collapse=""))
par.method$formula<-ff
if (verbose) {print("gam"); print(1);print(ff)}
model <- do.call(method,par.method)
if (rev(summary(model)$s.table[,"p-value"])[1]<pvalue){
if (verbose) { print("entra");print(xentra);print(summary(model))}
model0 <- model
xvar <- xvar2
}
}
edf<- sum(model0$edf)
nvar <- ncol(model0$model)-1
}
if (method == "svm"){
if (is.null(par.method)) par.method=list("cost"=100,"gamma"=1,"kernel"="linear")
par.method$x <- data[,covar,drop=F]
par.method$y <- data[,"y"]
model0 <- do.call(method,par.method)
}
if (method == "rpart"){
ff<-as.formula(paste(y,"~",paste(covar,collapse="+"),collapse=""))
par.method$formula <- ff
par.method$data <- data
model0 <- do.call(method,par.method)
}
if (method == "knn"){
par.method$train <- data[,covar,drop=F]
par.method$test <- newdata[,covar,drop=F]
par.method$y <- data[,"y"]
model0 <- do.call("knn.reg",par.method)
pred0<-model0$pred
}
if ( method =="lars") {
if (is.null(par.method))
par.method= list(type="lasso",normalize=FALSE,intercept = TRUE,use.Gram=FALSE)
x0<-as.matrix(data[,covar])
par.method$x <- x0
par.method$y <- data[,"y"]
model0 <- do.call(method, par.method)
templam <- range(model0$lambda)
lambda<-seq(templam[1], templam[2], length=200)
cv <- do.call("cvlars",
list(x=x0, y=data[,"y"],K=10,lambda=lambda, trace = FALSE,
intercept = TRUE,normalize=FALSE, type="lasso",use.Gram=F))
minl<-lambda[which.min(cv)]
pred0 <- do.call("predict.lars",
list("object" = model0, "newx" = as.matrix(newdata[,covar]),
"s" = minl,"type"= "fit", "mode"= "lambda"))$fit
cv <- do.call("lars::cv.lars",list("x" = x0, "y" = data[,"y"],"K" = 10))
ideal_l1_ratio <- cv$index[which.max(cv$cv - cv$cv.error <= min(cv$cv))]
obj <- do.call("lars::lars",list("x"=x0, "y"=data[,"y"]))
scaled_coefs <- scale(obj$beta, FALSE, 1 / obj$normx)
l1 <- apply(X = scaled_coefs, MARGIN = 1, FUN = function(x) sum(abs(x)))
coef(obj)[which.max(l1 / do.call("tail",list("x"=l1, "n"=1)) > ideal_l1_ratio),]
pred0 <- do.call("predict.lars",list("object"= model0,
"newx"=as.matrix(newdata[,covar]),
"s"=minl,"type"="fit","mode"="lambda"))$fit
nvar <- sum( coef(obj)[which.max(l1 / do.call("tail",list("x"=l1, "n"=1)) > ideal_l1_ratio),]>0)
}
if ( method =="glmnet") {
x0 <- as.matrix(data[,covar,drop=F])
newx0 <- as.matrix(newdata[,covar,drop=F])
if (ncol(x0)==1){
x0<-cbind(x0,1:nrow(x0))
newx0<-cbind(newx0,1:nrow(newx0))
}
if (is.null(par.method))
par.method= list(family="gaussian", standardize=TRUE, nfolds=10)
par.method$x <- x0
par.method$y <- data[,"y"]
model0 <- do.call("cv.glmnet",par.method)
pred0<-predict(model0,newx=newx0, s=model0$lambda.min)
c<-coef(model0 ,s='lambda.min',exact=TRUE)
inds<-which(c!=0)
variables<-row.names(c)[inds]
variables<-setdiff(variables,'(Intercept)')
nvar<- length(variables)
edf <-nvar +1
}
if ( method =="nnet") {
if (is.null(par.method))
par.method<-list( size = 5, rang = .1,decay = 5e-6, maxit =1000,linout=T)
par.method$x <- data[,covar,drop=F]
par.method$y <- data[,"y"]
model0 <- do.call("nnet",par.method)
}
if ( method =="mars") {
par.method$x <- data[,covar,drop=F]
par.method$y <- data[,"y"]
model0 <- do.call("mars",par.method)
}
if (method == "npreg"){
par.method$txdat<-data[,covar,drop=F]
par.method$tydat<-data[,"y"]
model0 <- do.call(method,par.method)
}
if (method == "flam"){
par.method$x <- data[,covar,drop=F]
par.method$y <- data[,"y"]
model0 <- do.call("flamCV",par.method)
alpha <- model0$alpha
lambda <- model0$lambda.cv
pred0<- predict(model0$flam.out, new.x =newdata[,covar,drop=F],
lambda = lambda, alpha = alpha)
}
if (method == "novas"){
par.method$COVARIATES<- data[,covar,drop=F]
par.method$Responses <- data[,"y"]
model0 <- do.call("novas",par.method)
pred0<- predict(model0, newdata[,covar,drop=F])
nvar <- edf <- length(model0$model)
}
if ( method =="cosso") {
x0 <- as.matrix(data[,covar,drop=F])
newx0 <- as.matrix(newdata[,covar,drop=F])
if (ncol(x0)==1){
x0<-cbind(x0,1:nrow(x0))
newx0<-cbind(newx0,1:nrow(newx0))
}
model0<-do.call("cosso",list("x"=x0,"y"=data[,y],"family"="Gaussian"))
xvar<-do.call("predict.cosso",
list("object"=model0,"M"=2,"type"="nonzero"))
pred0<-do.call("predict.cosso",list("object"=model0,"xnew"=newx0,
"M"=2,"type"="fit"))
nvar <- length(xvar)
edf <-nvar+1
}
if (method != "knn" & method != "cosso" & method != "lars" & method != "novas" & method != "glmnet" & method != "flam"){
if (pred) pred0 <- predict(model0, newdata=newdata[,covar,drop=F])
else pred0 <- NULL
}
return(list(model=model0, xvar=xvar, pred=pred0,edf=edf,nvar=nvar))
}
|
[
{
"title": "Prices of houses in the Netherlands",
"href": "http://wiekvoet.blogspot.com/2013/10/prices-of-houses-in-netherlands.html"
},
{
"title": "Using the booktabs package with Sweave and xtable",
"href": "http://cameron.bracken.bz/sweave-xtable-booktabs"
},
{
"title": "Modern Portfolio Optimization Theory: The idea",
"href": "http://programming-r-pro-bro.blogspot.com/2011/11/modern-portfolio-optimization-theory.html"
},
{
"title": "Tips and Tricks for HTML and R",
"href": "http://jaredknowles.com/journal/2012/8/1/tips-and-tricks-for-html-and-r.html"
},
{
"title": "An Intro to Ensemble Learning in R",
"href": "http://viksalgorithms.blogspot.com/2012/01/intro-to-ensemble-learning-in-r.html"
},
{
"title": "Sensitivity of risk parity to variance differences",
"href": "https://feedproxy.google.com/~r/PortfolioProbeRLanguage/~3/aJvAvQ5fwrI/"
},
{
"title": "Joining R-bloggers",
"href": "https://xianblog.wordpress.com/2010/02/19/joining-r-bloggers/"
},
{
"title": "Raccoon | Ch. 1 – Introduction to Linear Models with R",
"href": "http://www.quantide.com/raccoon-ch-1-introduction-to-linear-models-with-r/"
},
{
"title": "Predicting Titanic deaths on Kaggle V: Ranger",
"href": "http://wiekvoet.blogspot.com/2015/09/predicting-titanic-deaths-on-kaggle-v.html"
},
{
"title": "Build your own Twitter Archive and Analyzing Infrastructure with MongoDB, Java and R [Part 2] [Update]",
"href": "http://thinktostart.com/build-your-own-twitter-archive-and-analyzing-infrastructure-with-mongodb-java-and-r-part-2/"
},
{
"title": "A new Oce coastline",
"href": "http://dankelley.github.io//r/2013/12/22/coastline.html"
},
{
"title": "BioC 2016 Conference Overview and Few Ways of Downloading TCGA Data",
"href": "http://r-addict.com/2016/07/22/BioC2016-RTCGA.html"
},
{
"title": "Project Euler — problem 2",
"href": "https://web.archive.org/web/http://ec2-184-73-106-109.compute-1.amazonaws.com/wordpress/?p=37"
},
{
"title": "Canonical Correlation Analysis for finding patterns in coupled fields",
"href": "http://menugget.blogspot.com/2012/03/canonical-correlation-analysis-for.html"
},
{
"title": "Day
"href": "https://web.archive.org/web/http://flyordie.sin.khk.be/2011/04/20/day-27-a-lot-of-graphics-in-one-place/"
},
{
"title": "Win Your Fantasy Football Snake Draft with this Shiny App in R",
"href": "http://fantasyfootballanalyticsr.blogspot.com/2013/08/win-your-fantasy-football-snake-draft.html"
},
{
"title": "Online course on forecasting using R",
"href": "http://robjhyndman.com/hyndsight/revolutionr2013/"
},
{
"title": "Big Business Backs Hillary: Small Bernie",
"href": "http://www.econometricsbysimulation.com/2016/02/big-business-backs-hillary-small-bernie.html"
},
{
"title": "Cubism Horizon Charts in R",
"href": "http://timelyportfolio.blogspot.com/2012/06/cubism-horizon-charts-in-r.html"
},
{
"title": "Development of a R code to solve a large system of equations with linear constrains",
"href": "https://www.r-users.com/jobs/development-of-a-r-code-to-solve-a-large-system-of-equations-with-linear-constrains/"
},
{
"title": "Rgdal Package: R crash during \"writeGDAL\" solved",
"href": "http://ssrebelious.blogspot.com/2012/08/rgdal-crash-solved.html"
},
{
"title": "Speeding up model bootstrapping in GNU R",
"href": "http://rsnippets.blogspot.com/2013/12/speeding-up-model-bootstrapping-in-gnu-r.html"
},
{
"title": "SPARQL with R in less than 5 minutes",
"href": "https://feedproxy.google.com/~r/ProgrammingR/~3/CDuxujjMT3U/"
},
{
"title": "The Art of R Programming – my two cents",
"href": "https://feedproxy.google.com/~r/OneRTipADay/~3/7v5JE8d8QBw/art-of-r-programming-my-two-cents.html"
},
{
"title": "Case Study: Network visualization with data from a 360° feedback – often wasted potential!",
"href": "http://holtmeier.de/network-360/"
},
{
"title": "Bandit Formulations for A/B Tests: Some Intuition",
"href": "http://www.win-vector.com/blog/2014/04/bandit-formulations-for-ab-tests-some-intuition/?utm_source=rss&utm_medium=rss&utm_campaign=bandit-formulations-for-ab-tests-some-intuition"
},
{
"title": "Modeling Permanent and Gradual Process Changes with CDFs",
"href": "http://anythingbutrbitrary.blogspot.com/2012/07/modeling-permanent-and-gradual-process.html"
},
{
"title": "Draw nicer Classification and Regression Trees with the rpart.plot package",
"href": "http://blog.revolutionanalytics.com/2013/06/plotting-classification-and-regression-trees-with-plotrpart.html"
},
{
"title": "Reading Arduino data directly into R",
"href": "http://www.magesblog.com/2015/02/reading-arduino-data-directly-into-r.html"
},
{
"title": "Data Science Live Book (open source)",
"href": "http://blog.datascienceheroes.com/data-science-live-book-open-source/"
},
{
"title": "R bracket is a bit irregular",
"href": "http://www.win-vector.com/blog/2015/01/r-bracket-is-a-bit-irregular/"
},
{
"title": "Project TIER",
"href": "http://citizen-statistician.org/2016/06/25/project-tier/"
},
{
"title": "New chapters for 50 shades of grey….",
"href": "https://longhowlam.wordpress.com/2016/07/27/new-chapters-for-50-shades-of-grey/"
},
{
"title": "Meielisalp, Mar 2012 – Abstract Submission",
"href": "https://www.rmetrics.org/PaperSubmission2012"
},
{
"title": "2016-10 A transformable markup document format",
"href": "http://stattech.wordpress.fos.auckland.ac.nz/2016-10-a-transformable-markup-document-format/"
},
{
"title": "How to make 3-D graphics from SAS data",
"href": "http://blog.revolutionanalytics.com/2011/04/how-to-make-3-d-graphics-from-sas-data.html"
},
{
"title": "Part 1 of 3: Building/Loading/Scoring Against Predictive Models in R",
"href": "http://scottmutchler.blogspot.com/2011/08/part-1-of-3-buildingloadingscoring.html"
},
{
"title": "The Fourier Transform, explained in one sentence",
"href": "http://blog.revolutionanalytics.com/2014/01/the-fourier-transform-explained-in-one-sentence.html"
},
{
"title": "Have you ever heard about the ‘animation package’?",
"href": "http://using-r-project.blogspot.com/2009/08/have-you-ever-heard-about-animation.html"
},
{
"title": "Benchmarking distance calculation in R",
"href": "http://things-about-r.tumblr.com/post/33851672597/benchmarking-distance-calculation-in-r"
},
{
"title": "R 3.3.0 now available",
"href": "http://blog.revolutionanalytics.com/2016/05/r-330-now-available.html"
},
{
"title": "R Tops Data Mining Software Poll",
"href": "http://blog.revolutionanalytics.com/2012/05/r-tops-data-mining-poll.html"
},
{
"title": "Lomb-Scargle periodogram for unevenly sampled time series",
"href": "http://menugget.blogspot.com/2013/01/lomb-scargle-periodogram-for-unevenly.html"
},
{
"title": "Bio7 Overview Video",
"href": "http://bio7.org/?p=2478"
},
{
"title": "Another crosshairs",
"href": "http://adistantobserver.blogspot.com/2012/11/another-crosshairs.html"
},
{
"title": "Suicide statistics and the Christchurch earthquake",
"href": "http://www.quantumforest.com/2012/09/suicide-statistics/"
},
{
"title": "R Coding Style Guide",
"href": "http://www.compbiome.com/2009/08/r-coding-style-guide.html"
},
{
"title": "The tenure of Doctor Who incarnations",
"href": "https://4dpiecharts.com/2013/08/03/the-tenure-of-doctor-who-incarnations/"
},
{
"title": "Why you should learn R first for data science",
"href": "http://sharpsightlabs.com/blog/2015/01/27/learn-r-data-science/"
},
{
"title": "How to plot three categorical variables and one continuous variable using ggplot2",
"href": "http://jeromyanglim.blogspot.com/2012/05/how-to-plot-three-categorical-variables.html"
}
]
|
library('TreeTools')
test_that("SplitwiseInfo() / ClusteringInfo() handle probabilities", {
Tree <- function (txt) ape::read.tree(text = txt)
tree <- Tree('((a, b)60, (c, d)60);')
treeP <- Tree('((a, b)0.60, (c, d)0.60);')
treeProfile <- list(Tree('((a, b), (c, d));'),
Tree('(a, b, c, d);'),
Tree('((a, d), (c, b));'))[c(1, 1, 1, 2, 3)]
Test <- function (Expect, tree, p = NULL, ...) {
Expect(..., SplitwiseInfo(tree, p))
Expect(..., ClusteringInfo(tree, p))
Expect(..., ClusteringEntropy(tree, p))
}
Clust <- function (tree, ...) {
expect_equal(ClusteringInfo(tree, ..., sum = TRUE),
sum(ClusteringInfo(tree, ..., sum = FALSE)))
expect_equal(ClusteringEntropy(tree, ..., sum = TRUE),
sum(ClusteringEntropy(tree, ..., sum = FALSE)))
expect_equal(ClusteringInfo(tree, ...) / NTip(tree),
ClusteringEntropy(tree, ...))
}
Test(expect_error, tree, TRUE)
Test(expect_null, tree = NULL)
expect_gt(SplitwiseInfo(tree), SplitwiseInfo(tree, 100))
expect_gt(ClusteringInfo(tree), ClusteringInfo(tree, 100))
expect_gt(ClusteringEntropy(tree), ClusteringEntropy(tree, 100))
expect_equal(SplitwiseInfo(tree), SplitwiseInfo(tree, p = FALSE))
expect_equal(ClusteringInfo(tree), ClusteringInfo(tree, p = FALSE))
expect_equal(ClusteringEntropy(tree), ClusteringEntropy(tree, p = FALSE))
expect_equal(0, SplitwiseInfo(tree, 60 * 3),
tolerance = sqrt(.Machine$double.eps))
expect_equal(1 / 3, Clust(tree, 60 * 3))
p <- 1.0 * c(1, 0, 0) + 0.0 * c(0, 1/2, 1/2)
expect_equal(log2(3), SplitwiseInfo(tree))
expect_equal(1, Clust(tree))
p <- 0.6 * c(1, 0, 0) + 0.4 * c(0, 1/2, 1/2)
expect_equal(sum(p), 1)
expectation <- log2(3) + sum(p * log2(p))
expect_equal(expectation, SplitwiseInfo(tree, 100))
expect_equal(0.6, Clust(tree, 100))
expect_equal(SplitwiseInfo(tree, 100), SplitwiseInfo(treeP, TRUE))
expect_equal(Clust(tree, 100), Clust(treeP, TRUE))
expect_equal(SplitwiseInfo(tree, 100), ConsensusInfo(treeProfile, 'p'))
expect_equal(ClusteringInfo(tree, 100), ConsensusInfo(treeProfile, 'c'))
expect_equal(SplitwiseInfo(Tree('(a, b, (c, (d, e)0.8)0.75);'), TRUE),
SplitwiseInfo(Tree('(a, b, (c, d, e)0.75);'), TRUE) +
SplitwiseInfo(Tree('(a, b, c, (d, e)0.8);'), TRUE))
expect_equal(Clust(Tree('(a, b, (c, (d, e)0.8)0.75);'), TRUE),
Clust(Tree('(a, b, (c, d, e)0.75);'), TRUE) +
Clust(Tree('(a, b, c, (d, e)0.8);'), TRUE))
expect_equal(SplitwiseInfo(Tree('(a, b, (c, (d, e)0.8)0.75);'), TRUE),
SplitwiseInfo(Tree('(a, b, (c, (d, e)));'), c(0.75, 0.8)))
expect_equal(Clust(Tree('(a, b, (c, (d, e)0.8)0.75);'), TRUE),
Clust(Tree('(a, b, (c, (d, e)));'), c(0.75, 0.8)))
expect_equal(SplitwiseInfo(Tree('(a, b, (c, (d, e)0.8));'), TRUE),
SplitwiseInfo(Tree('(a, b, (c, (d, e)));'), c(1, 0.8)))
expect_equal(Clust(Tree('(a, b, (c, (d, e)0.8));'), TRUE),
Clust(Tree('(a, b, (c, (d, e)));'), c(1, 0.8)))
expect_equal(SplitwiseInfo(Tree('(a, b, (c, (d, e)));')),
SplitwiseInfo(Tree('(a, b, (c, (d, e)));'), TRUE))
expect_equal(Clust(Tree('(a, b, (c, (d, e)));')),
Clust(Tree('(a, b, (c, (d, e)));'), TRUE))
expect_equal(SplitwiseInfo(Tree('(a, b, (c, (d, e)));'), TRUE),
SplitwiseInfo(Tree('(a, b, (c, (d, e)));'), c(1, 1)))
expect_equal(Clust(Tree('(a, b, (c, (d, e)));'), TRUE),
Clust(Tree('(a, b, (c, (d, e)));'), c(1, 1)))
})
test_that("SplitwiseInfo() / ClusteringInfo(sum = FALSE)", {
splits <- as.Splits(BalancedTree(8))
Test <- function (x) {
expect_equal(length(x), length(splits))
expect_equal(names(x), names(splits))
}
p <- c(1, 1, 0.5, 0.6, 0.7)
Test(SplitwiseInfo(BalancedTree(8), sum = FALSE))
Test(SplitwiseInfo(BalancedTree(8), p = p, sum = FALSE))
Test(ClusteringInfo(BalancedTree(8), sum = FALSE))
Test(ClusteringInfo(BalancedTree(8), p = p, sum = FALSE))
})
test_that("SplitwiseInfo() can't be improved by dropping resolved tip", {
b8With <- as.Splits(c(T, T, T, T, F, F, F, F))
b8Without <- as.Splits(c(T, T, T, F, F, F, F))
i8With <- as.Splits(c(T, T, T, T, T, T, F, F))
i8Without <- as.Splits(c(T, T, T, T, T, F, F))
expect_lt(SplitwiseInfo(b8With, p = 0.5), SplitwiseInfo(b8Without))
expect_lt(SplitwiseInfo(i8With, p = 0.5), SplitwiseInfo(i8Without))
balancedWithout <- BalancedTree(32)
balancedWith <- AddTip(balancedWithout, 32)
p <- double(30) + 1
p[c(58, 62, 64, 65) - 35] <- 0.5
expect_lt(SplitwiseInfo(balancedWith, p = p), SplitwiseInfo(balancedWithout))
})
test_that('ClusteringInfo() method works', {
trees <- list(BalancedTree(8), PectinateTree(8))
expect_equal(vapply(trees, ClusteringInfo, 0),
ClusteringInfo(structure(trees, class = 'multiPhylo')))
expect_equal(vapply(trees, ClusteringInfo, 0),
ClusteringInfo(trees))
trees <- list(RandomTree(8), BalancedTree(8), PectinateTree(8))
cons <- consensus(lapply(trees, RootTree, 1), p = 0.5)
p <- SplitFrequency(cons, trees) / length(trees)
expect_equal(SplitwiseInfo(cons, p), ConsensusInfo(trees, 'spic'))
expect_equal(ClusteringInfo(cons, p), ConsensusInfo(trees, 'scic'))
})
test_that("ConsensusInfo() is robust", {
trees <- list(ape::read.tree(text = '(a, (b, (c, (d, (e, X)))));'),
ape::read.tree(text = '((a, X), (b, (c, (d, e))));'))
expect_equal(0, ConsensusInfo(trees, 'cl'))
expect_error(ConsensusInfo(trees, 'ERROR'))
expect_equal(ConsensusInfo(trees[1]), ConsensusInfo(trees[[1]]))
})
test_that("ConsensusInfo() generates correct value", {
trees <- list(ape::read.tree(text = "((a, b), (c, d));"),
ape::read.tree(text = "((a, c), (b, d));"),
ape::read.tree(text = "((a, d), (c, b));"))
expect_equal(0, ConsensusInfo(trees))
expect_equal(0, ConsensusInfo(trees, 'cl'))
expect_equal(log2(3), ConsensusInfo(trees[1]))
expect_equal(4, ConsensusInfo(trees[1], 'cl'))
expect_equal(log2(3), ConsensusInfo(trees[c(1, 1)]))
expect_equal(4, ConsensusInfo(trees[c(1, 1)], 'cl'))
expect_equal(Entropy(c(1, 1, 1) / 3) - Entropy(c(1/2, 1/2, 9)/10),
ConsensusInfo(trees[c(rep(1, 9), 2)]))
})
|
context("get planes")
test_that("standard get_planes", {
skip_on_cran()
skip_if_offline()
skip_on_os("windows")
planes_ <- get_planes(2018)
})
test_that("get_planes joined to nycflights13", {
skip_on_cran()
skip_if_offline()
skip_on_os("windows")
planes_ <- get_planes(2013, flights_data = nycflights13::flights)
planes_orig <- nycflights13::planes
expect_equal(ncol(planes_), ncol(planes_orig))
expect_equal(colnames(planes_), colnames(planes_orig))
expect_equal(purrr::map(planes_, class) %>% unlist(),
purrr::map(planes_orig, class) %>% unlist())
})
|
add_igos <- function(data) {
if (length(attributes(data)$ps_data_type) > 0 && attributes(data)$ps_data_type == "dyad_year") {
if (!all(i <- c("ccode1", "ccode2") %in% colnames(data))) {
stop("add_igos() merges on two Correlates of War codes (ccode1, ccode2), which your data don't have right now. Make sure to run create_dyadyears() at the top of the pipe. You'll want the default option, which returns Correlates of War codes.")
} else {
cow_igo_ndy %>%
rename(ccode1 = .data$ccode2,
ccode2 = .data$ccode1) %>%
bind_rows(cow_igo_ndy, .) %>%
left_join(data, .) -> data
return(data)
}
} else if (length(attributes(data)$ps_data_type) > 0 && attributes(data)$ps_data_type == "state_year") {
if (!all(i <- c("ccode") %in% colnames(data))) {
stop("add_igos() merges on the Correlates of War code, which your data don't have right now. Make sure to run create_stateyears() at the top of the pipe. You'll want the default option, which returns Correlates of War codes.")
} else {
cow_igo_sy %>%
left_join(data, .) -> data
return(data)
}
} else {
stop("add_igos() requires a data/tibble with attributes$ps_data_type of state_year or dyad_year. Try running create_dyadyears() or create_stateyears() at the start of the pipe.")
}
return(data)
}
|
.mult.test <- function(y1, y2, perm.num) {
y1.num <- nrow(y1)
y <- rbind(y1, y2)
y.num <- nrow(y)
t.obs <- .hote(y1, y2, FALSE)$t.obs
stat.perm <- vector("numeric", perm.num)
for (i in 1:perm.num) {
ind <- sample(y.num)
y1.perm <- y[ind[1:y1.num],]
y2.perm <- y[ind[(y1.num+1):y.num],]
stat.perm[i] <- .hote(y1.perm, y2.perm, FALSE)$t.obs
}
alpha.obs <- sum(stat.perm >= t.obs) / perm.num
list(alpha.obs=alpha.obs, t.obs=t.obs)
}
|
streamParserClose <- function(stream) stream$streamParserClose(stream)
|
XML4R2list <- function(file){
if(!file.exists(file)) stop(paste0("'", file, "' does not exist."))
read_lines <- readLines(file)
if(sum(grepl('(>)', read_lines)) == 0) return(read_lines)
read_lines <- paste(read_lines, collapse="\n")
read_lines <- gsub('(>)([[:print:]])', '\\1\n\\2', read_lines)
read_lines <- gsub('([[:print:]])(</)', '\\1\n\\2', read_lines)
lines <- strsplit(x=read_lines, split="\n")[[1]]
lines <- gsub("^[\t]*", "", lines)
lines <- lines[lines != ""]
object_type <- 'vector'
enclosing_tag_added <- FALSE
if(sum(grepl('(<)', lines[1:2])) < 2){
lines <- c('<enclose_all type=list >', lines)
lines <- c(lines, '</enclose_all>')
enclosing_tag_added <- TRUE
}
read_xml_lines <- XML4R2listLines(lines)
if(enclosing_tag_added){
rlist <- read_xml_lines$rlist
}else{
rlist <- list(read_xml_lines$rlist)
names(rlist)[1] = read_xml_lines$obj.name
}
rlist
}
|
core_advanced_search <- function(..., page = 1, limit = 10,
key = NULL, parse = TRUE, .list = list()) {
assert(page, c('numeric', 'integer'))
assert(limit, c('numeric', 'integer'))
must_be(limit)
qrs <- list(...)
if (length(qrs) == 0) qrs <- NULL
qrs <- c(qrs, .list)
queries <- create_batch_query_list(qrs, page, limit)
res <- core_POST(path = "search", key, NULL, queries)
core_parse(res, parse)
}
acceptable_advanced_filters <- c(
"title", "description", "fullText", "authors",
"publisher", "repositories.id", "repositories.name", "doi",
"oai", "identifiers", "language.name", "year",
"repositoryDocument.metadataUpdated"
)
core_query <- function(..., op = "AND") {
x <- list(...)
if (length(x) == 0) stop("no queries passed")
x <- x[which(names(x) %in% acceptable_advanced_filters)]
out <- c()
for (i in seq_along(x)) {
out[i] <- paste0(names(x)[i], ":", x[[i]])
}
paste(out, collapse = sprintf(" %s ", op))
}
|
context("is.keyvalue")
test_that("is.keyvalue11", {
expect_false(is.keyvalue11(snomed))
expect_true(is.keyvalue11(kon))
})
|
`plot.corres` <-
function(x, main="", addcol=TRUE, extreme=0,
rcex=1, rcol=1, rlabels="", stretch=1.4,
ccex = 1, ccol = 2, clabels="", ...) {
if (!is(x, "corres")) stop("argument should be a correspondence object")
dat = x@data$origOut
xlimit = range(dat$rproj[,1])*stretch
ylimit = range(dat$rproj[,2])*stretch
graphics::plot(dat$rproj[,1], dat$rproj[,2], type="n", xlim=xlimit, ylim=ylimit,
xlab=paste("Factor 1 (", round(x@data$eigenrates[1]/10, 1), " %)", sep=""),
ylab=paste("Factor 2 (", round(x@data$eigenrates[2]/10, 1), " %)", sep=""))
graphics::lines(c(max(dat$rproj[,1]), min(dat$rproj[,1])), c(0,0))
graphics::lines(c(0,0), c(max(dat$rpro[,2]), min(dat$rproj[,2])))
if (!(main == "")) graphics::mtext(main, 3, 1)
if (length(rcol) == 1 ) rcol = rep(1, nrow(dat$rproj))
if (length(rlabels)==1) rlabels = rownames(x@data$input)
graphics::text(dat$rproj[,1], dat$rproj[,2], rlabels, cex=rcex, col=rcol)
if (addcol) {
if (length(clabels)==1) clabels = colnames(x@data$input)
if (extreme > 0) {
x = data.frame(dat$cproj[,1:2])
extremes = apply(x, 2, stats::quantile, c(extreme, 1-extreme))
Accept = as.factor((x[,2] < extremes[1,2] | x[,2] > extremes[2,2])|
(x[,1] < extremes[1,1] | x[,1] > extremes[2,1]))
graphics::text(x[Accept==TRUE,1], x[Accept==TRUE,2], clabels[Accept==TRUE],
font=2, cex=ccex, col=ccol)
} else {
graphics::text(dat$cproj[,1], dat$cproj[,2], clabels, font=2, cex=ccex, col=ccol)
}
}
}
|
"data"
|
remove.spaces<-function (charvec)
{
charvec <- gsub("^([[:blank:]]*)([[:space:]]*)", "", charvec)
charvec <- gsub("([[:blank:]]*)([[:space:]]*)$", "", charvec)
return(charvec)
}
my.read.genepop<-function (file, ncode = 2L, quiet = FALSE)
{
if (!quiet)
cat("\nParsing Genepop file...\n\n")
prevcall <- match.call()
txt <- scan(file, sep = "\n", what = "character", quiet = TRUE)
if (!quiet)
cat("\nFile description: ", txt[1], "\n")
txt <- txt[-1]
txt <- gsub("\t", " ", txt)
locinfo.idx <- 1:(min(grep("POP", toupper(txt))) - 1)
locinfo <- txt[locinfo.idx]
locinfo <- paste(locinfo, collapse = ",")
loc.names <- unlist(strsplit(locinfo, "([,]|[\n])+"))
loc.names <- remove.spaces(loc.names)
nloc <- length(loc.names)
txt <- txt[-locinfo.idx]
pop.idx <- grep("POP", toupper(txt))
npop <- length(pop.idx)
nocomma <- which(!(1:length(txt)) %in% grep(",", txt))
splited <- nocomma[which(!nocomma %in% pop.idx)]
if (length(splited) > 0) {
for (i in sort(splited, decreasing = TRUE)) {
txt[i - 1] <- paste(txt[i - 1], txt[i], sep = " ")
}
txt <- txt[-splited]
}
pop.idx <- grep("POP", toupper(txt))
txt[length(txt) + 1] <- "POP"
pops<-txt[pop.idx]
nind<-diff(c(pop.idx,length(txt)))-1
if(pops[1] == "POP"){
for(i in 1:length(pops)){
pops[i]<-paste("POP ",i,sep="") }
}
popinfo<-as.vector(unlist(apply(cbind(
as.character(pops),as.numeric(nind)),
1,function(x){ rep(x[1],x[2])})))
txt <- txt[-c(pop.idx, length(txt))]
temp <- sapply(1:length(txt), function(i) strsplit(txt[i], ","))
ind.names <- sapply(temp, function(e) e[1])
ind.names <- remove.spaces(ind.names)
vec.genot <- sapply(temp, function(e) e[2])
vec.genot <- remove.spaces(vec.genot)
X <- matrix(unlist(strsplit(vec.genot, "[[:space:]]+")),
ncol = nloc, byrow = TRUE)
rownames(X) <- 1:nrow(X)
colnames(X) <- loc.names
res<-data.frame(popinfo,ind.names,X)
if (!quiet)
cat("\n...done.\n\n")
return(res)
}
allele.counts<-function(genotypes){
obs.gen<-summary(as.factor(genotypes))
obs.gen<-obs.gen[names(obs.gen) != "000000" & names(obs.gen)
!= "0000" & names(obs.gen) != "0"]
if(nchar(names(obs.gen[1])) %% 2 == 0){
splitnum<-nchar(names(obs.gen[1]))/2
allele.names<-levels(as.factor(c(substr(names(obs.gen),1,splitnum),
substr(names(obs.gen),splitnum+1,nchar(names(obs.gen[1]))))))
alleles<-cbind(substr(genotypes,1,splitnum),
substr(genotypes,splitnum+1,nchar(names(obs.gen[1]))))
alleles<-alleles[alleles != "0" & alleles != "00" &
alleles != "000" & alleles!= "0000" &
alleles != "00000" & alleles != "000000"]
} else {
allele.names<-levels(as.factor(names(obs.gen)))
alleles<-cbind(genotypes,genotypes)
alleles<-alleles[alleles != "0" & alleles != "00" &
alleles != "000" & alleles!= "0000" &
alleles != "00000" & alleles != "000000"]
}
obs<-summary(as.factor(alleles))
if(length(obs) < length(allele.names)){
num.missing<-length(allele.names[!(allele.names%in% names(obs))])
AlleleCounts<-c(obs,rep(0,num.missing))
names(AlleleCounts)<-c(names(obs)[names(obs) %in% allele.names],
allele.names[!(allele.names%in% names(obs))])
}else{
AlleleCounts<-obs
}
AlleleCounts<-AlleleCounts[order(names(AlleleCounts))]
return(AlleleCounts)
}
calc.allele.freq<-function(genotypes){
counts<-allele.counts(genotypes)
obs.af<-counts/sum(counts)
return(obs.af)
}
calc.exp.het<-function(af){
ht<-2*af[1]*(1-af[1])
return(ht)
}
calc.fst<-function(df,i){
df.split<-split(df[,i],df[,1])
af<-do.call("rbind",lapply(df.split,calc.allele.freq))
hexp<-apply(af,1,calc.exp.het)
hs<-mean(hexp)
pbar<-mean(af[,1])
ht<-2*pbar*(1-pbar)
fst<-1-(hs/ht)
return(c(ht,fst))
}
var.fst<-function(df,i){
df.split<-split(df[,i],df[,1])
N<-length(df.split)
af<-do.call("rbind",lapply(df.split,calc.allele.freq))
pbar<-mean(af[,1])
vp<-(af[,1]-pbar)^2
varp<-(1/N)*sum(vp)
fst<-(varp/(pbar*(1-pbar)))-(1/(2*N))
ht<-2*pbar*(1-pbar)
return(c(ht,fst))
}
calc.theta<-function(df,i){
df.split<-split(df[,i],df[,1])
N<-length(df.split)
af<-do.call("rbind",lapply(df.split,calc.allele.freq))
pbar<-mean(af[,1])
vp<-(af[,1]-pbar)^2
ns<-unlist(lapply(df.split,length))
nbar<-mean(unlist(lapply(df.split,length)))
s2a<-(1/((N-1)*nbar))*sum(vp)
nc<-(1/(N-1))*(sum(ns)-(sum(ns^2)*(1/sum(ns))))
T1<-s2a-((1/(nbar-1))*((pbar*(1-pbar))-((N-1)/N)*s2a))
T2<-((nc-1)/(nbar-1))*(pbar*(1-pbar))+(s2a/N)*(1+(((N-1)*(nbar-nc))/(nbar-1)))
theta<-T1/T2
return(c(T2,theta))
}
calc.betahat<-function(df,i){
df.split<-split(df[,i],df[,1])
r<-length(df.split)
M<-mean(unlist(lapply(df.split,length)))
af<-do.call("rbind",lapply(df.split,calc.allele.freq))
Y<-sum(af[,1])*sum(af[,1])+sum(1-af[,1])*sum(1-af[,1])
X<-sum(af[,1]^2)+sum((1-af[,1])^2)
F0<-((2*M*X)-r)/(((2*M)-1)*r)
F1<-((Y-X)/(r*(r-1)))
HB<-1-F1
betahat<-(F0-F1)/HB
return(c(HB,betahat))
}
calc.weighted.fst<-function(df,i){
df.split<-split(df[,i],df[,1])
af<-do.call("rbind",lapply(df.split,calc.allele.freq))
hexp<-apply(af,1,calc.exp.het)
ns<-unlist(lapply(df.split,length))
hs<-sum(hexp*ns)/sum(ns)
ht<-calc.exp.het(calc.allele.freq(df[,i]))
fst<-(ht-hs)/ht
return(c(ht,fst))
}
fst.boot.onecol<-function(df,fst.choice){
fst.options<-c("FST","Fst","fst","var","VAR","Var","theta","Theta","THETA",
"Betahat","BETAHAT","betahat")
if(!(fst.choice %in% fst.options)) { stop("Fst choice not an option. Use fst.options.print() to see options.")}
col<-sample(3:ncol(df),1)
if(fst.choice %in% c("Fst","FST","fst")){
ht.fst<-calc.fst(df,col)
}
if(fst.choice %in% c("VAR","var","Var")){
ht.fst<-var.fst(df,col)
}
if(fst.choice %in% c("Theta","theta","THETA")){
ht.fst<-calc.theta(df,col)
}
if(fst.choice %in% c("betahat","BETAHAT","Betahat")){
ht.fst<-calc.betahat(df,col)
}
return(ht.fst)
}
fst.options.print<-function(){
print("For Wright's Fst: fst, FST, Fst",quote=F)
print("For a variance-based Fst (beta): var, VAR, Var",quote=F)
print("For Cockerham and Weir's Theta: theta, Theta, THETA", quote=F)
print("For Beta-hat (LOSITAN): Betahat, betahat, BETAHAT",quote=F)
}
make.bins<-function(fsts,num.breaks=25, Ht.name="Ht", Fst.name="Fst",min.per.bin=20)
{
breaks<-hist(fsts[,Ht.name],breaks=(num.breaks/2),plot=F)$breaks
low.breaks<-breaks[1:(length(breaks)-1)]
upp.breaks<-breaks[2:length(breaks)]
br.rate<-breaks[2]-breaks[1]
newbreaks<-breaks-(br.rate/2)
low.breaks<-c(low.breaks,newbreaks[1:(length(breaks)-1)])
upp.breaks<-c(upp.breaks,newbreaks[2:length(breaks)])
bins<-data.frame(low.breaks=sort(low.breaks),upp.breaks=sort(upp.breaks))
bins<-bins[bins[,1]>=0 & bins[,2]>=0,]
mids<-apply(bins,1,mean)
bin.fst<-apply(bins, 1, function(x){
out<-fsts[fsts[,Ht.name] > x[1] & fsts[,Ht.name] < x[2],Fst.name] })
names(bin.fst)<-bins$upp.breaks
rmvec<-NULL
for(i in 1:(length(bin.fst)-1)){
if(length(bin.fst[[i]]) < min.per.bin){
bin.fst[[(i+1)]]<-c(bin.fst[[i]],bin.fst[[(i+1)]])
rmvec<-c(rmvec,i)
}
if((i+1)==length(bin.fst)){
if(length(bin.fst[[i+1]]) < min.per.bin){
bin.fst[[i]]<-c(bin.fst[[i]],bin.fst[[(i+1)]])
rmvec<-c(rmvec,(i+1))
}
}
}
if(!is.null(rmvec)){ bin.fst<-bin.fst[-rmvec] }
bin.fst<-lapply(bin.fst,sort)
for(i in 1:length(rmvec)){
bins[(rmvec[i]+1),1]<-bins[rmvec[i],1]
}
if(!is.null(rmvec)){ bins<-bins[-rmvec,] }
return(list(bins=bins,bin.fst=bin.fst))
}
find.quantiles<-function(bins, bin.fst, ci=0.05)
{
fst.CI<-list()
for(j in 1:length(ci)){
ci.min<-(ci[j]/2)
ci.max<-1-(ci[j]/2)
fstCI<-lapply(bin.fst, function(x){
keep.fst<-x[round(length(x)*ci.min):round(length(x)*ci.max)]
if(length(keep.fst)>0){
fst.thresh<-c(min(keep.fst),max(keep.fst))
names(fst.thresh)<-c("Low","Upp")
} else{
fst.thresh<-c("","")
names(fst.thresh)<-c("Low","Upp")
}
return(fst.thresh)
})
ci.name<-paste("CI",(1-ci[j]),sep="")
cis<-data.frame(do.call(rbind,fstCI))
cis$UppHet<-as.numeric(rownames(cis))
cis<-apply(cis,c(1,2),round,3)
bins<-apply(bins,c(1,2),round,3)
cis<-merge(cis,bins,by.x="UppHet",by.y="upp.breaks",keep=T)
fst.CI[[j]]<-data.frame(Low=cis$Low,Upp=cis$Upp,LowHet=cis$low.breaks,UppHet=cis$UppHet)
names(fst.CI)[j]<-ci.name
}
return(fst.CI)
}
fst.boot<-function(df, fst.choice="fst", ci=0.05,num.breaks=25,bootstrap=TRUE,min.per.bin=20){
fst.options<-c("FST","Fst","fst","var","VAR","Var","theta","Theta","THETA",
"Betahat","BETAHAT","betahat")
if(!(fst.choice %in% fst.options)) { stop("Fst choice not an option. Use fst.options.print() to see options.")}
nloci<-(ncol(df)-2)
if(bootstrap == TRUE)
{
boot.out<-as.data.frame(t(replicate(nloci, fst.boot.onecol(df,fst.choice))))
colnames(boot.out)<-c("Ht","Fst")
boot.out$Fst[boot.out$Fst=="NaN"]<-0
print("Bootstrapping done. Now Calculating CIs")
} else{
boot.out<-calc.actual.fst(df,fst.choice)
rownames(boot.out)<-boot.out$Locus
boot.out<-data.frame(cbind(as.numeric(boot.out$Ht),as.numeric(boot.out$Fst)))
colnames(boot.out)<-c("Ht","Fst")
boot.out$Fst[boot.out$Fst=="NaN"]<-0
print("Fsts calculated. Now Calculating CIs")
}
boot.out<-as.data.frame(boot.out[order(boot.out$Ht),])
bins<-make.bins(boot.out,num.breaks,min.per.bin=min.per.bin)
fst.CI<-find.quantiles(bins$bins,bins$bin.fst,ci)
return(list(Fsts=boot.out,Bins=bins$bins,fst.CI))
}
fst.boot.means<-function(boot.out){
all.boot<-data.frame(
Ht=unlist(lapply(boot.out$Fsts,
function(x) { out<-x$Ht; return(out) })),
Fst=unlist(lapply(boot.out$Fsts,
function(x) { out<-x$Fst; return(out) })))
breaks<-boot.out$Bins[[1]]
bmu<-t(apply(breaks,1,function(x){
x.ht<-all.boot[all.boot$Ht >= x[1] &
all.boot$Ht <= x[2],"Ht"]
x.fst<-all.boot[all.boot$Ht >= x[1] &
all.boot$Ht <= x[2],"Fst"]
x.fh<-c(mean(x.ht),mean(x.fst),length(x.ht))
return(x.fh)
}))
bmu<-data.frame(bmu[,1],bmu[,2],bmu[,3],breaks[,1],breaks[,2])
colnames(bmu)<-c("Ht","Fst","Num","LowBin","UppBin")
return(bmu)
}
p.boot<-function(actual.fsts, boot.out=NULL,boot.means=NULL){
if(is.null(boot.out) & is.null(boot.means)) {
stop("You must provide either bootstrapping output or bootstrap means") }
if(class(boot.out[[2]])=="data.frame"){
stop("Can only calculate p-values if bootstrapping was run more than once.") }
if(is.null(boot.means)){
boot.means<-fst.boot.means(boot.out)
}
boot.means$real.means<-apply(boot.means,1,function(x){
rmu<-mean(as.numeric(actual.fsts[
as.numeric(actual.fsts$Ht) >= as.numeric(x[4]) &
as.numeric(actual.fsts$Ht) <= as.numeric(x[5]),"Fst"]),na.rm=T)
return(rmu)
})
boot.means$unitsaway<-abs(boot.means$real.means - boot.means$Fst)
boot.means$low<-boot.means$Fst-boot.means$unitsaway
boot.means$upp<-boot.means$Fst+boot.means$unitsaway
pvals<-unlist(apply(actual.fsts,1, function(x){
bin<-boot.means[
as.numeric(boot.means$LowBin) <= as.numeric(x["Ht"])
& as.numeric(boot.means$UppBin) >= as.numeric(x["Ht"]),]
fsts.in.bin<-apply(bin,1,function(y){
actual.fsts[actual.fsts$Ht >= as.numeric(y["LowBin"] )
& actual.fsts$Ht <= as.numeric(y["UppBin"]),]
})
unitsaway<-apply(bin,1,function(y){
abs(as.numeric(x["Fst"]) - as.numeric(y["Fst"])) })
low<-apply(bin,1,function(y){
as.numeric(y["Fst"]) - as.numeric(unitsaway) })
upp<-apply(bin,1,function(y){
as.numeric(y["Fst"]) + as.numeric(unitsaway) })
n<-lapply(fsts.in.bin, nrow)
p<-NULL
if(length(fsts.in.bin)>0){
for(i in 1:length(fsts.in.bin)){
p[i]<-(nrow(fsts.in.bin[[i]][as.numeric(fsts.in.bin[[i]]$Fst) <
as.numeric(low[i]),]) +
nrow(fsts.in.bin[[i]][as.numeric(fsts.in.bin[[i]]$Fst) >
as.numeric(upp[i]),]))/as.numeric(n[i])
}
p<-max(p)
}else{
p<-NA
print("No bins found. Were actual.fsts and boot.out/boot.means calculated with the same Fst method?")
}
return(p)
}))
names(pvals)<-actual.fsts$Locus
return(pvals)
}
ci.means<-function(boot.out.list){
if(class(boot.out.list)=="list" & length(boot.out.list) > 1) {
boot.ci<-as.data.frame(do.call(rbind,
lapply(boot.out.list,function(x){
y<-as.data.frame(x[[1]])
return(y)
})))
} else {
boot.ci<-as.data.frame(boot.out.list[[1]])
}
colnames(boot.ci)<-c("Low","Upp","LowHet","UppHet")
avg.cil<-tapply(boot.ci[,1],boot.ci$UppHet,mean)
avg.ciu<-tapply(boot.ci[,2],boot.ci$UppHet,mean)
low.het<-tapply(boot.ci$LowHet,boot.ci$UppHet,mean)
return(data.frame(low=avg.cil,upp=avg.ciu, LowHet=low.het,
UppHet=as.numeric(levels(as.factor(boot.ci$UppHet)))))
}
plotting.cis<-function(df,boot.out=NULL,ci.df=NULL,sig.list=NULL,Ht.name="Ht",Fst.name="Fst",
ci.col="red", pt.pch=1,file.name=NULL,sig.col=ci.col,make.file=TRUE) {
if(is.null(boot.out) & is.null(ci.df)){
stop("Must provide bootstrap output or a list of CI values")
} else if(is.null(ci.df)){
avg.ci<-ci.means(boot.out[[3]])
} else {
avg.ci<-ci.df
}
if(make.file==TRUE){
if(!is.null(file.name)) {
png(file.name,height=8,width=9,units="in",res=300) }
else {
png("OutlierLoci.png",height=8,width=9,units="in",res=300) }
}
x.max<-round(as.numeric(max(df[,Ht.name]))+0.1,1)
plot(df[,Ht.name],df[,Fst.name],xlab="",ylab="",las=1,pch=pt.pch,axes=F,
xlim=c(0,x.max))
axis(1,pos=0,at=seq(0,x.max,0.1))
axis(2,pos=0,las=1)
mtext(expression(italic(F)["ST"]),2,line=2.5)
mtext(expression(italic(H)["T"]),1,line=2.5)
if(!is.null(sig.list)){
points(df[df[,1] %in% sig.list,Ht.name],df[df[,1] %in% sig.list,Fst.name],col=sig.col,pch=pt.pch)
}
points(avg.ci$LowHet,avg.ci$low,type="l",col=ci.col)
points(avg.ci$UppHet,avg.ci$upp,type="l",col=ci.col)
if(make.file==TRUE) dev.off()
}
find.outliers<-function(df,boot.out,ci.df=NULL,file.name=NULL){
if(is.null(boot.out) & is.null(ci.df)){
stop("Must provide bootstrap output or a list of CI values")
} else if(is.null(ci.df)){
ci.df<-ci.means(boot.out[[3]])
}
if(class(boot.out[[2]])=="list"){
bin<-boot.out[[2]][[1]] }
if(class(boot.out[[2]])=="data.frame"){
bin<-boot.out[[2]] }
actual.bin<-apply(ci.df, 1, function(x){
out<-df[df$Ht >= x["LowHet"] & df$Ht < x["UppHet"],] })
out<-NULL
for(i in 1:nrow(ci.df)){
out<-rbind(out,actual.bin[[i]][
as.numeric(actual.bin[[i]]$Fst) < as.numeric(ci.df[i,"low"]) |
as.numeric(actual.bin[[i]]$Fst)> as.numeric(ci.df[i,"upp"]),])
}
out<-out[!duplicated(out$Locus),]
if(!is.null(file.name)){
write.csv(out,paste(file.name,".csv",sep=""))
}
return(out)
}
calc.actual.fst<-function(df, fst.choice="fst"){
fst.options<-c("FST","Fst","fst","var","VAR","Var","theta","Theta","THETA",
"Betahat","BETAHAT","betahat")
if(!(fst.choice %in% fst.options)) { stop("Fst choice not an option. Use fst.options.print() to see options.")}
fsts<-data.frame(Locus=character(),Ht=numeric(),Fst=numeric(),
stringsAsFactors=F)
if(fst.choice %in% c("Fst","FST","fst")){
for(i in 3:ncol(df)){
fsts[i-2,]<-c(colnames(df)[i],calc.fst(df,i))
}
}
if(fst.choice %in% c("VAR","var","Var")){
for(i in 3:ncol(df)){
fsts[i-2,]<-c(colnames(df)[i],var.fst(df,i))
}
}
if(fst.choice %in% c("Theta","theta","THETA")){
for(i in 3:ncol(df)){
fsts[i-2,]<-c(colnames(df)[i],calc.theta(df,i))
}
}
if(fst.choice %in% c("betahat","BETAHAT","Betahat")){
for(i in 3:ncol(df)){
fsts[i-2,]<-c(colnames(df)[i],calc.betahat(df,i))
}
}
fsts<-data.frame(Locus=as.character(fsts$Locus),Ht=as.numeric(fsts$Ht),Fst=as.numeric(fsts$Fst),
stringsAsFactors=F)
return(fsts)
}
fhetboot<-function(gpop, fst.choice="fst",alpha=0.05,nreps=10){
fsts<-calc.actual.fst(gpop,fst.choice)
boot.out<-as.data.frame(t(replicate(nreps, fst.boot(gpop,fst.choice))))
boot.pvals<-p.boot(fsts,boot.out=boot.out)
boot.cor.pvals<-p.adjust(boot.pvals,method="BH")
boot.sig<-boot.cor.pvals[boot.cor.pvals <= alpha]
plotting.cis(fsts,boot.out,make.file=F,sig.list=names(boot.sig),pt.pch = 19)
outliers<-find.outliers(fsts,boot.out)
fsts$P.value<-boot.pvals
fsts$Corr.P.value<-boot.cor.pvals
fsts$Outlier<-FALSE
fsts[fsts$Locus %in% outliers$Locus,"Outlier"]<-TRUE
return(fsts)
}
fsthet<-function(gpop, fst.choice="fst",alpha=0.05){
fsts<-calc.actual.fst(gpop,fst.choice)
quant.out<-as.data.frame(t(replicate(1, fst.boot(gpop,fst.choice=fst.choice,ci=alpha,bootstrap=FALSE))))
plotting.cis(fsts,quant.out,make.file=F,pt.pch = 19)
outliers<-find.outliers(fsts,quant.out)
fsts$Outlier<-FALSE
fsts[fsts$Locus %in% outliers$Locus,"Outlier"]<-TRUE
return(fsts)
}
|
RRglmGOF <- function(RRglmOutput, doPearson = TRUE, doDeviance = TRUE, doHlemeshow = TRUE, hlemeshowGroups = 10, rm.na = TRUE)
{
pearson <- list(do = doPearson, obs = NULL, exp = NULL, res = NULL, stat = NA, pvalue = NA, df = NA, nGroup = NA)
deviance <- list(do = doDeviance, obs = NULL, exp = NULL, res = NULL, stat = NA, pvalue = NA, df = NA, nGroup = NA)
hlemeshow <- list(do = doHlemeshow, stat = NA, pvalue = NA, df = NA, overview = NULL, nGroup = hlemeshowGroups)
vars <- all.vars(RRglmOutput$formula)
df.work <- data.frame(y.obs = RRglmOutput$model[, vars[1]], RRglmOutput$model[, vars[2:length(vars)]])
y.fitted.tmp <- RRglmOutput$fitted.values
if (rm.na)
{
df.work <- na.omit(df.work)
y.fitted.tmp <- na.omit(y.fitted.tmp)
}
df.work <- data.frame(y.fitted = y.fitted.tmp, df.work)
if (doPearson || doDeviance)
{
df.x <- data.frame(df.work[, 3:ncol(df.work)])
factor.groups <- getUniqueGroups(df.x)
nGroup <- length(levels(factor.groups))
pearson$nGroup <- nGroup
deviance$nGroup <- nGroup
nParam <- length(RRglmOutput$coeff)
if(nGroup <= nParam)
{
cat("The Pearson Fit statistic is defined for an unsaturated model","\n")
cat("The number of unique groups should be higher than the number of estimated parameters","\n")
}
else
{
vec.pihat <- getCellMeans(y = df.work$y.fitted, factor.groups = factor.groups)
vec.prophat <- getCellMeans(y = df.work$y.obs, factor.groups = factor.groups)
vec.groupN <- getCellSizes(n = length(df.work$y.obs), factor.groups = factor.groups)
if (doPearson)
{
pearson$obs <- vec.prophat
pearson$exp <- vec.pihat
pearson$res <- (vec.prophat - vec.pihat) / sqrt(vec.pihat * (1 - vec.pihat) / vec.groupN)
pearson$stat <- sum(vec.groupN * (((vec.prophat - vec.pihat)^2) / (vec.pihat * (1 - vec.pihat))))
pearson$df <- nGroup - nParam
pearson$pvalue = 1 - pchisq(pearson$stat, df = pearson$df)
}
if (doDeviance)
{
vec.prophat[which(vec.prophat == 0)] <- 1e-15
vec.prophat[which(vec.prophat == 1)] <- 1-1e-15
deviance$obs <- vec.prophat
deviance$exp <- vec.pihat
sign <- rep(1, length(vec.prophat))
sign[which(vec.prophat < vec.pihat)] <- -1
deviance$res <- sign * sqrt(2 * vec.groupN * (vec.prophat * log(vec.prophat / vec.pihat) + (1 - vec.prophat) * log((1 - vec.prophat) / (1 - vec.pihat))))
deviance$stat <- 2 * sum(vec.groupN * (vec.prophat * log(vec.prophat / vec.pihat) + (1 - vec.prophat) * log((1 - vec.prophat) / (1 - vec.pihat))))
deviance$df <- nGroup - nParam
deviance$pvalue = 1 - pchisq(deviance$stat, df = deviance$df)
}
}
}
if(doHlemeshow)
{
probmatrix = matrix(nrow = nrow(df.work), ncol = 2)
probmatrix[, 1] = df.work$y.fitted
probmatrix[, 2] = df.work$y.obs
probmatrix <- probmatrix[order(probmatrix[, 1], decreasing=FALSE), ]
probmatrix <- cbind(probmatrix, seq(1, nrow(df.work)))
breaks <- quantile(probmatrix[, 3], probs = seq(0, 1, by = 1 / hlemeshowGroups))
probmatrix[, 3] <- cut(probmatrix[, 3], breaks = breaks, include.lowest = TRUE)
problistSplitted <- split(as.data.frame(probmatrix), probmatrix[, 3])
hlmatrix = matrix(nrow = hlemeshowGroups, ncol = 10)
for(ii in 1:hlemeshowGroups)
{
predictedProbs <- problistSplitted[[ii]][[1]]
observedProbs <- problistSplitted[[ii]][[2]]
nObservations <- length(problistSplitted[[ii]][[2]])
nObservedSuccesses <- sum(problistSplitted[[ii]][[2]])
hlmatrix[ii, 1] <- mean(predictedProbs)
hlmatrix[ii, 3] <- mean(observedProbs)
hlmatrix[ii, 2] <- mean(1 - predictedProbs)
hlmatrix[ii, 4] <- mean(1 - observedProbs)
hlmatrix[ii, 5] <- hlmatrix[ii,1] * nObservations
hlmatrix[ii, 6] <- nObservations - hlmatrix[ii, 5]
hlmatrix[ii, 7] <- nObservedSuccesses
hlmatrix[ii, 8] <- nObservations - hlmatrix[ii, 7]
nObservationsInGroup <- nObservations
avgPredictedSuccesses <- hlmatrix[ii, 1]
avgObservedSuccesses <- hlmatrix[ii, 3]
hlmatrix[ii, 9] <- nObservationsInGroup * (((avgObservedSuccesses - avgPredictedSuccesses)^2) / (avgPredictedSuccesses * (1 - avgPredictedSuccesses)))
hlmatrix[ii, 10] <- (avgObservedSuccesses - avgPredictedSuccesses)/sqrt(avgPredictedSuccesses * (1 - avgPredictedSuccesses) / nObservationsInGroup)
}
df.hlemeshow <- data.frame(obs = hlmatrix[, 7] + hlmatrix[, 8], exp.suc = hlmatrix[, 5], obs.suc = hlmatrix[, 7],
stat = hlmatrix[, 9], res = hlmatrix[, 10])
colnames(df.hlemeshow) <- c("Observations", "Expected successes", "Observed successes", "H-L Statistic", "Pearson Residuals")
rownames(df.hlemeshow) <- paste("Group", seq(1, hlemeshowGroups))
hlemeshow$stat <- sum(hlmatrix[, 9])
hlemeshow$df <- hlemeshowGroups - 2
hlemeshow$pvalue <- 1 - pchisq(hlemeshow$stat, df = hlemeshow$df)
hlemeshow$overview <- df.hlemeshow
}
ls.return <- list(pearson = pearson, deviance = deviance, hlemeshow = hlemeshow, vars = vars, n = nrow(df.work), family = RRglmOutput$family)
class(ls.return) <- "RRglmGOF"
return(ls.return)
}
|
pgListGeom <- function(conn, geog = TRUE) {
dbConnCheck(conn)
if (!suppressMessages(pgPostGIS(conn))) {
stop("PostGIS is not enabled on this database.")
}
if (!geog) end <- ";" else
end <- paste("UNION"," SELECT", " f_table_schema AS schema_name,",
" f_table_name AS table_name,", " f_geography_column AS geom_column,",
" type AS geometry_type,", " 'GEOGRAPHY'::character(9) AS type", "FROM geography_columns",
" ORDER BY type desc;",
sep = "\n")
tmp.query <- paste("SELECT", " f_table_schema AS schema_name,",
" f_table_name AS table_name,", " f_geometry_column AS geom_column,",
" type AS geometry_type,", " 'GEOMETRY'::character(8) AS type", "FROM geometry_columns ", end ,
sep = "\n")
tab <- dbGetQuery(conn, tmp.query)
return(tab)
}
pgListRast <- function(conn) {
dbConnCheck(conn)
if (!suppressMessages(pgPostGIS(conn))) {
stop("PostGIS is not enabled on this database.")
}
tmp.query <- paste("SELECT", " r_table_schema AS schema_name,",
" r_table_name AS table_name,", " r_raster_column AS raster_column",
"FROM raster_columns;",
sep = "\n")
tab <- dbGetQuery(conn, tmp.query)
return(tab)
}
|
rkay <- function(n, df, ncp=0) {
chincp <- df * ncp^2
sqrt(rchisq(n, df, chincp) / df)}
|
multicut.default <-
function(Y, X, strata, dist="gaussian", sset=NULL, cl=NULL, ...)
{
if (missing(strata))
stop("It looks like that strata is missing")
Y <- data.matrix(Y)
if (is.null(colnames(Y)))
colnames(Y) <- paste("Sp", seq_len(ncol(Y)))
if (any(duplicated(colnames(Y)))) {
warning("Duplicate column names found and renamed in LHS")
colnames(Y) <- make.names(colnames(Y), unique = TRUE)
}
if (!all(colSums(abs(Y)) > 0)) {
warning("Empty columns in Y were dropped")
Y <- Y[,colSums(abs(Y)) > 0,drop=FALSE]
}
if (missing(X)) {
X <- matrix(1, nrow(Y), 1L)
rownames(X) <- rownames(Y)
colnames(X) <- "(Intercept)"
}
if (any(is.na(Y)))
stop("Y contains NA")
if (any(is.na(X)))
stop("X contains NA")
if (any(is.na(strata)))
stop("strata argument contains NA")
if (is.ordered(strata)) {
warning("ordering in strata ignored")
class(strata) <- "factor"
}
if (!is.factor(strata))
strata <- as.factor(strata)
strata <- droplevels(strata)
Z <- strata
if (!is.function(dist)) {
dist <- .opticut_dist(dist, make_dist=TRUE)
Dist <- strsplit(as.character(dist), ":", fixed=TRUE)[[1L]][1L]
if (Dist %in% c("rsf", "rspf") && ncol(Y) > 1L)
stop("'", Dist, "' is only available for single species in RHS")
}
if (ncol(Y) < 2L) {
pbo <- pbapply::pboptions(type = "none")
on.exit(pbapply::pboptions(pbo), add=TRUE)
}
if (inherits(cl, "cluster")) {
parallel::clusterEvalQ(cl, library(opticut))
e <- new.env()
assign("dist", dist, envir=e)
assign("X", X, envir=e)
assign("Z", X, envir=e)
assign("Y", Y, envir=e)
assign("sset", sset, envir=e)
parallel::clusterExport(cl, c("Y", "X","Z","dist"), envir=e)
on.exit(parallel::clusterEvalQ(cl, rm(list=c("Y", "X","Z","dist"))), add=TRUE)
on.exit(parallel::clusterEvalQ(cl, detach(package:opticut)), add=TRUE)
}
if (getOption("ocoptions")$try_error) {
res <- pbapply::pblapply(seq_len(ncol(Y)), function(i, ...)
try(multicut1(Y=Y[,i], X=X, Z=Z, dist=dist, sset=sset, ...)), cl=cl, ...)
names(res) <- colnames(Y)
Failed <- sapply(res, inherits, "try-error")
failed <- names(res)[Failed]
if (any(Failed)) {
if (length(failed) == length(res))
stop("Bad news: opticut failed for all species.")
warning("Bad news: opticut failed for ", length(failed),
" out of ", length(res), " species.")
}
} else {
res <- pbapply::pblapply(seq_len(ncol(Y)), function(i, ...)
multicut1(Y=Y[,i], X=X, Z=Z, dist=dist, sset=sset, ...), cl=cl, ...)
names(res) <- colnames(Y)
Failed <- logical(length(res))
failed <- character(0)
}
NOBS <- if (is.null(sset))
NROW(Y) else NROW(data.matrix(Y)[sset,,drop=FALSE])
out <- list(call=match.call(),
species=res[!Failed],
X=X,
Y=Y[,!Failed,drop=FALSE],
strata=Z,
nobs=NOBS,
sset=sset,
dist=dist,
scale=getOption("ocoptions")$scale,
failed=failed)
if (is.function(dist)) {
attr(out$dist, "dist") <- deparse(substitute(dist))
for (i in seq_len(length(out$species)))
attr(out$species[[i]], "dist") <- deparse(substitute(dist))
}
class(out) <- "multicut"
mu <- sapply(out$species, "[[", "mu")
if (any(mu < 0))
warning("Negative fitted values found for ",
sum(colSums(mu < 0) > 0), " species.")
out
}
|
longletters<-function()
{
LONGLETTERS = rep("0",27*26)
LONGLETTERS[1:26] = LETTERS
for (i in 1:26) {
for (j in 1:26) {
LONGLETTERS[26*i + j] = paste0(LETTERS[i],LETTERS[j])
}
}
return(LONGLETTERS)
}
UniformDesignMatrix = function( n, p, c ) {
emptymatrix = matrix(0, n, p)
cholmat = chol(diag(p))
for ( i in 1:n ) {
emptymatrix[ i, ] = fastrmvnorm(cholmat, cholesky = T)
}
emptymatrix = pnorm(emptymatrix)
emptymatrix = ceiling(c * emptymatrix)
finalmatrix = matrix(0, n, p)
for ( i in 1:n ) {
for ( j in 1:p ) {
finalmatrix[ i, j ] = paste0(longletters()[ emptymatrix[ i, j ] ], j)
}
}
finalmatrix = as.data.frame(finalmatrix, stringsAsFactors = TRUE)
return(finalmatrix)
}
CorrelatedDesignMatrix = function( n, cov_mat, c ) {
if (( dim(cov_mat)[ 1 ] != dim(cov_mat)[ 2 ] )){
stop("Incorrect input arguments")
}
cov_mat = 2 * sin( cov_mat * pi / 6 )
cholmat = chol(cov_mat)
p = dim(cov_mat)[ 1 ]
emptymatrix = matrix(0, n, p)
for ( i in 1:n ) {
emptymatrix[ i, ] = fastrmvnorm(cholmat, cholesky = T)
}
emptymatrix = pnorm(emptymatrix)
emptymatrix = ceiling(c * emptymatrix)
finalmatrix = matrix(0, n, p)
for ( i in 1:n ) {
for ( j in 1:p ) {
finalmatrix[ i, j ] = paste0(longletters()[ emptymatrix[ i, j ] ], j)
}
}
finalmatrix = as.data.frame(finalmatrix, stringsAsFactors = TRUE)
return(finalmatrix)
}
|
AMGE.AMMI <- function(model, n, alpha = 0.05,
ssi.method = c("farshadfar", "rao"), a = 1) {
if (!is(model, "AMMI")) {
stop('"model" is not of class "AMMI"')
}
if (!(0 < alpha && alpha < 1)) {
stop('"alpha" should be between 0 and 1 (0 < alpha < 1)')
}
if (missing(n) || is.null(n)) {
n <- sum(model$analysis$Pr.F <= alpha, na.rm = TRUE)
}
if (n %% 1 != 0 && length(n) != 1) {
stop('"n" is not an integer vector of unit length')
}
if (n > nrow(model$analysis)) {
stop('"n" is greater than the number of IPCs in "model"')
}
ssi.method <- match.arg(ssi.method)
yresp <- setdiff(colnames(model$means), c("ENV", "GEN", "RESIDUAL"))
ge <- array(model$genXenv, dim(model$genXenv), dimnames(model$genXenv))
svdge <- svd(ge)
lambda.n <- svdge$d[1:n]
gamma.n <- svdge$u[, 1:n]
delta.n <- svdge$v[, 1:n]
ge.n <- gamma.n %*% diag(lambda.n) %*% t(delta.n)
AMGE <- rowSums(ge.n)
B <- model$means
W <- aggregate(B[, yresp], by = list(model$means$GEN), FUN = mean, na.rm = TRUE)
SSI_AMGE <- SSI(y = W$x, sp = AMGE, gen = W$Group.1,
method = ssi.method, a = a)
ranking <- SSI_AMGE
colnames(ranking) <- c("AMGE", "SSI", "rAMGE", "rY", "means")
return(ranking)
}
|
inst_reactlog_file <- function(file) {
system.file(file.path("reactlog", file), package = "reactlog")
}
inst_reactlog_assets <- function() {
inst_reactlog_file("reactlogAsset")
}
reactlog_render <- function(log, session_token = NULL, time = TRUE) {
log <- reactlog_upgrade(log)
template_file <- inst_reactlog_file("reactlog.html")
html <- paste(readLines(template_file, warn = FALSE), collapse = "\n")
json_blob <- as.character(
reactlog_write(log, file = NULL, session_token = session_token)
)
fixed_sub <- function(...) {
sub(..., fixed = TRUE)
}
html <- fixed_sub(
"__DATA__", paste(json_blob, collapse = "\n"),
fixed_sub(
"__TIME__", paste0("\"", time, "\""),
fixed_sub(
"<script src=\"defaultLog.js\"></script>", "",
html
)
)
)
if (requireNamespace("shiny")) {
shiny::addResourcePath(
"reactlogAsset",
inst_reactlog_assets()
)
}
file <- tempfile(fileext = ".html")
write_utf8(html, file = file)
file.copy(
inst_reactlog_assets(),
dirname(file),
recursive = TRUE
)
return(file)
}
|
roots2pars <- function(roots) {
out <- list(roots = roots,
periods = 2*pi / Arg(roots),
frequencies = Arg(roots) / (2*pi),
moduli = Mod(roots),
rates = log(Mod(roots)))
out$periods[abs(Arg(roots)) < .Machine$double.eps^.5] <- Inf
class(out) <- "fdimpars.1d"
out
}
print.fdimpars.1d <- function(x, ...) {
cat(" period rate | Mod Arg | Re Im\n")
for (i in seq_along(x$roots)) {
cat(sprintf("% 9.3f % 8.6f | % 7.5f % 3.2f | % 7.5f % 7.5f\n",
x$periods[i],
x$rates[i],
x$moduli[i],
x$frequencies[i] * 2*pi,
Re(x$roots[i]),
Im(x$roots[i])))
}
}
parestimate.pairs <- function(U, normalize = FALSE) {
stopifnot(ncol(U) == 2)
U1 <- apply(U[-1, ], 2, diff)
U2 <- apply(U[-nrow(U), ], 2, diff)
scos <- rowSums(U1*U2) / sqrt(rowSums(U1*U1)) / sqrt(rowSums(U2*U2))
mres <- mad(2*pi/acos(scos)) / median(2*pi/acos(scos))
if (mres > 0.3)
warning("too big deviation of estimates, period estimates might be unreliable")
r <- exp(1i * acos(median(scos)))
if (normalize) r <- r / abs(r)
roots2pars(r)
}
tls.solve <- function(A, B) {
stopifnot(ncol(A) == ncol(B))
r <- ncol(A)
V <- svd(cbind(A, B))$v[, 1:r, drop = FALSE]
Conj(qr.solve(t(V[1:r,, drop = FALSE]), t(V[-(1:r),, drop = FALSE])))
}
.cycle.permutation <- function(v, k = 0) {
n <- length(v)
k <- k %% n
if (k) {
v <- c(v[(k + 1):n], v[1:k])
}
v
}
.mdim.cycle.permutation <- function(v, ndim, k = 0) {
v <- as.array(v)
d <- dim(v)
idx <- lapply(d, seq_len)
idx[[ndim]] <- .cycle.permutation(idx[[ndim]], k = k)
do.call("[", c(list(v), idx, list(drop = FALSE)))
}
.annulate.row <- function(v, ndim, i = 1, value = 0) {
v <- as.array(v)
d <- dim(v)
idx <- lapply(d, seq_len)
idx[[ndim]] <- i
do.call("[<-", c(list(v), idx, list(value = value)))
}
.shifted.matrix.masks <- function(wmask,
ndim,
circular = FALSE) {
wmask <- as.array(wmask)
d <- dim(wmask)
mask <- wmask & .mdim.cycle.permutation(wmask, ndim, 1)
if (!circular) {
mask <- .annulate.row(mask, ndim, i = d[ndim], FALSE)
}
lind <- array(NA_integer_, dim = d)
lind[wmask] <- seq_len(sum(wmask))
rind <- .mdim.cycle.permutation(lind, ndim, 1)
list(left.mask = lind[mask], right.mask = rind[mask])
}
.shift.matrix <- function(U, wmask,
ndim,
circular = FALSE,
solve.method = c("ls", "tls")) {
solve.method <- match.arg(solve.method)
solver <- switch(solve.method,
ls = qr.solve,
tls = tls.solve)
smxs <- .shifted.matrix.masks(wmask, ndim, circular = circular)
lm.left <- U[smxs$left.mask,, drop = FALSE]
lm.right <- U[smxs$right.mask,, drop = FALSE]
solver(lm.left, lm.right)
}
.pairs <- function(x, groups,
subspace = c("column", "row"),
normalize.roots = NULL,
...,
drop) {
if (missing(groups))
groups <- 1:min(nsigma(x), nu(x))
subspace <- match.arg(subspace)
if (is.null(normalize.roots))
normalize.roots <- x$circular || inherits(x, "toeplitz.ssa")
if (is.shaped(x)) {
stop("`pairs' parameter estimation method is not implemented for shaped SSA case yet")
}
if (inherits(x, "cssa")) {
stop("`pairs' parameter estimation method is not implemented for Complex SSA case yet")
}
if (identical(subspace, "column")) {
span <- .colspan
} else if (identical(subspace, "row")) {
if (inherits(x, "mssa")) {
stop("row space `pairs' parameter estimation method is not implemented for MSSA yet")
}
span <- .rowspan
}
.maybe.continue(x, groups = groups, ...)
out <- list()
for (i in seq_along(groups)) {
group <- groups[[i]]
if (length(group) != 2)
stop("can estimate for pair of eigenvectors only using `pairs' method")
out[[i]] <- parestimate.pairs(span(x, group), normalize = normalize.roots)
}
names(out) <- .group.names(groups)
if (length(out) == 1 && drop)
out <- out[[1]]
out
}
parestimate.1d.ssa <- function(x, groups,
method = c("esprit", "pairs"),
subspace = c("column", "row"),
normalize.roots = NULL,
dimensions = NULL,
solve.method = c("ls", "tls"),
...,
drop = TRUE) {
method <- match.arg(method)
solve.method <- match.arg(solve.method)
if (missing(groups))
groups <- 1:min(nsigma(x), nu(x))
subspace <- match.arg(subspace)
if (is.null(normalize.roots))
normalize.roots <- x$circular || inherits(x, "toeplitz.ssa")
if (identical(method, "pairs")) {
.pairs(x, groups = groups,
subspace = subspace,
normalize.roots = normalize.roots,
...,
drop = drop)
} else if (identical(method, "esprit")) {
parestimate.nd.ssa(x, groups = groups,
subspace = subspace,
normalize.roots = normalize.roots,
dimensions = c(x = 1),
...,
solve.method = solve.method,
drop = drop)
}
}
parestimate.toeplitz.ssa <- parestimate.1d.ssa
parestimate.mssa <- parestimate.1d.ssa
parestimate.cssa <- parestimate.1d.ssa
.matrix.linear.combination <- function(Zs, beta = 8) {
if (length(beta) == 1) {
beta <- beta ^ seq_len(length(Zs) - 1)
}
if (length(beta) == length(Zs) - 1) {
beta <- c(1 - sum(beta), beta)
}
Z <- matrix(0., ncol = ncol(Zs[[1]]), nrow = nrow(Zs[[1]]))
for (i in seq_along(Zs)) {
Z <- Z + beta[i] * Zs[[i]]
}
Z
}
.est.exp.2desprit <- function(Zs, beta = 8) {
Z <- .matrix.linear.combination(Zs, beta)
Ze <- eigen(Z, symmetric = FALSE)
Tinv <- Ze$vectors
lapply(seq_along(Zs),
function(i) diag(qr.solve(Tinv, Zs[[i]] %*% Tinv)))
}
.simple.assignment <- function(mx) {
mx <- as.matrix(mx)
stopifnot(ncol(mx) == nrow(mx))
d <- nrow(mx)
stopifnot(all(mx > -Inf))
res <- rep(0, d)
for (k in seq_len(d)) {
maxij <- which(mx == max(mx), arr.ind = TRUE)[1, ]
res[maxij[1]] <- maxij[2]
mx[maxij[1], ] <- -Inf
mx[, maxij[2]] <- -Inf
}
stopifnot(isTRUE(all.equal(sort(res), seq_len(ncol(mx)))))
res
}
.est.exp.memp.new <- function(Zs, beta = 8) {
Z <- .matrix.linear.combination(Zs, beta)
Ze <- eigen(Z, symmetric = FALSE)
Zse <- lapply(Zs, eigen, symmetric = FALSE)
Ps <- lapply(seq_along(Zs),
function(i) .simple.assignment(t(abs(qr.solve(Ze$vectors, Zse[[i]]$vectors)))))
for (P in Ps) {
stopifnot(length(P) == length(unique(P)))
}
lapply(seq_along(Zs),
function(i) Zse[[i]]$values[Ps[[i]]])
}
.esprit <- function(U,
wmask,
circular,
normalize,
dimensions = NULL,
solve.method = c("ls", "tls"),
pairing.method = c("diag", "memp"),
beta = 8) {
wmask <- as.array(wmask)
d <- dim(wmask)
solve.method <- match.arg(solve.method)
pairing.method <- match.arg(pairing.method)
if (is.null(dimensions)) {
dimensions <- seq_along(d)
}
if (max(dimensions) > length(d)) {
stop(sprintf("some of input dimension indices exceed the actual number of object dimensions (%d)",
length(d)))
}
Zs <- lapply(dimensions,
function(ndim) {
.shift.matrix(U,
wmask = wmask,
ndim = ndim,
circular = circular[ndim],
solve.method = solve.method)
})
pairer <- switch(pairing.method, diag = .est.exp.2desprit, memp = .est.exp.memp.new)
r <- pairer(Zs, beta = beta)
for (k in seq_along(d))
if (normalize[k]) r[[k]] <- r[[k]] / abs(r[[k]])
out <- lapply(r, roots2pars)
names(out) <- names(dimensions)
if (length(names(out)) == 0 || any(names(out) == "")) {
default.names <- c("x", "y", "z", "t", "u", "s",
paste("x",
seq_len(max(dimensions)),
sep = "_"))
names(out) <- default.names[dimensions]
}
if (length(out) == 1) {
out <- out[[1]]
} else {
class(out) <- "fdimpars.nd"
}
out
}
parestimate.nd.ssa <- function(x, groups,
method = c("esprit"),
subspace = c("column", "row"),
normalize.roots = NULL,
dimensions = NULL,
solve.method = c("ls", "tls"),
pairing.method = c("diag", "memp"),
beta = 8,
...,
drop = TRUE) {
method <- match.arg(method)
stopifnot(identical(method, "esprit"))
solve.method <- match.arg(solve.method)
pairing.method <- match.arg(pairing.method)
if (missing(groups))
groups <- seq_len(min(nsigma(x), nu(x)))
.maybe.continue(x, groups = groups, ...)
subspace <- match.arg(subspace)
if (identical(subspace, "column")) {
span <- .colspan
wmask <- .wmask(x)
} else if (identical(subspace, "row")) {
span <- function(...) Conj(.rowspan(...))
wmask <- .fmask(x)
}
if (is.null(dimensions)) {
dimensions <- seq_len(.dim(x))
}
if (is.null(normalize.roots))
normalize.roots <- x$circular | inherits(x, "toeplitz.ssa")
if (length(normalize.roots) > .dim(x))
warning("incorrect argument length: length(normalize.roots) > .dim(x), only leading values will be used")
normalize.roots <- rep(normalize.roots, .dim(x))[seq_len(.dim(x))]
out <- list()
for (i in seq_along(groups)) {
group <- groups[[i]]
out[[i]] <- .esprit(span(x, group),
wmask = wmask,
circular = x$circular,
normalize = normalize.roots,
solve.method = solve.method,
pairing.method = pairing.method,
beta = beta,
dimensions = dimensions)
}
names(out) <- .group.names(groups)
if (length(out) == 1 && drop)
out <- out[[1]]
out
}
print.fdimpars.nd <- function(x, ...) {
if (length(names(x)) == 0 || any(names(x) == "")) {
names(x) <- paste("x", seq_along(x), sep = "_")
}
header <- paste(sapply(names(x),
function(name) sprintf("%8s: period rate", name)),
sep = "", collapse = " | ")
cat(header)
cat("\n")
for (i in seq_along(x[[1]]$roots)) {
row <- paste(sapply(seq_along(x),
function(k) sprintf("% 16.3f % 8.6f",
x[[k]]$periods[i],
x[[k]]$rate[i])),
sep = "", collapse = " | ")
cat(row)
cat("\n")
}
}
parestimate <- function(x, ...)
UseMethod("parestimate")
|
gce_auth <- function(new_user = FALSE, no_auto = FALSE){
.Defunct("gar_attach_auto_auth", package = "googleAuthR",
msg = "gce_auth() is defunct. Authenticate instead by downloading your JSON key and placing in a GCE_AUTH_FILE environment argument. See https://cloudyr.github.io/googleComputeEngineR/articles/installation-and-authentication.html or vignette('installation-and-authentication', package = 'googleComputeEngineR')")
}
|
xdata <- function(e=NULL)
{
tryCatch(
{
conf = fromJSON(paste(.libPaths()[1], "x.ent/www/config/ini.json", sep='/'))
path = conf$result$file;
lst_f <- xfile(sep=":")
lst_tag <- xentity();
lst_tag <-add_unique(lst_tag,xrelation())
dta <- data.frame()
reg_ent = ":\\$:"
reg_rel = ":\\$\\$:"
dta <- data.frame(file=lst_f)
text <- readLines(path)
if(is.null(e))
{
for(i in 1:length(lst_tag))
{
dta[,lst_tag[i]] <- "N/A"
}
for(i in 1:length(text))
{
f <- unlist(strsplit(text[i],":"))[1]
if(grepl(pattern=reg_ent,x=text[i]))
{
data_ele <- ""
eles <- unlist(strsplit(text[i],":"))
if(length(eles) == 4)
{
data_ele <- eles[4]
}
else if(length(eles) >= 5)
{
data_ele <- eles[4]
for(j in 5:length(eles))
{
data_ele <- paste(data_ele,eles[j],sep="; ")
}
}
dta[dta$file == f,eles[2]] <- data_ele
}
if(grepl(pattern=reg_rel,x=text[i]))
{
eles <- unlist(strsplit(text[i],reg_rel))
col = sub(pattern = paste(f,":",sep=""), replacement = "",x = eles[1])
col = gsub(":$", "",col, perl=TRUE)
if(col %in% names(dta))
{
if(dta[dta$file == f,col] == "N/A")
{
dta[dta$file == f,col] <- eles[2]
}
else
{
dta[dta$file == f,col] <- paste(dta[dta$file == f,col],eles[2],sep =";")
}
}
}
}
}
else
{
for(i in 1:length(e))
{
dta[,e[i]] <- "N/A"
}
for(i in 1:length(text))
{
f <- unlist(strsplit(text[i],":"))[1]
for(j in 1:length(e))
{
reg = paste(f,":",e[j],reg_ent,sep="")
if(grepl(pattern=reg,x=text[i]))
{
data_ele <- ""
eles <- unlist(strsplit(text[i],":"))
if(length(eles) == 4)
{
data_ele <- eles[4]
}
else if(length(eles) >= 5)
{
data_ele <- eles[4]
for(j in 5:length(eles))
{
data_ele <- paste(data_ele,eles[j],sep="; ")
}
}
dta[dta$file == f,eles[2]] <- data_ele
}
reg = paste(f,":",e[j],reg_rel,sep="")
if(grepl(pattern=reg,x=text[i]))
{
result = gsub(reg, "",text[i], perl=TRUE)
if(e[j] %in% names(dta))
{
if(dta[dta$file == f,e[j]] == "N/A")
{
dta[dta$file == f,e[j]] <- result
}
else
{
dta[dta$file == f,e[j]] <- paste(dta[dta$file == f,e[j]],result,sep =";")
}
}
}
}
}
}
return(dta)
},
error=function(cond) {
message("Parameters are incorrect or there are problems in paths, please check your parameters!")
},
warning=function(cond) {
message("Parameters are incorrect or there are problems in paths, please check your parameters!")
},
finally={
rm(list=ls())
})
}
|
'cepstrum' <- function(listOfFiles = NULL, optLogFilePath = NULL,
beginTime = 0.0, centerTime = FALSE,
endTime = 0.0, resolution = 40.0,
fftLength = 0, windowShift = 5.0,
window = 'BLACKMAN', toFile = TRUE,
explicitExt = NULL, outputDirectory = NULL,
forceToLog = useWrasspLogger, verbose = TRUE){
if (is.null(listOfFiles)) {
stop(paste("listOfFiles is NULL! It has to be a string or vector of file",
"paths (min length = 1) pointing to valid file(s) to perform",
"the given analysis function."))
}
if (is.null(optLogFilePath) && forceToLog){
stop("optLogFilePath is NULL! -> not logging!")
}else{
if(forceToLog){
optLogFilePath = path.expand(optLogFilePath)
}
}
if(!isAsspWindowType(window)){
stop("WindowFunction of type '", window,"' is not supported!")
}
if (!is.null(outputDirectory)) {
outputDirectory = normalizePath(path.expand(outputDirectory))
finfo <- file.info(outputDirectory)
if (is.na(finfo$isdir))
if (!dir.create(outputDirectory, recursive=TRUE))
stop('Unable to create output directory.')
else if (!finfo$isdir)
stop(paste(outputDirectory, 'exists but is not a directory.'))
}
listOfFiles <- prepareFiles(listOfFiles)
if(length(listOfFiles)==1 | !verbose){
pb <- NULL
}else{
if(toFile==FALSE){
stop("length(listOfFiles) is > 1 and toFile=FALSE! toFile=FALSE only permitted for single files.")
}
cat('\n INFO: applying cepstrum to', length(listOfFiles), 'files\n')
pb <- utils::txtProgressBar(min = 0, max = length(listOfFiles), style = 3)
}
externalRes = invisible(.External("performAssp", listOfFiles,
fname = "spectrum", beginTime = beginTime,
centerTime = centerTime, endTime = endTime,
spectrumType = 'CEP',
resolution = resolution,
fftLength = as.integer(fftLength),
windowShift = windowShift, window = window,
toFile = toFile, explicitExt = explicitExt,
progressBar = pb, outputDirectory = outputDirectory,
PACKAGE = "wrassp"))
if (forceToLog){
optionsGivenAsArgs = as.list(match.call(expand.dots = TRUE))
wrassp.logger(optionsGivenAsArgs[[1]], optionsGivenAsArgs[-1],
optLogFilePath, listOfFiles)
}
if(!is.null(pb)){
close(pb)
}else{
return(externalRes)
}
}
|
knitr::opts_chunk$set(
collapse = TRUE,
comment = "
)
library(ATbounds)
nsw_treated <- read.table("http://users.nber.org/~rdehejia/data/nsw_treated.txt")
colnames(nsw_treated) <- c("treat","age","edu","black","hispanic",
"married","nodegree","RE75","RE78")
nsw_control <- read.table("http://users.nber.org/~rdehejia/data/nsw_control.txt")
colnames(nsw_control) <- c("treat","age","edu","black","hispanic",
"married","nodegree","RE75","RE78")
nsw <- rbind(nsw_treated,nsw_control)
attach(nsw)
D <- treat
Y <- (RE78 > 0)
rps <- rep(mean(D),length(D))
ate_nsw <- mean(D*Y)/mean(D)-mean((1-D)*Y)/mean(1-D)
print(ate_nsw)
model <- lm(Y ~ D)
summary(model)
confint(model)
detach(nsw)
nswre_treated <- read.table("http://users.nber.org/~rdehejia/data/nswre74_treated.txt")
colnames(nswre_treated) <- c("treat","age","edu","black","hispanic",
"married","nodegree","RE74","RE75","RE78")
nswre_control <- read.table("http://users.nber.org/~rdehejia/data/nswre74_control.txt")
colnames(nswre_control) <- c("treat","age","edu","black","hispanic",
"married","nodegree","RE74","RE75","RE78")
nswre <- rbind(nswre_treated,nswre_control)
attach(nswre)
D <- treat
Y <- (RE78 > 0)
X <- cbind(age,edu,black,hispanic,married,nodegree,RE74/1000,RE75/1000)
rps <- rep(mean(D),length(D))
ate_nswre <- mean(D*Y)/mean(D)-mean((1-D)*Y)/mean(1-D)
print(ate_nswre)
model <- lm(Y ~ D)
summary(model)
confint(model)
bns_nsw <- atebounds(Y, D, X, rps)
summary(bns_nsw)
summary(atebounds(Y, D, X, rps, Q = 2))
summary(atebounds(Y, D, X, rps, Q = 4))
print(ate_nswre)
summary(atebounds(Y, D, X, rps))
summary(atebounds(Y, D, X, rps, n_hc = ceiling(length(Y)/5)))
summary(atebounds(Y, D, X, rps, n_hc = ceiling(length(Y)/20)))
print(bns_nsw)
bns_nsw_att <- attbounds(Y, D, X, rps)
summary(bns_nsw_att)
summary(attbounds(Y, D, X, rps, Q = 2))
summary(attbounds(Y, D, X, rps, Q = 4))
summary(attbounds(Y, D, X, rps))
summary(attbounds(Y, D, X, rps, n_hc = ceiling(length(Y)/5)))
summary(attbounds(Y, D, X, rps, n_hc = ceiling(length(Y)/20)))
psid2_control <- read.table("http://users.nber.org/~rdehejia/data/psid2_controls.txt")
colnames(psid2_control) <- c("treat","age","edu","black","hispanic",
"married","nodegree","RE74","RE75","RE78")
psid <- rbind(nswre_treated,psid2_control)
detach(nswre)
attach(psid)
D <- treat
Y <- (RE78 > 0)
X <- cbind(age,edu,black,hispanic,married,nodegree,RE74/1000,RE75/1000)
rps_sp <- rep(mean(D),length(D))
bns_psid <- atebounds(Y, D, X, rps_sp)
summary(bns_psid)
summary(atebounds(Y, D, X, rps_sp, Q=1))
summary(attbounds(Y, D, X, rps_sp))
detach(psid)
Y <- RHC[,"survival"]
D <- RHC[,"RHC"]
X <- as.matrix(RHC[,-c(1,2)])
glm_ps <- stats::glm(D~X,family=binomial("logit"))
ps <- glm_ps$fitted.values
ps_treated <- ps[D==1]
ps_control <- ps[D==0]
df <- data.frame(cbind(D,ps))
colnames(df)<-c("RHC","PS")
df$RHC <- as.factor(df$RHC)
levels(df$RHC) <- c("No RHC (Control)", "RHC (Treated)")
ggplot2::ggplot(df, ggplot2::aes(x=PS, color=RHC, fill=RHC)) +
ggplot2::geom_histogram(breaks=seq(0,1,0.1),alpha=0.5,position="identity")
y1_att <- mean(D*Y)/mean(D)
att_wgt <- ps/(1-ps)
y0_att_num <- mean((1-D)*att_wgt*Y)
y0_att_den <- mean((1-D)*att_wgt)
y0_att <- y0_att_num/y0_att_den
att_ps <- y1_att - y0_att
print(att_ps)
rps <- rep(mean(D),length(D))
att_rps <- mean(D*Y)/mean(D) - mean((1-D)*Y)/mean(1-D)
print(att_rps)
Xunique <- mgcv::uniquecombs(X)
print(c("no. of unique rows:", nrow(Xunique)))
print(c("sample size :", nrow(X)))
summary(attbounds(Y, D, X, rps))
summary(atebounds(Y, D, X, rps, Q = 1))
summary(atebounds(Y, D, X, rps, Q = 2))
summary(atebounds(Y, D, X, rps, Q = 3))
summary(atebounds(Y, D, X, rps, Q = 4))
Y <- EFM[,"cesarean"]
D <- EFM[,"monitor"]
X <- as.matrix(EFM[,c("arrest", "breech", "nullipar", "year")])
year <- EFM[,"year"]
ate_rps <- mean(D*Y)/mean(D) - mean((1-D)*Y)/mean(1-D)
print(ate_rps)
rps <- rep(mean(D),length(D))
print(rps[1])
summary(atebounds(Y, D, X, rps, Q = 1, x_discrete = TRUE))
summary(atebounds(Y, D, X, rps, Q = 2, x_discrete = TRUE))
summary(atebounds(Y, D, X, rps, Q = 3, x_discrete = TRUE))
summary(atebounds(Y, D, X, rps, Q = 5, x_discrete = TRUE))
summary(atebounds(Y, D, X, rps, Q = 10, x_discrete = TRUE))
summary(atebounds(Y, D, X, rps, Q = 20, x_discrete = TRUE))
summary(atebounds(Y, D, X, rps, Q = 50, x_discrete = TRUE))
summary(atebounds(Y, D, X, rps, Q = 100, x_discrete = TRUE))
|
reml <- function(nu, skel, thetaG, sLc,
modMats, W, Bpinv, nminffx, nminfrfx, rfxlvls, rfxIncContrib2loglik,
thetaR = NULL,
tWW = NULL, RHS = NULL){
lambda <- is.null(thetaR)
Rinv <- as(solve(nu[[length(thetaG)+1]]), "symmetricMatrix")
Ginv <- lapply(thetaG, FUN = function(x){as(solve(nu[[x]]), "symmetricMatrix")})
if(lambda){
tWKRinvW <- tWW
tyRinvy <- crossprod(modMats$y)
} else{
KRinv <- kronecker(Rinv, Diagonal(x = 1, n = modMats$Zr@Dim[[2L]]))
tyRinvy <- as(crossprod(modMats$y, KRinv) %*% modMats$y, "sparseMatrix")
tWKRinv <- crossprod(W, KRinv)
tWKRinvW <- tWKRinv %*% W
RHS <- Matrix(tWKRinv %*% modMats$y, sparse = TRUE)
}
if(modMats$nG > 0){
C <- as(tWKRinvW + bdiag(c(Bpinv,
sapply(1:modMats$nG, FUN = function(u){kronecker(modMats$listGeninv[[u]], Ginv[[u]])}))), "symmetricMatrix")
} else C <- as(tWKRinvW + Bpinv, "symmetricMatrix")
if(is.null(sLc)){
sLc <- Cholesky(C, perm = TRUE, LDL = FALSE, super = FALSE)
} else sLc <- update(sLc, C)
sln <- solve(a = sLc, b = RHS, system = "A")
tyPy <- tyRinvy - crossprod(sln, RHS)
logDetC <- 2 * sum(log(sLc@x[sLc@p+1][1:sLc@Dim[[1L]]]))
sigma2e <- if(lambda) tyPy / nminffx else NA
if(lambda){
loglik <- nminfrfx * log(sigma2e)
} else{
loglik <- modMats$ny * log(nu[[thetaR]])
}
if(lambda){
logDetGfun <- function(x){rfxlvls[x] * log(as.vector(nu[[x]]*sigma2e))}
} else{
logDetGfun <- function(x){rfxlvls[x] * log(as.vector(nu[[x]]))}
}
if(modMats$nG != 0){
loglik <- loglik + sum(sapply(seq(modMats$nG), FUN = logDetGfun)) + rfxIncContrib2loglik
}
loglik <- -0.5 * (loglik + logDetC + if(lambda) nminffx else tyPy)
r <- modMats$y - W %*% sln
return(list(loglik = loglik@x,
sigma2e = if(lambda) sigma2e@x else NA,
tyPy = tyPy@x, logDetC = logDetC,
sln = sln, r = r, sLc = sLc))
}
em <- function(nuvin, thetaG, thetaR, conv,
modMats, nminffx, sLc, ndgeninv, sln, r){
Cinv_ii <- matrix(0, nrow = nrow(sln), ncol = 1)
if(length(thetaG) > 0){
ei <- modMats$nb + sum(sapply(modMats$Zg, FUN = ncol))
Ig <- Diagonal(n = sLc@Dim[1L], x = 1)
for(g in rev(thetaG)){
if(conv[g] == "F") next
qi <- ncol(modMats$Zg[[g]])
si <- ei - qi + 1
trace <- 0
if(ndgeninv[g]){
o <- (crossprod(sln[si:ei, , drop = FALSE], modMats$listGeninv[[g]]) %*% sln[si:ei, , drop = FALSE])@x
for(k in si:ei){
Cinv_siei_k <- solve(sLc, b = Ig[, k], system = "A")[si:ei, , drop = TRUE]
Cinv_ii[k] <- Cinv_siei_k[k-si+1]
trace <- trace + sum(modMats$listGeninv[[g]][(k-si+1), , drop = TRUE] * Cinv_siei_k)
}
} else{
o <- crossprod(sln[si:ei, , drop = FALSE])
for(k in si:ei){
Cinv_ii[k] <- solve(sLc, b = Ig[, k], system = "A")[k,]
trace <- trace + Cinv_ii[k]
}
}
nuvin[g] <- as(as(matrix((o + trace) / qi),
"symmetricMatrix"),
"dsCMatrix")
ei <- si-1
}
}
if(conv[thetaR] != "F") nuvin[thetaR] <- crossprod(modMats$y, r) / nminffx
return(list(nuv = nuvin, Cinv_ii = Cinv_ii))
}
ai <- function(nuvin, skel, thetaG,
modMats, W, sLc, sln, r,
thetaR = NULL,
sigma2e = NULL){
lambda <- is.null(thetaR)
p <- length(nuvin)
nuin <- vech2matlist(nuvin, skel)
if(lambda){
Rinv <- as(solve(matrix(sigma2e)), "symmetricMatrix")
} else{
Rinv <- as(solve(nuin[[thetaR]]), "symmetricMatrix")
}
B <- matrix(0, nrow = modMats$ny, ncol = p)
if(length(thetaG) > 0){
Ginv <- lapply(thetaG, FUN = function(x){as(solve(nuin[[x]]), "symmetricMatrix")})
si <- modMats$nb+1
for(g in thetaG){
qi <- ncol(modMats$Zg[[g]])
ei <- si - 1 + qi
Bg <- modMats$Zg[[g]] %*% sln[si:ei, , drop = FALSE] %*% Ginv[[g]]
B[cbind(Bg@i+1, g)] <- Bg@x
si <- ei+1
}
} else g <- p-1
if(g < p){
if(lambda){
B[, p] <- (modMats$y %*% Rinv)@x
} else{
B[, p] <- (r %*% Rinv)@x
}
}
if(lambda){
BRHS <- Matrix(crossprod(W, B), sparse = TRUE)
tBRinvB <- crossprod(B)
} else{
KRinv <- kronecker(Rinv, Diagonal(x = 1, n = modMats$Zr@Dim[[2L]]))
tWKRinv <- crossprod(W, KRinv)
BRHS <- Matrix(tWKRinv %*% B, sparse = TRUE)
tBRinvB <- crossprod(B, KRinv) %*% B
}
tBPB <- tBRinvB - crossprod(solve(sLc, BRHS, system = "A"), BRHS)
AI <- 0.5 * tBPB
if(lambda) AI <- AI / sigma2e
as(AI, "matrix")
}
gradFun <- function(nuvin, thetaG, modMats, Cinv, sln,
sigma2e = NULL,
r = NULL, nminfrfx = NULL){
lambda <- is.null(r)
p <- length(nuvin)
dLdnu <- matrix(0, nrow = p, ncol = 1, dimnames = list(names(nuvin), NULL))
if(!lambda) tee <- crossprod(r)
if(length(thetaG) > 0){
trCinvGeninv_gg <- tugug <- as.list(rep(0, length(thetaG)))
si <- modMats$nb+1
for(g in thetaG){
qi <- ncol(modMats$Zg[[g]])
ei <- si - 1 + qi
if(class(modMats$listGeninv[[g]]) == "ddiMatrix"){
tugug[[g]] <- crossprod(sln[si:ei, , drop = FALSE])
trCinvGeninv_gg[[g]] <- tr(Cinv[si:ei, si:ei])
} else{
tugug[[g]] <- crossprod(sln[si:ei, , drop = FALSE], modMats$listGeninv[[g]]) %*% sln[si:ei, , drop = FALSE]
trCinvGeninv_gg[[g]] <- tr(modMats$listGeninv[[g]] %*% Cinv[si:ei, si:ei])
}
si <- ei+1
}
if(lambda){
for(g in thetaG){
dLdnu[g] <- (ncol(modMats$Zg[[g]]) / nuvin[g]) - (1 / nuvin[g]^2) * (trCinvGeninv_gg[[g]] + tugug[[g]] / sigma2e)
}
} else{
dLdnu[p] <- (nminfrfx / tail(nuvin, 1)) - (tee / tail(nuvin, 1)^2)
for(g in thetaG){
dLdnu[p] <- dLdnu[p] + (1 / tail(nuvin, 1)) * (trCinvGeninv_gg[[g]] /nuvin[g])
dLdnu[g] <- (ncol(modMats$Zg[[g]]) / nuvin[g]) - (1 / nuvin[g]^2) * (trCinvGeninv_gg[[g]] + tugug[[g]])
}
}
} else{
if(!lambda) dLdnu[p] <- (nminfrfx / tail(nuvin, 1)) - (tee / tail(nuvin, 1)^2)
}
-0.5 * dLdnu
}
|
context("tpmatrix unit tests")
test_that("tpmatrix() works correctly" , {
p <- c(.7, .6)
tpmat <- tpmatrix(
C, p,
0, 1
)
n <- length(p)
expect_true(inherits(tpmat, "data.table"))
expect_equal(tpmat$s1_s1, 1 - p)
expect_equal(tpmat$s1_s2, p)
expect_equal(tpmat$s2_s1,rep(0, n))
expect_equal(tpmat$s2_s2,rep(1, n))
})
test_that("tpmatrix() works with complement argument" , {
pmat <- data.frame(s1_s1 = 0, s1_s2 = .5, s2_s1 = .3, s2_s2 = 0)
tpmat1 <- tpmatrix(pmat, complement = c("s1_s1", "s2_s2"))
expect_equal(tpmat1$s1_s1, .5)
expect_equal(tpmat1$s2_s2, .7)
tpmat2 <- tpmatrix(pmat, complement = c(1, 4))
tpmat3 <- tpmatrix(pmat, complement = c(1L, 4L))
expect_equal(tpmat1, tpmat2)
expect_equal(tpmat1, tpmat3)
})
test_that("tpmatrix() works with states argument" , {
p <- tpmatrix(
.5, .5, .5, .5,
states = c("s1", "s2"), sep = "."
)
expect_equal(
colnames(p),
c("s1.s1", "s1.s2", "s2.s1", "s2.s2")
)
})
test_that("tpmatrix() throws error if complement argument is incorrectly specified" , {
expect_error(
tpmatrix(2, complement = data.frame(2)),
"'complement' must either be a vector of integers or a character vector."
)
})
test_that("tpmatrix() throws error if it is not a square msatrix" , {
expect_error(
tpmatrix(1, 2, 3),
"tpmatrix() must be a square matrix.",
fixed = TRUE
)
})
test_that("tpmatrix() throws error if states has wrong length" , {
expect_error(
tpmatrix(1, 2, 3, 4, states = "s1"),
paste0("The length of 'states' must equal the square root of the number of ",
"elements in the transition probability matrix."),
fixed = TRUE
)
})
h <- hesim_data(strategies = data.table(strategy_id = 1:2),
patients = data.table(patient_id = 1:3))
input_data <- expand(h, by = c("strategies", "patients"))
tpmat_id <- tpmatrix_id(input_data, n_samples = 2)
p_12 <- ifelse(tpmat_id$strategy_id == 1, .6, .7)
p <- tpmatrix(
C, p_12,
0, 1
)
test_that("summarize.tpmatrix() works without unflattening" , {
ps <- summary(p)
expect_equal(colnames(ps), c("from", "to", "mean", "sd"))
expect_equal(nrow(ps), 4)
expect_equal(colnames(p), paste0(ps$from, "_", ps$to))
expect_equivalent(ps$mean, apply(p, 2, mean))
})
test_that("summarize.tpmatrix() works with variables probs arguments" , {
ps <- summary(p, prob =.5)
expect_equal(colnames(ps), c("from", "to", "mean", "sd", "50%"))
ps <- summary(p, prob = c(.25, .75, .9))
expect_equal(colnames(ps), c("from", "to", "mean", "sd", "25%", "75%", "90%"))
})
test_that("summarize.tpmatrix() works with unflattening" , {
ps <- summary(p, unflatten = TRUE)
expect_equal(colnames(ps), c("mean", "sd"))
expect_true(is.matrix(ps$mean[[1]]))
states <- attr(p, "states")
expect_equal(
ps$mean[[1]],
matrix(apply(p, 2, mean), nrow = 2, byrow = TRUE,
dimnames = list(states, states))
)
})
test_that("summarize.tpmatrix() works with ID argument" , {
ps <- summary(p, id = tpmat_id)
expect_equal(nrow(input_data) * 4, nrow(ps))
expect_equal(colnames(ps), c("strategy_id", "patient_id",
"from", "to", "mean", "sd"))
expect_true(all(ps$sd == 0))
expect_true(all(ps[strategy_id == 1 & from == "s1" & to == "s2"]$mean == .6))
expect_true(all(ps[strategy_id == 2 & from == "s1" & to == "s2"]$mean == .7))
ps <- summary(p, id = tpmat_id, unflatten = TRUE)
expect_equal(nrow(ps), nrow(input_data))
expect_equal(
unlist(ps[strategy_id == 1]$mean),
rep(c(0.4, 0.0, 0.6, 1.0), times = 3)
)
expect_equal(
unlist(ps[strategy_id == 2]$mean),
rep(c(0.3, 0.0, 0.7, 1.0), times = 3)
)
})
test_that("summarize.tpmatrix() works with ID argument and time intervals" , {
x <- expand(h, by = c("strategies", "patients"), times = c(0, 2))
tpid <- tpmatrix_id(x, n_samples = 2)
p2 <- tpmatrix(
C, ifelse(tpid$strategy_id == 1, .6, .7),
0, 1
)
ps <- summary(p2, id = tpid, probs = .9)
expect_equal(colnames(ps), c("strategy_id", "patient_id",
"time_id", "time_start", "time_stop",
"from", "to", "mean", "sd", "90%"))
expect_equal(nrow(ps), nrow(x) * 4)
ps <- summary(p2, id = tpid, unflatten = TRUE)
expect_equal(nrow(ps), nrow(x))
expect_true(all(unlist(ps$sd) == 0))
expect_equal(
unlist(ps[strategy_id == 2]$mean),
rep(c(0.3, 0.0, 0.7, 1.0), times = 6)
)
})
strategies <- data.frame(strategy_id = c(1, 2))
patients <- data.frame(patient_id = seq(1, 3),
patient_wt = c(1/2, 1/4, 1/4),
gender = c("Female", "Female", "Male"))
hesim_dat <- hesim_data(strategies = strategies,
patients = patients)
test_that("tpmatrix_id() returns errror if 'object' is not right class." , {
expect_error(tpmatrix_id(2, 1))
})
test_that("tpmatrix_id() returns errror if 'object' is not expanded correctly." , {
object <- expand(hesim_dat, by = c("strategies"))
expect_error(
tpmatrix_id(object, 1),
paste0("'object' must be expanded by 'strategy_id', 'patient_id',",
" and optionally 'time_id'.")
)
})
test_that("tpmatrix_id() returns correct numnber or rows and is the right class." , {
input_data <- expand(hesim_dat, by = c("strategies", "patients"))
tpmat_id <- tpmatrix_id(input_data, n_samples = 2)
expect_equal(nrow(tpmat_id), nrow(input_data) * 2)
expect_true(inherits(tpmat_id, "tpmatrix_id"))
})
test_that("tpmatrix_id() returns correct columns." , {
input_data <- expand(hesim_dat, by = c("strategies", "patients"),
times = c(0, 2))
tpmat_id <- tpmatrix_id(input_data, n_samples = 1)
expect_equal(
colnames(tpmat_id),
c("sample", "strategy_id", "patient_id", "patient_wt", "time_id",
"time_start", "time_stop")
)
})
tmat <- rbind(c(NA, 1, 2),
c(3, NA, 4),
c(NA, NA, NA))
q12 <- c(.8, .7)
q13 <- c(.2, .3)
q21 <- c(.9, .8)
q23 <- c(1.1, 1.2)
q <- data.frame(q12, q13, q21, q23)
qmat <- qmatrix(q, trans_mat = tmat)
test_that("qmatrix() returns a 3D array" , {
expect_true(inherits(qmat, "array"))
expect_equal(length(dim(qmat)), 3)
})
test_that("qmatrix() returns the correct diagonals" , {
expect_equal(mean(apply(qmat, 3, rowSums)), 0, tol = .00001)
})
set.seed(101)
library("msm")
qinit <- rbind(
c(0, 0.28163, 0.01239),
c(0, 0, 0.10204),
c(0, 0, 0)
)
ptid <- sample(onc3p$patient_id, 200)
fit <- msm(state_id ~ time, subject = patient_id,
data = onc3p[patient_id %in% ptid],
covariates = ~ age + strategy_name, qmatrix = qinit)
test_that("qmatrix.msm() works with factor covariates and 'newdata' is one row" , {
newdata <- data.frame(strategy_name = "New 1", age = 50)
expect_equal(
msm::qmatrix.msm(fit, newdata[1, , drop = FALSE], ci = "none"),
qmatrix(fit, newdata, uncertainty = "none")[, , 1],
check.attributes = FALSE
)
})
test_that("qmatrix.msm() works with factor covariates and 'newdata' is multiple rows" , {
newdata <- data.frame(strategy_name = c("New 1", "New 2"),
age = c(50, 55))
expect_equal(
msm::qmatrix.msm(fit, newdata[2, , drop = FALSE], ci = "none"),
qmatrix(fit, newdata, uncertainty = "none")[, , 2],
check.attributes = FALSE
)
})
test_that("qmatrix.msm() works with covariates that vary by transition" , {
fit <- update(fit, covariates = list("1-2" = ~ strategy_name + age))
newdata <- data.frame(strategy_name = c("New 1", "New 2"),
age = c(50, 55))
expect_equal(
msm::qmatrix.msm(fit, newdata[2, , drop = FALSE], ci = "none"),
qmatrix(fit, newdata, uncertainty = "none")[, , 2],
check.attributes = FALSE
)
})
test_that("qmatrix.msm() works with a hidden Markov model" , {
qinith <- rbind(
c(0, exp(-6), exp(-9)),
c(0, 0, exp(-6)),
c(0, 0, 0)
)
hmod <- list(
hmmNorm(mean = 100, sd = 16),
hmmNorm(mean = 54, sd = 18),
hmmIdent(999)
)
fith <- msm(fev ~ days, subject = ptnum,
data = fev[fev$ptnum %in% 1:20, ],
qmatrix = qinith,
covariates = ~acute,
hmodel = hmod,
hcovariates = list(~ acute, ~ acute, NULL),
hconstraint = list(acute = c(1, 1)),
death = 3,
method = "BFGS")
expect_equal(
msm::qmatrix.msm(fith, covariates = list(acute = 0), ci = "none"),
qmatrix(fith, data.frame(acute = 0), uncertainty = "none")[,,1],
check.attributes = FALSE
)
})
test_that("qmatrix.msm() returns correct number of matrices with uncertainy = 'normal'" , {
newdata <- data.frame(strategy_name = c("New 1"), age = c(55))
sim <- qmatrix(fit, newdata, uncertainty = "normal", n = 5)
expect_true(dim(sim)[3] == 5)
})
test_that("qmatrix.msm() requires 'newdata' if covariates are included in the model." , {
expect_error(
qmatrix(fit),
"'newdata' cannot be NULL if covariates are included in 'x'."
)
})
test_that("qmatrix.msm() does not require 'newdata' if no covariates are included in the model." , {
fit <- update(fit, covariates =~ 1)
expect_equal(
msm::qmatrix.msm(fit, ci = "none"),
qmatrix(fit, uncertainty = "none")[,,1],
check.attributes = FALSE
)
})
test_that("expmat() returns an array where rows sum to 1." , {
p <- expmat(qmat)
expect_true(inherits(p, "array"))
row_sums <- c(apply(p, 3, rowSums))
expect_equal(mean(row_sums), 1, tolerance = .001,scale = 1)
})
test_that("expmat() works with matrix input." , {
expect_true(inherits(expmat(qmat[,,1]), "array"))
})
test_that("expmat() works with t as vector" , {
p <- expmat(qmat, t = c(1, 1))
expect_equal(dim(p)[3], dim(qmat)[3] * 2)
expect_equal(p[,, 1], p[,, 2])
expect_equal(p[,, 3], p[,, 4])
})
test_that("expmat() is consisten with matrix multiplication " , {
z <- diag(1, 3)
p <- expmat(qmat, t = c(1, 2))
expect_equal(
z %*% p[,, 1] %*% p[, ,1],
p[,, 2]
)
})
test_that("expmat() returns error if x is not an array" , {
expect_error(
expmat("Test error"),
"'x' must be an array."
)
})
test_that("as_array3() returns a 3D array if 'x' is a square matrix", {
expect_equal(
dim(as_array3(matrix(1:16, 4, 4))),
c(2, 2, 4)
)
})
test_that("as_array3() throws error if 'x' is not a square matrix", {
expect_error(
as_array3(matrix(1:4, 2, 2)),
"'x' must contain square matrices."
)
})
p_12 <- c(.7, .5)
p_23 <- c(.1, .2)
pmat <- as_array3(tpmatrix(
C, p_12, .1,
0, C, p_23,
0, 0, 1
))
rr_12 <- runif(4, .8, 1)
rr_13 <- runif(4, .9, 1)
rr <- cbind(rr_12, rr_13)
pmat2 <- apply_rr(pmat, rr,
index = list(c(1, 2), c(1, 3)),
complement = list(c(1, 1), c(2, 2)))
test_that("apply_rr() correctly multiplies relative risks" , {
expect_equal(pmat2[1, 2, ], rr_12 * pmat[1, 2, ])
expect_equal(pmat2[1, 3, ], rr_13 * pmat[1, 3, ])
})
test_that("Row sums are correct with apply_rr()" , {
row_sums <- c(apply(pmat2, 3, rowSums))
expect_equal(mean(row_sums), 1, tolerance = .001,scale = 1)
})
test_that("'index' argument in apply_rr() has correct dimensions" , {
expect_error(
apply_rr(pmat, rr,
index = list(c(1, 2), c(1, 3), c(2, 1)),
complement = list(c(1, 1), c(2, 2))),
paste0("'index' must contain the same number of matrix elements as the ",
"number of columns in 'rr'.")
)
})
test_that("'complement' argument in apply_rr() must have correct number of matrix elements" , {
expect_error(
apply_rr(pmat, rr,
index = list(c(1, 2), c(1, 3)),
complement = list(c(1, 1), c(2, 2), c(3, 3), c(4, 4))),
paste0("The number of matrix elements in 'complement' cannot be larger than the ",
"number of rows in 'x'.")
)
})
test_that("apply_rr() can only have one complementary column for each row in matrix" , {
expect_error(
apply_rr(pmat, rr,
index = list(c(1, 2), c(1, 3)),
complement = list(c(1, 1), c(1,2))),
"There can only be one complementary column in each row."
)
})
|
library(act)
rpraat.tg <- act::export_rpraat(t=examplecorpus@transcripts[[1]])
\dontrun{
rPraat::tg.plot(rpraat.tg)
}
|
print.naive_bayes <- function (x, ...) {
model <- "Naive Bayes"
n_char <- getOption("width")
str_left_right <- paste0(rep("=", floor((n_char - nchar(model)) / 2)),
collapse = "")
str_full <- paste0(str_left_right, " ", model, " ",
ifelse(n_char %% 2 != 0, "=", ""), str_left_right)
len <- nchar(str_full)
l <- paste0(rep("-", len), collapse = "")
cat("\n")
cat(str_full, "\n", "\n", "Call:", "\n")
print(x$call)
cat("\n")
cat(l, "\n", "\n")
cat( "Laplace smoothing:", x$laplace)
cat("\n")
cat("\n")
cat(l, "\n", "\n")
cat(" A priori probabilities:", "\n")
print(x$prior)
cat("\n")
cat(l, "\n", "\n")
cat(" Tables:", "\n")
tabs <- x$tables
n <- length(x$tables)
indices <- seq_len(min(5,n))
tabs <- tabs[indices]
print(tabs)
if (n > 5) {
cat("\n\n")
cat("
cat(l)
}
cat("\n\n")
}
|
carbfull<-
function(flag, var1, var2, S=35, T=25, Patm=1, P=0, Pt=0, Sit=0, k1k2='x', kf='x', ks="d", pHscale="T", b="u74", gas="potential",
NH4t = 0, HSt = 0){
RES <- calculate_carb(flag, var1, var2, S, T, Patm, P, Pt, Sit, NH4t, HSt, k1k2, kf, ks, pHscale, b, gas, badd=0, fullresult=TRUE)
return(RES)
}
|
library(HRW) ; library(mgcv)
data(WarsawApts)
cex.axisVal <- 1.8 ; cex.labVal <- 1.8
cex.legendVal <- 1.3 ; lwdVal <- 2
fitSimpSemi <- gam(areaPerMzloty ~ factor(district) +
s(construction.date,bs = "cr",k = 27),
data = WarsawApts)
print(summary(fitSimpSemi))
construcDate <- WarsawApts$construction.date
areaPerMzloty <- WarsawApts$areaPerMzloty
districtChar <- as.character(WarsawApts$district)
par(mai = c(1.02,0.9,0.82,0.42))
plot(construcDate,areaPerMzloty,type="n",bty="l",
xlab="construction date (year)",
ylab="area (square meters) per million zloty",
cex.axis = cex.axisVal,cex.lab = cex.labVal)
myPtCols <- c("deepskyblue","salmon","green3","gold")
myLnCols <- c("blue","red","darkgreen","darkorange")
points(construcDate[districtChar == "Mokotow"],
areaPerMzloty[districtChar == "Mokotow"],
lwd = lwdVal,col = myPtCols[1])
points(construcDate[districtChar == "Srodmiescie"],
areaPerMzloty[districtChar == "Srodmiescie"],
lwd = lwdVal,col = myPtCols[2])
points(construcDate[districtChar == "Wola"],
areaPerMzloty[districtChar == "Wola"],
lwd = lwdVal,col = myPtCols[3])
points(construcDate[districtChar == "Zoliborz"],
areaPerMzloty[districtChar == "Zoliborz"],
lwd = lwdVal,col = myPtCols[4])
ng <- 1001
construcDateg <- seq(min(construcDate),max(construcDate),length = ng)
fHatgMokotow <- predict(fitSimpSemi, newdata = data.frame(
construction.date = construcDateg,
district = rep("Mokotow",ng)))
lines(construcDateg,fHatgMokotow,col = myLnCols[1])
fHatgSrodmiescie <- predict(fitSimpSemi, newdata = data.frame(
construction.date = construcDateg,
district = rep("Srodmiescie",ng)))
lines(construcDateg,fHatgSrodmiescie,col = myLnCols[2])
fHatgWola <- predict(fitSimpSemi, newdata = data.frame(
construction.date = construcDateg,
district = rep("Wola",ng)))
lines(construcDateg,fHatgWola,col = myLnCols[3])
fHatgZoliborz <- predict(fitSimpSemi, newdata = data.frame(
construction.date = construcDateg,
district = rep("Zoliborz",ng)))
lines(construcDateg,fHatgZoliborz,col = myLnCols[4])
legend(1971,80,legend = c("Mokotow","Srodmiescie","Wola","Zoliborz"),
col = myPtCols,pch = rep(1,4),cex = cex.legendVal,
pt.cex = rep(1,4),pt.lwd = rep(lwdVal,4))
library(lattice)
tmp <- trellis.par.get("add.text")
tmp$cex <- 1.5
trellis.par.set("add.text",tmp)
pobj <- xyplot(areaPerMzloty ~ construction.date|district,
data = WarsawApts,as.table=TRUE,
par.settings = list(layout.heights
=list(strip=1.4)),
scales = list(cex = 1.25),
xlab= list("construction date (year)",cex=cex.labVal),
ylab= list("area (square meters) per million zloty",cex=cex.labVal),
panel=function(x,y)
{
panel.grid()
panel.xyplot(x,y)
if (panel.number() == 1)
panel.xyplot(construcDateg,fHatgMokotow,
col = "darkgreen",lwd = 2,type = "l")
if (panel.number() == 2)
panel.xyplot(construcDateg,fHatgSrodmiescie,
col = "darkgreen",lwd = 2,type = "l")
if (panel.number() == 3)
panel.xyplot(construcDateg,fHatgWola,
col = "darkgreen",lwd = 2,type = "l")
if (panel.number() == 4)
panel.xyplot(construcDateg,fHatgZoliborz,
col = "darkgreen",lwd = 2,type = "l")
}
)
print(pobj)
|
require("GPArotation")
f3 <- structure(c(0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0,0),
.Dim = c(6L, 3L), .Dimnames = list(NULL, c("PC1", "PC2", "PC3")))
f3
GPForth(f3)
Varimax(f3)
|
renv_cli_install <- function(target = NULL) {
exe <- if (renv_platform_windows()) "bin/renv.bat" else "bin/renv"
path <- system.file(exe, package = "renv")
target <- target %||% path.expand("~/bin/renv")
ensure_parent_directory(target)
file.copy(path, target)
writef("* renv binary copied to %s.", renv_path_pretty(target))
invisible(target)
}
renv_cli_exec <- function(clargs = commandArgs(trailingOnly = TRUE)) {
invisible(renv_cli_exec_impl(clargs))
}
renv_cli_exec_impl <- function(clargs) {
usage <-
length(clargs) == 0 ||
clargs[1L] %in% c("help", "--help")
if (usage)
return(renv_cli_usage())
method <- clargs[1L]
help <-
clargs[2L] %in% c("help", "--help")
if (help)
return(renv_cli_help(method))
exports <- getNamespaceExports("renv")
if (!method %in% exports)
return(renv_cli_unknown(method, exports))
args <- list(call("::", as.name("renv"), as.name(method)))
for (clarg in clargs[-1L]) {
if (grepl("^--no-", clarg)) {
key <- substring(clarg, 6L)
args[[key]] <- FALSE
}
else if (grepl("^--[^=]+=", clarg)) {
index <- regexpr("=", clarg, fixed = TRUE)
key <- substring(clarg, 3L, index - 1L)
val <- substring(clarg, index + 1L)
args[[key]] <- renv_cli_parse(val)
}
else if (grepl("^--", clarg)) {
key <- substring(clarg, 3L)
args[[key]] <- TRUE
}
else if (grepl("=", clarg, fixed = TRUE)) {
index <- regexpr("=", clarg, fixed = TRUE)
key <- substring(clarg, 1L, index - 1L)
val <- substring(clarg, index + 1L)
args[[key]] <- renv_cli_parse(val)
}
else {
args[[length(args) + 1L]] <- renv_cli_parse(clarg)
}
}
expr <- as.call(args)
eval(expr = expr, envir = globalenv())
}
renv_cli_usage <- function() {
usage <- "
Usage: renv [method] [args...]
[method] should be the name of a function exported from renv.
[args...] should be arguments accepted by that function.
Use renv [method] --help for more information about the associated function.
Examples:
renv init
renv snapshot
renv restore
renv status
renv install dplyr
renv run path/to/script.R
"
writeLines(usage, con = stderr())
}
renv_cli_help <- function(method) {
print(help(method, package = "renv"))
}
renv_cli_unknown <- function(method, exports) {
fmt <- "renv: '%s' is not a known command."
writef(fmt, method, con = stderr())
distance <- c(adist(method, exports))
names(distance) <- exports
n <- min(distance)
if (n > 2)
return(1L)
candidates <- names(distance)[distance == n]
fmt <- "did you mean %s?"
vwritef(fmt, paste(shQuote(candidates), collapse = " or "))
return(1L)
}
renv_cli_parse <- function(text) {
if (text %in% c("true", "True", "TRUE"))
return(TRUE)
else if (text %in% c("false", "False", "FALSE"))
return(FALSE)
value <- parse(text = text)[[1L]]
if (is.language(value)) text else value
}
|
plotregression <- function(dat){
regdf <- NULL
regdf$X <- dat$X
regdf$Y <- dat$Y
regdf$Yhat <- dat$Yhat
regdf$time <- dat$res$time
regdf <- as.data.frame(regdf)
meltdf <- melt(regdf, id.vars = "time")
p1 <- ggplot(meltdf, aes_string("time", "value", col = "variable")) +
geom_line(size = 2) + theme_bw()
print(p1)
}
plotrho <- function(dat){
rhodf <- NULL
rhodf$time <- dat$time
rhodf$rho <- 1/dat$rho
rhodf <- as.data.frame(rhodf)
p2 <- ggplot(rhodf, aes_string("time", "rho")) + geom_line(size = 2) +
theme_bw() + ggtitle(bquote(bar(rho) == .(signif(mean(1/dat$rho),
2)))) + ylab(bquote(1/rho))
print(p2)
}
plotsbar <- function(dat){
loglikdf <- NULL
loglikdf$sbar <- dat$Smean
loglikdf$loglik <- dat$loglik
loglikdf <- as.data.frame(loglikdf)
p9 <- ggplot(loglikdf, aes_string("sbar", "loglik")) +
geom_line() + geom_point() + theme_bw() + geom_vline(xintercept = dat$sbar, linetype = "longdash") +
ggtitle(bquote(bar(S) == .(signif(dat$sbar, 2)) ~ "," ~ .(signif(dat$sbar/mean(dat$pop) * 100, 2)) ~ "%")) + xlab(bquote(bar(S)))
print(p9)
}
plotbeta <- function(dat){
betadf <- dat$contact
betadf <- as.data.frame(betadf)
p4 <- ggplot(betadf, aes_string("time", "beta")) + geom_line(size = 2) +
theme_bw() + ggtitle(bquote(bar(beta) == .(signif(mean(dat$beta),
2)) ~ "," ~ alpha == .(signif(dat$alpha, 3)))) +
ylab(bquote(beta))
if ("contact" %in% names(dat)) {
p4 <- ggplot(betadf, aes_string("time", "beta")) +
geom_line(size = 2) + geom_ribbon(aes_(ymin = ~betalow,
ymax = ~betahigh), alpha = 0.5, col = "dodgerblue",
fill = "dodgerblue") + ylim(c(min(dat$contact$betalow),
max(dat$contact$betahigh))) + theme_bw() + ggtitle(bquote(bar(beta) ==
.(signif(mean(dat$beta), 2)) ~ "," ~ alpha ==
.(signif(dat$alpha, 3)))) + ylab(bquote(beta))
if (sum(sum(is.na(dat$contact))) > 0) {
p4 <- ggplot(betadf, aes_string("time", "beta")) +
geom_line(size = 2) + theme_bw() + ggtitle(bquote(bar(beta) ==
.(signif(mean(dat$beta), 2)) ~ "," ~ alpha ==
.(signif(dat$alpha, 3)))) + ylab(bquote(beta))
}
}
p4 <- p4 + xlab(sprintf("time mod %g", nrow(dat$contact)))
print(p4)
}
plotforward <- function(dat,inverse = F){
drops <- c("mean", "sd", "error", "cases", "time")
sim.only <- dat$res[, !(names(dat$res) %in% drops)]
n <- ncol(sim.only)
error <- qt(0.975, df = n - 1) * dat$res$sd/sqrt(n)
dat$res$error <- error
eb <- aes(ymax = mean + error, ymin = mean - error)
p6 <- ggplot(data = dat$res, aes_string("time")) + theme(legend.position = "none") +
geom_line(aes_string(y = "cases"), colour = "dodgerblue",
size = 1) + xlab("year") + ylab("cases") + geom_line(aes_string(y = "mean"),
colour = "orangered4", size = 1) + geom_ribbon(eb,
alpha = 0.3) + theme_bw()
inversecases <- dat$res
inversecases$cases <- -dat$res$cases
p7 <- ggplot(data = inversecases, aes_string("time")) +
theme(legend.position = "none") + geom_line(aes_string(y = "cases"),
colour = "dodgerblue", size = 1) + xlab("time") +
ylab("cases") + geom_line(aes_string(y = "mean"),
colour = "orangered4", size = 1) + geom_ribbon(eb,
alpha = 0.3) + theme_bw()
if(inverse){
print(p7)
}else{
print(p6)
}
}
|
context("tidy_table()")
test_that("tidy_table works with or without row and column names", {
expect_equal(nrow(tidy_table(Formaldehyde)), 12)
expect_equal(nrow(tidy_table(Formaldehyde, col_names = TRUE)), 14)
expect_equal(nrow(tidy_table(Formaldehyde, row_names = TRUE)), 18)
expect_equal(nrow(tidy_table(Formaldehyde, row_names = TRUE, col_names = TRUE)), 20)
})
test_that("tidy_table works with html tables", {
rowspan <- system.file("extdata", "rowspan.html", package = "unpivotr")
colspan <- system.file("extdata", "colspan.html", package = "unpivotr")
rowandcolspan <- system.file("extdata", "row-and-colspan.html", package = "unpivotr")
nested <- system.file("extdata", "nested.html", package = "unpivotr")
rowspan_correct <-
list(tibble::tribble(
~ row, ~ col, ~ data_type, ~ html,
1L, 1L, "html", "<th rowspan=\"2\">Header (1:2, 1)</th>",
2L, 1L, "html", NA,
1L, 2L, "html", "<th>Header (1, 2)</th>",
2L, 2L, "html", "<td>cell (2, 2)</td>"))
colspan_correct <-
list(tibble::tribble(
~ row, ~ col, ~ data_type, ~ html,
1L, 1L, "html", "<th colspan=\"2\">Header (1, 1:2)</th>",
2L, 1L, "html", "<td>cell (2, 1)</td>",
1L, 2L, "html", NA,
2L, 2L, "html", "<td>cell (2, 2)</td>")
)
rowandcolspan_correct <-
list(tibble::tribble(
~ row, ~ col, ~ data_type, ~ html,
1L, 1L, "html", "<th colspan=\"2\" rowspan=\"2\">Header (1:2, 1:2)</th>",
2L, 1L, "html", NA,
1L, 2L, "html", NA,
2L, 2L, "html", NA,
1L, 3L, "html", "<th>Header (2, 3)</th>",
2L, 3L, "html", "<td>cell (3, 1)</td>",
1L, 4L, "html", NA,
2L, 4L, "html", "<td>cell (3, 2)</td>",
1L, 5L, "html", NA,
2L, 5L, "html", "<td>cell (3, 3)</td>")
)
nested_correct <-
list(tibble::tribble(
~ row, ~ col, ~ data_type, ~ html,
1L, 1L, "html", "<th>Header (2, 2)(1, 1)</th>",
2L, 1L, "html", "<td>cell (2, 2)(2, 1)</td>",
1L, 2L, "html", "<th>Header (2, 2)(1, 2)</th>",
2L, 2L, "html", "<td>cell (2, 2)(2, 1)</td>")
)
rowspan_parsed <-
rowspan %>%
xml2::read_html() %>%
tidy_table()
colspan_parsed <-
colspan %>%
xml2::read_html() %>%
tidy_table()
rowandcolspan_parsed <-
rowandcolspan %>%
xml2::read_html() %>%
tidy_table()
nested_parsed <-
nested %>%
xml2::read_html() %>%
tidy_table() %>%
.[[1]] %>%
.$html %>%
.[4] %>%
xml2::read_html() %>%
tidy_table()
expect_identical(rowspan_parsed, rowspan_correct)
expect_identical(colspan_parsed, colspan_correct)
expect_identical(rowandcolspan_parsed, rowandcolspan_correct)
expect_identical(nested_parsed, nested_correct)
})
test_that("tidy_table works with all common datatypes", {
x <- tibble::tibble(
lgl = c(TRUE, FALSE),
int = c(1L, 2L),
dbl = c(1, 2),
cpl = c(1i, 2i),
date = c(as.Date("2001-01-01"), as.Date("2001-01-02")),
dttm = c(
as.POSIXct("2001-01-01 01:01:01"),
as.POSIXct("2001-01-01 01:01:02")
),
chr = c("a", "b"),
list = list(1:2, letters[1:2])
)
y <- tidy_table(x, col_names = TRUE)
expect_equal(colnames(y), c("row", "col", "data_type", sort(colnames(x))))
x_class <- purrr::map(x, class)
y_class <- purrr::map(y, class)
expect_equal(y_class[names(x_class)], x_class)
x <- tibble::tibble(
fct = factor(c("a", "b")),
ord = factor(c("c", "d"), ordered = TRUE),
list = list(1:2, list("a", "b"))
)
y <- tidy_table(x)
expect_equal(colnames(y), c("row", "col", "data_type", sort(colnames(x))))
expect_equal(class(y$fct), "list")
expect_equal(class(y$ord), "list")
expect_equal(class(y$list), "list")
expect_equal(y$fct[[1]], factor("a", levels = c("a", "b")))
expect_equal(y$fct[[3]], NULL)
expect_equal(y$ord[[2]], NULL)
expect_equal(y$ord[[4]], factor("d", levels = c("c", "d"), ordered = TRUE))
expect_equal(y$list[[4]], NULL)
expect_equal(y$list[[5]], 1:2)
expect_equal(y$list[[6]], list("a", "b"))
})
|
"negex"
|
suppress_warnings <- function(.expr, .f) {
eval.parent(substitute(
withCallingHandlers( .expr, warning = function(w) {
cm <- conditionMessage(w)
cond <- grepl(.f, cm)
if (cond) {
invokeRestart("muffleWarning")
}
})
))
}
|
readMetlin =
function(url = "http://metlin.scripps.edu/download/MSMS_test.XML",
what = c("molid" = "integer", name = "character", formula = "character", mass = "numeric", mz = "numeric"))
{
doc = xmlTreeParse(url, useInternal = TRUE)
z = xmlChildren(xmlRoot(doc))
nodes = z[ sapply(z, inherits, "XMLInternalElementNode") ]
if(length(nodes) > 1) {
}
nodes = nodes[[1]]
n = xmlSize(nodes)
ans = as.data.frame(lapply(what, function(x) get(x)(n)))
for(i in 1:n) {
node = nodes[[i]]
sapply(names(what),
function(id) {
ans[i, id] <<- as(xmlValue(node[[id]]), what[id])
})
}
ans
}
|
print.gresponse.dmm <-
function(x, ...)
{
cat("Call:\n")
print(x$call)
cat("\nPredicted response to selection using component(s) ",x$effects,"\n")
cat("\nGenetic selection differentials achieved by given psd:\n\n")
cat("Overall:\n")
print(x$overall,digits=x$digits)
cat("\n")
}
|
cma.delta.ts.arp.error.lm.ts.logLik <-
function(dat,delta=0,p=p,error.indep=FALSE,error.var.equal=FALSE)
{
re<-cma.delta.ts.arp.error.lm.ts(dat,delta=delta,p=p,error.indep=error.indep,error.var.equal=error.var.equal)
return(re$logLik.lm)
}
|
bibentries = c(
breiman_2001 = bibentry("article",
title = "Random Forests",
author = "Breiman, Leo",
year = "2001",
journal = "Machine Learning",
volume = "45",
number = "1",
pages = "5--32",
doi = "10.1023/A:1010933404324",
issn = "1573-0565"
),
chen_2016 = bibentry("inproceedings",
title = "Xgboost: A scalable tree boosting system",
author = "Chen, Tianqi and Guestrin, Carlos",
year = "2016",
booktitle = "Proceedings of the 22nd ACM SIGKDD Conference on Knowledge Discovery and Data Mining",
pages = "785--794",
doi = "10.1145/2939672.2939785",
organization = "ACM"
),
cortes_1995 = bibentry("article",
title = "Support-vector networks",
author = "Cortes, Corinna and Vapnik, Vladimir",
year = "1995",
month = "sep",
day = "1",
journal = "Machine Learning",
volume = "20",
number = "3",
pages = "273--297",
doi = "10.1007/BF00994018"
),
cover_1967 = bibentry("article",
title = "Nearest neighbor pattern classification",
author = "Cover, Thomas and Hart, Peter",
year = "1967",
journal = "IEEE transactions on information theory",
publisher = "IEEE",
volume = "13",
number = "1",
pages = "21--27",
doi = "10.1109/TIT.1967.1053964"
),
friedman_2010 = bibentry("article",
title = "Regularization Paths for Generalized Linear Models via Coordinate Descent",
author = "Jerome Friedman and Trevor Hastie and Robert Tibshirani",
year = "2010",
journal = "Journal of Statistical Software",
volume = "33",
number = "1",
pages = "1--22",
doi = "10.18637/jss.v033.i01"
),
hechenbichler_2004 = bibentry("techreport",
title = "Weighted k-nearest-neighbor techniques and ordinal classification",
author = "Hechenbichler, Klaus and Schliep, Klaus",
year = "2004",
number = "Discussion Paper 399, SFB 386",
doi = "10.5282/ubm/epub.1769",
institution = "Ludwig-Maximilians University Munich"
),
ripley_1996 = bibentry("book",
doi = "10.1017/cbo9780511812651",
year = "1996",
month = "jan",
publisher = "Cambridge University Press",
author = "Brian D. Ripley",
title = "Pattern Recognition and Neural Networks"
),
roustant_2012 = bibentry("article",
title = "{DiceKriging}, {DiceOptim}: Two {R} Packages for the Analysis of Computer Experiments by Kriging-Based Metamodeling and Optimization",
author = "Olivier Roustant and David Ginsbourger and Yves Deville",
year = "2012",
journal = "Journal of Statistical Software",
volume = "51",
number = "1",
pages = "1--55",
doi = "10.18637/jss.v051.i01"
),
samworth_2012 = bibentry("article",
title = "Optimal weighted nearest neighbour classifiers",
author = "Samworth, Richard J",
year = "2012",
journal = "The Annals of Statistics",
volume = "40",
number = "5",
pages = "2733--2763",
doi = "10.1214/12-AOS1049"
),
venables_2002 = bibentry("book",
title = "Modern Applied Statistics with S",
author = "W. N. Venables and B. D. Ripley",
year = "2002",
publisher = "Springer",
address = "New York",
url = "http://www.stats.ox.ac.uk/pub/MASS4/",
note = "ISBN 0-387-95457-0",
edition = "Fourth"
),
wright_2017 = bibentry("article",
title = "{ranger}: A Fast Implementation of Random Forests for High Dimensional Data in {C++} and {R}",
author = "Wright, Marvin N. and Ziegler, Andreas",
year = "2017",
journal = "Journal of Statistical Software",
volume = "77",
number = "1",
pages = "1--17",
doi = "10.18637/jss.v077.i01"
)
)
|
context("test_numerics_manipulations.R")
requireNamespace("data.table")
verbose <- TRUE
test_that("find_and_transform_numerics: find and transform to numeric columns that are hidden
in string wheter they have decimal separator ',' or '.'", {
data_set <- data.table(col1 = c("1.2", "1.3", "1.2", "1", "6"),
col2 = c("1,2", "1,3", "1,2", "1", "6"))
data_transformed <- find_and_transform_numerics(data_set, n_test = 5, verbose = verbose)
expect_true(is.numeric(data_transformed[["col1"]]))
expect_true(is.numeric(data_transformed[["col2"]]))
})
test_that("find_and_transform_numerics: doesn't transform to numeric character cols", {
data_set <- data.table(character_col = c("A", "B"))
data_transformed <- find_and_transform_numerics(data_set, n_test = 2, verbose = verbose)
expect_true(is.character(data_transformed[["character_col"]]))
})
test_that("private function identify_numerics: find numerics wheter they have decimal separator ',' or '.'", {
data_set <- data.table(col1 = c("1.2", "1.3", "1.2", "1", "6"),
col2 = c("1,2", "1,3", "1,2", "1", "6"))
numeric_cols <- identify_numerics(data_set, n_test = 5, verbose = verbose)
expect_equal(2, length(numeric_cols))
expect_equal("col1", numeric_cols$dont_strip)
expect_equal("col2", numeric_cols$strip)
expect_identical(identify_numerics(data_set, cols = list(), n_test = 5, verbose = verbose),
list(dont_strip = NULL, strip = NULL))
})
test_that("private function identify_numerics: if told to do nothing, do nothing.", {
data_set <- data.table(col1 = c("1.2", "1.3", "1.2", "1", "6"))
numeric_cols <- identify_numerics(data_set, cols = list(), n_test = 5, verbose = verbose)
expect_identical(numeric_cols, list(dont_strip = NULL, strip = NULL))
})
test_that("private function: identify_numerics_formats: give notstrip when numeric col hiden in character
with '.' decimal separator is thrown", {
data_set <- data.table(col = c("1.2", "1.3", "1.2", "1", "6"))
result <- identify_numerics_formats(data_set$col)
expect_equal(NUMERIC_COL_NOT_TO_STRIP, result)
})
test_that("private function: identify_numerics_formats: give strip when numeric col hiden in character with ','
decimal separator is thrown", {
data_set <- data.table(col = c("1,2", "1,3", "1,2", "1", "6"))
result <- identify_numerics_formats(data_set$col)
expect_equal(NUMERIC_COL_TO_STRIP, result)
})
test_that("private function: identify_numerics_formats: give 'Not a numeric' when col doesn't contain hidden numeric", {
data_set <- data.table(col = LETTERS)
result <- identify_numerics_formats(data_set$col)
expect_equal("Not a numeric", result)
})
test_that("private function: identify_numerics_formats: should throw error when called on not character col", {
data_set <- data.table(col = factor(c(1, 2, 3)))
expect_error(identify_numerics_formats(data_set$col),
"identify_numerics_formats: data_set should be some characters")
})
test_that("private function as.numeric_strip: should convert character containing a numeric with ','
decimal seprator into correct numeric", {
char_num <- "1,2"
expected_result <- 1.2
result <- as.numeric_strip(char_num)
expect_equal(expected_result, result)
})
|
tar_test("tar_exist_objects()", {
dir_create(dirname(path_objects(path_store_default(), "x")))
file.create(path_objects(path_store_default(), "x"))
expect_equal(tar_exist_objects(c("y", "x")), c(FALSE, TRUE))
})
tar_test("custom script and store args", {
skip_on_cran()
expect_equal(tar_config_get("script"), path_script_default())
expect_equal(tar_config_get("store"), path_store_default())
tar_script(tar_target(x, 1), script = "example/script.R")
expect_false(tar_exist_objects("x", store = "example/store"))
expect_false(file.exists("example/store"))
tar_make(
callr_function = NULL,
script = "example/script.R",
store = "example/store"
)
expect_true(tar_exist_objects("x", store = "example/store"))
expect_true(file.exists("example/store"))
expect_false(file.exists("_targets.yaml"))
expect_equal(tar_config_get("script"), path_script_default())
expect_equal(tar_config_get("store"), path_store_default())
expect_false(file.exists(path_script_default()))
expect_false(file.exists(path_store_default()))
expect_true(file.exists("example/script.R"))
tar_config_set(script = "x")
expect_equal(tar_config_get("script"), "x")
expect_true(file.exists("_targets.yaml"))
})
|
link_compar <- function(proj_name,
linkset1,
linkset2,
buffer_width = 200,
min_length = NULL,
proj_path = NULL){
if(!is.null(proj_path)){
chg <- 1
wd1 <- getwd()
setwd(dir = proj_path)
} else {
chg <- 0
proj_path <- getwd()
}
if(!inherits(proj_name, "character")){
if(chg == 1){setwd(dir = wd1)}
stop("'proj_name' must be a character string")
} else if (!(paste0(proj_name, ".xml") %in% list.files(path = paste0("./", proj_name)))){
if(chg == 1){setwd(dir = wd1)}
stop("The project you refer to does not exist.
Please use graphab_project() before.")
}
proj_end_path <- paste0(proj_name, "/", proj_name, ".xml")
if(!inherits(linkset1, "character")){
if(chg == 1){setwd(dir = wd1)}
stop("'linkset1' must be a character string specifying the name of the
first link set involved in the comparison.")
} else if (!(paste0(linkset1, "-links.csv") %in% list.files(path = paste0("./", proj_name)))){
if(chg == 1){setwd(dir = wd1)}
stop("The linkset you refer to does not exist.
Please use graphab_link() before.")
}
if(!inherits(linkset2, "character")){
if(chg == 1){setwd(dir = wd1)}
stop("'linkset2' must be a character string specifying the name of the
second link set involved in the comparison.")
} else if (!(paste0(linkset2, "-links.csv") %in% list.files(path = paste0("./", proj_name)))){
if(chg == 1){setwd(dir = wd1)}
stop("The linkset you refer to does not exist.
Please use graphab_link() before.")
}
if(!inherits(buffer_width, c("numeric", "integer"))){
if(chg == 1){setwd(dir = wd1)}
stop("'buffer_width' must be a numeric or integer value")
}
if(!is.null(min_length)){
if(!inherits(min_length, c("numeric", "integer"))){
if(chg == 1){setwd(dir = wd1)}
stop("'min_length' must be a numeric or an integer threshold value.")
}
}
ls1 <- suppressWarnings(sf::as_Spatial(sf::st_read(dsn = paste0(getwd(),
"/", proj_name),
layer = paste0(linkset1,
"-links"))))
ls2 <- suppressWarnings(sf::as_Spatial(sf::st_read(dsn = paste0(getwd(),
"/", proj_name),
layer = paste0(linkset2,
"-links"))))
if(!is.null(min_length)){
ls1 <- ls1[which(ls1$DistM >= min_length), ]
ls2 <- ls2[which(ls2$DistM >= min_length), ]
}
if(nrow(ls1) != nrow(ls2)){
if(chg == 1){setwd(dir = wd1)}
stop("'linkset1' and 'linkset2' must have the same number of links.")
} else if(!(all(ls1$Id %in% ls2$Id))){
if(chg == 1){setwd(dir = wd1)}
stop("'linkset1' and 'linkset2' must have the same link IDs.")
}
print(paste0("The buffer width on each side of the links has been set to ",
buffer_width, " m."))
ls1_b <- raster::buffer(ls1, width = buffer_width, dissolve = FALSE)
ls2_b <- raster::buffer(ls2, width = buffer_width, dissolve = FALSE)
data1 <- ls1_b@data
data2 <- ls2_b@data
df_res <- data.frame(id_link = NA,
area_1 = NA,
area_2 = NA,
cost_dist_1 = NA,
cost_dist_2 = NA,
euc_dist_1 = NA,
euc_dist_2 = NA,
area_overlap = NA)
df_res <- df_res[-1, ]
for(i in 1:nrow(ls1_b)){
id <- data1[i, 'Id']
ls1_bi <- ls1_b[which(data1$Id == id), ]
ls2_bi <- ls2_b[which(data2$Id == id), ]
inter_ls <- suppressWarnings(sf::st_intersection(sf::st_as_sf(ls1_bi),
sf::st_as_sf(ls2_bi)))
inter_area <- sf::st_area(inter_ls)
ls1_area <- sf::st_area(sf::st_as_sf(ls1_bi))
ls2_area <- sf::st_area(sf::st_as_sf(ls2_bi))
df_res <- rbind(df_res,
data.frame(id_link = id,
area_1 = as.numeric(ls1_area),
area_2 = as.numeric(ls2_area),
cost_dist_1 = ls1_bi$Dist,
cost_dist_2 = ls2_bi$Dist,
euc_dist_1 = ls1_bi$DistM,
euc_dist_2 = ls2_bi$DistM,
area_overlap = as.numeric(inter_area)))
}
correl <- stats::cor(df_res$cost_dist_1, df_res$cost_dist_2)
res <- list(df_res, correl)
names(res) <- c("Spatial overlap table",
"Correlation coefficient between cost distances")
if(chg == 1){
setwd(dir = wd1)
}
return(res)
}
|
expected <- eval(parse(text="structure(c(\"[\", \"shingle\", NA, \"as.data.frame\", \"shingle\", NA, \"plot\", \"shingle\", NA, \"print\", \"shingle\", NA, \"summary\", \"shingle\", NA, \"as.character\", \"shingleLevel\", NA, \"print\", \"shingleLevel\", NA, \"print\", \"trellis\", NA, \"plot\", \"trellis\", NA, \"update\", \"trellis\", NA, \"dim\", \"trellis\", NA, \"dimnames\", \"trellis\", NA, \"dimnames<-\", \"trellis\", NA, \"[\", \"trellis\", NA, \"t\", \"trellis\", NA, \"summary\", \"trellis\", NA, \"print\", \"summary.trellis\", NA, \"barchart\", \"formula\", NA, \"barchart\", \"array\", NA, \"barchart\", \"default\", NA, \"barchart\", \"matrix\", NA, \"barchart\", \"numeric\", NA, \"barchart\", \"table\", NA, \"bwplot\", \"formula\", NA, \"bwplot\", \"numeric\", NA, \"densityplot\", \"formula\", NA, \"densityplot\", \"numeric\", NA, \"dotplot\", \"formula\", NA, \"dotplot\", \"array\", NA, \"dotplot\", \"default\", NA, \"dotplot\", \"matrix\", NA, \"dotplot\", \"numeric\", NA, \"dotplot\", \"table\", NA, \"histogram\", \"formula\", NA, \"histogram\", \"factor\", NA, \"histogram\", \"numeric\", NA, \"qqmath\", \"formula\", NA, \"qqmath\", \"numeric\", NA, \"stripplot\", \"formula\", NA, \"stripplot\", \"numeric\", NA, \"qq\", \"formula\", NA, \"xyplot\", \"formula\", NA, \"xyplot\", \"ts\", NA, \"levelplot\", \"formula\", NA, \"levelplot\", \"table\", NA, \"levelplot\", \"array\", NA, \"levelplot\", \"matrix\", NA, \"contourplot\", \"formula\", NA, \"contourplot\", \"table\", NA, \"contourplot\", \"array\", NA, \"contourplot\", \"matrix\", NA, \"cloud\", \"formula\", NA, \"cloud\", \"matrix\", NA, \"cloud\", \"table\", NA, \"wireframe\", \"formula\", NA, \"wireframe\", \"matrix\", NA, \"splom\", \"formula\", NA, \"splom\", \"matrix\", NA, \"splom\", \"data.frame\", NA, \"parallelplot\", \"formula\", NA, \"parallelplot\", \"matrix\", NA, \"parallelplot\", \"data.frame\", NA, \"parallel\", \"formula\", NA, \"parallel\", \"matrix\", NA, \"parallel\", \"data.frame\", NA, \"tmd\", \"formula\", NA, \"tmd\", \"trellis\", NA, \"llines\", \"default\", NA, \"ltext\", \"default\", NA, \"lpoints\", \"default\", NA), .Dim = c(3L, 70L))"));
test(id=0, code={
argv <- eval(parse(text="list(structure(c(\"[\", \"as.data.frame\", \"plot\", \"print\", \"summary\", \"as.character\", \"print\", \"print\", \"plot\", \"update\", \"dim\", \"dimnames\", \"dimnames<-\", \"[\", \"t\", \"summary\", \"print\", \"barchart\", \"barchart\", \"barchart\", \"barchart\", \"barchart\", \"barchart\", \"bwplot\", \"bwplot\", \"densityplot\", \"densityplot\", \"dotplot\", \"dotplot\", \"dotplot\", \"dotplot\", \"dotplot\", \"dotplot\", \"histogram\", \"histogram\", \"histogram\", \"qqmath\", \"qqmath\", \"stripplot\", \"stripplot\", \"qq\", \"xyplot\", \"xyplot\", \"levelplot\", \"levelplot\", \"levelplot\", \"levelplot\", \"contourplot\", \"contourplot\", \"contourplot\", \"contourplot\", \"cloud\", \"cloud\", \"cloud\", \"wireframe\", \"wireframe\", \"splom\", \"splom\", \"splom\", \"parallelplot\", \"parallelplot\", \"parallelplot\", \"parallel\", \"parallel\", \"parallel\", \"tmd\", \"tmd\", \"llines\", \"ltext\", \"lpoints\", \"shingle\", \"shingle\", \"shingle\", \"shingle\", \"shingle\", \"shingleLevel\", \"shingleLevel\", \"trellis\", \"trellis\", \"trellis\", \"trellis\", \"trellis\", \"trellis\", \"trellis\", \"trellis\", \"trellis\", \"summary.trellis\", \"formula\", \"array\", \"default\", \"matrix\", \"numeric\", \"table\", \"formula\", \"numeric\", \"formula\", \"numeric\", \"formula\", \"array\", \"default\", \"matrix\", \"numeric\", \"table\", \"formula\", \"factor\", \"numeric\", \"formula\", \"numeric\", \"formula\", \"numeric\", \"formula\", \"formula\", \"ts\", \"formula\", \"table\", \"array\", \"matrix\", \"formula\", \"table\", \"array\", \"matrix\", \"formula\", \"matrix\", \"table\", \"formula\", \"matrix\", \"formula\", \"matrix\", \"data.frame\", \"formula\", \"matrix\", \"data.frame\", \"formula\", \"matrix\", \"data.frame\", \"formula\", \"trellis\", \"default\", \"default\", \"default\", NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), .Dim = c(70L, 3L)), c(2L, 1L), TRUE)"));
.Internal(`aperm`(argv[[1]], argv[[2]], argv[[3]]));
}, o=expected);
|
library(tinytest)
library(ggiraph)
classname_re <- "\\._CLASSNAME_\\s*"
girafe_css_re <- "\\/\\*GIRAFE CSS\\*\\/\\s*"
css_code <- "stroke: black;"
css_re <- function(x) {
paste0("\\s*\\{\\s*", x, "\\s*\\}\\s*")
}
{
css <- ggiraph:::validate_css(css_code, "text")
re <- paste0("^", classname_re, css_re(css_code), "$")
expect_true(grepl(re, css), info = "should return css prefixed with ._CLASSNAME_")
css <- ggiraph:::validate_css(css_code, "text", "text")
re <- paste0("^text", classname_re, css_re(css_code), "$")
expect_true(grepl(re, css), info = "tag name should be used")
css <- ggiraph:::validate_css(css_code, "text", c("text", "line"))
re <- paste0("^text", classname_re, ",\\s*line", classname_re, css_re(css_code), "$")
expect_true(grepl(re, css), info = "multiple tag names should be used")
expect_equal(
ggiraph:::validate_css(NULL, "text"), "",
info = "should return empty css"
)
expect_equal(
ggiraph:::validate_css("", "text"), "",
info = "should return empty css"
)
expect_error(
ggiraph:::validate_css(c("one", "two"), ""),
info = "css must be scalar character"
)
}
{
expect_equal(
ggiraph::girafe_css(""),
"/*GIRAFE CSS*/",
info = "should return just placeholder /*GIRAFE CSS*/"
)
css <- ggiraph::girafe_css(css_code)
re <- paste0("^", girafe_css_re, classname_re, css_re(css_code), "$")
expect_true(
grepl(re, css),
info = "should return the css code with placeholder"
)
css_tag_code <- "stroke: none;"
css <- ggiraph::girafe_css(css_code, text = css_tag_code)
re <- paste0("^", girafe_css_re, classname_re, css_re(css_code))
re2 <- paste0("\\s*text", classname_re, css_re(css_tag_code), "$")
expect_true(
grepl(re, css) && grepl(re2, css),
info = "should use the tag for text"
)
css <- ggiraph::girafe_css(css_code, point = css_tag_code)
re <- paste0("^", girafe_css_re, classname_re, css_re(css_code))
re2 <- paste0("\\s*circle", classname_re, css_re(css_tag_code), "$")
expect_true(
grepl(re, css) && grepl(re2, css),
info = "should use the tag for point"
)
css <- ggiraph::girafe_css(css_code, image = css_tag_code)
re <- paste0("^", girafe_css_re, classname_re, css_re(css_code))
re2 <- paste0("\\s*image", classname_re, css_re(css_tag_code), "$")
expect_true(
grepl(re, css) && grepl(re2, css),
info = "should use the tag for image"
)
css <- ggiraph::girafe_css(css_code, line = css_tag_code)
re <- paste0("^", girafe_css_re, classname_re, css_re(css_code))
re2 <- paste0(
"\\s*line", classname_re,
",\\s*polyline", classname_re,
css_re(css_tag_code), "$"
)
expect_true(
grepl(re, css) && grepl(re2, css),
info = "should use the tags for line"
)
css <- ggiraph::girafe_css(css_code, area = css_tag_code)
re <- paste0("^", girafe_css_re, classname_re, css_re(css_code))
re2 <- paste0(
"\\s*rect", classname_re,
",\\s*polygon", classname_re,
",\\s*path", classname_re,
css_re(css_tag_code), "$"
)
expect_true(
grepl(re, css) && grepl(re2, css),
info = "should use the tags for area"
)
}
{
default <- "fill:orange;stroke:gray;"
pattern <- "\\/\\*GIRAFE CSS\\*\\/"
cls_prefix <- "hover_"
name <- "opts_hover"
canvas_id <- "SVGID_"
expect_error(ggiraph:::check_css(
c("a", "b"),
default = default, cls_prefix = cls_prefix, name = name, canvas_id = canvas_id
))
expect_identical(ggiraph:::check_css(
NULL,
default = default, cls_prefix = cls_prefix, name = name, canvas_id = canvas_id
), paste0(".", cls_prefix, canvas_id, " { ", default, " }"))
}
|
dbListFields_PqConnection_Id <- function(conn, name, ...) {
list_fields(conn, name@name)
}
setMethod("dbListFields", c("PqConnection", "Id"), dbListFields_PqConnection_Id)
|
op_sum <- opwf(sum, c('...', 'removeNA_b_1'))
op_kronecker <- opwf(kronecker, c('arrayA_a_1', 'arrayB_a_1', 'function_f_1', 'computeDimensionNames_b_1', '...'))
op_formatdf <- opwf(format.data.frame, c('x_o_1', '...', 'justificationScheme_s_1'))
cac_sum <- computeArgumentsCombination(op_sum)
cac_kronecker <- computeArgumentsCombination(op_kronecker)
cac_formatdf <- computeArgumentsCombination(op_formatdf)
|
"rg.mva" <-
function(x, main = deparse(substitute(x)))
{
if(!is.matrix(x)) stop("Not a Matrix")
n <- length(x[, 1])
p <- length(x[1, ])
matnames <- dimnames(x)
wts <- numeric(n)
wts[1:n] <- 1
nc <- n
cat(" n =", n, "\tnc =", n, "\tp =", p, "\t\tnc/p =", round(nc/p, 2), "\n")
if(nc <= 5 * p)
cat(" *** Proceed with Care, n is < 5p ***\n")
if(nc <= 3 * p)
cat(" *** Proceed With Great Care, n = ", n, ", which is < 3p ***\n")
save <- cov.wt(x, wt = wts, cor = TRUE)
xmean <- save$center
xsd <- sqrt(diag(save$cov))
temp <- sweep(x, 2, xmean, "-")
snd <- sweep(temp, 2, xsd, "/")
xsd2 <- sqrt(n) * xsd
w <- sweep(temp, 2, xsd2, "/")
wt <- t(as.matrix(w))
a <- wt %*% as.matrix(w)
b <- svd(a)
cat(" Eigenvalues:", signif(b$d, 4), "\n")
sumc <- sum(b$d)
econtrib <- 100 * (b$d/sumc)
rqscore <- w %*% b$v
vcontrib <- apply(rqscore,2,var)
sumv <- sum(vcontrib)
pvcontrib <- (100 * vcontrib)/sumv
cpvcontrib <- cumsum(pvcontrib)
b1 <- b$v * 0
diag(b1) <- sqrt(b$d)
rload <- b$v %*% b1
rcr <- rload[, ] * 0
rcr1 <- apply(rload^2, 1, sum)
rcr <- 100 * sweep(rload^2, 1, rcr1, "/")
if(b$d[p] > 0.001) {
md <- mahalanobis(x, save$center, save$cov)
temp <- (nc - p)/(p * (nc + 1))
ppm <- 1 - pf(temp * md, p, nc - p)
epm <- 1 - pchisq(md, p)
}
else {
cat(" Lowest eigenvalue < 10^-4, Mahalanobis distances not computed\n")
md <- NULL
ppm <- NULL
epm <- NULL
}
invisible(list(main = main, input = deparse(substitute(x)), proc = "cov", n = n, nc = nc,
p = p, matnames = matnames, wts = wts, mean = xmean, cov = save$cov, sd = xsd,
snd = snd, r = save$cor, eigenvalues = b$d, econtrib = econtrib, eigenvectors =
b$v, rload = rload, rcr = rcr, rqscore = rqscore, vcontrib = vcontrib, pvcontrib
= pvcontrib, cpvcontrib = cpvcontrib, md = md, ppm = ppm, epm = epm, nr = NULL)
)
}
|
plot.FEmrt <- function(x, ...){
if (length(x$n) < 2) {
warning("No tree was detected")
} else{
frame <- x$tree$frame
frame0 <- frame
rownames(frame0) <- rank(as.numeric(rownames(frame)))
trans.labels <- rownames(frame)
names(trans.labels) <- rownames(frame0)
term.nodes <- rownames(frame0)[x$tree$where]
inx.pleaves <- which(frame0$var != "<leaf>")
pleaves <- rownames(frame0)[inx.pleaves]
pleaves <- pleaves[order(as.numeric(rownames(frame0)[inx.pleaves+1]))]
splits.all <- labels(x$tree, minlength = 0L)
splits <- splits.all[inx.pleaves+1][order(as.numeric(rownames(frame0)[inx.pleaves+1]))]
ntree <- data.frame(split = as.character(splits), pleaf = as.numeric(pleaves))
object <- list()
object$data <- x$data
object$data$term.node <- term.nodes
object$tree <- ntree
object$n <- table(term.nodes)
object$tree$split <- as.character(object$tree$split)
if (length(object$n) < 2) {stop("no tree was detected")}
else {
transparent_theme <- ggplot2::theme(
axis.line = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.background = element_blank(),
panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
plot.background = element_blank()
)
transparent_theme2 <- ggplot2::theme(
axis.text.x = element_blank(),
axis.ticks = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.background = element_blank(),
panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
plot.background = element_blank()
)
y <- NULL
term.node <- NULL
yi <- NULL
leaf.no <- NULL
tree <- object$tree
tree <- tree[!is.na(tree$pleaf), ]
count <- 1
nodes <- data.frame(leaf=1, pleaf=0, x=0, y=0, w=1)
for(pleaf in tree$pleaf) {
pleaf_row <- nodes[nodes$leaf == pleaf, ]
count <- count + 1
nodes <- updateNodes(
nodes,
data.frame(
leaf = count,
pleaf = pleaf,
x = pleaf_row$x - pleaf_row$w / 2,
y = pleaf_row$y - 1,
w = pleaf_row$w / 2
)
)
count <- count + 1
nodes <- updateNodes(
nodes,
data.frame(
leaf = count,
pleaf = pleaf,
x = pleaf_row$x + pleaf_row$w / 2,
y = pleaf_row$y - 1,
w = pleaf_row$w / 2
)
)
}
nodes$split = NA
nodes$split[tree$pleaf] = tree$split
nodes$x.new <- rep(NA, nrow(nodes))
inx.term <- !(nodes$leaf %in% nodes$pleaf)
nodes.term <- nodes[inx.term,]
nodes$x.new[inx.term] <- rank(nodes$x[inx.term])
nodes$leaf.no[inx.term] <- object$n[as.character(nodes$leaf[inx.term])]
for (i in min(nodes$y):-1){
inx.pleaf <- which(nodes$y == i)
coords <- sapply(split(nodes$x.new[inx.pleaf], nodes$pleaf[inx.pleaf]), mean)
leaf.no <- sapply(split(nodes$leaf.no[inx.pleaf], nodes$pleaf[inx.pleaf]), sum)
inx.replace <- names(coords[!is.na(coords)])
nodes$x.new[as.numeric(inx.replace)] <- coords[inx.replace]
nodes$leaf.no[as.numeric(inx.replace)] <- leaf.no[inx.replace]
}
nodes$x <- nodes$x.new
config.leaf_width_scale <- 0.9
x.scale <- config.leaf_width_scale / 2 *
min(sapply(split(nodes[-1, ]$x,f =nodes[-1, ]$y), function(x) min(diff(sort(x)))))
y.scale <- x.scale*diff(range(nodes$y))/diff(range(nodes$x))
vis <- ggplot()
for(i in 1:nrow(nodes)){
node <- nodes[i, ]
if(node$pleaf == 0){
next
}
parent = nodes[nodes$leaf == node$pleaf, ]
data_line = data.frame(x = c(node$x, parent$x),
y = c(node$y, parent$y))
vis <- vis + geom_line(data = data_line, aes(x, y), color = "black")
}
config.branch_text_left_dx = -0.2
config.branch_text_right_dx = 0.2
config.branch_text_left = "Yes"
config.branch_text_right = "No"
config.branch_text_size = 3
config.leaf_oval_ratio = 1.3
config.leaf_text_size = 5
config.split_text_dy = -0.33
config.split_text_size = 3
config.split_label = T
for (i in 1:nrow(nodes)) {
node <- nodes[i, ]
parent = nodes[nodes$leaf == node$pleaf,]
vis <- oval_draw(vis, node$x, node$y, config.leaf_oval_ratio, x.scale, y.scale) +
geom_text(
data = data.frame(x = node$x, y = node$y),
aes(x, y),
label = paste("K =",node$leaf.no),
size = config.leaf_text_size
)
h = 1
if(!is.na(node$split)){
dy <- h * config.split_text_dy
data_text = data.frame(x = node$x, y = node$y + dy)
show_text = ifelse(config.split_label, geom_label, geom_text)
vis <- vis +
show_text(
data = data_text,
aes(x, y),
label = encodeHtml(node$split),
size = config.split_text_size
)
}
dx = h * ifelse(node$leaf %% 2 == 0,
config.branch_text_left_dx,
config.branch_text_right_dx)
data_text = data.frame(x = (node$x + parent$x) / 2 + dx,
y = (node$y + parent$y) / 2)
vis <- vis +
geom_text(
data = data_text,
aes(x, y),
label = ifelse(
node$leaf %% 2 == 0,
config.branch_text_left,
config.branch_text_right
),
size = config.branch_text_size
)
}
vis <- vis + transparent_theme
term <- nodes[is.na(nodes$split),]
term <- term[ordered(term$x.new),]
yi <- model.response(x$data)
p <- ggplot()
p <- p + geom_hline(
yintercept = c(min(yi), max(yi)),
linetype = "solid"
)
p <- p + geom_hline(
yintercept = 0,
linetype = "dashed"
)
p <- p + scale_x_discrete(limits = as.factor(term$leaf))
CI.ratio = 2
for (i in unique(term.nodes)) {
y.coord2 = frame0[i, ]$yval
x.coord2 = nodes[i, ]$x
p <- CI_draw(p, x = x.coord2, y = y.coord2, b = 1.96* x$se[trans.labels[i]], a = 1.96* x$se[trans.labels[i]]/CI.ratio)
}
p <- p + transparent_theme2
grid.arrange(vis, p, nrow = 2, as.table=T, heights = c(3,1))
}
}
}
|
create_map <- function() {
insertUI(immediate = TRUE,
selector = ".content",
where = "beforeEnd",
ui = conditionalPanel(
condition = "output.cond == true",
div(class = "main_plots",
fluidRow(
box(
id = "box1",
width = 12,
collapsible = TRUE,
collapsed = FALSE,
title = tagList(icon("map"), "Map"),
mapdeck::mapdeckOutput(outputId = "map"))))))
}
create_summary_boxes <- function() {
insertUI(immediate = TRUE,
selector = ".content",
where = "beforeEnd",
ui = conditionalPanel(
condition = "output.cond == true",
div(class = "main_plots",
fluidRow(
valueBoxOutput("nsessions_running_box", width = 4),
valueBoxOutput("nsessions_cycling_box", width = 4),
valueBoxOutput("nsessions_swimming_box", width = 4)),
fluidRow(
valueBoxOutput("avgDistance_box", width = 4),
valueBoxOutput("avgDuration_box", width = 4),
valueBoxOutput("avgPace_box", width = 4)),
fluidRow(
valueBoxOutput("avgHeartRate_box", width = 4),
valueBoxOutput("avgTemperature_box", width = 4),
valueBoxOutput("total_elevation_gain_box", width = 4))
)))
}
create_workout_plots <- function(feature) {
fname <- switch(as.character(feature),
"distance" = "Distance",
"duration" = "Duration",
"avgSpeed" = "Average Speed",
"avgPace" = "Average Pace",
"avgCadenceRunning" = "Average Cadence Running",
"avgCadenceCycling" = "Average Cadence Cycling",
"avgPower" = "Average Power",
"avgHeartRate" = "Average Heart Rate",
"avgTemperature" = "Average Temperature",
"avgAltitude" = "Average Altitude",
"total_elevation_gain" = "Total elevation gain",
"wrRatio" = "Work-to-rest Ratio")
insertUI(selector = ".content",
where = "beforeEnd",
ui = conditionalPanel(
condition = paste0("output.", feature, " == false"),
div(class = "main_plots", id = paste0("box", feature),
fluidRow(
box(width = 12,
collapsible = TRUE,
title = tagList(icon(create_icon(feature)), fname),
plotlyOutput(paste0(feature, "_plot"),
width = "auto",
height = "auto"))))))
}
create_selected_workout_plot <- function(id, workout_features, collapsed = FALSE) {
insertUI(
selector = ".content",
where = "beforeEnd",
ui = conditionalPanel(
condition = paste0("output.", id, " == false"),
div(class = "plots", id = id,
fluidRow(
box(
width = 12,
collapsible = TRUE,
collapsed = collapsed,
title = tagList(icon("gear"),
switch(id,
"pace" = "Pace",
"heart_rate" = "Heart Rate",
"altitude" = "Altitude",
"power" = "Power",
"speed" = "Speed",
"cadence_running" = "Cadence Running",
"cadence_cycling" = "Cadence Cycling",
"altitude" = "Altitude",
"temperature" = "Temperature",
"cumulative_elevation_gain" = "Cumulative elevation gain")),
fluidRow(
column(3,
selectInput(
inputId = paste0("what2", id),
label = "Shaded feature",
multiple = FALSE,
choices = workout_features,
selected = "altitude")),
column(1,
selectizeInput(
inputId = paste0("n_changepoints", id),
label = "Changepoints",
multiple = FALSE,
choices = c(
"no" = 0,
"1" = 1,
"2" = 2,
"3" = 3,
"4" = 4,
"5" = 5,
"6" = 6,
"7" = 7,
"8" = 8,
"9" = 9,
"10" = 10,
"11" = 11,
"12" = 12),
selected = "no"))),
div(id = "workout_view_plot",
uiOutput(paste0(id, "_plot"))))))))
}
create_profiles_box <- function(inputId, plotId, choices, collapsed = FALSE) {
insertUI(
selector = ".content",
where = "beforeEnd",
ui = conditionalPanel(
condition = "output.cond == false",
div(class = "plots",
fluidRow(
box(
width = 12,
collapsible = TRUE,
collapsed = collapsed,
title = tagList(icon("gear"), "Workout concentration"),
fluidRow(
column(2, pickerInput(inputId = inputId,
label = "Features",
choices = choices,
options = list(`actions-box` = TRUE),
multiple = TRUE,
selected = c("speed")))),
uiOutput(plotId))))))
}
create_zones_box <- function(inputId, plotId, choices) {
insertUI(
selector = ".content",
where = "beforeEnd",
ui = conditionalPanel(
condition = "output.cond == false",
div(class = "plots",
fluidRow(
box(
width = 12,
collapsible = TRUE,
collapsed = FALSE,
title = tagList(icon("gear"), "Time in zones"),
fluidRow(
column(2, pickerInput(inputId = inputId,
label ="Features",
choices = choices,
options = list(`actions-box` = TRUE),
multiple = TRUE,
selected = c("speed"))),
column(2, pickerInput(inputId = "n_zones",
label = "Number of zones:",
multiple = FALSE,
choices = c("2" = 2,
"3" = 3,
"4" = 4,
"5" = 5,
"6" = 6,
"7" = 7,
"8" = 8,
"9" = 9),
options = list(`actions-box` = TRUE),
selected = "6"))),
uiOutput(plotId))))))
}
create_option_box <- function(sport_options, summary_features_available, workout_features_available) {
insertUI(
immediate = TRUE,
selector = ".content",
where = "afterBegin",
ui = div(class = "option_boxes",
fluidRow(
box(
width = 12,
collapsible = TRUE,
title = "Toolbar",
fluidRow(
column(3,
actionButton(inputId = "no_sports",
label = "Clear selection",
icon = icon("times-circle"))),
column(3,
actionButton(inputId = "all_sports",
label = "Select all",
icon = icon("times-circle"))),
column(2,
actionButton(
inputId = "sport_is_running",
label = "Running",
icon = icon("walking"))),
column(2,
actionButton(
inputId = "sport_is_cycling",
label = "Cycling",
icon = icon("bicycle"))),
column(2,
actionButton(
inputId = "sport_is_swimming",
label = "Swimming",
icon = icon("swimmer")))),
br(),
fluidRow(
column(3,
conditionalPanel(
condition = "output.cond == false",
actionButton(
inputId = "return_to_main_page",
label = "Summary view",
icon = icon("search-minus"))),
conditionalPanel(
condition = "output.cond == true",
actionButton(inputId = "plotSelectedWorkouts",
label = "Workout view",
icon = icon("search-plus")))),
column(3, actionButton(inputId = "showModalUnits",
label = "Change units",
icon = icon("balance-scale"))),
column(6,
conditionalPanel(
condition = "output.cond == true",
pickerInput(inputId = "metricsSelected",
choices = summary_features_available,
options = list(`actions-box` = TRUE),
multiple = TRUE, selected = trops()$default_summary_plots)),
conditionalPanel(
condition = "output.cond == false",
pickerInput(inputId = "workout_features_selected",
choices = workout_features_available,
options = list(`actions-box` = TRUE),
multiple = TRUE,
selected = trops()$default_workout_plots))
))))))
}
create_summary_timeline_boxes <- function() {
insertUI(
immediate = TRUE,
selector = ".content",
where = "beforeEnd",
ui = div(class = "main_plots",
fluidRow(
box(
id = "summary_box",
width = 6,
collapsible = TRUE,
title = tagList(icon("reorder"), "Workout summary"),
DTOutput("summary", height = "365px")),
box(
id = "workout_timeline_box",
width = 6,
collapsible = TRUE,
collapsed = FALSE,
title = tagList(icon("calendar"), "Workout timeline"),
plotlyOutput("timeline_plot", height = "365px")))))
}
show_change_unit_window <- function(data) {
showModal(modalDialog(
title = "Change units",
awesomeRadio("altitudeUnits", "Altitude:", c("m" = "m",
"km" = "km",
"mi" = "mi",
"ft" = "ft"),
inline = TRUE,
selected = get_selected_units("altitude", data)),
awesomeRadio("distanceUnits", "Distance:", c("m" = "m",
"km" = "km",
"mi" = "mi",
"ft" = "ft"),
inline = TRUE,
selected = get_selected_units("distance", data)),
awesomeRadio("speedUnits", "Speed:", c("m/s" = "m_per_s",
"km/h" = "km_per_h",
"ft/min" = "ft_per_min",
"ft/s" = "ft_per_s",
"mi/h" = "mi_per_h"),
inline = TRUE,
selected = get_selected_units("speed", data)),
awesomeRadio("paceUnits", "Pace:", c("min/km" = "min_per_km",
"min/mi" = "min_per_mi",
"s/min" = "s_per_m"),
inline = TRUE,
selected = get_selected_units("pace", data)),
awesomeRadio("durationUnits", "Duration:", c("seconds" = "s",
"minutes" = "min",
"hours" = "h"),
inline = TRUE,
selected = get_selected_units("duration", data)),
awesomeRadio("powerUnits", "Power:", c("W" = "W",
"kW" = "kW"),
inline = TRUE,
selected = get_selected_units("power", data)),
footer = tagList(modalButton("Cancel"),
actionButton("updateUnits", "Apply"))))
}
|
context("multi-objective: parego")
test_that("mbo parego works", {
learner = makeLearner("regr.km", predict.type = "se", nugget.estim = TRUE)
ctrl = makeMBOControl(n.objectives = 2L)
ctrl = setMBOControlTermination(ctrl, iters = 5L)
ctrl = setMBOControlInfill(ctrl, opt.focussearch.points = 10)
ctrl = setMBOControlMultiObj(ctrl, method = "parego", parego.s = 100)
or = mbo(testfmco1, testdesmco1, learner = learner, control = ctrl)
expect_output(print(or), "Optimization path")
expect_matrix(or$pareto.front, mode = "numeric", any.missing = FALSE)
ctrl = makeMBOControl(n.objectives = 2, propose.points = 2L)
ctrl = setMBOControlTermination(ctrl, iters = 5L)
ctrl = setMBOControlInfill(ctrl, opt.focussearch.points = 5)
ctrl = setMBOControlMultiObj(ctrl, method = "parego", parego.s = 100, parego.use.margin.points = c(TRUE, TRUE))
or = mbo(testfmco1, testdesmco1, learner = learner, control = ctrl)
w = as.data.frame(or$opt.path)[-(1:10), c("parego.weight.1", "parego.weight.2")]
expect_true(all(w == 0 | w == 1))
expect_equal(1 - w[, 1], w[, 2])
ctrl = makeMBOControl(n.objectives = 2, propose.points = 2L)
ctrl = setMBOControlTermination(ctrl, iters = 5L)
ctrl = setMBOControlInfill(ctrl, opt.focussearch.points = 5)
ctrl = setMBOControlMultiObj(ctrl, method = "parego", parego.s = 100, parego.use.margin.points = c(TRUE, TRUE))
or = mbo(testfmco1, testdesmco1, learner = learner, control = ctrl)
w = as.data.frame(or$opt.path)[-(1:10), c("parego.weight.1", "parego.weight.2")]
expect_true(all(w == 0 | w == 1))
expect_equal(1 - w[, 1], w[, 2])
ctrl = makeMBOControl(n.objectives = 3)
ctrl = setMBOControlTermination(ctrl, iters = 5L)
ctrl = setMBOControlInfill(ctrl, opt.focussearch.points = 10L)
ctrl = setMBOControlMultiObj(ctrl, method = "parego", parego.s = 100)
expect_error(mbo(testfmco1, testdesmco1, learner = learner, control = ctrl), "Objective function has")
ctrl = makeMBOControl(n.objectives = 2, propose.points = 5L)
ctrl = setMBOControlTermination(ctrl, iters = 1L)
ctrl = setMBOControlInfill(ctrl, opt.focussearch.points = 10L)
ctrl = setMBOControlMultiObj(ctrl, method = "parego", parego.s = 100)
or = mbo(testfmco1, testdesmco1, learner = learner, control = ctrl)
w = as.data.frame(or$opt.path)[, c("parego.weight.1", "parego.weight.2")]
expect_true(all(rowSums(w[-(1:10),]) == 1))
expect_numeric(w[-(1:10), 1], any.missing = FALSE, unique = TRUE)
expect_numeric(w[-(1:10), 2], any.missing = FALSE, unique = TRUE)
ctrl = makeMBOControl(n.objectives = 2, propose.points = 5L)
ctrl = setMBOControlTermination(ctrl, iters = 5L)
ctrl = setMBOControlInfill(ctrl, opt.focussearch.points = 10L)
ctrl = setMBOControlMultiObj(ctrl, method = "parego", parego.s = 100)
or = mbo(testfmco2, testdesmco2, learner = learner, control = ctrl)
expect_matrix(or$pareto.front, mode = "numeric", any.missing = FALSE)
})
|
all_na <- function(x) {
UseMethod("all_na")
}
all_na.default <- function(x) {
sum(!is.na(x)) == 0
}
all_na.data.frame <- function(x) {
as.data.frame(lapply(x, function(v) sum(!is.na(v)) == 0))
}
all_na.list <- function(x) {
lapply(x, function(v) sum(!is.na(v)) == 0)
}
|
`smootherstep.uni` <-
function(m,C,Gmatx,Wtx,mx,Cx) {
Rx <- Gmatx * C * Gmatx + Wtx
B <- C * Gmatx / Rx
ms <- m + B*(mx - Gmatx *m)
Cs <- C + B*(Cx-Rx)*B
list(ms=ms,Cs=Cs)
}
|
"fextreme"<-
function(x, start, densfun, distnfun, ..., distn, mlen = 1, largest = TRUE,
std.err = TRUE, corr = FALSE, method = "Nelder-Mead")
{
if (missing(x) || length(x) == 0 || !is.numeric(x))
stop("`x' must be a non-empty numeric object")
if(any(is.na(x)))
stop("`x' must not contain missing values")
if (!is.list(start))
stop("`start' must be a named list")
call <- match.call()
if(missing(densfun))
densfun <- get(paste("d", distn, sep=""), mode="function")
if(missing(distnfun))
distnfun <- get(paste("p", distn, sep=""), mode="function")
nllh <- function(p, ...) {
dvec <- dens(p, ..., log = TRUE)
if(any(is.infinite(dvec)))
return(1e6)
else
return(-sum(dvec))
}
nm <- names(start)
l <- length(nm)
f1 <- formals(densfun)
f2 <- formals(distnfun)
args <- names(f1)
mtch <- match(nm, args)
if (any(is.na(mtch)))
stop("`start' specifies unknown arguments")
formals(densfun) <- c(f1[c(1, mtch)], f1[-c(1, mtch)])
formals(distnfun) <- c(f2[c(1, mtch)], f2[-c(1, mtch)])
dens <- function(p, x, densfun, distnfun, ...)
dextreme(x, densfun, distnfun, p, ...)
if(l > 1)
body(dens) <- parse(text = paste("dextreme(x, densfun, distnfun,",
paste("p[",1:l,"]", collapse = ", "), ", ...)"))
opt <- optim(start, nllh, x = x, hessian = TRUE, ...,
densfun = densfun, distnfun = distnfun, mlen = mlen,
largest = largest, method = method)
if(is.null(names(opt$par))) names(opt$par) <- nm
if (opt$convergence != 0) {
warning("optimization may not have succeeded")
if(opt$convergence == 1) opt$convergence <- "iteration limit reached"
}
else opt$convergence <- "successful"
if(std.err) {
tol <- .Machine$double.eps^0.5
var.cov <- qr(opt$hessian, tol = tol)
if (var.cov$rank != ncol(var.cov$qr))
stop("observed information matrix is singular; use std.err = FALSE")
var.cov <- solve(var.cov, tol = tol)
std.err <- diag(var.cov)
if(any(std.err <= 0))
stop("observed information matrix is singular; use std.err = FALSE")
std.err <- sqrt(std.err)
names(std.err) <- nm
if(corr) {
.mat <- diag(1/std.err, nrow = length(std.err))
corr <- structure(.mat %*% var.cov %*% .mat, dimnames = list(nm,nm))
diag(corr) <- rep(1, length(std.err))
}
else corr <- NULL
}
else std.err <- var.cov <- corr <- NULL
structure(list(estimate = opt$par, std.err = std.err,
deviance = 2*opt$value, corr = corr, var.cov = var.cov,
convergence = opt$convergence, counts = opt$counts,
message = opt$message, call = call, data = x,
n = length(x)), class = c("extreme", "evd"))
}
"forder"<-
function(x, start, densfun, distnfun, ..., distn, mlen = 1, j = 1,
largest = TRUE, std.err = TRUE, corr = FALSE, method = "Nelder-Mead")
{
if (missing(x) || length(x) == 0 || !is.numeric(x))
stop("`x' must be a non-empty numeric object")
if(any(is.na(x)))
stop("`x' must not contain missing values")
if (!is.list(start))
stop("`start' must be a named list")
call <- match.call()
if(missing(densfun))
densfun <- get(paste("d", distn, sep=""), mode="function")
if(missing(distnfun))
distnfun <- get(paste("p", distn, sep=""), mode="function")
nllh <- function(p, ...) {
dvec <- dens(p, ..., log = TRUE)
if(any(is.infinite(dvec)))
return(1e6)
else
return(-sum(dvec))
}
nm <- names(start)
l <- length(nm)
f1 <- formals(densfun)
f2 <- formals(distnfun)
args <- names(f1)
mtch <- match(nm, args)
if (any(is.na(mtch)))
stop("`start' specifies unknown arguments")
formals(densfun) <- c(f1[c(1, mtch)], f1[-c(1, mtch)])
formals(distnfun) <- c(f2[c(1, mtch)], f2[-c(1, mtch)])
dens <- function(p, x, densfun, distnfun, ...)
dorder(x, densfun, distnfun, p, ...)
if(l > 1)
body(dens) <- parse(text = paste("dorder(x, densfun, distnfun,",
paste("p[",1:l,"]", collapse = ", "), ", ...)"))
opt <- optim(start, nllh, x = x, hessian = TRUE, ..., densfun = densfun,
distnfun = distnfun, mlen = mlen, j = j, largest = largest,
method = method)
if(is.null(names(opt$par))) names(opt$par) <- nm
if (opt$convergence != 0) {
warning("optimization may not have succeeded")
if(opt$convergence == 1) opt$convergence <- "iteration limit reached"
}
else opt$convergence <- "successful"
if(std.err) {
tol <- .Machine$double.eps^0.5
var.cov <- qr(opt$hessian, tol = tol)
if (var.cov$rank != ncol(var.cov$qr))
stop("observed information matrix is singular; use std.err = FALSE")
var.cov <- solve(var.cov, tol = tol)
std.err <- diag(var.cov)
if(any(std.err <= 0))
stop("observed information matrix is singular; use std.err = FALSE")
std.err <- sqrt(std.err)
names(std.err) <- nm
if(corr) {
.mat <- diag(1/std.err, nrow = length(std.err))
corr <- structure(.mat %*% var.cov %*% .mat, dimnames = list(nm,nm))
diag(corr) <- rep(1, length(std.err))
}
else corr <- NULL
}
else std.err <- var.cov <- corr <- NULL
names(std.err) <- nm
structure(list(estimate = opt$par, std.err = std.err,
deviance = 2*opt$value, corr = corr, var.cov = var.cov,
convergence = opt$convergence, counts = opt$counts,
message = opt$message, call = call, data = x,
n = length(x)), class = c("extreme", "evd"))
}
"fgev"<-
function(x, start, ..., nsloc = NULL, prob = NULL, std.err = TRUE,
corr = FALSE, method = "BFGS", warn.inf = TRUE)
{
call <- match.call()
if(missing(x) || length(x) == 0 || !is.numeric(x))
stop("`x' must be a non-empty numeric vector")
if(is.null(prob)) {
ft <- fgev.norm(x = x, start = start, ..., nsloc = nsloc, std.err =
std.err, corr = corr, method = method, warn.inf = warn.inf)
}
else {
if(length(prob) != 1 || !is.numeric(prob) || prob < 0 || prob > 1)
stop("`prob' should be a probability in [0,1]")
ft <- fgev.quantile(x = x, start = start, ..., nsloc = nsloc, prob = prob,
std.err = std.err, corr = corr, method = method, warn.inf = warn.inf)
}
structure(c(ft, call = call), class = c("gev", "uvevd", "evd"))
}
"fgev.norm"<-
function(x, start, ..., nsloc = NULL, std.err = TRUE, corr = FALSE, method = "BFGS", warn.inf = TRUE)
{
nlgev <- function(loc, scale, shape)
{
if(scale <= 0) return(1e6)
if(!is.null(nsloc)) {
ns <- numeric(length(loc.param))
for(i in 1:length(ns))
ns[i] <- get(loc.param[i])
loc <- drop(nslocmat %*% ns)
}
else loc <- rep(loc, length.out = length(x))
.C(C_nlgev,
x, n, loc, scale, shape, dns = double(1))$dns
}
if(!is.null(nsloc)) {
if(is.vector(nsloc)) nsloc <- data.frame(trend = nsloc)
if(nrow(nsloc) != length(x))
stop("`nsloc' and data are not compatible")
nsloc <- nsloc[!is.na(x), ,drop = FALSE]
nslocmat <- cbind(1,as.matrix(nsloc))
}
x <- as.double(x[!is.na(x)])
n <- as.integer(length(x))
loc.param <- paste("loc", c("",names(nsloc)), sep="")
param <- c(loc.param, "scale", "shape")
if(missing(start)) {
start <- as.list(numeric(length(param)))
names(start) <- param
start$scale <- sqrt(6 * var(x))/pi
start$loc <- mean(x) - 0.58 * start$scale
start <- start[!(param %in% names(list(...)))]
}
if(!is.list(start))
stop("`start' must be a named list")
if(!length(start))
stop("there are no parameters left to maximize over")
nm <- names(start)
l <- length(nm)
f <- c(as.list(numeric(length(loc.param))), formals(nlgev)[2:3])
names(f) <- param
m <- match(nm, param)
if(any(is.na(m)))
stop("`start' specifies unknown arguments")
formals(nlgev) <- c(f[m], f[-m])
nllh <- function(p, ...) nlgev(p, ...)
if(l > 1)
body(nllh) <- parse(text = paste("nlgev(", paste("p[",1:l,
"]", collapse = ", "), ", ...)"))
fixed.param <- list(...)[names(list(...)) %in% param]
if(any(!(param %in% c(nm,names(fixed.param)))))
stop("unspecified parameters")
start.arg <- c(list(p = unlist(start)), fixed.param)
if(warn.inf && do.call("nllh", start.arg) == 1e6)
warning("negative log-likelihood is infinite at starting values")
opt <- optim(start, nllh, hessian = TRUE, ..., method = method)
if(is.null(names(opt$par))) names(opt$par) <- nm
if (opt$convergence != 0) {
warning("optimization may not have succeeded")
if(opt$convergence == 1) opt$convergence <- "iteration limit reached"
}
else opt$convergence <- "successful"
if(std.err) {
tol <- .Machine$double.eps^0.5
var.cov <- qr(opt$hessian, tol = tol)
if(var.cov$rank != ncol(var.cov$qr))
stop("observed information matrix is singular; use std.err = FALSE")
var.cov <- solve(var.cov, tol = tol)
std.err <- diag(var.cov)
if(any(std.err <= 0))
stop("observed information matrix is singular; use std.err = FALSE")
std.err <- sqrt(std.err)
names(std.err) <- nm
if(corr) {
.mat <- diag(1/std.err, nrow = length(std.err))
corr <- structure(.mat %*% var.cov %*% .mat, dimnames = list(nm,nm))
diag(corr) <- rep(1, length(std.err))
}
else corr <- NULL
}
else std.err <- var.cov <- corr <- NULL
param <- c(opt$par, unlist(fixed.param))
if(!is.null(nsloc)) {
trend <- param[paste("loc", names(nsloc), sep="")]
trend <- drop(as.matrix(nsloc) %*% trend)
x2 <- x - trend
}
else x2 <- x
list(estimate = opt$par, std.err = std.err,
fixed = unlist(fixed.param), param = param,
deviance = 2*opt$value, corr = corr, var.cov = var.cov,
convergence = opt$convergence, counts = opt$counts,
message = opt$message,
data = x, tdata = x2, nsloc = nsloc,
n = length(x), prob = NULL, loc = param["loc"])
}
"fgev.quantile"<-
function(x, start, ..., nsloc = NULL, prob, std.err = TRUE, corr = FALSE, method = "BFGS", warn.inf = TRUE)
{
nlgev <- function(quantile, scale, shape)
{
if(scale <= 0) return(1e6)
quantile <- rep(quantile, length.out = length(x))
if(prob == 0 && shape >= 0) return(1e6)
if(prob == 1 && shape <= 0) return(1e6)
if(shape == 0) loc <- quantile + scale * log(-log(1-prob))
else loc <- quantile + scale/shape * (1 - (-log(1-prob))^(-shape))
if(!is.null(nsloc)) {
ns <- numeric(length(loc.param) - 1)
for(i in 1:length(ns))
ns[i] <- get(loc.param[i+1])
loc <- drop(nslocmat %*% ns) + loc
}
if(any(is.infinite(loc))) return(1e6)
.C(C_nlgev,
x, n,
loc, scale, shape, dns = double(1))$dns
}
if(is.null(nsloc)) loc.param <- "quantile"
else loc.param <- c("quantile", paste("loc", names(nsloc), sep=""))
param <- c(loc.param, "scale", "shape")
if(missing(start)) {
start <- as.list(numeric(length(param)))
names(start) <- param
start$scale <- sqrt(6 * var(x, na.rm = TRUE))/pi
start.loc <- mean(x, na.rm = TRUE) - 0.58 * start$scale
start$quantile <- start.loc - start$scale * log(-log(1-prob))
if(prob == 0) {
fpft <- fgev(x = x, ..., nsloc = nsloc, prob = 0.001, std.err =
std.err, corr = corr, method = method, warn.inf = warn.inf)
start <- as.list(fitted(fpft))
}
if(prob == 1) {
fpft <- fgev(x = x, ..., nsloc = nsloc, prob = 0.999, std.err =
std.err, corr = corr, method = method, warn.inf = warn.inf)
start <- as.list(fitted(fpft))
}
start <- start[!(param %in% names(list(...)))]
}
if(!is.list(start))
stop("`start' must be a named list")
if(!length(start))
stop("there are no parameters left to maximize over")
if(!is.null(nsloc)) {
if(is.vector(nsloc)) nsloc <- data.frame(trend = nsloc)
if(nrow(nsloc) != length(x))
stop("`nsloc' and data are not compatible")
nsloc <- nsloc[!is.na(x), ,drop = FALSE]
nslocmat <- as.matrix(nsloc)
}
x <- as.double(x[!is.na(x)])
n <- as.integer(length(x))
nm <- names(start)
l <- length(nm)
f <- c(as.list(numeric(length(loc.param))), formals(nlgev)[2:3])
names(f) <- param
m <- match(nm, param)
if(any(is.na(m)))
stop("`start' specifies unknown arguments")
formals(nlgev) <- c(f[m], f[-m])
nllh <- function(p, ...) nlgev(p, ...)
if(l > 1)
body(nllh) <- parse(text = paste("nlgev(", paste("p[",1:l,
"]", collapse = ", "), ", ...)"))
fixed.param <- list(...)[names(list(...)) %in% param]
if(any(!(param %in% c(nm,names(fixed.param)))))
stop("unspecified parameters")
start.arg <- c(list(p = unlist(start)), fixed.param)
if(warn.inf && do.call("nllh", start.arg) == 1e6)
warning("negative log-likelihood is infinite at starting values")
opt <- optim(start, nllh, hessian = TRUE, ..., method = method)
if(is.null(names(opt$par))) names(opt$par) <- nm
if (opt$convergence != 0) {
warning("optimization may not have succeeded")
if(opt$convergence == 1) opt$convergence <- "iteration limit reached"
}
else opt$convergence <- "successful"
if(std.err) {
tol <- .Machine$double.eps^0.5
var.cov <- qr(opt$hessian, tol = tol)
if(var.cov$rank != ncol(var.cov$qr))
stop("observed information matrix is singular; use std.err = FALSE")
var.cov <- solve(var.cov, tol = tol)
std.err <- diag(var.cov)
if(any(std.err <= 0))
stop("observed information matrix is singular; use std.err = FALSE")
std.err <- sqrt(std.err)
names(std.err) <- nm
.mat <- diag(1/std.err, nrow = length(std.err))
if(corr) {
corr <- structure(.mat %*% var.cov %*% .mat, dimnames = list(nm,nm))
diag(corr) <- rep(1, length(std.err))
}
else corr <- NULL
}
else {
std.err <- var.cov <- corr <- NULL
}
param <- c(opt$par, unlist(fixed.param))
if(!is.null(nsloc)) {
trend <- param[paste("loc", names(nsloc), sep="")]
trend <- drop(as.matrix(nsloc) %*% trend)
x2 <- x - trend
}
else x2 <- x
if(param["shape"] == 0)
loc <- param["quantile"] + param["scale"] * log(-log(1-prob))
else
loc <- param["quantile"] + param["scale"]/param["shape"] *
(1 - (-log(1-prob))^(-param["shape"]))
list(estimate = opt$par, std.err = std.err,
fixed = unlist(fixed.param), param = param,
deviance = 2*opt$value, corr = corr, var.cov = var.cov,
convergence = opt$convergence, counts = opt$counts,
message = opt$message, data = x, tdata = x2, nsloc = nsloc,
n = length(x), prob = prob, loc = loc)
}
"fpot"<-
function(x, threshold, model = c("gpd", "pp"), start, npp = length(x), cmax = FALSE, r = 1, ulow = -Inf, rlow = 1, mper = NULL, ..., std.err = TRUE, corr = FALSE, method = "BFGS", warn.inf = TRUE)
{
call <- match.call()
model <- match.arg(model)
if(missing(x) || length(x) == 0 || mode(x) != "numeric")
stop("`x' must be a non-empty numeric vector")
if(missing(threshold) || length(threshold) != 1 ||
mode(threshold) != "numeric")
stop("`threshold' must be a numeric value")
threshold <- as.double(threshold)
if(is.null(mper)) {
ft <- fpot.norm(x = x, threshold = threshold, model = model, start = start,
npp = npp, cmax = cmax, r = r, ulow = ulow, rlow = rlow, ...,
std.err = std.err, corr = corr, method = method, warn.inf = warn.inf)
}
else {
if(model == "pp")
stop("`mper' cannot be specified in point process models")
ft <- fpot.quantile(x = x, threshold = threshold, start =
start, npp = npp, cmax = cmax, r = r, ulow = ulow, rlow = rlow, ...,
mper = mper, std.err = std.err, corr = corr, method = method,
warn.inf = warn.inf)
}
structure(c(ft, call = call), class = c("pot", "uvevd", "evd"))
}
"fpot.norm"<-
function(x, threshold, model, start, npp = length(x), cmax = FALSE, r = 1, ulow = -Inf, rlow = 1, ..., std.err = TRUE, corr = FALSE, method = "BFGS", warn.inf = TRUE)
{
if(model == "gpd") {
nlpot <- function(loc, scale, shape) {
.C(C_nlgpd,
exceed, nhigh, threshold, scale, shape, dns = double(1))$dns
}
formals(nlpot) <- formals(nlpot)[2:3]
}
if(model == "pp") {
nlpot <- function(loc, scale, shape) {
.C(C_nlpp,
exceed, nhigh, loc, scale, shape, threshold, nop,
dns = double(1))$dns
}
}
nn <- length(x)
nop <- as.double(nn/npp)
if(cmax) {
exceed <- clusters(x, u = threshold, r = r, ulow = ulow, rlow = rlow,
cmax = TRUE, keep.names = FALSE)
extind <- attributes(exceed)$acs
exceed <- as.double(exceed)
nhigh <- length(exceed) ; nat <- as.integer(nhigh * extind)
extind <- 1/extind
}
else {
extind <- r <- NULL
high <- (x > threshold) & !is.na(x)
exceed <- as.double(x[high])
nhigh <- nat <- length(exceed)
}
if(!nhigh) stop("no data above threshold")
pat <- nat/nn
param <- c("scale", "shape")
if(model == "pp") param <- c("loc", param)
if(missing(start)) {
if(model == "gpd") {
start <- list(scale = 0, shape = 0)
start$scale <- mean(exceed) - threshold
}
if(model == "pp") {
start <- list(loc = 0, scale = 0, shape = 0)
start$scale <- sqrt(6 * var(x))/pi
start$loc <- mean(x) + (log(nop) - 0.58) * start$scale
}
start <- start[!(param %in% names(list(...)))]
}
if(!is.list(start))
stop("`start' must be a named list")
if(!length(start))
stop("there are no parameters left to maximize over")
nm <- names(start)
l <- length(nm)
f <- formals(nlpot)
names(f) <- param
m <- match(nm, param)
if(any(is.na(m)))
stop("`start' specifies unknown arguments")
formals(nlpot) <- c(f[m], f[-m])
nllh <- function(p, ...) nlpot(p, ...)
if(l > 1)
body(nllh) <- parse(text = paste("nlpot(", paste("p[",1:l,
"]", collapse = ", "), ", ...)"))
fixed.param <- list(...)[names(list(...)) %in% param]
if(any(!(param %in% c(nm,names(fixed.param)))))
stop("unspecified parameters")
start.arg <- c(list(p = unlist(start)), fixed.param)
if(warn.inf && do.call("nllh", start.arg) == 1e6)
warning("negative log-likelihood is infinite at starting values")
opt <- optim(start, nllh, hessian = TRUE, ..., method = method)
if(is.null(names(opt$par))) names(opt$par) <- nm
if (opt$convergence != 0) {
warning("optimization may not have succeeded")
if(opt$convergence == 1) opt$convergence <- "iteration limit reached"
}
else opt$convergence <- "successful"
if(std.err) {
tol <- .Machine$double.eps^0.5
var.cov <- qr(opt$hessian, tol = tol)
if(var.cov$rank != ncol(var.cov$qr))
stop("observed information matrix is singular; use std.err = FALSE")
var.cov <- solve(var.cov, tol = tol)
std.err <- diag(var.cov)
if(any(std.err <= 0))
stop("observed information matrix is singular; use std.err = FALSE")
std.err <- sqrt(std.err)
names(std.err) <- nm
if(corr) {
.mat <- diag(1/std.err, nrow = length(std.err))
corr <- structure(.mat %*% var.cov %*% .mat, dimnames = list(nm,nm))
diag(corr) <- rep(1, length(std.err))
}
else corr <- NULL
}
else std.err <- var.cov <- corr <- NULL
param <- c(opt$par, unlist(fixed.param))
if(model == "gpd") scale <- param["scale"]
if(model == "pp") scale <- param["scale"] + param["shape"] * (threshold -
param["loc"])
list(estimate = opt$par, std.err = std.err, fixed = unlist(fixed.param),
param = param, deviance = 2*opt$value, corr = corr, var.cov = var.cov,
convergence = opt$convergence, counts = opt$counts, message = opt$message,
threshold = threshold, cmax = cmax, r = r, ulow = ulow, rlow = rlow, npp = npp,
nhigh = nhigh, nat = nat, pat = pat, extind = extind,
data = x, exceedances = exceed, mper = NULL, scale = scale)
}
"fpot.quantile"<-
function(x, threshold, start, npp = length(x), cmax = FALSE, r = 1, ulow = -Inf, rlow = 1, mper, ..., std.err = TRUE, corr = FALSE, method = "BFGS", warn.inf = TRUE)
{
nlpot <- function(rlevel, shape)
{
if(is.infinite(mper) && shape >= 0) return(1e6)
rlevel <- rlevel - threshold
if(shape == 0) scale <- rlevel / log(adjmper)
else scale <- shape * rlevel / (adjmper^shape - 1)
.C(C_nlgpd,
exceed, nhigh, threshold, scale, shape, dns = double(1))$dns
}
nn <- length(x)
if(cmax) {
exceed <- clusters(x, u = threshold, r = r, ulow = ulow, rlow = rlow,
cmax = TRUE, keep.names = FALSE)
extind <- attributes(exceed)$acs
exceed <- as.double(exceed)
nhigh <- length(exceed) ; nat <- as.integer(nhigh * extind)
extind <- 1/extind
}
else {
extind <- r <- NULL
high <- (x > threshold) & !is.na(x)
exceed <- as.double(x[high])
nhigh <- nat <- length(exceed)
}
if(!nhigh) stop("no data above threshold")
pat <- nat/nn
adjmper <- mper * npp * nhigh/nn
if(adjmper <= 1) stop("`mper' is too small")
param <- c("rlevel", "shape")
if(missing(start)) {
start <- list(rlevel = 0, shape = 0)
stscale <- mean(exceed) - threshold
start$rlevel <- threshold + stscale*log(adjmper)
if(is.infinite(mper)) {
stmp <- 100/(npp * nhigh/nn)
fpft <- fpot(x = x, threshold = threshold, npp = npp, cmax =
cmax, r = r, ulow = ulow, rlow = rlow, mper = stmp, ...,
std.err = std.err, corr = corr, method = method, warn.inf =
warn.inf)
start <- as.list(fitted(fpft))
}
start <- start[!(param %in% names(list(...)))]
}
if(!is.list(start))
stop("`start' must be a named list")
if(!length(start))
stop("there are no parameters left to maximize over")
nm <- names(start)
l <- length(nm)
f <- formals(nlpot)
names(f) <- param
m <- match(nm, param)
if(any(is.na(m)))
stop("`start' specifies unknown arguments")
formals(nlpot) <- c(f[m], f[-m])
nllh <- function(p, ...) nlpot(p, ...)
if(l > 1)
body(nllh) <- parse(text = paste("nlpot(", paste("p[",1:l,
"]", collapse = ", "), ", ...)"))
fixed.param <- list(...)[names(list(...)) %in% param]
if(any(!(param %in% c(nm,names(fixed.param)))))
stop("unspecified parameters")
start.arg <- c(list(p = unlist(start)), fixed.param)
if(warn.inf && do.call("nllh", start.arg) == 1e6)
warning("negative log-likelihood is infinite at starting values")
opt <- optim(start, nllh, hessian = TRUE, ..., method = method)
if(is.null(names(opt$par))) names(opt$par) <- nm
if (opt$convergence != 0) {
warning("optimization may not have succeeded")
if(opt$convergence == 1) opt$convergence <- "iteration limit reached"
}
else opt$convergence <- "successful"
if(std.err) {
tol <- .Machine$double.eps^0.5
var.cov <- qr(opt$hessian, tol = tol)
if(var.cov$rank != ncol(var.cov$qr))
stop("observed information matrix is singular; use std.err = FALSE")
var.cov <- solve(var.cov, tol = tol)
std.err <- diag(var.cov)
if(any(std.err <= 0))
stop("observed information matrix is singular; use std.err = FALSE")
std.err <- sqrt(std.err)
names(std.err) <- nm
if(corr) {
.mat <- diag(1/std.err, nrow = length(std.err))
corr <- structure(.mat %*% var.cov %*% .mat, dimnames = list(nm,nm))
diag(corr) <- rep(1, length(std.err))
}
else corr <- NULL
}
else std.err <- var.cov <- corr <- NULL
param <- c(opt$par, unlist(fixed.param))
rlevel <- param["rlevel"] - threshold
if(param["shape"] == 0) scale <- rlevel / log(adjmper)
else scale <- param["shape"] * rlevel / (adjmper^param["shape"] - 1)
list(estimate = opt$par, std.err = std.err, fixed = unlist(fixed.param),
param = param, deviance = 2*opt$value, corr = corr, var.cov = var.cov,
convergence = opt$convergence, counts = opt$counts, message = opt$message,
threshold = threshold, cmax = cmax, r = r, ulow = ulow, rlow = rlow, npp = npp,
nhigh = nhigh, nat = nat, pat = pat, extind = extind,
data = x, exceedances = exceed, mper = mper, scale = scale)
}
"print.evd" <- function(x, digits = max(3, getOption("digits") - 3), ...)
{
cat("\nCall:", deparse(x$call), "\n")
cat("Deviance:", x$deviance, "\n")
cat("\nEstimates\n")
print.default(format(x$estimate, digits = digits), print.gap = 2,
quote = FALSE)
if(!is.null(x$std.err)) {
cat("\nStandard Errors\n")
print.default(format(x$std.err, digits = digits), print.gap = 2,
quote = FALSE)
}
if(!is.null(x$corr)) {
cat("\nCorrelations\n")
print.default(format(x$corr, digits = digits), print.gap = 2,
quote = FALSE)
}
cat("\nOptimization Information\n")
cat(" Convergence:", x$convergence, "\n")
cat(" Function Evaluations:", x$counts["function"], "\n")
if(!is.na(x$counts["gradient"]))
cat(" Gradient Evaluations:", x$counts["gradient"], "\n")
if(!is.null(x$message)) cat(" Message:", x$message, "\n")
cat("\n")
invisible(x)
}
"confint.evd" <- function (object, parm, level = 0.95, ...)
{
cf <- fitted(object)
pnames <- names(cf)
if (missing(parm))
parm <- seq(along = pnames)
else if (is.character(parm))
parm <- match(parm, pnames, nomatch = 0)
if(any(!parm))
stop("`parm' contains unknown parameters")
a <- (1 - level)/2
a <- c(a, 1 - a)
pct <- paste(round(100 * a, 1), "%")
ci <- array(NA, dim = c(length(parm), 2), dimnames = list(pnames[parm],
pct))
ses <- std.errors(object)[parm]
ci[] <- cf[parm] + ses %o% qnorm(a)
ci
}
"anova.evd" <- function (object, object2, ..., half = FALSE)
{
if(missing(object)) stop("model one must be specified")
if(missing(object2)) stop("model two must be specified")
dots <- as.list(substitute(list(...)))[-1]
dots <- sapply(dots,function(x) deparse(x))
if(!length(dots)) dots <- NULL
model1 <- deparse(substitute(object))
model2 <- deparse(substitute(object2))
models <- c(model1, model2, dots)
narg <- length(models)
for(i in 1:narg) {
if(!inherits(get(models[i], envir = parent.frame()), "evd"))
stop("Use only with 'evd' objects")
}
for(i in 1:(narg-1)) {
a <- get(models[i], envir = parent.frame())
b <- get(models[i+1], envir = parent.frame())
if((!all(names(fitted(b)) %in% names(fitted(a)))) &&
(!identical(c("bilog","log"), c(a$model, b$model))) &&
(!identical(c("negbilog","neglog"), c(a$model, b$model)))) {
warning("models may not be nested")
}
}
dv <- npar <- numeric(narg)
for(i in 1:narg) {
evmod <- get(models[i], envir = parent.frame())
dv[i] <- evmod$deviance
npar[i] <- length(evmod$estimate)
}
df <- -diff(npar)
if(any(df <= 0)) stop("models are not nested")
dvdiff <- diff(dv)
if(any(dvdiff < 0)) stop("negative deviance difference")
if(half) dvdiff <- 2*dvdiff
pval <- pchisq(dvdiff, df = df, lower.tail = FALSE)
table <- data.frame(npar, dv, c(NA,df), c(NA,dvdiff), c(NA,pval))
dimnames(table) <- list(models, c("M.Df", "Deviance", "Df", "Chisq",
"Pr(>chisq)"))
structure(table, heading = c("Analysis of Deviance Table\n"),
class = c("anova", "data.frame"))
}
"fitted.evd" <- function (object, ...) object$estimate
"std.errors" <- function (object, ...) UseMethod("std.errors")
"std.errors.evd" <- function (object, ...) object$std.err
"vcov.evd" <- function (object, ...) object$var.cov
"logLik.evd" <- function(object, ...) {
val <- -deviance(object)/2
attr(val, "df") <- length(fitted(object))
class(val) <- "logLik"
val
}
"print.pot" <- function(x, digits = max(3, getOption("digits") - 3), ...)
{
cat("\nCall:", deparse(x$call), "\n")
cat("Deviance:", x$deviance, "\n")
cat("\nThreshold:", round(x$threshold, digits), "\n")
cat("Number Above:", x$nat, "\n")
cat("Proportion Above:", round(x$pat, digits), "\n")
if(!is.null(x$extind)) {
cat("\nClustering Interval:", x$r, "\n")
if(is.finite(x$ulow)) {
cat("Lower Threshold:", round(x$ulow, digits), "\n")
cat("Lower Clustering Interval:", x$rlow, "\n")
}
cat("Number of Clusters:", x$nhigh, "\n")
cat("Extremal Index:", round(x$extind, digits), "\n")
}
cat("\nEstimates\n")
print.default(format(x$estimate, digits = digits), print.gap = 2,
quote = FALSE)
if(!is.null(x$std.err)) {
cat("\nStandard Errors\n")
print.default(format(x$std.err, digits = digits), print.gap = 2,
quote = FALSE)
}
if(!is.null(x$corr)) {
cat("\nCorrelations\n")
print.default(format(x$corr, digits = digits), print.gap = 2,
quote = FALSE)
}
cat("\nOptimization Information\n")
cat(" Convergence:", x$convergence, "\n")
cat(" Function Evaluations:", x$counts["function"], "\n")
if(!is.na(x$counts["gradient"]))
cat(" Gradient Evaluations:", x$counts["gradient"], "\n")
if(!is.null(x$message)) cat(" Message:", x$message, "\n")
cat("\n")
invisible(x)
}
|
boxplot_monthly_compare_observations_with_ci <- function(model, use.example=FALSE) {
start_par = par()$mfrow
on.exit(par(mfrow = start_par))
resultsdir <- elt(model, "setup", "resultsdir")
model.path <- elt(model, "setup", "model.path")
model.ident <- elt(model, "setup", "model.ident")
model.name <- elt(model, "setup", "model.name")
model.variant <- elt(model, "setup", "model.variant")
obstargetdataset <- get.model.file(model.path, TARGET_DATA_DIR, file.pattern=MONTHLY_TARGET_DATA)
corefilename<-"CredInt_processed_monthly_mass"
if(use.example==TRUE){
credintervaldata <- get.example.results(model.name, model.variant, corefilename, CREDINT_DIR)
}
if(use.example==FALSE){
credpath <- makepath(resultsdir, CREDINT_DIR)
credfile <- csvname(credpath, corefilename, model.ident)
message("Reading credible interval processed data from '", credfile, "'")
if (! file.exists(credfile)) {
message("Error: cannot find credible interval output file: ", credfile)
stop("Please run the Monte-Carlo function!\n")
}
credintervaldata <- readcsv(credfile, row.names=1)
}
ntargobs<-100
monthtarget<-array(0,dim=c(ntargobs,12,10))
monlab<-c("Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec")
installtargdata<-function(qr,obspar,monthtarget){
data<-data.frame("Month"=monlab,"median"=rep(NA,12),"17centile"=rep(NA,12),"83centile"=rep(NA,12))
for(mmm in 1:12){
data[mmm,2:4]<-seldata[(which(seldata$Month==data$Month[mmm])),3:5]
}
for(mmm in 1:12){
sdpos<-(data[mmm,4]-data[mmm,2])/(qr/2)
sdneg<-(data[mmm,2]-data[mmm,3])/(qr/2)
for(kkk in 1:ntargobs){
rand<-rnorm(1,0,1)
if(rand<0) dev<-rand*sdneg
if(rand>=0) dev<-rand*sdpos
monthtarget[kkk,mmm,obspar]<-data[mmm,2]+dev
}
}
return(monthtarget)
}
seldata<-subset(obstargetdataset,obstargetdataset$Variable=="surface_nitrate")
obspar<-1
qr<-3.5
if(seldata[1,7]==17 & seldata[1,8]==83) qr<-3.5
if(seldata[1,7]==5 & seldata[1,8]==95) qr<-7
monthtarget<-installtargdata(qr,obspar,monthtarget)
seldata<-subset(obstargetdataset,obstargetdataset$Variable=="deep_nitrate")
obspar<-2
qr<-3.5
if(seldata[1,7]==17 & seldata[1,8]==83) qr<-3.5
if(seldata[1,7]==5 & seldata[1,8]==95) qr<-7
monthtarget<-installtargdata(qr,obspar,monthtarget)
seldata<-subset(obstargetdataset,obstargetdataset$Variable=="surface_ammonia")
obspar<-3
qr<-3.5
if(seldata[1,7]==17 & seldata[1,8]==83) qr<-3.5
if(seldata[1,7]==5 & seldata[1,8]==95) qr<-7
monthtarget<-installtargdata(qr,obspar,monthtarget)
seldata<-subset(obstargetdataset,obstargetdataset$Variable=="deep_ammonia")
obspar<-4
qr<-3.5
if(seldata[1,7]==17 & seldata[1,8]==83) qr<-3.5
if(seldata[1,7]==5 & seldata[1,8]==95) qr<-7
monthtarget<-installtargdata(qr,obspar,monthtarget)
seldata<-subset(obstargetdataset,obstargetdataset$Variable=="surface_chlorophyll")
obspar<-5
qr<-3.5
if(seldata[1,7]==17 & seldata[1,8]==83) qr<-3.5
if(seldata[1,7]==5 & seldata[1,8]==95) qr<-7
monthtarget<-installtargdata(qr,obspar,monthtarget)
seldata<-subset(obstargetdataset,obstargetdataset$Variable=="omniv_zooplankton")
obspar<-6
qr<-3.5
if(seldata[1,7]==17 & seldata[1,8]==83) qr<-3.5
if(seldata[1,7]==5 & seldata[1,8]==95) qr<-7
monthtarget<-installtargdata(qr,obspar,monthtarget)
seldata<-subset(obstargetdataset,obstargetdataset$Variable=="carniv_zooplankton")
obspar<-7
qr<-3.5
if(seldata[1,7]==17 & seldata[1,8]==83) qr<-3.5
if(seldata[1,7]==5 & seldata[1,8]==95) qr<-7
monthtarget<-installtargdata(qr,obspar,monthtarget)
seldata<-subset(obstargetdataset,obstargetdataset$Variable=="larvae_susp_dep_benthos")
obspar<-8
qr<-3.5
if(seldata[1,7]==17 & seldata[1,8]==83) qr<-3.5
if(seldata[1,7]==5 & seldata[1,8]==95) qr<-7
monthtarget<-installtargdata(qr,obspar,monthtarget)
seldata<-subset(obstargetdataset,obstargetdataset$Variable=="larvae_carn_scav_benthos")
obspar<-9
qr<-3.5
if(seldata[1,7]==17 & seldata[1,8]==83) qr<-3.5
if(seldata[1,7]==5 & seldata[1,8]==95) qr<-7
monthtarget<-installtargdata(qr,obspar,monthtarget)
seldata<-subset(obstargetdataset,obstargetdataset$Variable=="larvae_susp_dep_benthos")
seldata2<-subset(obstargetdataset,obstargetdataset$Variable=="larvae_carn_scav_benthos")
seldata[,3]<-seldata[,3]+seldata2[,3]
seldata[,4]<-seldata[,4]+seldata2[,4]
seldata[,5]<-seldata[,5]+seldata2[,5]
obspar<-10
qr<-3.5
if(seldata[1,7]==17 & seldata[1,8]==83) qr<-3.5
if(seldata[1,7]==5 & seldata[1,8]==95) qr<-7
monthtarget<-installtargdata(qr,obspar,monthtarget)
for(iii in 1:9){
credrows<- seq( ((iii-1)*(5+1))+2,((iii-1)*(5+1))+(5+1) )
modeldata2plot<-(credintervaldata[credrows,1])
for(jj in 2:12) { modeldata2plot<-c(modeldata2plot,(credintervaldata[credrows,jj]))}
array2plot<- array(dim=c(5,12),modeldata2plot)
bxpdata<-list(stats=array2plot,n=rep(100,12),conf=NULL,out=numeric(length=0))
if(iii==1) bxpdata1<-bxpdata
if(iii==2) bxpdata2<-bxpdata
if(iii==3) bxpdata3<-bxpdata
if(iii==4) bxpdata4<-bxpdata
if(iii==5) bxpdata5<-bxpdata
if(iii==6) bxpdata6<-bxpdata
if(iii==7) bxpdata7<-bxpdata
if(iii==8) bxpdata8<-bxpdata
if(iii==9) bxpdata9<-bxpdata
}
bxpdata10<-list(stats=(bxpdata9$stats+bxpdata8$stats),n=rep(100,12),conf=NULL,out=numeric(length=0))
bxpdata.all<-list(bxpdata1=bxpdata1,
bxpdata2=bxpdata2,
bxpdata3=bxpdata3,
bxpdata4=bxpdata4,
bxpdata5=bxpdata5,
bxpdata6=bxpdata6,
bxpdata7=bxpdata7,
bxpdata8=bxpdata8,
bxpdata9=bxpdata9,
bxpdata10=bxpdata10)
plotdata_mo<-function(monthtarget, bxpdata.all, obspar, monlab){
obsplot<-as.data.frame(monthtarget[,,obspar])
names(obsplot)<-monlab
if(obspar==1) bxpdata<-bxpdata.all$bxpdata1
if(obspar==2) bxpdata<-bxpdata.all$bxpdata2
if(obspar==3) bxpdata<-bxpdata.all$bxpdata3
if(obspar==4) bxpdata<-bxpdata.all$bxpdata4
if(obspar==5) bxpdata<-bxpdata.all$bxpdata5
if(obspar==6) bxpdata<-bxpdata.all$bxpdata6
if(obspar==7) bxpdata<-bxpdata.all$bxpdata7
if(obspar==8) bxpdata<-bxpdata.all$bxpdata8
if(obspar==9) bxpdata<-bxpdata.all$bxpdata9
if(obspar==10) bxpdata<-bxpdata.all$bxpdata10
modplot<-bxpdata$stats
obschecksum_0<-sum(obsplot,na.rm=TRUE)
obschecksum1_0<-sum(monthtarget[,,1],na.rm=TRUE)
obschecksum2_0<-sum(monthtarget[,,2],na.rm=TRUE)
obschecksum3_0<-sum(monthtarget[,,3],na.rm=TRUE)
obschecksum4_0<-sum(monthtarget[,,4],na.rm=TRUE)
obschecksum_1<-sum(obsplot)
obschecksum1_1<-sum(monthtarget[,,1])
obschecksum2_1<-sum(monthtarget[,,2])
obschecksum3_1<-sum(monthtarget[,,3])
obschecksum4_1<-sum(monthtarget[,,4])
MIXFLAG<-0
MIXFLAG1<-0
MIXFLAG2<-0
MIXFLAG3<-0
MIXFLAG4<-0
if(obschecksum_0>0 & is.na(obschecksum_1)==TRUE) MIXFLAG<-1
if(obschecksum1_0>0 & is.na(obschecksum1_1)==TRUE) MIXFLAG1<-1
if(obschecksum2_0>0 & is.na(obschecksum2_1)==TRUE) MIXFLAG2<-1
if(obschecksum3_0>0 & is.na(obschecksum3_1)==TRUE) MIXFLAG3<-1
if(obschecksum4_0>0 & is.na(obschecksum4_1)==TRUE) MIXFLAG4<-1
if(obspar==1 | obspar==2){
if(MIXFLAG1==1 & MIXFLAG2==1){
ymax<- max(0, max(as.data.frame(monthtarget[,,1]),na.rm=TRUE), max(as.data.frame(monthtarget[,,2]),na.rm=TRUE), max(as.data.frame(bxpdata.all$bxpdata1$stats)), max(as.data.frame(bxpdata.all$bxpdata2$stats)),na.rm=TRUE )
} else if(MIXFLAG1==1 & MIXFLAG2==0){
ymax<- max(0, max(as.data.frame(monthtarget[,,1]),na.rm=TRUE), max(as.data.frame(monthtarget[,,2])), max(as.data.frame(bxpdata.all$bxpdata1$stats)), max(as.data.frame(bxpdata.all$bxpdata2$stats)),na.rm=TRUE )
} else if(MIXFLAG1==0 & MIXFLAG2==1){
ymax<- max(0, max(as.data.frame(monthtarget[,,1])), max(as.data.frame(monthtarget[,,2]),na.rm=TRUE), max(as.data.frame(bxpdata.all$bxpdata1$stats)), max(as.data.frame(bxpdata.all$bxpdata2$stats)),na.rm=TRUE )
} else if(MIXFLAG1==0 & MIXFLAG2==0){
ymax<- max(0, max(as.data.frame(monthtarget[,,1])), max(as.data.frame(monthtarget[,,2])), max(as.data.frame(bxpdata.all$bxpdata1$stats)), max(as.data.frame(bxpdata.all$bxpdata2$stats)),na.rm=TRUE )
}
}
if(obspar==3 | obspar==4){
if(MIXFLAG3==1 & MIXFLAG4==1){
ymax<- max(0, max(as.data.frame(monthtarget[,,3]),na.rm=TRUE), max(as.data.frame(monthtarget[,,4]),na.rm=TRUE), max(as.data.frame(bxpdata.all$bxpdata3$stats)), max(as.data.frame(bxpdata.all$bxpdata4$stats)),na.rm=TRUE )
} else if(MIXFLAG3==1 & MIXFLAG4==0){
ymax<- max(0, max(as.data.frame(monthtarget[,,3]),na.rm=TRUE), max(as.data.frame(monthtarget[,,4])), max(as.data.frame(bxpdata.all$bxpdata3$stats)), max(as.data.frame(bxpdata.all$bxpdata4$stats)),na.rm=TRUE )
} else if(MIXFLAG3==0 & MIXFLAG4==1){
ymax<- max(0, max(as.data.frame(monthtarget[,,3])), max(as.data.frame(monthtarget[,,4]),na.rm=TRUE), max(as.data.frame(bxpdata.all$bxpdata3$stats)), max(as.data.frame(bxpdata.all$bxpdata4$stats)),na.rm=TRUE )
} else if(MIXFLAG3==0 & MIXFLAG4==0){
ymax<- max(0, max(as.data.frame(monthtarget[,,3])), max(as.data.frame(monthtarget[,,4])), max(as.data.frame(bxpdata.all$bxpdata3$stats)), max(as.data.frame(bxpdata.all$bxpdata4$stats)),na.rm=TRUE )
}
}
if(obspar>4){
if(MIXFLAG==1){
ymax<- max(0, max(obsplot,na.rm=TRUE),max(as.data.frame(modplot)),na.rm=TRUE )
} else if(MIXFLAG==0){
ymax<- max(0, max(obsplot),max(as.data.frame(modplot)),na.rm=TRUE )
}
}
if(ymax==0 | is.na(ymax)==TRUE) ymax<-0.1
boxplot(obsplot,range=0,boxwex=0.25,ylim=c(0,ymax*1.1),las=1,cex.axis=1.1,yaxt="n",xaxt="n")
axis(labels=monlab, at=seq(1,12),side=1,las=1,cex.axis=1.1,padj=-0.55)
if(obspar==1){
axis(side=2,cex.lab=1.0,las=1)
mtext("Surf.nitrate",cex=0.8,side=2,line=4)
mtext(bquote(mMN.m^-3),cex=0.6,side=2,line=2.7)
}
if(obspar==2){
axis(side=2,cex.lab=1.0,las=1)
mtext("Deep nitrate",cex=0.8,side=2,line=4)
mtext(bquote(mMN.m^-3),cex=0.6,side=2,line=2.7)
}
if(obspar==3){
axis(side=2,cex.lab=1.0,las=1)
mtext("Surf.ammonia",cex=0.8,side=2,line=4)
mtext(bquote(mMN.m^-3),cex=0.6,side=2,line=2.7)
}
if(obspar==4){
axis(side=2,cex.lab=1.0,las=1)
mtext("Deep ammonia",cex=0.8,side=2,line=4)
mtext(bquote(mMN.m^-3),cex=0.6,side=2,line=2.7)
}
if(obspar==5){
axis(side=2,cex.lab=1.0,las=1)
mtext("Chlorophyll",cex=0.8,side=2,line=4)
mtext(bquote(mg.m^-3),cex=0.6,side=2,line=2.7)
}
if(obspar==6){
axis(side=2,cex.lab=1.0,las=1)
mtext("Omniv.zoo",cex=0.8,side=2,line=4)
mtext(bquote(mMN.m^-3),cex=0.6,side=2,line=2.7)
}
if(obspar==7){
axis(side=2,cex.lab=1.0,las=1)
mtext("Carniv.zoo",cex=0.8,side=2,line=4)
mtext(bquote(mMN.m^-3),cex=0.6,side=2,line=2.7)
}
if(obspar==8){
axis(side=2,cex.lab=1.0,las=1)
mtext("Larv.s/d.benth.",cex=0.8,side=2,line=4)
mtext(bquote(mMN.m^-3),cex=0.6,side=2,line=2.7)
}
if(obspar==9){
axis(side=2,cex.lab=1.0,las=1)
mtext("Larv.c/s.benth.",cex=0.8,side=2,line=4)
mtext(bquote(mMN.m^-3),cex=0.6,side=2,line=2.7)
}
if(obspar==10){
axis(side=2,cex.lab=1.0,las=1)
mtext("Benthos larvae (all)",cex=0.8,side=2,line=4)
mtext(bquote(mMN.m^-3),cex=0.6,side=2,line=2.7)
}
bxp(bxpdata,add=TRUE,boxwex=0.25,at=1:12+0.35,yaxt="n",xaxt="n",
boxcol="red",whiskcol="red",whisklty="solid",medcol="red",staplecol="red")
}
par(mfrow=c(4,2))
par(mar=c(3,6,0.6,0.5))
plotdata_mo(monthtarget, bxpdata.all, 1, monlab)
plotdata_mo(monthtarget, bxpdata.all, 2, monlab)
plotdata_mo(monthtarget, bxpdata.all, 3, monlab)
plotdata_mo(monthtarget, bxpdata.all, 4, monlab)
plotdata_mo(monthtarget, bxpdata.all, 5, monlab)
plotdata_mo(monthtarget, bxpdata.all, 6, monlab)
plotdata_mo(monthtarget, bxpdata.all, 7, monlab)
plotdata_mo(monthtarget, bxpdata.all, 10, monlab)
legend(grconvertX(0.425, "ndc", "user"), grconvertY(0.045, "ndc", "user"),
c("observations","model"), fill = c("black","red"), ncol=2, bty="n", xpd = NA)
}
|
poTest <- function(model, ...){
UseMethod("poTest")
}
poTest.polr <- function(model, ...){
if (model$method != "logistic") stop("test for proportional odds is only for the logistic model")
X <- model.matrix(model)
y <- model.frame(model)[, 1]
levels <- levels(y)
k <- length(levels)
p <- ncol(X) - 1
y <- as.numeric(y)
models <- vector(k - 1, mode="list")
for (j in 1:(k - 1)){
models[[j]] <- glm(y > j ~ X - 1, family=binomial)
}
vcov <- matrix(0, (k - 1)*p, (k - 1)*p)
for (el in 1:(k - 1)){
for (j in 1:el){
W.j.el <- fitted(models[[el]]) - fitted(models[[j]])*fitted(models[[el]])
W.el.el <- fitted(models[[el]]) - fitted(models[[el]])^2
W.j.j <- fitted(models[[j]]) - fitted(models[[j]])^2
V <- solve(t(X * W.j.j) %*% X) %*% (t(X * W.j.el) %*% X) %*% solve(t(X * W.el.el) %*% X)
subs.j <- (j - 1)*p + 1:p
subs.el <- (el - 1)*p + 1:p
vcov[subs.j, subs.el] <- vcov[subs.el, subs.j] <- V[-1, -1]
}
}
beta <- unlist(lapply(models, function(m) coef(m)[-1]))
D <- matrix(0, (k - 2)*p, (k - 1)*p)
I <- diag(p)
for (j in 1:(k - 2)){
subs.j <- (j - 1)*p + 1:p
subs.el <- j*p + 1:p
D[subs.j, 1:p] <- I
D[subs.j, subs.el] <- -I
}
chisq <- t(D %*% beta) %*% solve(D %*% vcov %*% t(D)) %*% (D %*% beta)
df <- (k - 2)*p
chisq.p <- numeric(p)
zeros <- matrix(0, k - 2, (k - 1)*p)
D.p <- vector(p, mode="list")
for (i in 1:p){
DD <- zeros
j <- 1:(k - 2)
DD[j, i] <- 1
DD[cbind(j, j*p + i)] <- -1
chisq.p[i] <- t(DD %*% beta) %*% solve(DD %*% vcov %*% t(DD)) %*% (DD %*% beta)
D.p[[i]] <- DD
}
b <- coef(model)
coef.names <- names(b)
b <- cbind(b, matrix(beta, ncol = k - 1))
colnames(b) <- c("b[polr]", paste0("b[>", levels[1:(k - 1)], "]"))
result <- list(call=model$call, coef.names=coef.names, b=b,
vcov=vcov, D=D, chisq=as.vector(chisq), df=df,
D.p=D.p, chisq.p=chisq.p, df.p = k - 2)
class(result) <- "poTest"
result
}
print.poTest <- function(x, digits=3, ...){
cat("\nTests for Proportional Odds\n")
print(x$call)
cat("\n")
names <- c("Overall", x$coef.names)
chisq <- c(x$chisq, x$chisq.p)
df <- c(x$df, rep(x$df.p, length(x$chisq.p)))
pval <- pchisq(chisq, df, lower.tail=FALSE)
table <- cbind(chisq, df, pval)
colnames(table) <- c("Chisquare", "df", "Pr(>Chisq)")
b <- x$b
b <- rbind(rep(NA, ncol(b)), b)
table <- cbind(b, table)
rownames(table) <- names
printCoefmat(table, P.values=TRUE, has.Pvalue=TRUE, tst.ind = ncol(b) + 1,
na.print="", digits=digits)
invisible(x)
}
|
tuning.gPLS.X <- function(X,Y,folds=10,validation=c("Mfold","loo"),ncomp,keepX=NULL,grid.X,setseed,progressBar=FALSE,ind.block.x=ind.block.x){
choicesetseed <- setseed
if(length(keepX)>(ncomp-1)) stop("The length of keepX should be less then ncomp")
k <- 0
res <- rep(0,length(grid.X))
for (i in grid.X){
if(is.null(keepX)) keepX1 <- rep(i,ncomp) else keepX1 <- c(keepX,rep(i,ncomp-length(keepX)))
k <- k+1
cond <- TRUE
while (cond) {
model.gpls <- gPLS(X,Y,ncomp=ncomp,mode="regression",keepX=keepX1,ind.block.x=ind.block.x)
res.perf.gpls <- try(perf(model.gpls,criterion="MSEP",validation=validation,folds = folds,setseed=choicesetseed,progressBar=progressBar),silent=FALSE)
if (class(res.perf.gpls)[1]=="try-error"){ cond <- TRUE;choicesetseed=choicesetseed+1 } else {cond <- FALSE}
}
res[k] <- sum(res.perf.gpls$MSEP[,ncomp])
}
ind <- which.min(res)
keepX <- grid.X[ind]
return(list(MSEP=res,keepX=keepX))
}
|
tt_load_gh <- function(x, week, auth = github_pat()) {
if (!get_connectivity()) {
check_connectivity(rerun = TRUE)
if (!get_connectivity()) {
message("Warning - No Internet Connectivity")
return(NULL)
}
}
if (rate_limit_check() == 0) {
return(NULL)
}
if (missing(x)) {
on.exit({
tt_available(auth = auth)
})
stop("Enter either the year or date of the TidyTuesday Data to extract!")
}
tt_date <- tt_check_date(x, week)
message("--- Compiling
tt_compilation <- tt_compile(tt_date)
n_files <- as.character(nrow(tt_compilation$files))
are_is <- switch( n_files,
"0" = "are",
"1" = "is",
"are")
file_s <- switch( n_files,
"0" = "files",
"1" = "file",
"files")
n_files <- ifelse( n_files == 0, "no", n_files)
message("--- There ",are_is," ", n_files, " ", file_s," available ---")
structure(
tt_compilation$files$data_files,
".files" = tt_compilation$files,
".readme" = tt_compilation$readme,
".date" = tt_date,
class = "tt"
)
}
|
form_resupport <- function(f, support, method = "reflect") {
assert_pdqr_fun(f)
assert_missing(support, "vector for support")
assert_support(support, allow_na = TRUE)
assert_method(method, methods_resupport)
disable_asserting_locally()
if (all(is.na(support))) {
return(f)
}
supp <- coalesce_pair(support, meta_support(f))
if (supp[1] > supp[2]) {
stop_collapse(
"After imputing `NA`s `support` equals (",
supp[1], ", ", supp[2], ") which is not proper."
)
}
switch(
method,
reflect = resupport_reflect(f, supp),
trim = resupport_trim(f, supp),
winsor = resupport_winsor(f, supp),
linear = resupport_linear(f, supp)
)
}
methods_resupport <- c("reflect", "trim", "winsor", "linear")
resupport_reflect <- function(f, support) {
f_supp <- meta_support(f)
f_x_tbl <- meta_x_tbl(f)
x_tbl_list <- list(f_x_tbl)
if (support[1] > f_supp[1]) {
x_tbl_list <- c(x_tbl_list, list(reflect_x_tbl(f_x_tbl, support[1])))
}
if (support[2] < f_supp[2]) {
x_tbl_list <- c(x_tbl_list, list(reflect_x_tbl(f_x_tbl, support[2])))
}
x_tbl <- stack_x_tbl(x_tbl_list)
res <- new_pdqr_by_ref(f)(x_tbl, meta_type(f))
form_resupport(res, support, "trim")
}
resupport_trim <- function(f, support) {
switch(
meta_type(f),
discrete = resupport_trim_dis(f, support),
continuous = resupport_trim_con(f, support)
)
}
resupport_trim_dis <- function(f, support) {
res_x_tbl <- filter_x_tbl(meta_x_tbl(f), support)
res_x_tbl <- res_x_tbl[, c("x", "prob")]
if (sum(res_x_tbl[["prob"]]) <= 0) {
stop_resupport_zero_tot_prob()
}
new_pdqr_by_ref(f)(res_x_tbl, "discrete")
}
resupport_trim_con <- function(f, support) {
d_f <- as_d(f)
edge_y <- d_f(support)
x_tbl_plus <- union_inside_x_tbl(
x_tbl_orig = meta_x_tbl(f),
x_tbl_new = data.frame(x = support, y = edge_y)
)
res_x_tbl <- filter_x_tbl(x_tbl_plus, support)
if (trapez_integral(res_x_tbl[["x"]], res_x_tbl[["y"]]) <= 0) {
stop_resupport_zero_tot_prob()
}
new_pdqr_by_ref(f)(res_x_tbl, "continuous")
}
resupport_winsor <- function(f, support) {
if (support[1] == support[2]) {
return(new_pdqr_by_ref(f)(support[1], meta_type(f)))
}
switch(
meta_type(f),
discrete = resupport_winsor_dis(f, support),
continuous = resupport_winsor_con(f, support)
)
}
resupport_winsor_dis <- function(f, support) {
f_x_tbl <- meta_x_tbl(f)
x <- f_x_tbl[["x"]]
x[x <= support[1]] <- support[1]
x[x >= support[2]] <- support[2]
f_x_tbl[["x"]] <- x
new_pdqr_by_ref(f)(f_x_tbl, "discrete")
}
resupport_winsor_con <- function(f, support, h = 1e-8) {
p_f <- as_p.pdqr(f)
f_x_tbl <- meta_x_tbl(f)
f_supp <- meta_support(f)
if (support[1] >= f_supp[2]) {
return(new_pdqr_by_ref(f)(support[1], meta_type(f)))
}
if (support[2] <= f_supp[1]) {
return(new_pdqr_by_ref(f)(support[2], meta_type(f)))
}
x_tbl <- f_x_tbl
if (support[1] > f_supp[1]) {
x_tbl <- add_x_tbl_knots(x_tbl, support[1] + c(0, h))
x_tbl <- filter_x_tbl(x_tbl, c(support[1], f_supp[2]))
tail_prob <- p_f(support[1])
x_tbl <- increase_tail_weight(x_tbl, tail_prob, "left")
}
if (support[2] < f_supp[2]) {
x_tbl <- add_x_tbl_knots(x_tbl, support[2] - c(h, 0))
x_tbl <- filter_x_tbl(x_tbl, c(f_supp[1], support[2]))
tail_prob <- 1 - p_f(support[2])
x_tbl <- increase_tail_weight(x_tbl, tail_prob, "right")
}
new_pdqr_by_ref(f)(x_tbl, "continuous")
}
increase_tail_weight <- function(x_tbl, by_prob, edge) {
n <- nrow(x_tbl)
x <- x_tbl[["x"]]
y <- x_tbl[["y"]]
if (edge == "left") {
present_prob <- (y[1] + y[2]) * (x[2] - x[1]) / 2
to_prob <- present_prob + by_prob
y[1] <- 2 * to_prob / (x[2] - x[1]) - y[2]
} else if (edge == "right") {
present_prob <- (y[n - 1] + y[n]) * (x[n] - x[n - 1]) / 2
to_prob <- present_prob + by_prob
y[n] <- 2 * to_prob / (x[n] - x[n - 1]) - y[n - 1]
}
data.frame(x = x, y = y)
}
resupport_linear <- function(f, support) {
if (support[1] == support[2]) {
return(new_pdqr_by_ref(f)(support[1], meta_type(f)))
}
f_supp <- meta_support(f)
if (f_supp[1] == f_supp[2]) {
stop_collapse(
"Can't resupport from single point support to interval one."
)
}
res_x_tbl <- meta_x_tbl(f)
res_x_tbl[["x"]] <- extrap_lin(
x_1 = f_supp[1], x_2 = f_supp[2],
y_1 = support[1], y_2 = support[2],
x_target = res_x_tbl[["x"]]
)
new_pdqr_by_ref(f)(res_x_tbl, meta_type(f))
}
stop_resupport_zero_tot_prob <- function() {
stop_collapse(
"Output of `form_resupport()` will not have positive total probability."
)
}
|
locally.weighted.polynomial <- function(x, y, h=NA, x.grid=NA, degree=1, kernel.type='Normal'){
x.grid <- x.grid.create(x.grid, x,y);
if(is.na(h)){
h <- stats::bw.SJ(x);
}
out <- NULL;
out$data <- NULL;
out$data$x <- x;
out$data$y <- y;
out$data$n <- length(x);
out$h <- h;
out$x.grid <- x.grid;
out$effective.sample.sizes <- rep(NA, length(x.grid));
out$Beta <- matrix(nrow=degree+1, ncol=length(x.grid));
unscaled.var <- out$Beta;
out$Beta.var <- out$Beta;
var.y.given.x <- rep(0, length(x.grid) );
count <- 1;
for(x.star in x.grid){
X <- NULL;
for( i in 0:degree ){
X <- cbind(X, (x-x.star)^i );
}
w <- kernel.h(x-x.star, h, type=kernel.type);
w2 <- w^2;
XtW <- t( apply(X,2,function(x){x*w}) );
XtW2 <- t( apply(X,2,function(x){x*w2}) );
XtWXinv <- tryCatch(solve( XtW %*% X ),
error=function(e){NULL})
if( is.null(XtWXinv) ){
out$Beta[, count] <- rep(NA, degree+1);
unscaled.var[, count] <- rep(NA, degree+1);
}else{
beta <- XtWXinv %*% XtW %*% y;
out$Beta[,count] <- beta;
unscaled.var[,count] <- diag(XtWXinv %*% XtW2%*%X %*% t(XtWXinv));
}
count <- count+1;
}
index <- which(!is.na(out$Beta[1,]))
if(length(index) > 2 ){
x.grid.small <- x.grid[index]
interp <- splines::interpSpline(x.grid.small, out$Beta[1,index])
out$residuals <- y - stats::predict(interp, x)$y
for(i in 1:length(x.grid.small) ){
divisor <- 0
for(j in 1:length(x)){
kernel <- kernel.h(x.grid.small[i]-x[j], h, type=kernel.type)
var.y.given.x[i] <- var.y.given.x[i] + (out$residuals[j])^2 * kernel
divisor <- divisor + kernel
}
var.y.given.x[i] <- var.y.given.x[i] / divisor;
}
}
index <- which(var.y.given.x == 0)
var.y.given.x[index] <- NA
for( i in 1:length(x.grid) ){
sum <- 0;
for( j in 1:length(x) ){
sum <- sum + kernel.h(x.grid[i] - x[j], h, type=kernel.type);
}
out$effective.sample.sizes[i] <- sum / kernel.h(0, h, type=kernel.type);
}
for( i in 1:length(x.grid) ){
out$Beta.var[, i] <- unscaled.var[,i] * var.y.given.x[i];
}
class(out) <- 'LocallyWeightedPolynomial';
return(out);
}
plot.LocallyWeightedPolynomial <- function(x, derv=0, CI.method=2, alpha=.05, use.ess=TRUE, draw.points=TRUE, ...){
index <- derv+1;
intervals <- calc.CI.LocallyWeightedPolynomial(x, derv=derv, CI.method, alpha=alpha, use.ess);
if( any( is.na(intervals$lower.CI) ) ){
stop('Too few data points to perform a valid local regression. Set use.ess=F to ignore the issue or select a larger bandwidth.');
}
y.M <- max( intervals$upper.CI );
y.m <- min( intervals$lower.CI );
if(derv==0 & draw.points==TRUE){
y.M <- max( c(y.M, x$data$y) );
y.m <- min( c(y.m, x$data$y) );
}
temp <- range(x$x.grid);
x.m <- temp[1];
x.M <- temp[2];
graphics::plot( c(x.m, x.M), c(y.m, y.M), type='n', ...);
graphics::lines(x$x.grid, intervals$upper.CI );
graphics::lines(x$x.grid, intervals$lower.CI );
graphics::polygon(c(x$x.grid, rev(x$x.grid)), c(intervals$upper.CI, rev(intervals$lower.CI)),
col='light grey');
graphics::lines(x$x.grid, intervals$estimated, type='l', lwd=2);
if(derv >= 1){
graphics::lines( range(x$x.grid), c(0,0) );
}
if( derv==0 & draw.points == TRUE ){
graphics::points(x$data$x, x$data$y, ...);
}
}
calc.CI.LocallyWeightedPolynomial <- function(model, derv=0, CI.method=2, alpha=.05, use.ess=TRUE){
out <- NULL;
out$lower.CI <- rep(NA, length(model$x));
out$estimated <- out$lower.CI;
out$upper.CI <- out$lower.CI;
index <- derv+1;
if( CI.method == 1 ){
t.star <- stats::qt(1-alpha/2, df=model$degrees.freedom);
out$estimated <- model$Beta[index,];
out$lower.CI <- out$estimated - t.star*sqrt( model$Beta.var[index,] );
out$upper.CI <- out$estimated + t.star*sqrt( model$Beta.var[index,] );
}
if( CI.method == 2 ){
g <- length(model$x.grid);
h <- model$h;
delta <- model$x.grid[2] - model$x.grid[1];
theta <- 2* stats::pnorm( sqrt((1+2*derv)*log(g)) * delta/(2*h) ) -1;
temp.star <- stats::qnorm( (1-alpha/2)^(1/(theta*g)) );
out$estimated <- model$Beta[index,] * factorial(derv);
out$lower.CI <- out$estimated - temp.star*sqrt(model$Beta.var[index,]) * factorial(derv);
out$upper.CI <- out$estimated + temp.star*sqrt(model$Beta.var[index,]) * factorial(derv);
}
if( use.ess == TRUE){
index <- which( model$effective.sample.sizes < 5 );
out$lower.CI[index] <- NA;
out$upper.CI[index] <- NA;
}
return(out);
}
kernel.h <- function(x, h, type='Normal'){
if(type == 'Normal'){
out <- stats::dnorm(x/h) / h;
}else if(type == 'Epanechnikov'){
out <- (1/beta(.5,2)) * ( positive.part( (1-(x/h)^2) ) );
}else if( type == 'biweight' ){
out <- (1/beta(.5,3)) * ( positive.part( (1-(x/h)^2) ) )^2; }else if( type == 'triweight' ){
out <- (1/beta(.5,4)) * ( positive.part( (1-(x/h)^2) ) )^3; }else{
out <- rep(1, length(x));
out[ abs(x) > h ] <- 0;
}
return(out);
}
x.grid.create <- function(x.grid, x, y, grid.length=41){
if(all(is.na(x.grid))){
out <- seq(min(x), max(x), length=grid.length);
}else if( length(x.grid) == 1 ){
out <- seq(min(x), max(x), length=x.grid);
}else{
out <- x.grid;
}
return(out);
}
find.states <- function(intervals){
n <- length(intervals$lower.CI);
out <- rep(NA, n);
for( i in 1:n ){
out[i] <- find.state( intervals$lower.CI[i], intervals$upper.CI[i] );
}
return(out);
}
find.state.changes <- function(intervals){
out <- NULL;
out$indices <- NULL;
out$state <- NULL;
lower.CI <- intervals$lower.CI;
upper.CI <- intervals$upper.CI;
count <- 1;
out$state[count] <- find.state(lower.CI[1], upper.CI[1]);
out$indices[count] <- 1;
continue <- TRUE;
while(continue == TRUE){
if(out$state[count] == 1){
compare <- lower.CI < 0;
}else if(out$state[count] == -1){
compare <- upper.CI > 0;
} else{
compare <- upper.CI < 0 | lower.CI > 0
}
index <- match( TRUE, compare );
if( is.na(index) ){
continue <- FALSE;
}else{
count <- count + 1;
lower.CI <- lower.CI[ -1*1:(index-1) ];
upper.CI <- upper.CI[ -1*1:(index-1) ];
out$state[count] <- find.state(lower.CI[1], upper.CI[1]);
out$indices[count] <- out$indices[count-1] + index - 1;
}
}
return(out);
}
find.state <- function(lower.CI, upper.CI){
if( is.na(lower.CI) ){
state <- 2;
}else if( lower.CI > 0 ){
state <- 1;
}else if( upper.CI < 0 ){
state <- -1;
}else{
state <- 0;
}
return(state);
}
positive.part <- function(x){
out <- x;
out[ out<0 ] <- 0;
return(out);
}
|
convertNWSFireZones <- function(
nameOnly = FALSE,
simplify = TRUE
) {
dataDir <- getSpatialDataDir()
datasetName <- 'NWSFireZones'
if (nameOnly)
return(datasetName)
url <- "https://www.weather.gov/source/gis/Shapefiles/WSOM/fz03mr20.zip"
filePath <- file.path(dataDir, basename(url))
utils::download.file(url, filePath)
utils::unzip(filePath, exdir = file.path(dataDir, 'NWSFireZones'))
dsnPath <- file.path(dataDir, 'NWSFireZones')
shpName <- 'fz03mr20'
SPDF <- convertLayer(
dsn = dsnPath,
layerName = shpName,
encoding = 'UTF-8'
)
SPDF@data <-
dplyr::select(
.data = SPDF@data,
stateCode = .data$STATE,
weatherForecastOffice = .data$CWA,
zoneNumber = .data$ZONE,
name = .data$NAME,
zoneID = .data$STATE_ZONE,
longitude = .data$LON,
latitude = .data$LAT
)
SPDF <- organizePolygons(
SPDF,
uniqueID = 'zoneID',
sumColumns = c('longitude', 'latitude')
)
if ( !cleangeo::clgeo_IsValid(SPDF) ) {
SPDF <- cleangeo::clgeo_Clean(SPDF, verbose = TRUE)
}
message("Saving full resolution version...\n")
assign(datasetName, SPDF)
save(list = c(datasetName), file = paste0(dataDir, '/', datasetName, '.rda'))
rm(list = datasetName)
if ( simplify ) {
message("Simplifying to 5%...\n")
SPDF_05 <- rmapshaper::ms_simplify(SPDF, 0.05)
SPDF_05@data$rmapshaperid <- NULL
if ( !cleangeo::clgeo_IsValid(SPDF_05) ) {
SPDF_05 <- cleangeo::clgeo_Clean(SPDF_05)
}
datasetName_05 <- paste0(datasetName, "_05")
message("Saving 5% version...\n")
assign(datasetName_05, SPDF_05)
save(list = datasetName_05, file = paste0(dataDir,"/", datasetName_05, '.rda'))
rm(list = c("SPDF_05",datasetName_05))
message("Simplifying to 2%...\n")
SPDF_02 <- rmapshaper::ms_simplify(SPDF, 0.02)
SPDF_02@data$rmapshaperid <- NULL
if ( !cleangeo::clgeo_IsValid(SPDF_02) ) {
SPDF_02 <- cleangeo::clgeo_Clean(SPDF_02)
}
datasetName_02 <- paste0(datasetName, "_02")
message("Saving 2% version...\n")
assign(datasetName_02, SPDF_02)
save(list = datasetName_02, file = paste0(dataDir,"/", datasetName_02, '.rda'))
rm(list = c("SPDF_02",datasetName_02))
message("Simplifying to 1%...\n")
SPDF_01 <- rmapshaper::ms_simplify(SPDF, 0.01)
SPDF_01@data$rmapshaperid <- NULL
if ( !cleangeo::clgeo_IsValid(SPDF_01) ) {
SPDF_01 <- cleangeo::clgeo_Clean(SPDF_01)
}
datasetName_01 <- paste0(datasetName, "_01")
message("Saving 1% version...\n")
assign(datasetName_01, SPDF_01)
save(list = datasetName_01, file = paste0(dataDir,"/", datasetName_01, '.rda'))
rm(list = c("SPDF_01",datasetName_01))
}
unlink(filePath, force = TRUE)
unlink(dsnPath, recursive = TRUE, force = TRUE)
return(invisible(datasetName))
}
|
library(sp)
library(gstat)
x <- c(215, 330, 410, 470, 545)
y <- c(230, 310, 330, 340, 365)
fc <- c(0.211, 0.251, 0.281, 0.262, 0.242)
por <- c(0.438, 0.457, 0.419, 0.430, 0.468)
Allier <- data.frame(x, y, fc, por)
coordinates(Allier) = ~x+y
g <- gstat(id=c("fc"), formula=fc~1, data=Allier,
model=vgm(0.00247, "Sph", 480, 0.00166))
g <- gstat(g, id="por", formula=por~1, data=Allier,
model=vgm(0.00239, "Sph", 480, 0.00118))
g <- gstat(g, id=c("fc", "por"),
model=vgm(0.00151, "Sph", 480, -0.00124))
g$set = list(choleski = 0)
A <- predict(g, SpatialPoints(data.frame(x=450, y=350)), debug = 32)
g$set = list(choleski = 1)
B <- predict(g, SpatialPoints(data.frame(x=450, y=350)), debug = 32)
all.equal(A,B)
|
"berry_2019"
"bays2009_full"
"bays2009_sample"
"oberauer_2017"
|
test_that("new token computes expires_at", {
time <- Sys.time()
token <- oauth_token("xyz", expires_in = 10, .date = time)
expect_s3_class(token, "httr2_token")
expect_equal(token$expires_at, as.numeric(time + 10))
})
test_that("printing token redacts access and refresh token", {
expect_snapshot({
oauth_token(access_token = "secret", refresh_token = "secret")
})
})
test_that("can compute token expiry", {
token <- oauth_token("xyz")
expect_equal(token_has_expired(token), FALSE)
token <- oauth_token("xyz", expires_in = 8, .date = Sys.time() - 10)
expect_equal(token_has_expired(token), TRUE)
token <- oauth_token("xyz", expires_in = 10, .date = Sys.time())
expect_equal(token_has_expired(token), FALSE)
})
|
check_input_data <- function(arg_types, geoData = NULL, rtData = NULL){
if(!is.null(geoData)){
if (!'sf' %in% unlist(arg_types['geoData'])){stop('geoData must be an sf object')}
}
if(!is.null(rtData)){
if (!'list' %in% unlist(arg_types['rtData'])){stop('rtData must be a list object')}
}
rt_expected_names <- c("summaryData", "rtData", "casesInfectionData", "casesReportData", "obsCasesData")
if(!check_rtData_structure(rtData, rt_expected_names)){stop("Each level of rtData must include ", paste(rt_expected_names, collapse = ' '), ". Missing items should be NULL.")}
expected_columns <- list(geoData = c('sovereignt', 'geometry'),
rtData = c('region','date','type','median','lower_90','upper_90','lower_50','upper_50'),
obsCasesData = c('region','date','confirm')
)
if (!is.null(geoData)){
if (!check_geoData_columns(geoData, expected_columns[['geoData']])){stop("geoData missing required columns. geoData must contain: ", paste(expected_columns[['geoData']], collapse = ' '))}
}
if (!check_obsCasesData_columns(rtData, expected_columns[['obsCasesData']])){stop("obsCasesData missing required columns. obsCasesData must contain: ", paste(expected_columns[['obsCasesData']], collapse = ' '))}
if (!check_rtData_columns(rtData, expected_columns[['rtData']])){stop("rtData missing required columns. rtData, casesInfectionData, casesReportData must contain: ", paste(expected_columns[['rtData']], collapse = ' '))}
return(TRUE)
}
check_rtData_structure <- function(rtData, expected_names){
agreement <- c()
for (source in names(rtData)){
agreement <- append(agreement, identical(names(rtData[[source]]), expected_names))
}
return(sum(agreement) == length(agreement))
}
check_geoData_columns <- function(geoData, expected_columns){
return(length(setdiff(expected_columns, colnames(geoData))) == 0)
}
check_obsCasesData_columns <- function(rtData, expected_columns){
agreement <- c()
for (source in names(rtData)){
if (!is.null(rtData[[source]][['obsCasesData']])){
agreement <- append(agreement, length(setdiff(expected_columns, colnames(rtData[[source]][['obsCasesData']]))) == 0)
}
}
return(sum(agreement) == length(agreement))
}
check_rtData_columns <- function(rtData, expected_columns){
agreement <- c()
for (source in names(rtData)){
if (!is.null(rtData[[source]][['rtData']])){
agreement <- append(agreement, length(setdiff(expected_columns, colnames(rtData[[source]][['rtData']]))) == 0)
}
if (!is.null(rtData[[source]][['casesInfectionData']])){
agreement <- append(agreement, length(setdiff(expected_columns, colnames(rtData[[source]][['casesInfectionData']]))) == 0)
}
if (!is.null(rtData[[source]][['casesReportData']])){
agreement <- append(agreement, length(setdiff(expected_columns, colnames(rtData[[source]][['casesReportData']]))) == 0)
}
}
return(sum(agreement) == length(agreement))
}
check_geoData_names <- function(geoData, rtData){
rtSample <- rtData[[1]][[which(unlist(sapply(rtData[[1]], function(x){return(!is.null(x))})[2:4]))[1] + 1]]
name_diff <- setdiff(rtSample$region, geoData$sovereignt)
name_warning_geoData(name_diff)
}
name_warning_geoData <- function(name_diff){
if (length(name_diff) > 0 & length(name_diff) <= 5){
warning('The following names are present in the estimates but not in the GeoData: ', paste(name_diff, collapse = ', '), '.')
} else if (length(name_diff) > 5) {
warning('The following names are present in the estimates but not in the GeoData: ', paste(name_diff[1:5], collapse = ', '), ' ... and ', length(name_diff) - 5, ' more.')
}
}
|
cdf.fitMOSgev0 <-
function(fit, ensembleData, values, dates = NULL, randomizeATzero = FALSE, ...)
{
gini.md <- function(x,na.rm=FALSE) {
if(na.rm & any(is.na(x))) x <- x[!is.na(x)]
n <-length(x)
return(4*sum((1:n)*sort(x,na.last=TRUE))/(n^2)-2*mean(x)*(n+1)/n)
}
M <- matchEnsembleMembers(fit,ensembleData)
nForecasts <- ensembleSize(ensembleData)
if (!all(M == 1:nForecasts)) ensembleData <- ensembleData[,M]
M <- apply(ensembleForecasts(ensembleData), 1, function(z) all(is.na(z)))
ensembleData <- ensembleData[!M,]
nObs <- nrow(ensembleData)
if (!is.null(dates)) warning("dates ignored")
CDF <- matrix(NA, nObs, length(values))
dimnames(CDF) <- list(ensembleObsLabels(ensembleData),as.character(values))
ensembleData <- ensembleForecasts(ensembleData)
x <- c(fit$a,fit$B)
A <- cbind(rep(1,nObs),ensembleData)
SHAPE <- fit$q
S <- fit$s
S.sq <- apply(ensembleData,1,gini.md, na.rm = TRUE)
MEAN <- A%*%x + S*rowMeans(ensembleData==0, na.rm = TRUE)
SCALE <- rep(fit$c,nObs) + rep(fit$d,nObs)*S.sq
LOC <- MEAN - SCALE*(gamma(1-SHAPE)-1)/SHAPE
for (i in 1:length(values)){
if (randomizeATzero & (values[i]==0)){
cdfval <- pgev(0, loc=LOC, scale=SCALE, shape=SHAPE)
CDF[,i] <- runif(nObs,0,cdfval)
}
else {
CDF[,i] <- pgev(values[i], loc=LOC, scale=SCALE, shape=SHAPE)
}
}
CDF
}
|
bridgeHand <- function(handNumber = "auto", seat = FALSE, createGraphic = TRUE, LTC = "original", ...) {
if (handNumber != "auto" & !is.numeric(handNumber)) {
stop("Only numeric seeds allowed for handNumbers")
}
if (length(handNumber) > 1) {
lapply(handNumber, bridgeHand, seat = seat, createGraphic = createGraphic, LTC = LTC, ...)
}
args <- as.list(list(...))
if ("HCValues" %in% names(args)) {
HCValues <- args$HCValues
stopifnot(length(HCValues) == 5, all(is.numeric(HCValues)))
} else {
HCValues <- c(4, 3, 2, 1, 0)
}
if ("shapeValues" %in% names(args)) {
shapeValues <- args$HCValues
stopifnot(length(shapeValues) == 8, all(is.numeric(shapeValues)))
} else {
shapeValues <- c(3, 2, 1, 0, 0, 1, 2, 3)
}
if ("LTC" != FALSE) {
LTCSchema <- LTC
} else {
LTCSchema <- FALSE
}
if ("wackyFrom" %in% names(args)) {
wackyFrom <- args$wackyFrom
stopifnot(length(wackyFrom) == 1, all(is.numeric(wackyFrom)))
} else {
wackyFrom <- 1
}
if ("wackyTo" %in% names(args)) {
wackyTo <- args$wackyTo
stopifnot(length(wackyTo) == 1, all(is.numeric(wackyTo)))
} else {
wackyTo <- 1
}
suits <- c("S", "H", "D", "C")
compassPoints <- c("N", "E", "S", "W")
if (handNumber != "auto") {
handNo <- handNumber
} else {
handNo <- round(runif(1) * 10000000, 0)
}
set.seed(handNo)
vuln <- c("None", "NS", "EW", "Both")[sample(1:4, 1)]
if (seat != FALSE) {
dealer <- "S"
} else {
dealer <- compassPoints[handNo %% 4 + 1]
}
pack <- expand.grid(rank = c("A", 2:9, "T", "J", "Q", "K"), suit = suits) %>%
as_tibble(.name_repair = "minimal") %>%
mutate(card = paste(suit, rank, sep = "-"))
for (i in 1:4) {
temp <- sample(pack$card, 13,
replace = FALSE,
prob = rep(seq(from = wackyFrom, to = wackyTo, length.out = 13), 5 - i)
) %>%
as_tibble(.name_repair = "minimal") %>%
separate(value, sep = "-", into = c("suit", "rank")) %>%
mutate(
suit = factor(suit, levels = c("S", "H", "D", "C")),
rank = factor(rank, levels = c("A", "K", "Q", "J", "T", 9:2, " "))
) %>%
arrange(suit, rank) %>%
unite("card", sep = "-")
colnames(temp) <- compassPoints[i]
assign(glue::glue("hand{i}"), temp)
pack <- pack %>%
filter(!card %in% unname(unlist(temp)))
}
pack <- hand1 %>%
cbind(hand2) %>%
cbind(hand3) %>%
cbind(hand4)
for (j in compassPoints) {
temp_hand <- pack[j] %>%
bind_cols(order = 1:13, .) %>%
separate(!!j, sep = "-", into = c("suit", "rank")) %>%
mutate(suit = factor(suit, levels = c("S", "H", "D", "C")), rank = factor(rank, levels = c("A", "K", "Q", "J", "T", 9:2, " ", "10"))) %>%
arrange(suit, rank) %>%
pivot_wider(names_from = "suit", values_from = "rank") %>%
select(-order)
if (all(!colnames(temp_hand) %in% "S")) {
temp_hand <- cbind(temp_hand, S = c("Void", rep(NA, 12)))
}
if (all(!colnames(temp_hand) %in% "H")) {
temp_hand <- cbind(temp_hand, H = c("Void", rep(NA, 12)))
}
if (all(!colnames(temp_hand) %in% "D")) {
temp_hand <- cbind(temp_hand, D = c("Void", rep(NA, 12)))
}
if (all(!colnames(temp_hand) %in% "C")) {
temp_hand <- cbind(temp_hand, C = c("Void", rep(NA, 12)))
}
temp_hand <- temp_hand %>%
select(S, H, D, C)
for (i in suits) {
temp_suit <- na.omit(temp_hand[i])
while (nrow(temp_suit) < 13) {
temp_suit <- rbind(temp_suit, NA)
}
temp_hand[i] <- temp_suit
}
temp_hand <- temp_hand %>%
filter_all(any_vars(!is.na(.)))
temp_hand <- temp_hand %>%
replace(is.na(.), " ")
assign(glue::glue("hand{j}"), temp_hand)
}
if (seat != FALSE) {
otherHands <- setdiff(compassPoints, seat)
hand_temp_S <- get(glue::glue("hand{seat}"))
hand_temp_W <- get(glue::glue("hand{otherHands[1]}"))
hand_temp_N <- get(glue::glue("hand{otherHands[2]}"))
hand_temp_E <- get(glue::glue("hand{otherHands[3]}"))
handS <- hand_temp_S
handW <- hand_temp_W
handN <- hand_temp_N
handE <- hand_temp_E
}
names(HCValues) <- c("A", "K", "Q", "J", "10")
points <- tibble(Hand = compassPoints, HC = 0L, Shape = 0L, LTC = 0L)
for (i in compassPoints) {
temp <- get(glue::glue("hand{i}")) %>%
rowid_to_column() %>%
pivot_longer(-rowid) %>%
filter(value != " ") %>%
select(value) %>%
table() %>%
as_tibble(.name_repair = "minimal")
points[points$Hand == i, "HC"] <- round(sum(
unname(unlist(temp[temp$. == "A", "n"])) * HCValues[["A"]],
unname(unlist(temp[temp$. == "K", "n"])) * HCValues[["K"]],
unname(unlist(temp[temp$. == "Q", "n"])) * HCValues[["Q"]],
unname(unlist(temp[temp$. == "J", "n"])) * HCValues[["J"]],
unname(unlist(temp[temp$. == "T", "n"])) * HCValues[["10"]]
), 0)
}
for (i in compassPoints) {
hand_shape <- get(glue::glue("hand{i}")) %>%
rowid_to_column() %>%
pivot_longer(-rowid) %>%
filter(value != " ") %>%
group_by(name) %>%
summarise(shape = max(rowid), .groups = "drop") %>%
ungroup() %>%
select(shape) %>%
unname() %>%
unlist()
temp_points <-
sum(shapeValues[2] * (hand_shape == 1)) +
sum(shapeValues[3] * (hand_shape == 2)) +
sum(shapeValues[4] * (hand_shape == 3)) +
sum(shapeValues[5] * (hand_shape == 4)) +
sum(shapeValues[6] * (hand_shape == 5)) +
sum(shapeValues[7] * (hand_shape == 6)) +
sum(shapeValues[8] * (hand_shape == 7))
temp_points <- temp_points + sum(get(glue::glue("hand{i}"))[1, ] == "Void") * shapeValues[1]
points[points$Hand == i, "Shape"] <- temp_points
}
points <- points %>%
rowwise() %>%
mutate(Total = sum(HC + Shape)) %>%
relocate(LTC, .after = Total)
if (LTCSchema != FALSE) {
for (i in compassPoints) {
current_hand <- get(glue::glue("hand{i}")) %>%
slice(1:3) %>%
mutate(across(.cols = everything(), as.character)) %>%
mutate(across(.cols = everything(), ~ ifelse(.x %in% c("A", "K", "Q", " ", "Void"), .x, "x")))
ltc <- ifelse(any(stringr::str_detect(unname(unlist(current_hand)), "A")), 0, 1)
for (j in suits) {
suit_shape <- select(current_hand, all_of(j)) %>%
unname() %>%
unlist() %>%
glue::glue_collapse()
if (LTCSchema == "original") {
temp_ltc <- 0 +
stringr::str_count(suit_shape, "Void|A |AK |AKQ") * 0 +
stringr::str_count(suit_shape, "AQ |Ax |AKx|AQx|K |KQ |Kx |KQx|Q |x ") * 1 +
stringr::str_count(suit_shape, "Axx|Kxx|Qx |Qxx|xx ") * 2 +
stringr::str_count(suit_shape, "xxx") * 3
} else if (LTCSchema == "new") {
temp_ltc <- 0 +
stringr::str_count(suit_shape, "Void|A |AK |AKQ") * 0 +
stringr::str_count(suit_shape, "AKx") * 0.5 +
stringr::str_count(suit_shape, "AQx|Ax |AQ ") * 1 +
stringr::str_count(suit_shape, "K |Q |x |Axx|KQx|KQ |Kx ") * 1.5 +
stringr::str_count(suit_shape, "Kxx") * 2 +
stringr::str_count(suit_shape, "Qx |Qxx|xx ") * 2.5 +
stringr::str_count(suit_shape, "xxx") * 3
}
ltc <- ltc + ceiling(temp_ltc)
}
points[points$Hand == i, "LTC"] <- ltc
}
} else {
points[points$Hand == i, "LTC"] <- NA
}
points <- points %>%
rename_with(~ glue::glue("LTC ({stringr::str_to_title(LTCSchema)})"), LTC)
if (createGraphic) {
hand_graphic <- createGraphic(handNo, handN, handE, handS, handW, dealer, vuln, points)
} else {
hand_graphic <- "Not requested"
}
handShapes <- tibble(
N = colSums(handN != " "),
E = colSums(handE != " "),
S = colSums(handS != " "),
W = colSums(handW != " ")
)
invisible(list(
id = handNo,
dealer = dealer,
graphic = hand_graphic,
handPoints = points[, c("Hand", "HC")],
handShapes = handShapes,
vuln = vuln
))
}
|
schernoff <- function(data, xvar=character(0), ...) {
main <- paste(deparse(substitute(data), 500), collapse = "\n")
if (is.data.frame(data)) data <- as.matrix(data[,sapply(data, is.numeric)])
stopifnot("matrix" %in% class(data))
if (is.null(colnames(data))) colnames(data) <- sprintf("%s[,%.0f]", main, 1:ncol(data))
dvar <- dimnames(data)[[2]]
if (length(xvar)) {
xvar <- xvar[xvar %in% dvar]
dvar <- setdiff(dvar, xvar)
} else {
xvar <- order_andrews(data)
dvar <- NULL
}
choices <- as.list(1:10)
names(choices) <- sprintf("%.0fx%.0f", 1:10, 1:10)
maxmin <- rbind(apply(data, 2, max), apply(data, 2, min))
hc <- hclust(dist(data), method="ward.D2")
oldpar <- par(no.readonly = TRUE)
on.exit(par(oldpar))
shinyApp(
ui = dashboardPage(
dashboardHeader(title="Chernoff faces"),
dashboardSidebar(
tags$style( HTML(".black-text .rank-list-item { color:
bucket_list(
header = NULL,
group_name = "bucket_var_group",
orientation = "vertical",
class = c("default-sortable", "black-text"),
add_rank_list(
text = "Variable(s)",
labels = dvar,
input_id = "dvar"
),
add_rank_list(
text = "Selected variable(s)",
labels = xvar,
input_id = "xvar"
)
)
),
dashboardBody(
fluidRow(
box(plotOutput("plot")),
box(selectInput("size", "Size", choices=choices, selected=3),
sliderInput("page", "Page", value=1, min=1, max=ceiling(nrow(data)/9), step=1)
),
box(verbatimTextOutput("command"), title="Basic R code")
))
),
server = function(input, output, session) {
rv <- reactiveValues(size=c(3,3), n=1)
observeEvent (input$size, {
page <- as.numeric(isolate(input$page))
size <- as.numeric(input$size)
n <- 1
max <- ceiling(nrow(data)/(n*size*size))
if (page>max) page <- max
updateSliderInput(session, "page", value = page, max = max)
rv$size <- c(size, size)
})
observeEvent (input$n, {
page <- as.numeric(isolate(input$page))
size <- as.numeric(isolate(input$size))
n <- 1
max <- ceiling(nrow(data)/(n*size*size))
if (page>max) page <- max
updateSliderInput(session, "page", value = page, max = max)
rv$n <- n
})
output$plot <- renderPlot({
if ((length(input$xvar)>1)) {
par(mfrow=rv$size, mar=c(0,0,1,0))
first <- (as.numeric(input$page)-1)*prod(rv$size)+1
last <- min(first+prod(rv$size)-1, nrow(data))
args <- list(...)
args$xy <- data[hc$order[first:last],input$xvar]
args$scale <- TRUE
args$labels <- as.character(hc$order[first:last])
args$nr <- rv$size[1]
args$nc <- rv$size[2]
if (is.null(args$main)) args$main <- main
do.call("PlotFaces", args)
}
})
output$command <- renderText({
txt <- "At least two variables are required for a plot!"
if (length(input$xvar)>2) {
txt <- c("
"
"
"
"
"
paste0("x <- c(", paste0('"', input$xvar, '"', collapse=", "), ")\n"),
sprintf("PlotFaces(%s[,x])\n", main))
}
txt
})
}
)
}
|
context("Creation RDML object from different file types or user input")
PATH <- path.package("RDML")
test_that("RDML can be created from BioRad .rdml", {
filename <- paste0(PATH, "/extdata/", "BioRad_qPCR_melt.rdml")
rdml <- RDML$new(filename)
expect_class(rdml, "RDML")
})
test_that("RDML can be created from StepOne .rdml", {
filename <- paste0(PATH, "/extdata/", "stepone_std.rdml")
rdml <- RDML$new(filename)
expect_class(rdml, "RDML")
})
test_that("RDML can be created from LightCycler .rdml", {
filename <- paste0(PATH, "/extdata/", "lc96_bACTXY.rdml")
rdml <- RDML$new(filename)
expect_class(rdml, "RDML")
})
|
hnlmix <- function(y=NULL, distribution="normal", mixture="normal",
random=NULL, nest=NULL, mu=NULL, shape=NULL, linear=NULL,
pmu=NULL, pshape=NULL, pmix=NULL, prandom=NULL, delta=1, common=FALSE,
envir=parent.frame(), print.level=0, typsize=abs(p),
ndigit=10, gradtol=0.00001, stepmax=10*sqrt(p%*%p), steptol=0.00001,
iterlim=100, fscale=1, eps=1.0e-4, points=5){
call <- sys.call()
distribution <- match.arg(distribution,c("binomial","beta binomial",
"double binomial","mult binomial","Poisson","negative binomial",
"double Poisson","mult Poisson","gamma count","Consul","logarithmic",
"geometric","normal","inverse Gauss","logistic","exponential","gamma",
"Weibull","extreme value","Pareto","Cauchy","Laplace","Levy","beta",
"simplex","two-sided power"))
shp <- distribution!="binomial"&&distribution!="Poisson"&&
distribution!="exponential"&&distribution!="geometric"&&
distribution!="logarithmic"
bindata <- distribution=="binomial"||distribution=="double binomial"||
distribution=="beta binomial"||distribution=="mult binomial"
mixture <- match.arg(mixture,c("normal","logistic","gamma","inverse gamma",
"inverse Gauss","Weibull","beta"))
mean0 <- if(mixture=="normal"||mixture=="logistic"||mixture=="Cauchy"||
mixture=="Laplace")0 else if(mixture=="beta")2 else 1
fixed <- !is.null(pmix)
if(fixed&&pmix<0)stop("pmix must be positive")
if(is.null(random))stop("name of random parameter must be supplied")
if(!is.character(random))stop("random must be the name of a parameter")
if(length(random)>1)stop("only one random parameter allowed")
if(common&&!is.null(linear))
stop("linear cannot be used with common parameters")
npl <- length(pmu)
nps <- length(pshape)
np1 <- npl+nps
respenv <- exists(deparse(substitute(y)),envir=parent.frame())&&
inherits(y,"repeated")&&!inherits(envir,"repeated")
if(respenv){
if(dim(y$response$y)[2]>1)
stop("hnlmix only handles univariate responses")
if(!is.null(y$NAs)&&any(y$NAs))
stop("hnlmix does not handle data with NAs")}
envname <- if(respenv)deparse(substitute(y))
else if(!is.null(class(envir)))deparse(substitute(envir))
else NULL
if(!inherits(mu,"formula"))stop("mu must be a formula")
if(shp&&!is.null(shape)&&!inherits(shape,"formula"))
stop("shape must be a formula")
lin1 <- lin2 <- NULL
if(is.list(linear)){
lin1 <- linear[[1]]
lin2 <- linear[[2]]}
else lin1 <- linear
if(inherits(lin1,"formula")&&is.null(mu)){
mu <- lin1
lin1 <- NULL}
if(inherits(lin2,"formula")&&is.null(shape)){
shape <- lin2
lin2 <- NULL}
if(inherits(lin1,"formula")){
lin1model <- if(respenv){
if(!is.null(attr(finterp(lin1,.envir=y,.name=envname),"parameters")))
attr(finterp(lin1,.envir=y,.name=envname),"model")}
else {if(!is.null(attr(finterp(lin1,.envir=envir,.name=envname),"parameters")))
attr(finterp(lin1,.envir=envir,.name=envname),"model")}}
else lin1model <- NULL
if(inherits(lin2,"formula")){
lin2model <- if(respenv){
if(!is.null(attr(finterp(lin2,.envir=y,.name=envname),"parameters")))
attr(finterp(lin2,.envir=y,.name=envname),"model")}
else {if(!is.null(attr(finterp(lin2,.envir=envir,.name=envname),"parameters")))
attr(finterp(lin2,.envir=envir,.name=envname),"model")}}
else lin2model <- NULL
lin1a <- lin2a <- mu2 <- sh2 <- NULL
if(respenv||inherits(envir,"repeated")||inherits(envir,"tccov")||inherits(envir,"tvcov")){
if(is.null(envname))envname <- deparse(substitute(envir))
if(inherits(mu,"formula")){
mu2 <- if(respenv)finterp(mu,.envir=y,.name=envname,.args=random)
else finterp(mu,.envir=envir,.name=envname,.args=random)}
if(inherits(shape,"formula")){
sh2 <- if(respenv)finterp(shape,.envir=y,.name=envname)
else finterp(shape,.envir=envir,.name=envname)}
if(inherits(lin1,"formula")){
lin1a <- if(respenv)finterp(lin1,.envir=y,.name=envname)
else finterp(lin1,.envir=envir,.name=envname)}
if(inherits(lin2,"formula")){
lin2a <- if(respenv)finterp(lin2,.envir=y,.name=envname)
else finterp(lin2,.envir=envir,.name=envname)}
if(is.function(mu)){
if(is.null(attr(mu,"model"))){
tmp <- parse(text=deparse(mu)[-1])
mu <- if(respenv)fnenvir(mu,.envir=y,.name=envname)
else fnenvir(mu,.envir=envir,.name=envname)
mu2 <- mu
attr(mu2,"model") <- tmp}
else mu2 <- mu}
if(is.function(shape)){
if(is.null(attr(shape,"model"))){
tmp <- parse(text=deparse(shape)[-1])
shape <- if(respenv)fnenvir(shape,.envir=y,.name=envname)
else fnenvir(shape,.envir=envir,.name=envname)
sh2 <- shape
attr(sh2,"model") <- tmp}
else sh2 <- shape}}
else {
if(is.function(mu)&&is.null(attr(mu,"model")))mu <- fnenvir(mu)
if(is.function(shape)&&is.null(attr(shape,"model")))
shape <- fnenvir(shape)}
if(inherits(lin1,"formula")){
tmp <- attributes(if(respenv)finterp(lin1,.envir=y,.name=envname)
else finterp(lin1,.envir=envir,.name=envname))
lf1 <- length(tmp$parameters)
if(!is.character(tmp$model))stop("linear must be a W&R formula")
else if(length(tmp$model)==1)stop("linear must contain covariates")
rm(tmp)}
else lf1 <- 0
if(inherits(lin2,"formula")){
tmp <- attributes(if(respenv)finterp(lin2,.envir=y,.name=envname)
else finterp(lin2,.envir=envir,.name=envname))
lf2 <- length(tmp$parameters)
if(!is.character(tmp$model))stop("linear must be a W&R formula")
else if(length(tmp$model)==1)stop("linear must contain covariates")
rm(tmp)}
else lf2 <- 0
if(lf1>0)random <- c(random,"linear")
mu3 <- if(respenv)finterp(mu,.envir=y,.name=envname,.args=random)
else finterp(mu,.envir=envir,.name=envname,.args=random)
npt1 <- length(attr(mu3,"parameters"))
if(is.character(attr(mu3,"model")))stop("mu cannot be a W&R formula")
if(npl!=npt1&&!common&&lf1==0){
cat("\nParameters are ")
cat(attr(mu3,"parameters"),"\n")
stop(paste("pmu should have",npt1,"estimates"))}
if(is.list(pmu)){
if(!is.null(names(pmu))){
o <- match(attr(mu3,"parameters"),names(pmu))
pmu <- unlist(pmu)[o]
if(sum(!is.na(o))!=length(pmu))stop("invalid estimates for mu - probably wrong names")}
else pmu <- unlist(pmu)}
if(lf1>0){
dm1 <- if(respenv)wr(lin1,data=y)$design
else wr(lin1,data=envir)$design
if(is.null(mu2))mu2 <- mu3
mu1 <- function(p,random)mu3(p,random,dm1%*%p[(npt1+1):(npt1+lf1)])}
else {
mu1 <- mu3
rm(mu3)}
nlp <- npt1+lf1
if(!common&&nlp!=npl)stop(paste("pmu should have",nlp,"initial estimates"))
npl1 <- if(common&&!inherits(shape,"formula")) 1 else nlp+1
sh3 <- NULL
if(inherits(shape,"formula")){
old <- if(common)mu1 else NULL
mufn <- if(lf2>0) "linear" else NULL
sh3 <- if(respenv)finterp(shape,.envir=y,.start=npl1,.name=envname,.old=old,.args=mufn)
else finterp(shape,.envir=envir,.start=npl1,.name=envname,.old=old,.args=mufn)
npt2 <- length(attr(sh3,"parameters"))
if(is.character(attr(sh3,"model")))stop("shape cannot be a W&R formula")
if(nps!=npt2&&!common&&lf2==0){
cat("\nParameters are ")
cat(attr(sh3,"parameters"),"\n")
stop(paste("pshape should have",npt2,"estimates"))}
if(is.list(pshape)){
if(!is.null(names(pshape))){
o <- match(attr(sh3,"parameters"),names(pshape))
pshape <- unlist(pshape)[o]
if(sum(!is.na(o))!=length(pshape))stop("invalid estimates for shape - probably wrong names")}
else pshape <- unlist(pshape)}}
else if(is.null(shape)&&shp){
sh3 <- function(p) p[npl1]*rep(1,n)
sh2 <- fnenvir(function(p) p[1]*rep(1,n))
npt2 <- 1}
if(lf2>0){
dm2 <- if(respenv)wr(lin2,data=y)$design
else wr(lin2,data=envir)$design
if(is.null(sh2))sh2 <- sh3
sh1 <- sh3(p,dm2%*%p[(npl1+lf2-1):np])}
else {
sh1 <- sh3
rm(sh3)}
if(shp){
nlp <- npt2+lf2
if(!common&&nlp!=nps)stop(paste("pshape should have",nlp,"initial estimates"))}
if(common){
nlp <- length(unique(c(attr(mu1,"parameters"),attr(sh1,"parameters"))))
if(nlp!=npl)stop(paste("with a common parameter model, pmu should contain",nlp,"estimates"))}
type <- "unknown"
if(respenv){
if(inherits(envir,"repeated")&&(length(nobs(y))!=length(nobs(envir))||any(nobs(y)!=nobs(envir))))
stop("y and envir objects are incompatible")
if(!is.null(y$response$delta))
delta <- as.vector(y$response$delta)
nest <- covind(y)
type <- y$response$type
envir <- y$response
y <- response(y)}
else if(inherits(envir,"repeated")){
if(!is.null(envir$NAs)&&any(envir$NAs))
stop("hnlmix does not handle data with NAs")
cn <- deparse(substitute(y))
if(length(grep("\"",cn))>0)cn <- y
if(length(cn)>1)stop("only one y variable allowed")
col <- match(cn,colnames(envir$response$y))
if(is.na(col))stop(paste("response variable",cn,"not found"))
nest <- covind(envir)
type <- envir$response$type[col]
y <- envir$response$y[,col]
if(!is.null(envir$response$n)&&!all(is.na(envir$response$n[,col])))
y <- cbind(y,envir$response$n[,col]-y)
else if(!is.null(envir$response$censor)&&!all(is.na(envir$response$censor[,col])))
y <- cbind(y,envir$response$censor[,col])
if(!is.null(envir$response$delta))
delta <- as.vector(envir$response$delta[,col])
envir <- envir$response
envir$y <- envir$y[,col,drop=FALSE]
if(!is.null(envir$n))envir$n <- envir$n[,col,drop=FALSE]
else if(!is.null(envir$censor))envir$censor <- envir$censor[,col,drop=FALSE]
if(!is.null(envir$delta))envir$delta <- envir$delta[,col,drop=FALSE]}
else if(inherits(y,"response")){
if(dim(y$y)[2]>1)stop("hnlmix only handles univariate responses")
if(!is.null(y$delta))delta <- as.vector(y$delta)
nest <- covind(y)
type <- y$type
envir <- y
y <- response(y)}
unest <- unique(nest)
nnest <- length(unest)
unest <- unest[-nnest]
if(is.null(nest)||nnest==1)stop("appropriate nest indicator required")
if(is.null(prandom))
stop(paste("one or",nnest,"values must be supplied for prandom"))
if(length(prandom)==1)prandom <- rep(prandom,nnest)
else if(length(prandom)!=nnest)
stop(paste(nnest,"values must be supplied for prandom"))
if(mean0==1){
if(any(prandom<=0))stop("prandom must all be positive")
prandom <- log(prandom)}
else if(mean0==2){
if(any(prandom<=0|prandom>=1))stop("prandom must all be in (0,1)")
prandom <- log(prandom/(1-prandom))}
p <- c(pmu,pshape,prandom[-nnest])
np <- length(p)
if(any(is.na(y)))stop("NAs in y - use rmna")
if(bindata){
if(type!="unknown"&&type!="nominal")stop("nominal data required")
if((is.vector(y)||(length(dim(y))==2&&dim(y)[2]==1))&&all(y==0|y==1))
y <- cbind(y,1-y)
if(length(dim(y))!=2||dim(y)[2]!=2)
stop(paste("Two column matrix required for response: successes and failures"))
if(any(y<0))stop("All response values must be positive")
n <- dim(y)[1]
nn <- y[,1]+y[,2]
censor <- FALSE}
else {
censor <- length(dim(y))==2&&dim(y)[2]==2
if(censor&&all(y[,2]==1)){
y <- y[,1]
censor <- FALSE}
if(!censor)if(!is.vector(y,mode="numeric"))stop("y must be a vector")
if(censor&&(distribution=="beta"||distribution=="simplex"||
distribution=="two-sided power"||distribution=="gamma count"||
distribution=="gamma count"||distribution=="logarithmic"))
stop("Censoring not allowed for this distribution")
if(distribution=="double Poisson"||distribution=="mult Poisson")
my <- if(censor)3*max(y[,1]) else 3*max(y)
n <- if(length(dim(y))==2)dim(y)[1] else length(y)}
if(distribution=="inverse Gauss"||distribution=="exponential"||
distribution=="gamma"||distribution=="Weibull"||
distribution=="extreme value"){
if(type!="unknown"&&type!="duration"&&type!="continuous")
stop("duration data required")
if((censor&&any(y[,1]<=0))||(!censor&&any(y<=0)))
stop("All response values must be > 0")}
else if(distribution=="Poisson"||distribution=="negative binomial"||
distribution=="gamma count"||distribution=="double Poisson"||
distribution=="mult Poisson"){
if(type!="unknown"&&type!="discrete")stop("discrete data required")
if(any(y<0))stop("All response values must be >= 0")}
else if(distribution=="logarithmic"){
if(type!="unknown"&&type!="discrete")stop("discrete data required")
if(any(y<1))stop("All response values must be integers > 0")}
else if(distribution=="beta"||distribution=="simplex"||
distribution=="two-sided power"){
if(type!="unknown"&&type!="continuous")stop("continuous data required")
if(any(y<=0)||any(y>=1))
stop("All response values must lie between 0 and 1")}
else if(!bindata&&type!="unknown"&&type!="continuous"&&type!="duration")
stop("continuous data required")
if(censor){
y[,2] <- as.integer(y[,2])
if(any(y[,2]!=-1&y[,2]!=0&y[,2]!=1))
stop("Censor indicator must be -1s, 0s, and 1s")
cc <- ifelse(y[,2]==1,1,0)
rc <- ifelse(y[,2]==0,1,ifelse(y[,2]==-1,-1,0))
lc <- ifelse(y[,2]==-1,0,1)}
else cc <- 1
wt <- rep(1,n)
if(length(delta)==1)delta <- rep(delta,n)
else if(length(delta)!=n)
stop("delta must be the same length as the other variables")
delta2 <- mean(delta)
if(any(is.na(mu1(pmu,0))))stop("The location model returns NAs: probably invalid initial values")
if(distribution=="Levy"&&any(y<=mu1(p)))
stop("location parameter must be strictly less than corresponding observation")
if(shp&&any(is.na(sh1(p))))
stop("The shape model returns NAs: probably invalid initial values")
if(distribution=="Pareto"&&exp(sh1(p))<=1)stop("shape parameters must be > 0")
if(!censor)fcn <- switch(distribution,
binomial=function(m,p) dbinom(y[,1],nn,m,TRUE),
"beta binomial"=function(m,p){
s <- exp(sh1(p))
t <- s*m
u <- s*(1-m)
lbeta(y[,1]+t,y[,2]+u)-lbeta(t,u)+lchoose(nn,y[,1])},
"double binomial"=function(m,p)
.C("ddb_c",as.integer(y[,1]),as.integer(nn),
as.double(m),as.double(exp(sh1(p))),
as.integer(n),as.double(wt),res=double(n),
PACKAGE="repeated")$res,
"mult binomial"=function(m,p)
.C("dmb_c",as.integer(y[,1]),as.integer(nn),
as.double(m),as.double(exp(sh1(p))),
as.integer(n),as.double(wt),res=double(n),
PACKAGE="repeated")$res,
Poisson=function(m,p)dpois(y,m,TRUE),
"negative binomial"=function(m,p)dnbinom(y,exp(sh1(p)),mu=m,log=TRUE),
"double Poisson"=function(m,p)
.C("ddp_c",as.integer(y),as.integer(my),as.double(m),
as.double(exp(sh1(p))),as.integer(n),as.double(wt),
res=double(n),
PACKAGE="repeated")$res,
"mult Poisson"=function(m,p)
.C("dmp_c",as.integer(y),as.integer(my),
as.double(m),as.double(exp(sh1(p))),
as.integer(n),as.double(wt),res=double(n),
PACKAGE="repeated")$res,
"gamma count"=function(m,p){
s <- exp(sh1(p))
u <- m*s
ifelse(y==0,pgamma(u,(y+1)*s,1,lower.tail=FALSE,log.p=TRUE),
log(pgamma(u,y*s+(y==0),1)-pgamma(u,(y+1)*s,1)))},
Consul=function(m,p){
s <- exp(sh1(p))
log(m)-(m+y*(s-1))/s+(y-1)*log(m+y*(s-1))-y*log(s)-
lgamma(y+1)},
logarithmic=function(m,p){
m <- 1/(1+exp(-m))
y*log(m)-log(y)-log(-log(1-m))},
geometric=function(m,p)y*log(m)-(y+1)*log(1+m),
normal=function(m,p)dnorm(y,m,exp(sh1(p)/2),TRUE),
"inverse Gauss"=function(m,p){
t <- sh1(p)
s <- exp(t)
-(t+(y-m)^2/(y*s*m^2)+log(2*pi*y^3))/2},
logistic=function(m,p)dlogis(y,m,exp(sh1(p))*sqrt(3)/pi,TRUE),
Cauchy=function(m,p)dcauchy(y,m,exp(sh1(p)/2),TRUE),
Laplace=function(m,p){
t <- sh1(p)
s <- exp(t)
-abs(y-m)/s-t-log(2)},
Pareto=function(m,p){
s <- exp(sh1(p))
t <- 1/(m*(s-1))
log(s*t)-(s+1)*log(1+y*t)},
exponential=function(m,p)dexp(y,1/m,TRUE),
gamma=function(m,p){
s <- exp(sh1(p))
dgamma(y,s,scale=m/s,log=TRUE)},
Weibull=function(m,p)dweibull(y,exp(sh1(p)),m,TRUE),
"extreme value"=function(m,p)y+dweibull(exp(y),exp(sh1(p)),m,TRUE),
beta=function(m,p){
s <- exp(sh1(p))
m <- m*s
s <- s-m
dbeta(y,m,s,log=TRUE)},
simplex=function(m,p){
t <- sh1(p)
s <- exp(t)
-(((y-m)/(m*(1-m)))^2/(y*(1-y)*s)+t+3*log(y*(1-y))+
log(2*pi))/2},
"two-sided power"=function(m,p){
t <- sh1(p)
s <- exp(t)
t+(s-1)*ifelse(y<m,log(y/m),log((1-y)/(1-m)))})
else fcn <- switch(distribution,
Poisson=function(m,p)cc*dpois(y[,1],m,TRUE)+log(lc-rc*ppois(y[,1],m)),
"negative binomial"=function(m,p){
s <- exp(sh1(p))
cc*dnbinom(y[,1],s,mu=m,log=TRUE)+log(lc-rc*pnbinom(y[,1],s,m))},
geometric=function(m,p)
cc*(y[,1]*log(m)-(y[,1]+1)*log(1+m))+
log(lc-rc*pgeom(y[,1],1/(1+m))),
normal=function(m,p){
s <- exp(sh1(p)/2)
cc*dnorm(y[,1],m,s,TRUE)+log(lc-rc*pnorm(y[,1],m,s))},
"inverse Gauss"=function(m,p){
s <- exp(sh1(p))
v <- sqrt(s*y[,1]/2)
-cc*(log(s)+(y[,1]-m)^2/(y[,1]*s*m^2)+log(2*pi*y[,1]^3))/2+
log(lc-rc*(pnorm((y[,1]/m-1)/v)+
exp(2/(m*s))*pnorm(-(y[,1]/m+1)/v)))},
logistic=function(m,p){
s <- exp(sh1(p))
cc*dlogis(y[,1],m,s*sqrt(3)/pi,TRUE)+
log(lc-rc*plogis(y[,1],m,s*sqrt(3)/pi))},
Cauchy=function(m,p){
s <- exp(sh1(p)/2)
cc*dcauchy(y[,1],m,s,TRUE)+log(lc-rc*pcauchy(y[,1],m,s))},
Laplace=function(m,p){
v <- sh1(p)
s <- exp(v)
u <- abs(y[,1]-m)/s
t <- exp(-u)/2
-cc*(u+v+log(2))+log(lc-rc*(ifelse(u<0,t,1-t)))},
Pareto=function(m,p){
s <- exp(sh1(p))
t <- 1/(m*(s-1))
cc*(log(s*t)-(s+1)*log(1+y[,1]*t))+
log(lc-rc*((1+y[,1]/(m*(s-1)))^(-s)))},
exponential=function(m,p)
cc*dexp(y[,1],1/m,TRUE)+log(lc-rc*pexp(y[,1],1/m)),
gamma=function(m,p){
s <- exp(sh1(p))
t <- m/s
cc*dgamma(y[,1],s,scale=t,log=TRUE)+
log(lc-rc*pgamma(y[,1],s,scale=t))},
Weibull=function(m,p){
s <- exp(sh1(p))
cc*dweibull(y[,1],s,m,TRUE)+log(lc-rc*pweibull(y[,1],s,m))},
"extreme value"=function(m,p){
s <- exp(sh1(p))
yy <- exp(y[,1])
cc*(y[,1]+dweibull(yy,s,m,TRUE))+log(lc-rc*pweibull(yy,s,m))})
mix <- switch(mixture,
normal=function(r,ss)dnorm(r,0,sqrt(ss),log=TRUE),
logistic=function(r,ss)dlogis(r,0,sqrt(3*ss)/pi,log=TRUE),
Cauchy=function(r,ss){
dcauchy(r,0,ss,log=TRUE)},
Laplace=function(r,ss){
-abs(r)/ss-log(ss)-log(2)},
gamma=function(r,ss)dgamma(r,1/ss,scale=ss,log=TRUE),
"inverse gamma"=function(r,ss)dgamma(1/r,1/ss,scale=ss,log=TRUE)/r^2,
"inverse Gauss"=function(r,ss)
-(log(ss)+(r-1)^2/(r*ss)+log(2*pi*r^3))/2,
Weibull=function(r,ss){
fn <- function(z)gamma(1+2/z)-gamma(1+1/z)^2-ss
s <- uniroot(fn,c(0.02,20))$root
dweibull(r,s,1,log=TRUE)},
beta=function(r,ss){
s <- 0.5/ss
dbeta(r,s,s,log=TRUE)})
like <- if(mean0==0)function(p){
r <- c(p[np1+unest],-sum(p[np1+unest]))
ss <- if(!fixed)sum(r^2)/nnest else pmix
m <- mu1(p,r[nest])
var0 <- if(censor)sum((y[,1]-m)^2)/n
else if(bindata)sum((y[,1]-m*nn)^2)/n
else sum((y-m)^2)/n
-sum(fcn(m,p))-sum(mix(r,var0+ss))}
else if(mean0==1) function(p){
r <- exp(p[np1+unest])
r <- c(r,nnest-sum(r))
ss <- if(!fixed)sum((r-1)^2)/nnest else pmix
m <- mu1(p,r[nest])
var0 <- if(censor)sum((y[,1]-m)^2)/n
else if(bindata)sum((y[,1]-m*nn)^2)/n
else sum((y-m)^2)/n
-sum(fcn(m,p))-sum(mix(r,var0+ss))}
else function(p){
r <- 1/(1+exp(-p[np1+unest]))
r <- c(r,0.5*nnest-sum(r))
ss <- if(!fixed)sum((r-0.5)^2)/nnest else pmix
m <- mu1(p,r[nest])
var0 <- if(censor)sum((y[,1]-m)^2)/n
else if(bindata)sum((y[,1]-m*nn)^2)/n
else sum((y-m)^2)/n
-sum(fcn(m,p))-sum(mix(r,var0+ss))}
tlike <- function(p){
if(mean0==0)r <- c(p[np1+unest],-sum(p[np1+unest]))
else if(mean0==1){
r <- exp(p[np1+unest])
r <- c(r,nnest-sum(r))}
else if(mean0==2){
r <- 1/(1+exp(-p[np1+unest]))
r <- c(r,0.5*nnest-sum(r))}
m <- mu1(p,r[nest])
-sum(fcn(m,p)+cc*log(delta))}
tmp <- like(p)
if(is.na(tmp)||abs(tmp)==Inf)
stop("Likelihood returns Inf or NA: invalid initial values, wrong model, or probabilities too small to calculate")
if(fscale==1)fscale <- tmp
z0 <- nlm(like,p=p,hessian=TRUE,print.level=print.level,typsize=typsize,
ndigit=ndigit,gradtol=gradtol,stepmax=stepmax,steptol=steptol,
iterlim=iterlim,fscale=fscale)
z0$minimum <- z0$minimum-sum(log(delta))
if(np==0)cov <- NULL
else if(np==1)cov <- 1/z0$hessian
else {
a <- if(any(is.na(z0$hessian))||any(abs(z0$hessian)==Inf))0
else qr(z0$hessian)$rank
if(a==np)cov <- solve(z0$hessian)
else cov <- matrix(NA,ncol=np,nrow=np)}
se <- sqrt(diag(cov))
maxlike <- sum(tlike(z0$estimate))
if(mean0==0){
r <- c(z0$estimate[np1+unest],-sum(z0$estimate[np1+unest]))
var <- sum(r^2)/nnest}
else if(mean0==1){
r <- exp(z0$estimate[np1+unest])
r <- c(r,nnest-sum(r))
if(mean0==1)var <- sum((r-1)^2)/nnest}
else if(mean0==2){
r <- 1/(1+exp(-z0$estimate[np1+unest]))
r <- c(r,0.5*nnest-sum(r))
var <- sum((r-0.5)^2)/nnest}
pred <- mu1(z0$estimate,if(mean0==0)0 else if(mean0==1)1 else 0.5)
if(bindata)pred <- nn*pred
rpred <- mu1(z0$estimate,r[nest])
if(bindata)rpred <- nn*rpred
var0 <- if(censor||bindata)sum((y[,1]-rpred)^2)/n else sum((y-rpred)^2)/n
if(!fixed)pmix <- switch(mixture,
normal=var,
logistic=sqrt(3*var)/pi,
gamma=1/var,
"inverse gamma"=1/var,
"inverse Gauss"=var,
Weibull={
fn <- function(z)gamma(1+2/z)-gamma(1+1/z)^2-var
uniroot(fn,c(0.02,20))$root},
beta=0.125/var-0.5)
if(!is.null(mu2))mu1 <- mu2
if(!is.null(sh2))sh1 <- sh2
if(!is.null(lin1a))lin1 <- lin1a
if(!is.null(lin2a))lin2 <- lin2a
z1 <- list(
call=call,
response=envir,
delta=delta,
distribution=distribution,
mixture=mixture,
mixvar=c(var0,var),
mu=mu1,
shape=sh1,
linear=list(lin1,lin2),
linmodel=list(lin1model,lin2model),
common=common,
maxlike=maxlike,
penalty=z0$minimum-maxlike,
pred=pred,
rpred=rpred,
aic=maxlike+np-1,
df=n-np+1,
coefficients=z0$estimate,
npl=npl,
npr=nnest-1,
nps=nps,
pmix=pmix,
fixed=fixed,
se=se,
cov=cov,
corr=cov/(se%o%se),
gradient=z0$gradient,
iterations=z0$iterations,
code=z0$code)
class(z1) <- c("hnlm","recursive")
return(z1)}
deviance.hnlm <- function(object, ...) 2*object$maxlike
fitted.hnlm <- function(object, recursive=TRUE, ...) if(recursive) object$rpred else object$pred
residuals.hnlm <- function(object, recursive=TRUE, ...)
if(recursive) object$response$y-object$rpred else object$response$y-object$pred
print.hnlm <- function(x, correlation=TRUE, ...) {
z<-x
sht <- z$nps>0||!is.null(z$shape)
npl <- z$npl
np1 <- z$npl+1
np1a <- z$npl+1
np2 <- z$npl+z$nps
np3 <- np2
np4 <- np3+1
np <- z$npl+z$nps+z$npr
mean0 <- if(z$mixture=="normal"||z$mixture=="logistic"||z$mixture=="Cauchy"||
z$mixture=="Laplace")0 else if(z$mixture=="beta")2 else 1
cat("\nCall:",deparse(z$call),sep="\n")
cat("\n")
if(z$code>2)cat("Warning: no convergence - error",z$code,"\n\n")
if(!is.null(z$dist))cat(z$dist,"distribution\n\n")
cat(z$mixture,"mixing distribution\n\n")
if(z$npl>0||!is.null(z$mu)){
cat("Location function:\n")
if(!is.null(attr(z$mu,"formula")))
cat(deparse(attr(z$mu,"formula")),sep="\n")
else if(!is.null(attr(z$mu,"model"))){
t <- deparse(attr(z$mu,"model"))
t[1] <- sub("expression\\(","",t[1])
t[length(t)] <- sub("\\)$","",t[length(t)])
cat(t,sep="\n")}
if(!is.null(z$linear[[1]])){
cat("Linear part:\n")
print(z$linear[[1]])}}
if(sht){
cat("\nLog shape function:\n")
if(!is.null(attr(z$shape,"formula")))
cat(deparse(attr(z$shape,"formula")),sep="\n")
else if(!is.null(attr(z$shape,"model"))){
t <- deparse(attr(z$shape,"model"))
t[1] <- sub("expression\\(","",t[1])
t[length(t)] <- sub("\\)$","",t[length(t)])
cat(t,sep="\n")}
if(!is.null(z$linear[[2]])){
cat("Linear part:\n")
print(z$linear[[2]])}
if(!is.null(z$family)){
cat("\n(Log) family function:\n")
if(!is.null(attr(z$family,"formula")))
cat(deparse(attr(z$family,"formula")),sep="\n")
else if(!is.null(attr(z$family,"model"))){
t <- deparse(attr(z$family,"model"))
t[1] <- sub("expression\\(","",t[1])
t[length(t)] <- sub("\\)$","",t[length(t)])
cat(t,sep="\n")}
if(!is.null(z$linear[[3]])){
cat("Linear part:\n")
print(z$linear[[3]])}}}
cat("\n-Log likelihood ",z$maxlike,"\n")
cat("Penalty ",z$penalty,"\n")
cat("Degrees of freedom",z$df,"\n")
cat("AIC ",z$aic,"\n")
cat("Iterations ",z$iterations,"\n\n")
if(npl>0){
if(z$common)cat("Common parameters:\n")
else cat("Location parameters:\n")
cname <- if(is.character(attr(z$mu,"model")))attr(z$mu,"model")
else if(length(grep("linear",attr(z$mu,"parameters")))>0)
attr(z$mu,"parameters")[grep("\\[",attr(z$mu,"parameters"))]
else attr(z$mu,"parameters")
if(!is.null(z$linmodel[[1]]))cname <- c(cname,z$linmodel[[1]])
coef.table <- cbind(z$coefficients[1:npl],z$se[1:npl])
if(!z$common){
dimnames(coef.table) <- list(cname, c("estimate", "se"))
print.default(coef.table,digits=4,print.gap=2)
cname <- coef.table <- NULL}}
if(z$common||z$nps>0){
if(!is.null(z$shape))cname <- c(cname,
if(is.character(attr(z$shape,"model")))
attr(z$shape,"model")
else if(length(grep("linear",attr(z$shape,"parameters")))>0||
length(grep("mu",attr(z$shape,"parameters")))>0)
attr(z$shape,"parameters")[grep("\\[",attr(z$shape,"parameters"))]
else attr(z$shape,"parameters"))
if(!is.null(z$linmodel[[2]]))cname <- c(cname,z$linmodel[[2]])
if(!z$common)coef.table <- cbind(z$coefficients[np1a:np2],
z$se[np1a:np2])
if(z$common&&is.null(z$family)){
dimnames(coef.table) <- list(unique(cname),c("estimate","se"))
print.default(coef.table,digits=4,print.gap=2)}
if(is.null(z$shape)&&z$nps==1){
coef.table <- cbind(z$coefficients[np2],z$se[np2])
cname <- " "}}
if(z$nps>0&&!z$common){
cat("\nShape parameters:\n")
dimnames(coef.table) <- list(cname,c("estimate","se"))
print.default(coef.table,digits=4,print.gap=2)
cname <- coef.table <- NULL}
if(!is.null(z$family)){
if(!z$common)cat("\nFamily parameters:\n")
cname <- c(cname,if(is.character(attr(z$family,"model")))
attr(z$family,"model")
else if(length(grep("linear",attr(z$family,"parameters")))>0)
attr(z$family,"parameters")[grep("\\[",attr(z$family,"parameters"))]
else attr(z$family,"parameters"))
if(!is.null(z$linmodel[[3]]))cname <- c(cname,z$linmodel[[3]])
if(z$common){
dimnames(coef.table) <- list(unique(cname),c("estimate","se"))
print.default(coef.table,digits=4,print.gap=2)}
else {
coef.table <- cbind(z$coefficients[np3:np],z$se[np3:np])
dimnames(coef.table) <- list(cname,c("estimate","se"))
print.default(coef.table,digits=4,print.gap=2)}}
if(z$fixed)
cat("\nFixed mixing shape parameter:",z$pmix,"\n")
else
cat("\nMixing shape parameter:",z$pmix,"\n")
cat("\nVariances: conditional = ",z$mixvar[1],", mixing = ",z$mixvar[2],"\n",
sep="")
cat("\nRandom effect parameters:\n")
r <- c(z$coefficients[np4:np],-sum(z$coefficients[np4:np]))
if(mean0==1){
rr <- exp(z$coefficients[np4:np])
rr <- c(rr,z$npr+1-sum(rr))
r[z$npr+1] <- log(rr[z$npr+1])}
else if(mean0==2){
rr <- 1/(1+exp(-z$coefficients[np4:np]))
rr <- c(rr,0.5*z$npr+0.5-sum(rr))
r[z$npr+1] <- log(rr[z$npr+1]/(1-rr[z$npr+1]))}
coef.table <- cbind(r,c(z$se[np4:np],NA))
if(mean0)coef.table <- cbind(coef.table,rr)
dimnames(coef.table) <- list(1:(z$npr+1),
c(if(mean0)"estimate"else"effect","se",if(mean0)"effect"))
print.default(coef.table,digits=4,print.gap=2)
if(correlation){
cat("\nCorrelations:\n")
dimnames(z$corr) <- list(seq(1,np),seq(1,np))
print.default(z$corr,digits=4)}
invisible(z)}
|
structure(list(username = c("unittestphifree", "wbeasleya"),
email = c("[email protected]", "[email protected]"
), firstname = c("Unit Test", "Will"), lastname = c("PHI Free",
"Beasley_A"), expiration = structure(c(NA_real_, NA_real_
), class = "Date"), data_access_group = c(NA_character_,
NA_character_), data_access_group_id = c(NA_character_, NA_character_
), design = c(FALSE, TRUE), user_rights = c(FALSE, TRUE),
data_access_groups = c(FALSE, TRUE), data_export = c("1",
"1"), reports = c(TRUE, TRUE), stats_and_charts = c(TRUE,
TRUE), manage_survey_participants = c(TRUE, TRUE), calendar = c(TRUE,
TRUE), data_import_tool = c(FALSE, TRUE), data_comparison_tool = c(FALSE,
TRUE), logging = c(FALSE, TRUE), file_repository = c(TRUE,
TRUE), data_quality_create = c(FALSE, TRUE), data_quality_execute = c(FALSE,
TRUE), api_export = c(TRUE, FALSE), api_import = c(FALSE,
FALSE), mobile_app = c(FALSE, FALSE), mobile_app_download_data = c(FALSE,
FALSE), record_create = c(TRUE, TRUE), record_rename = c(FALSE,
FALSE), record_delete = c(FALSE, FALSE), lock_records_all_forms = c(FALSE,
FALSE), lock_records = c(FALSE, FALSE), lock_records_customization = c(FALSE,
FALSE)), row.names = c(NA, -2L), class = c("tbl_df", "tbl",
"data.frame"))
|
aSECF <- function(integrands,samples,derivatives, polyorder = NULL, steinOrder = NULL, kernel_function = NULL, sigma = NULL, K0 = NULL,nystrom_inds = NULL, est_inds = NULL, apriori = NULL, conjugate_gradient = TRUE, reltol = 1e-02, diagnostics = FALSE){
N <- NROW(samples)
N_expectations <- NCOL(integrands)
if (is.null(ncol(integrands))){
integrands <- matrix(integrands,nrow=N,ncol=1)
}
if (is.null(ncol(samples))){
samples <- matrix(samples,nrow=N,ncol=1)
derivatives <- matrix(derivatives,nrow=N,ncol=1)
}
if (is.null(est_inds)){
inds_unique <- !duplicated(samples)
samples <- samples[inds_unique,,drop=FALSE]
derivatives <- derivatives[inds_unique,,drop=FALSE]
integrands <- integrands[inds_unique,,drop=FALSE]
N <- sum(inds_unique)
} else{
inds_all <- 1:N
to_remove <- est_inds[duplicated(samples[est_inds,,drop=FALSE])]
num_to_remove <- length(to_remove)
if (num_to_remove!=0){
N_new <- N - num_to_remove
samples <- samples[-to_remove,,drop=FALSE]
derivatives <- derivatives[-to_remove,,drop=FALSE]
integrands <- integrands[-to_remove,,drop=FALSE]
inds_all <- inds_all[-to_remove]
inds_all[!to_remove] <- 1:N_new
est_inds <- inds_all[est_inds]
N <- N_new
}
}
d <- NCOL(samples)
if (!is.null(est_inds)){
N <- length(est_inds)
}
if (!is.null(polyorder)){
if (choose(d+polyorder,d) >= N){
stop("The polyorder is too high for this sample size.")
}
} else if ((d >= N) && is.null(apriori)){
stop("The dimension is too large for this sample size. Consider increasing the sample size or using the apriori argument.")
} else if (length(apriori) >= N){
stop("The dimension is too large for this sample size. Consider reducing the number of terms in the apriori argument.")
}
if (is.null(est_inds)){
temp <- aSECF_cpp_prep(integrands, samples, derivatives, getX = getX, polyorder, steinOrder, kernel_function, sigma, K0, apriori, nystrom_inds, conjugate_gradient)
A <- temp$A
b <- temp$b
B2 <- temp$B2
cond_no <- temp$cond_no
m0 <- temp$m0
Q <- NCOL(temp$phi)
if (diagnostics){
B1 <- temp$B1
ny_inds <- temp$ny_inds
a <- matrix(NaN,nrow=m0,ncol=N_expectations)
beta <- matrix(NaN,nrow=Q,ncol=N_expectations)
}
expectation <- rep(NaN, nrow=N_expectations)
iter <- rep(NaN,N_expectations)
for (i in 1:N_expectations){
if (conjugate_gradient){
B2_inv <- solve(B2)
xinit <- c(rep(0,m0),B2_inv[,1]*mean(integrands[,i]))
ab_tilde <- lsolve.cg(A, b[,i], xinit = xinit, reltol = reltol, preconditioner = diag(ncol(A)), adjsym = TRUE, verbose = FALSE)
expectation[i] <- matrix(B2[1,],nrow=1)%*%ab_tilde$x[(m0+1):(m0+Q)]
iter[i] <- ab_tilde$iter
} else{
ab_tilde <- solve(nearPD(A),b[,i])
expectation[i] <- ab_tilde[m0+1]
iter[i] <- NaN
}
if (diagnostics){
a[,i] <- B1%*%ab_tilde$x[1:m0]
beta[,i] <- B2%*%ab_tilde$x[(m0+1):(m0+Q)]
}
}
if (conjugate_gradient){
if (diagnostics){
res <- list(expectation = expectation, cond_no=cond_no, iter = iter, a = a, b = beta, ny_inds = ny_inds)
} else{
res <- list(expectation = expectation, cond_no=cond_no, iter = iter)
}
} else{
if (diagnostics){
res <- list(expectation = expectation, a = a, b = beta, ny_inds = ny_inds)
} else{
res <- list(expectation = expectation)
}
}
} else{
res <- aSECF_unbiased_cpp_prep(integrands, samples, derivatives, est_inds, getX = getX, aSECF_mse_linsolve = aSECF_mse_linsolve, polyorder, steinOrder, kernel_function, sigma, K0, apriori, nystrom_inds, conjugate_gradient, reltol, diagnostics)
}
return(res)
}
CF <- function(integrands, samples, derivatives, steinOrder = NULL, kernel_function = NULL, sigma = NULL, K0 = NULL, est_inds = NULL, one_in_denom = FALSE, diagnostics = FALSE){
N <- NROW(samples)
d <- NCOL(samples)
if (is.null(ncol(integrands))){
integrands <- matrix(integrands,nrow=N,ncol=1)
}
if (is.null(ncol(samples))){
samples <- matrix(samples,nrow=N,ncol=1)
derivatives <- matrix(derivatives,nrow=N,ncol=1)
}
if (is.null(est_inds)){
inds_unique <- !duplicated(samples)
samples <- samples[inds_unique,,drop=FALSE]
derivatives <- derivatives[inds_unique,,drop=FALSE]
integrands <- integrands[inds_unique,,drop=FALSE]
N <- sum(inds_unique)
} else{
inds_all <- 1:N
to_remove <- est_inds[duplicated(samples[est_inds,,drop=FALSE])]
num_to_remove <- length(to_remove)
if (num_to_remove!=0){
N_new <- N - num_to_remove
samples <- samples[-to_remove,,drop=FALSE]
derivatives <- derivatives[-to_remove,,drop=FALSE]
integrands <- integrands[-to_remove,,drop=FALSE]
inds_all <- inds_all[-to_remove]
inds_all[!to_remove] <- 1:N_new
est_inds <- inds_all[est_inds]
N <- N_new
}
}
if (is.null(est_inds)){
temp <- CF_cpp(integrands, samples, derivatives, steinOrder, kernel_function, sigma, K0, one_in_denom, diagnostics)
} else{
temp <- CF_unbiased_cpp(integrands, samples, derivatives, est_inds, steinOrder, kernel_function, sigma, K0, one_in_denom, diagnostics)
}
return (temp)
}
CF_crossval <- function(integrands, samples, derivatives, steinOrder = NULL, kernel_function = NULL, sigma_list = NULL, K0_list = NULL, est_inds = NULL, log_weights = NULL, one_in_denom = FALSE, folds = NULL, diagnostics = FALSE){
N <- NROW(samples)
d <- NCOL(samples)
if (is.null(ncol(integrands))){
integrands <- matrix(integrands,nrow=N,ncol=1)
}
if (is.null(ncol(samples))){
samples <- matrix(samples,nrow=N,ncol=1)
derivatives <- matrix(derivatives,nrow=N,ncol=1)
}
if (is.null(est_inds)){
inds_unique <- !duplicated(samples)
if (sum(inds_unique)!=N){
inds <- order(samples[inds_unique,1])
num_dups <- data.frame(temp_for_dups=samples[,1]) %>% group_by(temp_for_dups) %>% group_size()
if (!is.null(log_weights)){
log_weights <- log_weights[inds_unique]
log_weights <- log_weights + log(num_dups[order(inds)])
}
samples <- samples[inds_unique,,drop=FALSE]
derivatives <- derivatives[inds_unique,,drop=FALSE]
integrands <- integrands[inds_unique,,drop=FALSE]
N <- sum(inds_unique)
if (!is.null(K0_list)){
for (jj in 1:length(K0_list)){
K0_list[[jj]] <- K0_list[[jj]][inds_unique,inds_unique]
}
}
}
} else{
inds_all <- 1:N
to_remove <- est_inds[duplicated(samples[est_inds,,drop=FALSE])]
num_to_remove <- length(to_remove)
if (num_to_remove!=0){
N_new <- N - num_to_remove
samples <- samples[-to_remove,,drop=FALSE]
derivatives <- derivatives[-to_remove,,drop=FALSE]
integrands <- integrands[-to_remove,,drop=FALSE]
inds_all <- inds_all[-to_remove]
inds_all[!to_remove] <- 1:N_new
est_inds <- inds_all[est_inds]
N <- N_new
if (!is.null(K0_list)){
for (jj in 1:length(K0_list)){
K0_list[[jj]] <- K0_list[[jj]][-to_remove,-to_remove]
}
}
}
}
if (is.null(log_weights)){
temp <- CF_crossval_cpp(integrands, samples, derivatives, steinOrder, kernel_function, sigma_list, K0_list, folds, est_inds, NULL, one_in_denom, diagnostics)
} else {
temp <- CF_crossval_cpp(integrands, samples, derivatives, steinOrder, kernel_function, sigma_list, K0_list, folds, est_inds, exp(log_weights), one_in_denom, diagnostics)
}
return (temp)
}
SECF <- function(integrands,samples,derivatives, polyorder = NULL, steinOrder = NULL, kernel_function = NULL, sigma = NULL, K0 = NULL, est_inds = NULL,apriori = NULL, diagnostics = FALSE){
N <- NROW(samples)
N_expectations <- NCOL(integrands)
if (is.null(ncol(integrands))){
integrands <- matrix(integrands,nrow=N,ncol=1)
}
if (is.null(ncol(samples))){
samples <- matrix(samples,nrow=N,ncol=1)
derivatives <- matrix(derivatives,nrow=N,ncol=1)
}
if (is.null(est_inds)){
inds_unique <- !duplicated(samples)
samples <- samples[inds_unique,,drop=FALSE]
derivatives <- derivatives[inds_unique,,drop=FALSE]
integrands <- integrands[inds_unique,,drop=FALSE]
N <- sum(inds_unique)
} else{
inds_all <- 1:N
to_remove <- est_inds[duplicated(samples[est_inds,,drop=FALSE])]
num_to_remove <- length(to_remove)
if (num_to_remove!=0){
N_new <- N - num_to_remove
samples <- samples[-to_remove,,drop=FALSE]
derivatives <- derivatives[-to_remove,,drop=FALSE]
integrands <- integrands[-to_remove,,drop=FALSE]
inds_all <- inds_all[-to_remove]
inds_all[!to_remove] <- 1:N_new
est_inds <- inds_all[est_inds]
N <- N_new
}
}
d <- NCOL(samples)
if (!is.null(est_inds)){
N <- length(est_inds)
}
if (!is.null(polyorder)){
if (choose(d+polyorder,d) >= N){
stop("The polyorder is too high for this sample size.")
}
} else if ((d >= N) && is.null(apriori)){
stop("The dimension is too large for this sample size. Consider increasing the sample size or using the apriori argument.")
} else if (length(apriori) >= N){
stop("The dimension is too large for this sample size. Consider reducing the number of terms in the apriori argument.")
}
if (is.null(est_inds)){
temp <- SECF_cpp(integrands, samples, derivatives, getX = getX, polyorder, steinOrder, kernel_function, sigma, K0, apriori, diagnostics)
} else{
temp <- SECF_unbiased_cpp(integrands, samples, derivatives, est_inds, getX = getX, polyorder, steinOrder, kernel_function, sigma, K0, apriori, diagnostics)
}
return(temp)
}
SECF_crossval <- function(integrands,samples,derivatives, polyorder = NULL, steinOrder = NULL, kernel_function = NULL, sigma_list = NULL, K0_list = NULL, est_inds = NULL, apriori = NULL, folds = NULL, diagnostics = FALSE){
N <- NROW(samples)
N_expectations <- NCOL(integrands)
if (is.null(ncol(integrands))){
integrands <- matrix(integrands,nrow=N,ncol=1)
}
if (is.null(ncol(samples))){
samples <- matrix(samples,nrow=N,ncol=1)
derivatives <- matrix(derivatives,nrow=N,ncol=1)
}
if (is.null(est_inds)){
inds_unique <- !duplicated(samples)
samples <- samples[inds_unique,,drop=FALSE]
derivatives <- derivatives[inds_unique,,drop=FALSE]
integrands <- integrands[inds_unique,,drop=FALSE]
N <- sum(inds_unique)
} else{
inds_all <- 1:N
to_remove <- est_inds[duplicated(samples[est_inds,,drop=FALSE])]
num_to_remove <- length(to_remove)
if (num_to_remove!=0){
N_new <- N - num_to_remove
samples <- samples[-to_remove,,drop=FALSE]
derivatives <- derivatives[-to_remove,,drop=FALSE]
integrands <- integrands[-to_remove,,drop=FALSE]
inds_all <- inds_all[-to_remove]
inds_all[!to_remove] <- 1:N_new
est_inds <- inds_all[est_inds]
N <- N_new
}
}
d <- NCOL(samples)
if (!is.null(est_inds)){
N <- length(est_inds)
}
if (is.null(folds)){
N_perfit <- floor(0.8*N)
} else{
N_perfit <- floor((folds-1)/folds*N)
}
if (!is.null(polyorder)){
if (choose(d+polyorder,d) >= N_perfit){
stop("The polyorder is too high for this sample size and number of folds.")
}
} else if ((d >= N_perfit) && is.null(apriori)){
stop("The dimension is too large for this sample size and number of folds. Consider increasing the sample size, reducing the number of cross-validation folds or using the apriori argument.")
} else if (length(apriori) >= N_perfit){
stop("The dimension is too large for this sample size and number of folds. Consider reducing the number of cross-validation folds or reducing the number of terms in the apriori argument.")
}
temp <- SECF_crossval_cpp(integrands, samples, derivatives, getX = getX, polyorder, steinOrder, kernel_function, sigma_list, K0_list, apriori, folds, est_inds, diagnostics)
return(temp)
}
aSECF_mse_linsolve <- function(integrands,samples,derivatives, polyorder = NULL, steinOrder = NULL, kernel_function = NULL, sigma = NULL, K0 = NULL, apriori = NULL, nystrom_inds = NULL, conjugate_gradient = TRUE, reltol = 1e-02){
N <- NROW(samples)
N_expectations <- NCOL(integrands)
if (is.null(ncol(integrands))){
integrands <- matrix(integrands,nrow=N,ncol=1)
}
if (is.null(ncol(samples))){
samples <- matrix(samples,nrow=N,ncol=1)
derivatives <- matrix(derivatives,nrow=N,ncol=1)
}
temp <- aSECF_cpp_prep(integrands, samples, derivatives, getX = getX, polyorder, steinOrder, kernel_function, sigma, K0, apriori, nystrom_inds, conjugate_gradient)
A <- temp$A
b <- temp$b
B1 <- temp$B1
B2 <- temp$B2
cond_no <- temp$cond_no
m0 <- temp$m0
Q <- NCOL(temp$phi)
expectation <- rep(NaN, nrow=N_expectations)
ab_tilde <- list()
for (i in 1:N_expectations){
ab_tilde[[i]] <- list()
if (conjugate_gradient){
B2_inv <- solve(B2)
xinit <- c(rep(0,m0),B2_inv[,1]*mean(integrands[,i]))
temp <- lsolve.cg(A, b[,i], xinit = xinit, reltol = reltol, preconditioner = diag(ncol(A)), adjsym = TRUE, verbose = FALSE)
temp$x[1:m0] <- B1%*%temp$x[1:m0]
temp$x[(m0+1):(m0+Q)] <- B2%*%temp$x[(m0+1):(m0+Q)]
ab_tilde[[i]]$sol <- temp$x
ab_tilde[[i]]$iter <- temp$iter
ab_tilde[[i]]$cond_no <- cond_no
} else{
ab_tilde[[i]]$sol <- solve(nearPD(A),b)
}
}
return(ab_tilde)
}
aSECF_crossval <- function(integrands,samples,derivatives, polyorder = NULL, steinOrder = NULL, kernel_function = NULL, sigma_list = NULL, est_inds = NULL, apriori = NULL, num_nystrom = NULL, conjugate_gradient = TRUE, reltol = 1e-02, folds = NULL, diagnostics = FALSE){
N <- NROW(samples)
N_expectations <- NCOL(integrands)
if (is.null(ncol(integrands))){
integrands <- matrix(integrands,nrow=N,ncol=1)
}
if (is.null(ncol(samples))){
samples <- matrix(samples,nrow=N,ncol=1)
derivatives <- matrix(derivatives,nrow=N,ncol=1)
}
if (is.null(est_inds)){
inds_unique <- !duplicated(samples)
samples <- samples[inds_unique,,drop=FALSE]
derivatives <- derivatives[inds_unique,,drop=FALSE]
integrands <- integrands[inds_unique,,drop=FALSE]
N <- sum(inds_unique)
} else{
inds_all <- 1:N
to_remove <- est_inds[duplicated(samples[est_inds,,drop=FALSE])]
num_to_remove <- length(to_remove)
if (num_to_remove!=0){
N_new <- N - num_to_remove
samples <- samples[-to_remove,,drop=FALSE]
derivatives <- derivatives[-to_remove,,drop=FALSE]
integrands <- integrands[-to_remove,,drop=FALSE]
inds_all <- inds_all[-to_remove]
inds_all[!to_remove] <- 1:N_new
est_inds <- inds_all[est_inds]
N <- N_new
}
}
d <- NCOL(samples)
if (!is.null(est_inds)){
N <- length(est_inds)
}
if (is.null(folds)){
N_perfit <- floor(0.8*N)
} else{
N_perfit <- floor((folds-1)/folds*N)
}
if (!is.null(polyorder)){
if (choose(d+polyorder,d) >= N_perfit){
stop("The polyorder is too high for this sample size and number of folds.")
}
} else if ((d >= N_perfit) && is.null(apriori)){
stop("The dimension is too large for this sample size and number of folds. Consider increasing the sample ize, reducing the number of cross-validation folds or using the apriori argument.")
} else if (length(apriori) >= N_perfit){
stop("The dimension is too large for this sample size and number of folds. Consider reducing the number of cross-validation folds or reducing the number of terms in the apriori argument.")
}
if (is.null(num_nystrom)){
num_nystrom <- ceiling(sqrt(N))
}
mse <- aSECF_crossval_cpp(integrands, samples, derivatives, getX = getX, aSECF_mse_linsolve = aSECF_mse_linsolve, num_nystrom = num_nystrom, polyorder, steinOrder, kernel_function, sigma_list, apriori, folds, conjugate_gradient, reltol = reltol, est_inds = est_inds)
opt_indices <- apply(mse,1,which.min)
expectation <- rep(NaN,N_expectations)
if (conjugate_gradient){
cond_no <- iter <- rep(NaN,N_expectations)
}
if (!is.null(est_inds)){
f_true <- f_hat <- matrix(NaN,nrow=N-length(est_inds),ncol=N_expectations)
}
if (diagnostics){
a <- matrix(NaN,nrow=ceiling(sqrt(N)),ncol=N_expectations)
ny_inds <- matrix(NaN,nrow=num_nystrom,ncol=N_expectations)
}
for (j in unique(opt_indices)){
inds <- which(opt_indices==j)
nystrom_inds <- sample(1:N,num_nystrom)
temp <- aSECF(matrix(integrands[,inds],ncol=length(inds)), samples, derivatives, polyorder, steinOrder, kernel_function, sigma_list[[j]], NULL, nystrom_inds, est_inds, apriori, conjugate_gradient, reltol, diagnostics)
if(diagnostics & (j==(unique(opt_indices))[1])){
b <- matrix(NaN,nrow=length(temp$b),ncol=N_expectations)
}
expectation[inds] <- temp$expectation
if (!is.null(est_inds)){
f_true[,inds] <- temp$f_true
f_hat[,inds] <- temp$f_hat
}
if (conjugate_gradient){
cond_no[inds] <- temp$cond_no
iter[inds] <- temp$iter
}
if (diagnostics){
a[,inds] <- temp$a
b[,inds] <- temp$b
if (length(inds)==1){
ny_inds[,inds] <- nystrom_inds
} else{
for (zz in 1:length(inds)){
ny_inds[,inds[zz]] <- nystrom_inds
}
}
}
}
if (!diagnostics){
if (conjugate_gradient & !is.null(est_inds)){
return(list(expectation=expectation,f_true=f_true,f_hat=f_hat,iter=iter,cond_no=cond_no,mse=mse,optinds=opt_indices))
} else if (conjugate_gradient){
return(list(expectation=expectation,iter=iter,cond_no=cond_no,mse=mse,optinds=opt_indices))
} else{
return(list(expectation=expectation,mse=mse,optinds=opt_indices))
}
} else {
if (conjugate_gradient & !is.null(est_inds)){
return(list(expectation=expectation,f_true=f_true,f_hat=f_hat,iter=iter,cond_no=cond_no,mse=mse,optinds=opt_indices,
a=a,b=b,ny_inds=ny_inds))
} else if (conjugate_gradient){
return(list(expectation=expectation,iter=iter,cond_no=cond_no,mse=mse,optinds=opt_indices,
a=a,b=b,ny_inds=ny_inds))
} else{
return(list(expectation=expectation,mse=mse,optinds=opt_indices,
a=a,b=b,ny_inds=ny_inds))
}
}
}
Phi_fn <- function(samples,derivatives,polyorder=NULL,apriori=NULL){
return(Phi_fn_cpp(samples,derivatives, getX = getX, polyorder,apriori))
}
|
areapart=function(data, G, cell.size=1){
if(!is.matrix(data) & !spatstat.geom::is.ppp(data))
stop("For grid data, please provide the dataset as a matrix;
for point pattern data, please provide the dataset as a ppp object")
if(is.matrix(data))
{
ncl=ncol(data); nrw=nrow(data)
W=spatstat.geom::owin(xrange=c(0, ncl*cell.size), yrange=c(0,nrw*cell.size))
xx.c=seq(cell.size/2, (ncl*cell.size-cell.size/2), l=ncl)
yy.c=rev(seq(cell.size/2, (nrw*cell.size-cell.size/2), l=nrw))
coords=expand.grid(yy.c, xx.c)
data.pp=spatstat.geom::ppp(x=coords[,2], y=coords[,1], window=W)
spatstat.geom::marks(data.pp)=c(data)
}
if (spatstat.geom::is.ppp(data)) {
W=data$window
data.pp=data
if(is.null(spatstat.geom::marks(data))) spatstat.geom::marks(data.pp)=rep(1, spatstat.geom::npoints(data))
}
if(length(G)==1){
part.pp=spatstat.core::runifpoint(G, W)
part.coord=cbind(x=part.pp$x, y=part.pp$y, id=1:G)
} else {
if(min(G[,1])<W$xrange[1]|max(G[,1])>W$xrange[2]|
min(G[,2])<W$yrange[1]|max(G[,2])>W$yrange[2])
stop("The given coordinates for the area partition are outside the boundaries of the data observation window")
part.pp=spatstat.geom::ppp(G[,1], G[,2], W)
part.coord=cbind(x=part.pp$x, y=part.pp$y, id=1:nrow(G))
}
near.neigh=spatstat.geom::nncross(data.pp, part.pp)
data.coord.area=data.frame(data.pp$x, data.pp$y, data.pp$marks, near.neigh$which)
colnames(data.coord.area)=c("x", "y", "cat", "area")
return(list(G.pp=part.pp, data.assign=data.coord.area))
}
batty=function(data, category=1, cell.size=1, partition=10){
if(!is.matrix(data) & !spatstat.geom::is.ppp(data))
stop("For grid data, please provide the dataset as a matrix;
for point pattern data, please provide the dataset as a ppp object")
if(spatstat.geom::is.ppp(data) & !spatstat.geom::is.marked(data) & category!=1)
stop("Since data do not have different categories, please set category to the default 1")
if(is.matrix(data)) datavec=c(data) else
if(spatstat.geom::is.marked(data)) datavec=spatstat.geom::marks(data) else
datavec=rep(1, spatstat.geom::npoints(data))
if(is.factor(datavec)) datavec=as.character(datavec)
if(length(which(unique(datavec)==category))==0)
stop("Please choose a category that is present in the dataset.
If the point pattern is unmarked, category must be set to 1")
datavec=as.numeric(datavec==category)
datavec[is.na(datavec)]=0
if(is.matrix(data))
{
ncl=ncol(data); nrw=nrow(data)
W=spatstat.geom::owin(xrange=c(0, ncl*cell.size), yrange=c(0,nrw*cell.size))
xx.c=seq(cell.size/2, (ncl*cell.size-cell.size/2), l=ncl)
yy.c=rev(seq(cell.size/2, (nrw*cell.size-cell.size/2), l=nrw))
coords=expand.grid(yy.c, xx.c)
data.pp=spatstat.geom::ppp(x=coords[,2], y=coords[,1], window=W)
spatstat.geom::marks(data.pp)=datavec
}
if (spatstat.geom::is.ppp(data)) {
W=data$window
data.pp=data
spatstat.geom::marks(data.pp)=datavec
}
if(is.numeric(partition) | is.matrix(partition))
areap=spatstat.geom::dirichlet(areapart(data, G=partition, cell.size=cell.size)$G.pp) else
if(is.list(partition)) {
if(names(partition)[1]=="G.pp" & names(partition)[2]=="data.assign")
areap=spatstat.geom::dirichlet(partition$G.pp)
if(names(partition)[1]=="tiles" & names(partition)[2]=="n")
areap=partition
} else
if(spatstat.geom::is.tess(partition)) {
if(partition$window$xrange[1]!=W$xrange[1] | partition$window$xrange[2]!=W$xrange[2] |
partition$window$yrange[1]!=W$yrange[1] | partition$window$yrange[2]!=W$yrange[2])
stop("The given partition is not on the same observation window as the data")
if(is.null(partition$tiles)) stop("If a tessellation is provided, it should contain tiles")
areap=partition
} else stop("please provide the area partition object in an accepted format.
If a tessellation is provided, it should contain tiles")
n.G=areap$n
tot.pG=sum(datavec)
pg=Tg=numeric(n.G)
for(gg in 1:n.G)
{
subd=data.pp[areap$tiles[[gg]]]
datatab=table(spatstat.geom::marks(subd))
if(length(datatab[which(names(datatab)==1)])==1)
pg[gg]=datatab[which(names(datatab)==1)]/tot.pG
Tg[gg]=spatstat.geom::area.owin(areap$tiles[[gg]])
if(is.na(Tg[gg]))
Tg[gg]=table(areap$tiles[[gg]]$m)[which(names(table(areap$tiles[[gg]]$m))==T)]*
spatstat.geom::area.owin(data.pp$window)/(nrow(areap$tiles[[gg]]$m)*ncol(areap$tiles[[gg]]$m))
}
G.count=data.frame(1:n.G, pg*tot.pG, pg, Tg)
colnames(G.count)=c("area.id", "abs.freq", "rel.freq", "area.size")
if(sum(Tg)==1)
{
Tg=Tg*100
warning("The total observation area is 1, which returns problems in the computation of Batty's entropy, since the maximum is log(1)=0.
For this reason, during the computation all areas are multiplied by 100." )
}
batty.terms=ifelse(G.count[,2]>0,G.count[,3]*log(Tg/G.count[,3]),0)
batty.ent=sum(batty.terms)
return(list(areas.tess=areap, areas.freq=G.count, batty=batty.ent, rel.batty=batty.ent/log(sum(Tg))))
}
karlstrom=function(data, category=1, cell.size=1, partition=10, neigh=4, method="number"){
if(!is.matrix(data) & !spatstat.geom::is.ppp(data))
stop("For grid data, please provide the dataset as a matrix;
for point pattern data, please provide the dataset as a ppp object")
if(spatstat.geom::is.ppp(data) & !spatstat.geom::is.marked(data) & category!=1)
stop("Since data do not have different categories, please set category to the default 1")
if(is.matrix(data)) datavec=c(data) else
if(spatstat.geom::is.marked(data)) datavec=spatstat.geom::marks(data) else
datavec=rep(1, spatstat.geom::npoints(data))
if(is.factor(datavec)) datavec=as.character(datavec)
if(length(which(unique(datavec)==category))==0)
stop("Please choose a category that is present in the dataset.
If the point pattern is unmarked, category must be set to 1")
datavec=as.numeric(datavec==category)
datavec[is.na(datavec)]=0
if(is.matrix(data))
{
ncl=ncol(data); nrw=nrow(data)
W=spatstat.geom::owin(xrange=c(0, ncl*cell.size), yrange=c(0,nrw*cell.size))
xx.c=seq(cell.size/2, (ncl*cell.size-cell.size/2), l=ncl)
yy.c=rev(seq(cell.size/2, (nrw*cell.size-cell.size/2), l=nrw))
coords=expand.grid(yy.c, xx.c)
data.pp=spatstat.geom::ppp(x=coords[,2], y=coords[,1], window=W)
spatstat.geom::marks(data.pp)=datavec
}
if (spatstat.geom::is.ppp(data)) {
W=data$window
data.pp=data
spatstat.geom::marks(data.pp)=datavec
}
if(is.numeric(partition) | is.matrix(partition))
areap=spatstat.geom::dirichlet(areapart(data, G=partition, cell.size=cell.size)$G.pp) else
if(is.list(partition)) {
if(names(partition)[1]=="G.pp" & names(partition)[2]=="data.assign")
areap=spatstat.geom::dirichlet(partition$G.pp)
if(names(partition)[1]=="tiles" & names(partition)[2]=="n")
areap=partition
} else
if(spatstat.geom::is.tess(partition)) {
if(partition$window$xrange[1]!=W$xrange[1] | partition$window$xrange[2]!=W$xrange[2] |
partition$window$yrange[1]!=W$yrange[1] | partition$window$yrange[2]!=W$yrange[2])
stop("The given partition is not on the same observation window as the data")
areap=partition
} else stop("please provide the area partition object in an accepted format")
n.G=areap$n
centroids=matrix(unlist(lapply(areap$tiles, spatstat.geom::centroid.owin)), byrow=T, ncol=2)
end=spatstat.geom::ppp(centroids[,1], centroids[,2], window=W)
maxdist=sqrt(diff(data.pp$window$xrange)^2+diff(data.pp$window$yrange)^2)
mindist=min(spatstat.geom::nndist(end))
if (method=="number" & neigh%%1!=0) stop("If method=number, neigh must be an integer")
if (method=="number" & neigh>n.G) stop("The number of neighbours cannot exceed the number of sub-areas")
if (method=="distance" & neigh>=maxdist)
warning("The chosen neighbourhood distance is larger than the observation area.
All areas will be neighbours of all other areas, i.e. all ptilde will be equal")
if (method=="distance" & neigh<mindist)
warning("The chosen neighbourhood distance is smaller than the minimum distance between areas.
All areas will have 0 neighbours, i.e. all ptildeg will be equal to pg")
neigh.indlist=vector("list", n.G)
for(gg in 1:n.G)
{
start=spatstat.geom::ppp(centroids[gg,1], centroids[gg,2], window=W)
cdist=spatstat.geom::crossdist(start,end)
if (method=="number") neigh.indlist[[gg]]=which(cdist<=sort(cdist)[neigh]) else
if (method=="distance") neigh.indlist[[gg]]=which(cdist<=neigh) else
stop("Method should be set to either number or distance. If method=number, neigh must be integer.")
}
tot.pG=sum(datavec)
pg=Tg=ptildeg=numeric(n.G)
for(gg in 1:n.G)
{
subd=data.pp[areap$tiles[[gg]]]
datatab=table(spatstat.geom::marks(subd))
if(length(datatab[which(names(datatab)==1)])==1)
pg[gg]=datatab[which(names(datatab)==1)]/tot.pG
Tg[gg]=spatstat.geom::area.owin(areap$tiles[[gg]])
}
for(gg in 1:n.G)
ptildeg[gg]=mean(pg[neigh.indlist[[gg]]])
G.count=data.frame(1:n.G, pg*tot.pG, pg, ptildeg, Tg)
colnames(G.count)=c("area.id", "abs.freq", "rel.freq", "neigh.mean","area.size")
karl.terms=ifelse(G.count[,3]>0&G.count[,4]>0,G.count[,3]*log(1/G.count[,4]),0)
karl.ent=sum(karl.terms)
if(karl.ent>log(n.G)) karl.ent=log(n.G)-1e-05
return(list(areas.tess=areap, areas.freq=G.count,
karlstrom=karl.ent, rel.karl=karl.ent/log(n.G)))
}
|
expected <- eval(parse(text="c(NA_integer_, NA_integer_, NA_integer_, NA_integer_, NA_integer_, NA_integer_, NA_integer_, NA_integer_, NA_integer_, NA_integer_, NA_integer_, NA_integer_, NA_integer_, NA_integer_, NA_integer_, NA_integer_)"));
test(id=0, code={
argv <- eval(parse(text="list(c(\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"11\", \"12\", \"13\", \"14\", \"15\"), NA_real_, NA_integer_, NULL)"));
.Internal(match(argv[[1]], argv[[2]], argv[[3]], argv[[4]]));
}, o=expected);
|
setGeneric("residuals")
setMethod("residuals",
"movG",
function(object,...)
{
object <- object@adjModel
callGeneric(object)
})
|
Rho1 <-
function(R,S) {
if (checking(R)==1 & checking(S)==1) {
if (all(R[,1,1]==S[,1,1])==FALSE) {
print("the fuzzy numbers of the two arrays must have the same alpha-levels")
}
else {
r=dim(R)[3]
s=dim(S)[3]
rho1=matrix(nrow=r,ncol=s)
rho1dist<-function(x){
k<-length(x)-1
delta<-1/k
y<-x[1:k]+x[2:(k+1)]
values<-abs(x[1:k])+abs(x[2:(k+1)])+2*abs(y)
integral<-sum(values)*delta/6
invisible(integral)
}
for (i in 1:r) {
for (j in 1:s) {
inf=R[,2,i]-S[,2,j]
sup=R[,3,i]-S[,3,j]
rho1[i,j]=(rho1dist(inf)+rho1dist(sup))*0.5
}
}
return(rho1)
}
}
}
|
expectreg_loclin_trivariate<-function(Z,X1,X2,Y,omega,h,kernel=gaussK)
{
GRIDZ=Z
GRID_X1=X1
GRID_X2=X2
TAU<-array(0,dim=c(length(GRIDZ)))
LAMBDA1<-array(0,dim=c(length(GRIDZ)))
LAMBDA2<-array(0,dim=c(length(GRIDZ)))
LAMBDA3<-array(0,dim=c(length(GRIDZ)))
Grid=cbind(GRIDZ,GRID_X1,GRID_X2)
a_first=lm(Y~Z+X1+X2)$coefficient[1]
b_first=lm(Y~Z+X1+X2)$coefficient[2]
c_first=lm(Y~Z+X1+X2)$coefficient[3]
d_first=lm(Y~Z+X1+X2)$coefficient[4]
for(i in 1:length(GRIDZ))
{
a=0
b=0
c=0
d=0
z=Grid[i,1]
x1=Grid[i,2]
x2=Grid[i,3]
a=a_first
b=b_first
c=c_first
d=d_first
closea = FALSE
while(closea == FALSE)
{
weight<-NULL
for(l in 1:length(Z))
{
if(Y[l]<=a+(b*(Z[l]-z))+(c*(X1[l]-x1))+(d*(X2[l]-x2)))
{
weight[l]=(1-omega)*(1/h^3)*kernel(((Z[l]-z)/h))*kernel(((X1[l]-x1)/h))*kernel(((X2[l]-x2)/h))
}
else
{
weight[l]=omega*(1/h^3)*kernel(((Z[l]-z)/h))*kernel(((X1[l]-x1)/h))*kernel(((X2[l]-x2)/h))
}
}
W=diag(weight)
D=cbind(1,Z-z,X1-x1,X2-x2)
tau1<-((matrix.inverse(t(D)%*%W%*%D))%*%t(D)%*%W%*%Y)[1,]
lambda1<-((matrix.inverse(t(D)%*%W%*%D))%*%t(D)%*%W%*%Y)[2,]
lambda2<-((matrix.inverse(t(D)%*%W%*%D))%*%t(D)%*%W%*%Y)[3,]
lambda3<-((matrix.inverse(t(D)%*%W%*%D))%*%t(D)%*%W%*%Y)[4,]
closea<-abs(tau1-a)<1*10^-6
a=tau1
b=lambda1
c=lambda2
d=lambda3
}
TAU[i]=a
LAMBDA1[i]=b
LAMBDA2[i]=c
LAMBDA3[i]=d
}
return(TAU)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.