code
stringlengths 1
13.8M
|
---|
QLMDe <- function(
x, distname = c('norm', 'GH'), K, data.name = deparse1(substitute(x)),
constraint = character(),
p = QLMDp(obs = x),
init = clust_fmx(x, distname = distname, K = K, constraint = constraint),
tol = .Machine$double.eps^.25, maxiter = 1000,
...
) {
distname <- match.arg(distname)
if (length(K) != 1L || !is.numeric(K) || is.na(K) || K <= 0L) stop('number of component must be length-1 positive integer')
if (!is.integer(K)) stop('number of component must be length-1 positive integer (e.g., use integer `2L` instead of numeric/double `2` for 2-component mixture model)')
if (!is.vector(x, mode = 'double')) stop('x must be double vector')
if (anyNA(x)) stop('do not allow NA_real_ in observations `x`')
if (!is.character(data.name) || length(data.name) != 1L || anyNA(data.name) || !nzchar(data.name)) stop('data.name must be length-1 character')
interval <- c(min(x), max(x))
if (anyNA(p)) stop('do not allow NA_real_ in `p`')
p <- sort.int(unique_allequal(p))
if (!inherits(init, what = 'fmx')) stop('`init` must be \'fmx\' object (e.g., a returned object from ?clust_fmx)')
if (init@distname != distname || dim(init@parM)[1L] != K) stop('`init` is not a ', distname, '-', K, ' fit.')
q_init <- qfmx(p = p, interval = interval, distname = distname, K = K, parM = init@parM, w = init@w)
if (any(id1 <- is.infinite(q_init))) {
if (all(id1)) stop('starting values too far away?')
p <- p[!id1]
}
q_obs <- quantile(x, probs = p)
x_kern <- density.default(x)
x_epdf <- approxfun(x = x_kern$x, y = x_kern$y)
d_obs <- x_epdf(q_obs)
if (anyNA(d_obs)) stop('do not allow NA_real_ empirical density')
tol <- sqrt(sqrt(.Machine$double.eps))
if (all(d0 <- (abs(d_obs) < tol))) stop('must have at least one positive density')
if (any(d0)) {
p <- p[!d0]
d_obs <- d_obs[!d0]
q_obs <- q_obs[!d0]
}
npar <- K * switch(distname, norm = 2L, GH = 4L) + (K - 1L)
if (length(p) < npar) {
stop('Using ', length(p), ' matching-quantiles to estimate a mixture distribution with ', npar, ' independent parameters. ',
'Try increasing `p` (see ?QLMDp for detail).')
}
qvv <- quantile_vcov(p = p, d = d_obs)
qvv_inv <- chol2inv(chol.default(qvv))
parRun <- fmx2dbl(init)
id_constr <- fmx_constraint_user(distname = distname, K = K, user = constraint)
has_constr <- (length(id_constr) > 0L)
par_init <- if (has_constr) parRun[-id_constr] else parRun
if (any(is.infinite(par_init))) {
par_init[(par_init < 0) & is.infinite(par_init)] <- -5
if (any((par_init > 0) & is.infinite(par_init))) stop('+Inf parameter indicates ???')
}
max_return <- .Machine$double.xmax
fn <- if (K == 1L) {
switch(distname, norm = function(x) {
q <- qnorm(p, mean = x[1L], sd = exp(x[2L]), lower.tail = TRUE, log.p = FALSE)
if (any(is.infinite(q))) stop('1-comp normal, infinite `q` should not be returned from any `p` between 0 to 1, see `qnorm(.Machine$double.eps)`')
return(mahalanobis_int(x = q, center = q_obs, invcov = qvv_inv))
}, GH = function(x) {
if (has_constr) parRun[-id_constr] <- x else parRun <- x
q <- qGH(p, A = parRun[1L], B = exp(parRun[2L]), g = parRun[3L], h = exp(parRun[4L]), lower.tail = TRUE, log.p = FALSE)
if (any(is.infinite(q))) return(max_return)
return(mahalanobis_int(x = q, center = q_obs, invcov = qvv_inv))
})
} else {
Kseq <- seq_len(K)
Kseq1 <- seq_len(K - 1L)
switch(distname, norm = {
id_w <- 2L*K + Kseq1
function(x) {
.pM <- array(x[seq_len(2L*K)], dim = c(K, 2L))
t_w <- t.default(pmlogis_first(x[id_w]))
sdinv <- 1 / exp(.pM[,2L])
eff <- cumsum(c(.pM[1L,1L], exp(.pM[2:K,1L]))) * sdinv
q <- vuniroot2(y = p, f = function(q) {
z <- tcrossprod(sdinv, q) - eff
c(t_w %*% pnorm(z))
}, interval = interval)
if (any(is.infinite(q))) return(max_return)
return(mahalanobis_int(x = q, center = q_obs, invcov = qvv_inv))
}
}, GH = {
id_w <- 4L*K + Kseq1
function(x) {
if (has_constr) parRun[-id_constr] <- x else parRun <- x
.pM <- array(parRun[seq_len(4L*K)], dim = c(K, 4L))
t_w <- t.default(pmlogis_first(parRun[id_w]))
g <- .pM[,3L]
h <- exp(.pM[,4L])
sdinv <- 1 / exp(.pM[,2L])
eff <- cumsum(c(.pM[1L,1L], exp(.pM[2:K,1L]))) * sdinv
q <- vuniroot2(y = p, f = function(q) {
z <- q0 <- tcrossprod(sdinv, q) - eff
for (i in Kseq) z[i,] <- .qGH2z(q0 = q0[i,], g = g[i], h = h[i], tol = tol, maxiter = maxiter)
c(t_w %*% pnorm(z))
}, interval = interval, tol = tol, maxiter = maxiter)
if (any(is.infinite(q))) return(max_return)
return(mahalanobis_int(x = q, center = q_obs, invcov = qvv_inv))
}
})
}
y <- optim(par = par_init, fn = fn, ...)
if (has_constr) parRun[-id_constr] <- y$par else parRun <- y$par
ret <- dbl2fmx(x = parRun, K = K, distname = distname)
new(
Class = 'fmx_QLMDe',
data = x, data.name = data.name,
distname = distname, parM = ret$parM, w = ret$w,
quantile_vv = qvv,
epdf = x_epdf,
p = p,
init = init,
optim = y
)
}
npar_fmx <- function(x) {
dm <- dim(x@parM)
(dm[2L] + 1L) * dm[1L] - 1L - length(attr(fmx_constraint(x), which = 'user', exact = TRUE))
}
print.fmx_QLMDe <- function(x, ...) {
parM <- x@parM
K <- dim(parM)[1L]
dimnames(parM)[[1L]] <- paste0(seq_len(K), '-comp.')
parM[] <- sprintf(fmt = '%.2f', parM)
obj <- if (K == 1L) parM else cbind(parM, w = sprintf(fmt = '%.1f%%', x@w*1e2))
heading <- paste0(K, '-Component Mixture of ', switch(x@distname, norm = 'Normal', GH = 'Tukey\'s G-&-H'), ' Distribution')
ci <- confint.fmx_QLMDe(x, parm = 'user')
id_constr <- fmx_constraint(x)
if (length(ci) && !anyNA(ci)) {
ci0 <- sprintf(fmt = '(%.2f~%.2f)', ci[,1L], ci[,2L])
if (length(id_constr)) {
obj[id_constr] <- '.'
obj[-id_constr] <- paste(obj[-id_constr], ci0)
} else obj[] <- paste(obj, ci0)
heading <- paste0(heading, ' (w. 95% Confidence Intervals)')
} else heading <- paste0('Malformed ', heading)
cat('\n ', heading, '\n\n', sep = '')
print.default(obj, quote = FALSE)
if (length(id_constr)) cat('\nwhere ', sQuote('.'), ' denotes an enforced constraint\n', sep = '')
cat('\n')
if (inherits(aod <- attr(x, which = 'anova', exact = TRUE), what = 'anova')) {
print(aod)
cat('\n')
}
print(autoplot.fmx_QLMDe(x))
cat('\n')
return(invisible(x))
}
fmx2dbl <- function(x, distname = x@distname, parM = x@parM, K = dim(parM)[1L], w = x@w, ...) {
w_val <- qmlogis_first(w)
if (!all(is.finite(w_val))) stop('NA or Inf in proportion indicated degenerated mixture (one or more component has 0% mixture proportion)')
w_nm <- if (K == 1L) character() else paste0('logit', 2:K)
parM[, id] <- log(parM[, (id <- transLog(distname))])
argnm <- switch(distname, norm = c('mean', 'sdlog'), GH = c('A', 'Blog', 'g', 'hlog'), stop('write more'))
if (K > 1L) {
parM[2:K, 1L] <- log(parM[2:K, 1L] - parM[1:(K-1L), 1L])
locnm <- c(paste0(argnm[1L], 1L), paste0('log\u0394', seq_len(K)[-1L]))
} else locnm <- paste0(argnm[1L], seq_len(K))
out <- c(parM, w_val)
names(out) <- c(locnm, paste0(rep(argnm[-1L], each = K), seq_len(K)), w_nm)
return(out)
}
dbl2fmx <- function(x, K, distname, argnm = dist_anm(distname), ...) {
nx <- length(x)
n_dist <- nx - (K - 1L)
w <- if (K == 1L) 1 else unname(pmlogis_first(x[(n_dist + 1L):nx]))
.pM <- array(x[seq_len(n_dist)], dim = c(K, n_dist/K), dimnames = list(NULL, argnm))
.pM[,id] <- exp(.pM[, (id <- transLog(distname)), drop = FALSE])
if (K > 1L) .pM[,1L] <- cumsum(c(.pM[1L,1L], exp(.pM[2:K,1L])))
list(parM = .pM, w = w)
}
transLog <- function(distname) {
switch(distname, norm = {
2L
}, GH = {
c(2L, 4L)
}, stop('distribution', sQuote(distname), 'not supported yet'))
}
quantile_vcov <- function(p, d) {
if (anyNA(p) || anyNA(d)) stop('no NA allowed in probability nor density')
if ((n <- length(p)) != length(d)) stop('p and d must match in length')
fs <- tcrossprod(d, d)
p_c <- array(p, dim = c(n,n))
p_r <- t.default(p_c)
p_min <- pmin.int(p_r, p_c)
p_max <- pmax.int(p_r, p_c)
vv <- p_min * (1 - p_max) / fs
return(vv)
}
qfmx_gr <- function(
dist,
p = stop('must provide `p`'),
distname = dist@distname, parM = dist@parM, K = dim(parM)[1L], w = dist@w,
interval = qfmx_interval(distname = distname, parM = parM, K = K, w = w, p = c(1e-5, 1-1e-5)),
...
) {
x_skeleton <- fmx2dbl(distname = distname, parM = parM, K = K, w = w)
has_constr <- (length(id_constr <- fmx_constraint(distname = distname, K = K, parM = parM)) > 0L)
x_dbl <- if (has_constr) x_skeleton[-id_constr] else x_skeleton
x_nm <- names(x_dbl)
if (any(is.infinite(x_dbl))) stop('wont happen since implementation of constraint')
if (!is.numeric(interval) || length(interval) != 2L || !all(is.finite(interval))) stop('`interval` must not contain NA nor Inf')
x_sbl <- lapply(x_nm, FUN = as.symbol)
qfmx_fn <- as.function.default(c(setNames(rep(x = alist(. = ), times = length(x_nm)), nm = x_nm), as.call(c(
quote(`{`),
quote(x <- x_skeleton),
if (has_constr) {
as.call(c(quote(`<-`), quote(x[-id_constr]), as.call(c(quote(c), x_sbl))))
} else as.call(c(quote(`<-`), quote(x), as.call(c(quote(c), x_sbl)))),
quote(fx <- dbl2fmx(x = x, distname = distname, K = K, argnm = NULL)),
quote(qfmx(p = p, parM = fx$parM, w = fx$w, interval = interval, distname = distname, K = K))
))))
ret <- tryCatch(expr = {
attr(numericDeriv(
expr = as.call(c(quote(qfmx_fn), x_sbl)),
theta = x_nm,
central = TRUE,
rho = list2env(as.list.default(x_dbl), parent = environment())), which = 'gradient', exact = TRUE)
}, error = function(e) {
cat('stats::numericDeriv in `qfmx_gr` error.\n')
array(NA_real_, dim = c(length(p), length(x_nm)))
})
dimnames(ret)[[2L]] <- x_nm
return(ret)
}
vcov.fmx_QLMDe <- function(object, parm = c('user', 'internal'), ...) {
parm <- match.arg(parm)
distname <- object@distname
parM <- object@parM
K <- dim(parM)[1L]
w <- object@w
x <- object@data
n <- length(x)
interval <- c(min(x), max(x))
p <- object@p
q <- qfmx(p, distname = distname, K = K, parM = parM, w = w, interval = interval, lower.tail = TRUE, log.p = FALSE)
d <- dfmx(q, distname = distname, K = K, parM = parM, w = w, log = FALSE)
tol <- sqrt(sqrt(.Machine$double.eps))
if (all(d0 <- (abs(d) < tol))) {
cat('malformed estimates with all-0 densities\n')
return(invisible())
}
if (any(d0)) {
p <- p[!d0]
q <- q[!d0]
d <- d[!d0]
}
.meat <- quantile_vcov(p = p, d = d)
q_gr <- qfmx_gr(p = p, distname = distname, K = K, parM = parM, w = w)
int_nm <- dimnames(q_gr)[[2L]]
if (all(is.na(q_gr))) {
int_dim <- dim(q_gr)[2L]
int_vv <- array(NA_real_, dim = c(int_dim, int_dim), dimnames = list(int_nm, int_nm))
} else {
.bread <- crossprod_inv(q_gr) %*% t.default(q_gr)
int_vv <- .bread %*% tcrossprod(.meat, .bread) / n
if (anyNA(int_vv)) {
stop('do not allow NA in `int_vv`')
}
if (any(diag(int_vv) < 0)) stop('diagonal terms of VarCov < 0 ?!')
dimnames(int_vv) <- list(int_nm, int_nm)
}
if (parm == 'internal') return(int_vv)
int_p <- fmx2dbl(object)
anm <- dist_anm(distname)
n_anm <- length(anm)
user_nm <- c(t.default(outer(c(anm, if (K > 1L) 'w'), 1:K, FUN = paste0)))
jacob <- array(0, dim = c(length(int_p), length(user_nm)), dimnames = list(names(int_p), user_nm))
jacob[1L, 1:K] <- 1
if (K > 1L) {
for (k in 2:K) jacob[k, k:K] <- exp(int_p[k])
id_pi <- (n_anm*K+1L):((n_anm+1L)*K-1L)
e_pi <- exp(int_p[id_pi])
sum_pi <- sum(1 + e_pi)^2
jacob[id_pi, n_anm*K+1L] <- - e_pi / sum_pi
jacob[id_pi, id_pi+1L] <- - tcrossprod(e_pi) / sum_pi
}
switch(distname, norm = {
id_exp <- (K+1L):(2*K)
id_identity <- id_constr <- NULL
}, GH = {
id_exp <- c((K+1L):(2*K), (3*K+1L):(4*K))
id_identity <- (2*K+1L):(3*K)
id_constr <- fmx_constraint(object)
})
if (length(id_exp)) jacob[cbind(id_exp, id_exp)] <- exp(int_p[id_exp])
if (length(id_identity)) jacob[cbind(id_identity, id_identity)] <- 1
jacob_free <- if (length(id_constr)) jacob[int_nm, -id_constr] else jacob
return(t.default(jacob_free) %*% int_vv %*% jacob_free)
}
coef.fmx <- function(object, parm = c('user', 'internal'), ...) {
anm <- dist_anm(object@distname)
K <- dim(object@parM)[1L]
cf0 <- switch(match.arg(parm), internal = fmx2dbl(object), user = {
if (K > 1L) {
setNames(c(object@parM, object@w), nm = c(t.default(outer(c(anm, 'w'), 1:K, FUN = paste0))))
} else setNames(c(object@parM), nm = c(t.default(outer(anm, 1:K, FUN = paste0))))
})
if (!length(id_constr <- fmx_constraint(object))) return(cf0)
return(cf0[-id_constr])
}
confint.fmx_QLMDe <- function(object, parm = c('internal', 'user'), level = .95, ...) {
parm <- match.arg(parm)
cf <- coef.fmx(object, parm = parm)
if (!length(vv <- vcov.fmx_QLMDe(object, parm = parm))) return(invisible())
ses <- sqrt(diag(vv))
p1 <- (1 - level) / 2
p <- c(p1, 1 - p1)
ret <- cf + ses %*% t.default(qnorm(p))
dimnames(ret) <- list(names(cf), sprintf('%.1f%%', 1e2*p))
return(ret)
}
logLik.fmx_QLMDe <- function(object, ...) {
if (nobjF <- length(objF <- attr(object, which = 'objF', exact = TRUE))) {
if (inherits(objF[[nobjF]], what = 'logLik')) return(objF[[nobjF]])
}
logd <- dfmx(x = object@data, dist = object, log = TRUE, ...)
if (!all(is.finite(logd))) {
}
out <- sum(logd)
attr(out, 'logl') <- logd
attr(out, 'nobs') <- length(object@data)
attr(out, 'npar') <- npar_fmx(object)
attr(out, 'df') <- attr(out, which = 'nobs', exact = TRUE) - attr(out, which = 'npar', exact = TRUE)
class(out) <- 'logLik'
return(out)
}
nobs.fmx_QLMDe <- function(object, ...) length(object@data) |
footeValues <- function(
p, q, r,
PA_n = 0,
PB_1 = 0,
p_cont = TRUE,
q_cont = TRUE,
Nb = 1
){
if (length(p) != length(q)){stop("p is not same length as q!")}
if (length(p) != length(r)){stop("p is not same length as r!")}
if (length(r) != length(q)){stop("q is not same length as r!")}
n <- length(p)
if(q_cont){
Nbt <- Nb*exp(-q)
}else{
Nbt <- Nb*(1-q)
}
if(q_cont){
NbL <- Nb*(1-exp(-q))
}else{
NbL <- Nb*q
}
if(p_cont){
if(q_cont){
NFt <- Nb*exp(p-q)*(1-exp(-p))
}else{
NFt <- Nb*exp(p)*(1-q)*(1-exp(-p))
}
}else{
if(q_cont){
NFt <- Nb*p*exp(-q)
}else{
NFt <- Nb*p*(1-q)
}
}
NFL <- numeric()
if(p_cont){
if(q_cont){
for(i in 1:n){
if(p[i] == q[i]){
NFL[i] <- Nb*(exp(-q[i])+p[i]-1)
}else{
NFL[i] <- Nb*(((q[i]*exp(p[i]-q[i]))+((p[i]-q[i])*exp(-q[i]))-p[i])/(p[i]-q[i]))
}
}
}else{
NFL <- Nb*q*(exp(p)-1)
}
}else{
if(q_cont){
NFL <- Nb*p*(1-exp(-q))
}else{
NFL <- Nb*p*q
}
}
PD_bt <- 1-exp(-r)
if(q_cont){
PD_bL <- (((r+q*exp(-(q+r)))/(q+r))-exp(-q))/(1-exp(-q))
}else{
PD_bL <- 1-exp(-r)
}
if(p_cont){
PD_Ft <- (((r+p*exp(-(p+r)))/(p+r))-exp(-p))/(1-exp(-p))
}else{
PD_Ft <- 1-exp(-r)
}
if(p_cont){
if(q_cont){
PD_FL <- numeric()
for(i in 1:n){
if(p[i] == q[i]){
PD_FL[i] <- (Nb*p[i]/NFL[i])*((r[i]/(p[i]+r[i]))-((1-exp(-p[i]))/p[i])
+(p[i]*(1-exp(-(p[i]+r[i])))/((p[i]+r[i])^2)))
}else{
PD_FL[i] <- (Nb/NFL[i])*(((p[i]*r[i]*(exp(p[i]-q[i])-1))/((q[i]+r[i])*(p[i]-q[i])))
+((p[i]*q[i]*exp(-(q[i]+r[i]))*(exp(p[i]+r[i])-1))/((p[i]+r[i])*(q[i]+r[i])))
-(exp(-q[i])*(exp(p[i])-1)))
}
}
}else{
PD_FL <- (-((p*(-exp(-r))-(exp(p)*r)+p+r)/((exp(p)-1)*(p+r))))
}
}else{
if(q_cont){
PD_FL <- (-((q*(-exp(-r))-(exp(q)*r)+q+r)/((exp(q)-1)*(q+r))))
}else{
PD_FL <- 1-exp(-r)
}
}
PA <- numeric()
PA[n] <- PA_n
for(i in 1:(n-1)){
if(q_cont){
PA_i <- 0
for(k in (i+1):n){
if((i+1) <= (k-1)){m <- (i+1):(k-1)}else{m <- NA}
PA_i <- PA_i+ifelse(is.na(m[1]),1,exp(-sum(q[m])))*(1-exp(-q[k]))*(1-(
ifelse(is.na(m[1]),1,exp(-sum(r[m])))*(1-PD_bL[k])))
}
PA[i] <- PA_i+(exp(-sum(q[(i+1):n])))*(1-exp(-sum(r[(i+1):n]))*(1-PA[n]))
}else{
PA_i <- 0
for(k in (i+1):n){
if((i+1) <= (k-1)){m <- (i+1):(k-1)}else{m <- NA}
PA_i <- PA_i+ifelse(is.na(m[1]),1,prod(1-q[m]))*q[k]*(1-(
ifelse(is.na(m[1]),1,exp(-sum(r[m])))*(1-PD_bL[k])))
}
PA[i] <- PA_i+(prod(1-q[(i+1):n])*(1-exp(-sum(r[(i+1):n]))*(1-PA[n])))
}
}
PB <- numeric()
PB[1] <- PB_1
for(i in 2:n){
if(p_cont){
PB_i <- 0
for(k in 1:(i-1)){
if((k+1) <= (i-1)){m <- (k+1):(i-1)}else{m <- NA}
PB_i <- PB_i+((ifelse(is.na(m[1]),1,exp(-sum(p[m])))*(1-exp(-p[k])))*(1-
ifelse(is.na(m[1]),1,exp(-sum(r[m])))*(1-PD_Ft[k])))
}
PB[i] <- PB_i+(exp(-sum(p[1:(i-1)])))*(1-exp(-sum(r[1:(i-1)]))*(1-PB[1]))
}else{
PB_i <- 0
for(k in 1:(i-1)){
if((k+1) <= (i-1)){m <- (k+1):(i-1)}else{m <- NA}
PB_i <- PB_i+((ifelse(is.na(m[1]),1,prod(1/(1+p[m])))*(p[k]/(1+p[k])))*(1-
ifelse(is.na(m[1]),1,exp(-sum(r[m])))*(1-PD_Ft[k])))
}
PB[i] <- PB_i+(prod(1/(1+p[1:(i-1)]))*(1-exp(-sum(r[1:(i-1)]))*(1-PB[1])))
}
}
Xbt <- Nbt*PB*PA
XbL <- NbL*PB*PD_bL+Nbt*PB*PD_bt*(1-PA)
XFt <- NFt*PA*PD_Ft+Nbt*PA*PD_bt*(1-PB)
XFL <- (NFL*PD_FL)+(NbL*(1-PB)*PD_bL)+(NFt*(1-PA)*PD_Ft)+(Nbt*(1-PB)*PD_bt*(1-PA))
res <- data.frame(cbind(Nb = rep(Nb,n),Nbt,NbL,NFt,NFL,PD_bt,PD_bL,PD_Ft,PD_FL,PA,PB,Xbt,XbL,XFt,XFL))
return(res)
} |
ind_clus_kml<- function(AID, cn, locs, cs, centroid_calc= "mean", overwrite= TRUE, dir= NULL){
store_dir<-getwd()
on.exit(setwd(store_dir))
if(AID %in% cs$AID == FALSE){stop(paste("AID", AID, "not found", sep=" "))}
if(length(cn)!=1){stop("'ind_clus_kml()' only accepts individual clusters")}
if(cn %in% cs[which(cs$AID == AID), "clus_ID"] == FALSE){stop(paste("Cluster", cn, "does not exist for", AID, sep=" "))}
if((!is.na(centroid_calc) && !is.null(centroid_calc) && (centroid_calc == "mean" | centroid_calc == "median"))==FALSE){stop("'centroid_calc' argument must = 'median' or 'mean'")}
if((!is.na(overwrite) && !is.null(overwrite) && (overwrite == TRUE | overwrite == FALSE))==FALSE){stop("'overwrite' argument must = 1/T/TRUE or 0/F/FALSE")}
ind_clus<-locs[which(locs$AID == AID & locs$clus_ID == cn),]
ind_clus<-ind_clus[which(!is.na(ind_clus$Lat)),]
ind_clus$AID2<-""
ind_clus<-ind_clus[,c("AID2", "AID", "clus_ID", "TelemDate", "Long", "Lat")]
clus_g_c<-cs[which(cs$AID == AID & cs$clus_ID==cn),]
if(centroid_calc=="median"){
spgeo<-sp::SpatialPointsDataFrame(matrix(c(clus_g_c$g_med_Long, clus_g_c$g_med_Lat), ncol=2), clus_g_c, proj4string=sp::CRS("+proj=longlat +datum=WGS84"))
} else {
spgeo<-sp::SpatialPointsDataFrame(matrix(c(clus_g_c$g_c_Long, clus_g_c$g_c_Lat), ncol=2), clus_g_c, proj4string=sp::CRS("+proj=longlat +datum=WGS84"))
}
aa<-ind_clus[1,]
aa$AID2<-"Centroid"
aa$TelemDate<-ind_clus$TelemDate[nrow(ind_clus)]+25200
aa$Lat<-spgeo$coords.x2
aa$Long<-spgeo$coords.x1
ind_clus<-rbind(ind_clus, aa)
if(overwrite==TRUE){
f_name<-"ind.kml"
setwd(tempdir())
} else {
f_name<-paste(AID, "_", cn, ".kml", sep="")
setwd(dir)
}
sp<- sp::SpatialPoints(ind_clus[,c("Long","Lat")])
sp::proj4string(sp) <- sp::CRS("+proj=longlat +datum=WGS84")
ST<- spacetime::STIDF(sp=sp,time=ind_clus$TelemDate, data=ind_clus)
ind_clus[which(ind_clus$AID2 == "Centroid"), "TelemDate"]<- NA
plotKML::plotKML(obj=ST, folder.name=paste(AID, "_", cn, sep="") , file.name=f_name,
size= .45,
colour=,
points_names= paste(ind_clus$AID2,ind_clus$AID,ind_clus$clus_ID,ind_clus$TelemDate,sep=" "),
metadata=NULL,
open.kml=TRUE,
LabelScale=0.6)
rm(spgeo, sp, ST, aa, ind_clus, f_name, clus_g_c)
} |
GB <- 1024 * 1024 * 1024
solve_m_mem_lim <- function(
n,
m = NA,
mat_m_n = 0,
mat_n_n = 0,
vec_m = 0,
vec_n = 0,
mem = NA,
mem_factor = 0.7
) {
if (missing(n))
stop('`n` is required!')
if (n <= 0)
stop('`n` must be positive! Passed ', n)
if (!is.na(m) && m <= 0)
stop('`m` must be positive! Passed ', m)
if (mat_m_n < 0)
stop('`mat_m_n` must be non-negative! Passed ', mat_m_n)
if (mat_n_n < 0)
stop('`mat_n_n` must be non-negative! Passed ', mat_n_n)
if (vec_m < 0)
stop('`vec_m` must be non-negative! Passed ', vec_m)
if (vec_n < 0)
stop('`vec_n` must be non-negative! Passed ', vec_n)
if (is.na(mem)) {
mem <- get_mem_lim(factor = mem_factor)
} else {
if (mem <= 0)
stop('`mem` must be positive! Passed ', mem)
mem <- mem * GB
}
if (mat_m_n == 0 && vec_m == 0)
stop('At least one of `mat_m_n` or `vec_m` must be non-zero! (there is no `m` to solve for otherwise)')
mo8 <- 27
ao8 <- 6
n <- n + 0.0
m_chunk <- (
+ mem / 8
- mat_m_n * mo8
- vec_m * ao8
- mat_n_n * (n * n + mo8)
- vec_n * (n + ao8)
) / (
+ mat_m_n * n
+ vec_m
)
if (m_chunk < 0)
stop('The resulting `m_chunk` was negative! This is because either `mat_n_n` or `vec_n` are non-zero and `n` alone is too large for the available memory (even for `m_chunk == 0`). The solution is to free more memory (ideal) or to reduce `n` if possible.')
if (!is.na(m)) {
if (m < m_chunk) {
m_chunk <- m
} else {
m_chunk <- ceiling( m / ceiling( m / m_chunk ) )
}
} else {
m_chunk <- floor( m_chunk )
}
mem_chunk <- (
+ mat_m_n * (m_chunk * n + mo8)
+ mat_n_n * (n * n + mo8)
+ vec_m * (m_chunk + ao8)
+ vec_n * (n + ao8)
) * 8
return(
list(
m_chunk = m_chunk,
mem_chunk = mem_chunk,
mem_lim = mem
)
)
} |
get_captions <- function (id = NULL, lang = "en",
format = "sbv", as_raw = TRUE, ...) {
if ( !is.character(id)) {
stop("Must specify a valid id.")
}
querylist <- list(tlang = lang, tfmt = format)
raw_res <- tuber_GET(paste0("captions", "/", id), query = querylist, ...)
if (!as_raw) {
raw_res <- rawToChar(raw_res)
raw_res <- strsplit(raw_res, split = "\n")[[1]]
}
raw_res
} |
rmd_b_heading_1 <- function() {
row_ind <- rs_get_ind_first_selected_row()
rs_insert_at_row_start(row_ind, "\n
}
rmd_b_heading_2 <- function() {
row_ind <- rs_get_ind_first_selected_row()
rs_insert_at_row_start(row_ind, "\n
}
rmd_b_heading_3 <- function() {
row_ind <- rs_get_ind_first_selected_row()
rs_insert_at_row_start(row_ind, "\n
}
rmd_b_heading_4 <- function() {
row_ind <- rs_get_ind_first_selected_row()
rs_insert_at_row_start(row_ind, "\n
}
rmd_b_heading_5 <- function() {
row_ind <- rs_get_ind_first_selected_row()
rs_insert_at_row_start(row_ind, "\n
}
rmd_b_heading_6 <- function() {
row_ind <- rs_get_ind_first_selected_row()
rs_insert_at_row_start(row_ind, "\n
}
rmd_b_heading_1_title <- function() {
text <- repeat_symbol("=", 60)
rs_enclose_first_row_with_lines("\n", below = text)
}
rmd_b_heading_2_subtitle <- function() {
text <- repeat_symbol("-", 60)
rs_enclose_first_row_with_lines("\n", below = text)
} |
context("Estimate Win Probability")
test_that("estimate_win_prob returns tibble with correct values", {
input_df <- tibble::tibble(
option_name = c("A", "B", "C"),
sum_clicks = c(1000, 1000, 1000),
sum_conversions = c(100, 120, 110)
)
output <- estimate_win_prob(input_df, "conversion_rate")
all_option_names <- input_df$option_name
expected_column_names <- c("option_name", "win_prob_raw", "win_prob")
expect_true(is.data.frame(output))
expect_true(all(expected_column_names %in% colnames(output)))
expect_true(all(output$option_name %in% all_option_names))
expect_true(is.double(output$win_prob_raw))
expect_true(is.character(output$win_prob))
expect_equal(nrow(output), length(all_option_names))
})
test_that("estimate_win_prob returns tibble with correct values when win prob is 0", {
input_df <- tibble::tibble(
option_name = c("A", "B", "C"),
sum_clicks = c(1000, 1000, 1000),
sum_conversions = c(1, 120, 2)
)
output <- estimate_win_prob(input_df, "conversion_rate")
all_option_names <- input_df$option_name
expected_column_names <- c("option_name", "win_prob_raw", "win_prob")
expect_true(is.data.frame(output))
expect_true(all(expected_column_names %in% colnames(output)))
expect_true(all(output$option_name %in% all_option_names))
expect_true(is.double(output$win_prob_raw))
expect_true(is.character(output$win_prob))
expect_equal(nrow(output), length(all_option_names))
}) |
scriptLocation <-
function()
system.file("chapters", package = "AppliedPredictiveModeling") |
useArgonDash <- function() {
if (!requireNamespace(package = "argonDash") & !requireNamespace(package = "argonR"))
message("Package 'argonDash' and 'argonR' are required to run this function")
deps <- findDependencies(argonDash::argonDashPage(
navbar = argonDash::argonDashNavbar(),
sidebar = argonDash::argonDashSidebar(id = "mysidebar"),
header = argonDash::argonDashHeader(),
body = argonDash::argonDashBody()
))
attachDependencies(tags$div(), value = deps)
} |
test_that("make_docvars() works", {
docvar1 <- quanteda:::make_docvars(0L, docname = character())
docvar2 <- quanteda:::make_docvars(3L, docname = c("A", "B", "C"))
docvar3 <- quanteda:::make_docvars(3L, docname = 1:3)
docvar4 <- quanteda:::make_docvars(10L)
docvar5 <- quanteda:::make_docvars(3L, docname = c("A", "B", "B"))
expect_equal(dim(docvar1), c(0, 3))
expect_equal(dim(docvar2), c(3, 3))
expect_equal(dim(docvar3), c(3, 3))
expect_equal(colnames(docvar1), c("docname_", "docid_", "segid_"))
expect_equal(colnames(docvar2), c("docname_", "docid_", "segid_"))
expect_equal(colnames(docvar3), c("docname_", "docid_", "segid_"))
expect_equal(colnames(docvar4), c("docname_", "docid_", "segid_"))
expect_equal(docvar4[["docname_"]], paste0("text", 1:10))
expect_error(quanteda:::make_docvars(n = 2, docname = c("A", "B", "C")))
expect_equal(docvar5[["docname_"]], c("A.1", "B.1", "B.2"))
expect_error(quanteda:::make_docvars(n = "3"))
expect_error(quanteda:::make_docvars(n = 1.4))
docvar4 <- quanteda:::make_docvars(5L, c("A", "A", "B", "B", "C"))
expect_equal(docvar4[["docname_"]], c("A.1", "A.2", "B.1", "B.2", "C.1"))
docvar5 <- quanteda:::make_docvars(5L, c("A", "B", "B", "A", "C"))
expect_equal(docvar5[["docname_"]], c("A.1", "B.1", "B.2", "A.2", "C.1"))
docvar6 <- quanteda:::make_docvars(5L, c("A", "A", "B", "B", "C"), unique = FALSE)
expect_equal(docvar6[["docname_"]], c("A", "A", "B", "B", "C"))
expect_equal(docvar6[["segid_"]], c(1, 1, 1, 1, 1))
})
test_that("reshape_docvars() works", {
docvar1 <- data.frame("docname_" = c("doc1", "doc2"),
"docid_" = factor(c("doc1", "doc2")),
"segid_" = c(1, 1), stringsAsFactors = FALSE)
expect_identical(
quanteda:::reshape_docvars(docvar1, c(1, 2))[["docname_"]],
c("doc1", "doc2")
)
expect_identical(
quanteda:::reshape_docvars(docvar1, c(1, 1, 2, 2))[["docname_"]],
c("doc1.1", "doc1.2", "doc2.1", "doc2.2")
)
docvar2 <- data.frame("docname_" = c("doc1.1", "doc1.2"),
"docid_" = factor(c("doc1", "doc1")),
"segid_" = c(1, 1))
expect_identical(
quanteda:::reshape_docvars(docvar2, c(1, 2))[["docname_"]],
c("doc1.1", "doc1.2")
)
expect_identical(
quanteda:::reshape_docvars(docvar2, c(1, 1, 2, 2))[["docname_"]],
c("doc1.1", "doc1.2", "doc1.3", "doc1.4")
)
expect_identical(
quanteda:::reshape_docvars(docvar2, c(1, 1, 1, 1))[["docname_"]],
c("doc1.1", "doc1.2", "doc1.3", "doc1.4")
)
})
test_that("upgrade_docvars() works", {
docvar1 <- data.frame()
docvar2 <- data.frame("var1" = c(100, 200, 300),
"var2" = c(TRUE, TRUE, FALSE))
docvar2$lis <- list(1:3, -5, 3:4)
docvar3 <- data.frame("var1" = c(100, 200, 300),
"var2" = c(TRUE, TRUE, FALSE),
row.names = c("A", "B", "C"))
docvar3$lis <- list(1:3, -5, 3:4)
docvar4 <- data.frame("docname_" = c("A", "B", "C"),
"docid_" = factor(c("A", "B", "C")),
"segid_" = rep(1L, 3),
"var1" = c(100, 200, 300),
"var2" = c(TRUE, TRUE, FALSE),
stringsAsFactors = FALSE)
docvar4$lis <- list(1:3, -5, 3:4)
expect_identical(
quanteda:::upgrade_docvars(docvar1, c("A", "B", "C")),
docvar4[, 1:3]
)
expect_identical(
quanteda:::upgrade_docvars(docvar3),
docvar4
)
expect_identical(
quanteda:::upgrade_docvars(docvar2, c("A", "B", "C")),
docvar4
)
expect_identical(
quanteda:::upgrade_docvars(docvar2, c("A", "B", "C")),
docvar4
)
})
test_that("get_docvars() works", {
data <- data.frame("docname_" = c("A", "B", "C"),
"docid_" = factor(c("A", "B", "C")),
"segid_" = rep(1L, 3),
"var1" = c(100, 200, 300),
"var2" = c(TRUE, TRUE, FALSE),
stringsAsFactors = FALSE)
expect_identical(
quanteda:::select_docvars(data, user = FALSE, system = TRUE),
data.frame("docname_" = c("A", "B", "C"),
"docid_" = factor(c("A", "B", "C")),
"segid_" = rep(1L, 3),
stringsAsFactors = FALSE)
)
expect_error(quanteda:::select_docvars(data, "docid_", user = FALSE, system = FALSE))
expect_identical(
quanteda:::select_docvars(data, "docid_", user = FALSE, system = TRUE),
data.frame("docid_" = factor(c("A", "B", "C")),
stringsAsFactors = FALSE)
)
expect_identical(
quanteda:::select_docvars(data, "docid_", user = FALSE, system = TRUE, drop = TRUE),
factor(c("A", "B", "C"))
)
expect_identical(
quanteda:::select_docvars(data),
data.frame("var1" = c(100, 200, 300),
"var2" = c(TRUE, TRUE, FALSE),
stringsAsFactors = FALSE)
)
expect_identical(
quanteda:::select_docvars(data, "var1"),
data.frame("var1" = c(100, 200, 300), stringsAsFactors = FALSE)
)
expect_identical(
quanteda:::select_docvars(data, "var1", drop = TRUE),
c(100, 200, 300)
)
})
test_that("set_docvars() works", {
data <- data.frame("docname_" = c("A", "B", "C"),
"docid_" = c("A", "B", "C"),
"docnum_" = 1L:3L,
"segid_" = rep(1L, 3),
"var1" = c(100, 200, 300),
"var2" = c(TRUE, TRUE, FALSE),
stringsAsFactors = FALSE)
quanteda:::set_docvars(data, "var2") <- c(10, 20, 30)
expect_identical(data[["var2"]], c(10, 20, 30))
quanteda:::set_docvars(data, "var3") <- c(1000, 2000, 3000)
expect_identical(data[["var3"]], c(1000, 2000, 3000))
quanteda:::set_docvars(data) <- data.frame("var1" = c(100, 200, 300),
"var2" = c(TRUE, TRUE, TRUE))
expect_identical(data[["var1"]], c(100, 200, 300))
expect_identical(data[["var2"]], c(TRUE, TRUE, TRUE))
expect_identical(names(data), c("docname_", "docid_", "segid_", "var1", "var2"))
quanteda:::set_docvars(data) <- NULL
expect_identical(names(data), c("docname_", "docid_", "segid_"))
})
test_that("docvars of corpus is a data.frame", {
expect_equal(
class(docvars(data_corpus_inaugural)),
"data.frame"
)
})
test_that("docvars with non-existent field names generate correct error messages", {
expect_error(
docvars(data_corpus_inaugural, c("President", "nonexistent")),
"field\\(s\\) nonexistent not found"
)
toks <- tokens(data_corpus_inaugural, include_docvars = TRUE)
expect_error(
docvars(toks, c("President", "nonexistent")),
"field\\(s\\) nonexistent not found"
)
})
test_that("docvars is working with tokens", {
corp <- data_corpus_inaugural[1:58]
toks <- tokens(corp, include_docvars = TRUE)
expect_equal(docvars(toks), docvars(corp))
expect_equal(docvars(toks, "President"), docvars(corp, "President"))
toks2 <- toks[docvars(toks, "Year") > 2000]
expect_equal(ndoc(toks2), nrow(docvars(toks2)))
expect_equal(
docvars(quanteda:::"docvars<-"(toks2, "Type", "Speech"), "Type"),
rep("Speech", 5)
)
expect_output(
print(docvars(quanteda:::"docvars<-"(toks, field = NULL, NULL))),
"data frame with 0 columns and 58 rows"
)
expect_equal(
docvars(quanteda:::"docvars<-"(toks, field = "ID", 1:58), "ID"),
1:58
)
})
test_that("docvars is working with dfm", {
corp <- data_corpus_inaugural
toks <- tokens(corp, include_docvars = TRUE)
thedfm <- dfm(toks)
expect_equal(docvars(toks), docvars(thedfm))
expect_equal(docvars(toks, "Party"), docvars(corp, "Party"))
thedfm2 <- dfm(tokens(corp))
expect_equal(docvars(corp), docvars(thedfm2))
expect_equal(docvars(corp, "Party"), docvars(thedfm2, "Party"))
corp2 <- corpus_subset(corp, Party == "Democratic")
thedfm3 <- dfm(tokens(corp2))
expect_equal(docvars(corp2), docvars(thedfm3))
})
test_that("$ returns docvars", {
corp <- data_corpus_inaugural
toks <- tokens(corp, include_docvars = TRUE)
dfmat <- dfm(toks)
expect_equal(docvars(corp, "Party"), corp$Party)
expect_equal(docvars(toks, "Party"), toks$Party)
expect_equal(docvars(dfmat, "Party"), dfmat$Party)
})
test_that("creating tokens and dfms with empty docvars", {
expect_equal(
length(docvars(tokens(data_corpus_inaugural, include_docvars = FALSE))), 0
)
expect_equal(
length(docvars(suppressWarnings(dfm(tokens(data_corpus_inaugural), include_docvars = FALSE)))), 0
)
})
test_that("tokens works works with one docvar", {
docv1 <- data.frame(dvar1 = c("A", "B"))
mycorpus1 <- corpus(c(d1 = "This is sample document one.",
d2 = "Here is the second sample document."),
docvars = docv1)
toks1 <- tokens(mycorpus1, include_docvars = TRUE)
expect_equivalent(docvars(toks1), docv1)
})
test_that("tokens works works with two docvars", {
docv2 <- data.frame(dvar1 = c("A", "B"),
dvar2 = c(1, 2))
mycorpus2 <- corpus(c(d1 = "This is sample document one.",
d2 = "Here is the second sample document."),
docvars = docv2)
toks2 <- tokens(mycorpus2, include_docvars = TRUE)
expect_equivalent(docvars(toks2), docv2)
})
test_that("dfm works works with one docvar", {
docv1 <- data.frame(dvar1 = c("A", "B"))
mycorpus1 <- corpus(c(d1 = "This is sample document one.",
d2 = "Here is the second sample document."),
docvars = docv1)
dfm1 <- suppressWarnings(dfm(tokens(mycorpus1), include_docvars = TRUE))
expect_equivalent(docvars(dfm1), docv1)
})
test_that("dfm works works with two docvars", {
docv2 <- data.frame(dvar1 = c("A", "B"),
dvar2 = c(1, 2))
mycorpus2 <- corpus(c(d1 = "This is sample document one.",
d2 = "Here is the second sample document."),
docvars = docv2)
dfm2 <- suppressWarnings(dfm(tokens(mycorpus2), include_docvars = TRUE))
expect_equivalent(docvars(dfm2), docv2)
})
test_that("object always have docvars in the same rows as documents", {
txt <- data_char_ukimmig2010
corp1 <- corpus(txt)
expect_true(nrow(docvars(corp1)) == ndoc(corp1))
expect_true(all(row.names(docvars(corp1)) == seq_len(ndoc(corp1))))
corp2 <- corpus_segment(corp1, "\\p{P}", valuetype = "regex")
expect_true(nrow(docvars(corp2)) == ndoc(corp2))
expect_true(all(row.names(docvars(corp2)) == seq_len(ndoc(corp2))))
corp3 <- corpus_reshape(corp1, to = "sentences")
expect_true(nrow(docvars(corp3)) == ndoc(corp3))
expect_true(all(row.names(docvars(corp3)) == seq_len(ndoc(corp3))))
corp4 <- corpus_sample(corp1, size = 5)
expect_true(nrow(docvars(corp4)) == ndoc(corp4))
expect_true(all(row.names(docvars(corp4)) == seq_len(ndoc(corp4))))
toks1 <- tokens(txt)
expect_true(nrow(docvars(toks1)) == ndoc(toks1))
expect_true(all(row.names(docvars(toks1)) == seq_len(ndoc(toks1))))
toks2 <- tokens(corpus(txt))
expect_true(nrow(docvars(toks2)) == ndoc(toks2))
expect_true(all(row.names(docvars(toks2)) == seq_len(ndoc(toks2))))
toks3 <- quanteda:::tokens_group(toks1, rep(c(1, 2, 3), 3))
expect_true(nrow(docvars(toks3)) == ndoc(toks3))
expect_true(all(row.names(docvars(toks3)) == seq_len(ndoc(toks3))))
toks4 <- tokens_select(toks1, stopwords())
expect_true(nrow(docvars(toks4)) == ndoc(toks4))
expect_true(all(row.names(docvars(toks4)) == seq_len(ndoc(toks4))))
dfm1 <- dfm(tokens(txt))
expect_true(nrow(docvars(dfm1)) == ndoc(dfm1))
expect_true(all(row.names(docvars(dfm1)) == seq_len(ndoc(dfm1))))
dfm2 <- dfm(tokens(txt))
expect_true(nrow(docvars(dfm2)) == ndoc(dfm2))
expect_true(all(row.names(docvars(dfm2)) == seq_len(ndoc(dfm2))))
dfm3 <- dfm(tokens(corpus(txt)))
expect_true(nrow(docvars(dfm3)) == ndoc(dfm3))
expect_true(all(row.names(docvars(dfm3)) == seq_len(ndoc(dfm3))))
dfm4 <- dfm_group(dfm1, rep(c(1, 2, 3), 3))
expect_true(nrow(docvars(dfm4)) == ndoc(dfm4))
expect_true(all(row.names(docvars(dfm4)) == seq_len(ndoc(dfm4))))
dfm5 <- suppressWarnings(dfm(dfm1, groups = rep(c(1, 2, 3), 3)))
expect_true(nrow(docvars(dfm5)) == ndoc(dfm5))
expect_true(all(row.names(docvars(dfm5)) == seq_len(ndoc(dfm5))))
dfm6 <- dfm_subset(dfm1, rep(c(TRUE, TRUE, FALSE), 3))
expect_true(nrow(docvars(dfm6)) == ndoc(dfm6))
expect_true(all(row.names(docvars(dfm6)) == seq_len(ndoc(dfm6))))
dfm7 <- rbind(dfm1, dfm1)
expect_true(nrow(docvars(dfm7)) == ndoc(dfm7))
expect_true(all(row.names(docvars(dfm7)) == seq_len(ndoc(dfm7))))
dfm8 <- suppressWarnings(cbind(dfm1, dfm1))
expect_true(nrow(docvars(dfm8)) == ndoc(dfm8))
expect_true(all(row.names(docvars(dfm8)) == seq_len(ndoc(dfm8))))
})
test_that("error when nrow and ndoc mismatch", {
toks <- tokens(c("a b c", "b c d", "c d e"))
expect_error(docvars(toks) <- data.frame(var = c(1, 5)))
expect_silent(docvars(toks) <- data.frame(var = c(1, 5, 6)))
expect_error(docvars(toks) <- data.frame(var = c(1, 5, 6, 3)))
mt <- dfm(toks)
expect_error(docvars(mt) <- data.frame(var = c(1, 5)))
expect_silent(docvars(mt) <- data.frame(var = c(1, 5, 6)))
expect_error(docvars(mt) <- data.frame(var = c(1, 5, 6, 3)))
})
test_that("assignment of NULL only drop columns", {
toks <- tokens(data_corpus_inaugural[1:14])
docvars(toks) <- NULL
expect_identical(dim(docvars(toks)), c(14L, 0L))
mt <- dfm(tokens(data_corpus_inaugural[1:14]))
docvars(mt) <- NULL
expect_identical(dim(docvars(mt)), c(14L, 0L))
})
test_that("can assign docvars when value is a dfm (
mycorp <- corpus(data_char_ukimmig2010)
thedfm <- dfm(tokens(mycorp))[, "the"]
docvars(mycorp) <- thedfm
expect_identical(
docvars(mycorp),
data.frame(the = as.vector(thedfm))
)
anddfm <- dfm(tokens(mycorp))[, "and"]
docvars(anddfm) <- anddfm
expect_identical(
docvars(anddfm),
data.frame(and = as.vector(anddfm))
)
toks <- tokens(mycorp)
docvars(toks) <- anddfm
expect_identical(
docvars(toks),
data.frame(and = as.vector(anddfm))
)
})
test_that("docvar can be renamed (
corp <- data_corpus_inaugural
names(docvars(corp))[c(1, 3)] <- c("year", "forename")
expect_identical(names(docvars(corp)),
c("year", "President", "forename", "Party"))
toks <- tokens(data_corpus_inaugural)
names(docvars(toks))[c(1, 3)] <- c("year", "forename")
expect_identical(names(docvars(toks)),
c("year", "President", "forename", "Party"))
dfmat <- dfm(tokens(data_corpus_inaugural))
names(docvars(dfmat))[c(1, 3)] <- c("year", "forename")
expect_identical(names(docvars(dfmat)),
c("year", "President", "forename", "Party"))
})
test_that("docvar assignment is fully robust including to renaming (
corp <- corpus(c("A b c d.", "A a b. B c."))
docvars(corp) <- data.frame(testdv = 10:11)
expect_identical(
docvars(corp),
data.frame(testdv = 10:11)
)
corp <- corpus(c("A b c d.", "A a b. B c."))
expect_error(
docvars(corp) <- c("x", "y"),
"you must supply field name(s)", fixed = TRUE
)
docvars(corp) <- matrix(c("x", "y"), ncol = 1)
expect_identical(
docvars(corp),
data.frame(V1 = c("x", "y"), stringsAsFactors = FALSE)
)
df <- data.frame(c("x", "y"), c("a", "b"), 11:12, stringsAsFactors = FALSE)
names(df) <- NULL
names(df)[2] <- "name2"
expect_error(docvars(corp) <- df,
"data.frame must have column names")
})
test_that("docvars<-.corpus and name uniqueness", {
corp <- corpus(c("A b c d.", "A a b. B c."))
docvars(corp) <- data.frame(docvar1 = 1:2)
docvars(corp)[2] <- 11:12
docvars(corp)[3] <- c("a", "b")
expect_identical(
docvars(corp),
data.frame(docvar1 = 1:2, V2 = 11:12, V3 = c("a", "b"), stringsAsFactors = FALSE)
)
})
test_that("docvars<- NULL removes docvars", {
corp1 <- data_corpus_inaugural
docvars(corp1)[c(1, 3)] <- NULL
expect_identical(names(docvars(corp1)), c("President", "Party"))
corp2 <- data_corpus_inaugural
docvars(corp2)[c("President", "Party")] <- NULL
expect_identical(names(docvars(corp2)), c("Year", "FirstName"))
corp3 <- data_corpus_inaugural
docvars(corp3, c("President", "Party")) <- NULL
expect_identical(names(docvars(corp3)), c("Year", "FirstName"))
toks <- tokens(data_corpus_inaugural)
toks1 <- toks
docvars(toks1)[c(1, 3)] <- NULL
expect_identical(names(docvars(toks1)), c("President", "Party"))
toks2 <- toks
docvars(toks2)[c("President", "Party")] <- NULL
expect_identical(names(docvars(toks2)), c("Year", "FirstName"))
toks3 <- toks
docvars(toks3, c("President", "Party")) <- NULL
expect_identical(names(docvars(toks3)), c("Year", "FirstName"))
dfmat <- dfm(toks)
dfmat1 <- dfmat
docvars(dfmat1)[c(1, 3)] <- NULL
expect_identical(names(docvars(dfmat1)), c("President", "Party"))
dfmat2 <- dfmat
docvars(dfmat2)[c("President", "Party")] <- NULL
expect_identical(names(docvars(dfmat2)), c("Year", "FirstName"))
dfmat3 <- dfmat
docvars(dfmat3, c("President", "Party")) <- NULL
expect_identical(names(docvars(dfmat3)), c("Year", "FirstName"))
})
test_that("works correctly in edge cases", {
corp <- corpus(c("A b c d.", "A a b. B c.", "D f. e g.", "H i j."))
expect_error(docvars(corp) <- 1:4,
quanteda:::message_error("docvar_noname"))
expect_silent(docvars(corp, "var1") <- 1)
expect_equal(docvars(corp, "var1"), rep(1, 4))
expect_silent(docvars(corp, "var2") <- 1:4)
expect_equal(docvars(corp, "var2"), 1:4)
expect_silent(docvars(corp, "var3") <- 1:2)
expect_equal(docvars(corp, "var3"), c(1, 2, 1, 2))
expect_error(docvars(corp, "var4") <- 1:3)
})
test_that("group_docvars() works", {
docvar <- data.frame("docname_" = c("A", "B", "C"),
"docid_" = factor(c("A", "B", "C")),
"segid_" = rep(1L, 3),
"var1" = c(100, 100, 200),
"var2" = c(TRUE, TRUE, FALSE),
stringsAsFactors = FALSE)
docvar1 <- quanteda:::group_docvars(docvar, factor(c("X", "X", "Y")))
expect_equal(
names(docvar1),
c("docname_", "docid_", "segid_", "var1", "var2")
)
expect_equal(
docvar1$var1,
c(100, 200)
)
expect_equal(
docvar1$var2,
c(TRUE, FALSE)
)
docvar2 <- quanteda:::group_docvars(docvar,
factor(c("X", "X", "Y"), levels = c("X", "Y", "Z")),
field = "var3")
expect_equal(
names(docvar2),
c("docname_", "docid_", "segid_", "var1", "var2", "var3")
)
expect_equal(
docvar2$var1,
c(100, 200, NA)
)
expect_equal(
docvar2$var2,
c(TRUE, FALSE, NA)
)
expect_equal(
docvar2$var3,
factor(c("X", "Y", "Z"), levels = c("X", "Y", "Z"))
)
docvar3 <- quanteda:::group_docvars(docvar,
factor(c("X", "X", "Y"), levels = c("Z", "Y", "X")),
field = "var3")
expect_equal(
names(docvar3),
c("docname_", "docid_", "segid_", "var1", "var2", "var3")
)
expect_equal(
docvar3$var1,
c(NA, 200, 100)
)
expect_equal(
docvar3$var2,
c(NA, FALSE, TRUE)
)
expect_equal(
docvar3$var3,
factor(c("Z", "Y", "X"), levels = c("Z", "Y", "X"))
)
})
test_that("docid works", {
corp <- corpus(c(textone = "This is a sentence. Another sentence. Yet another.",
textwo = "Sentence 1. Sentence 2."))
corpsent <- corp %>%
corpus_reshape(to = "sentences")
expect_identical(
docid(corpsent),
factor(c("textone", "textone", "textone", "textwo", "textwo"))
)
expect_identical(
docid(tokens(corpsent)),
docid(corpsent)
)
expect_identical(
docid(dfm(tokens(corpsent))),
docid(corpsent)
)
expect_identical(
docid(corpus_group(corpsent)),
factor(docnames(corp))
)
expect_identical(
docid(tokens_group(tokens(corpsent))),
factor(docnames(corp))
)
expect_identical(
docid(dfm_group(dfm(tokens(corpsent)))),
factor(docnames(corp))
)
expect_identical(
docid(corpus_group(corpsent, groups = docid(corpsent))),
factor(docnames(corp))
)
expect_identical(
docid(tokens_group(tokens(corpsent), groups = docid(corpsent))),
factor(docnames(corp))
)
expect_identical(
docid(dfm_group(dfm(tokens(corpsent)), groups = docid(corpsent))),
factor(docnames(corp))
)
})
test_that("docvars are combined along with the main objects", {
docvar <- data.frame("var1" = c(100, 100, 200, NA, NA),
"var2" = c(NA, NA, NA, TRUE, FALSE))
corp1 <- corpus(data.frame(text = c(d1 = "aa", d2 = "bb", d3 = "cc"),
var1 = c(100, 100, 200)))
corp2 <- corpus(data.frame(text = c(d4 = "dd", d5 = "ee"),
var2 = c(TRUE, FALSE)))
expect_equal(docvars(c(corp1, corp2)), docvar)
toks1 <- tokens(corp1)
toks2 <- tokens(corp2)
expect_equal(docvars(c(toks1, toks2)), docvar)
dfmat1 <- dfm(tokens(corp1))
dfmat2 <- dfm(tokens(corp2))
expect_equal(docvars(rbind(dfmat1, dfmat2)), docvar)
}) |
library(bayesplot)
context("available_mcmc and available_ppc")
test_that("available_mcmc works", {
a <- available_mcmc()
expect_s3_class(a, "bayesplot_function_list")
expect_s3_class(a, "character")
expect_identical(
as.character(a),
sort(grep("^mcmc_", getNamespaceExports("bayesplot"), value = TRUE))
)
b <- available_mcmc("trace|dens")
expect_s3_class(b, "bayesplot_function_list")
expect_identical(
as.character(b),
sort(grep("^mcmc_dens|^mcmc_trace", getNamespaceExports("bayesplot"), value = TRUE))
)
expect_length(available_mcmc(pattern = "99999"), 0)
})
test_that("available_ppc works", {
a <- available_ppc()
expect_s3_class(a, "bayesplot_function_list")
expect_s3_class(a, "character")
expect_identical(
as.character(a),
sort(grep("^ppc_", getNamespaceExports("bayesplot"), value = TRUE))
)
b <- available_ppc("grouped")
expect_s3_class(b, "bayesplot_function_list")
expect_identical(
as.character(b),
sort(grep("_grouped$", getNamespaceExports("bayesplot"), value = TRUE))
)
c <- available_ppc("grouped", invert = TRUE)
expect_false(any(grepl("grouped", c)))
expect_length(available_ppc(pattern = "99999"), 0)
})
test_that("print.bayesplot_function_list works", {
expect_output(print(available_ppc()), "bayesplot PPC module:")
expect_output(print(available_mcmc()), "bayesplot MCMC module:")
expect_output(print(available_ppc("ribbon")), "(matching pattern 'ribbon')")
expect_output(print(available_mcmc("trace")), "trace_highlight")
expect_output(print(available_ppc("grouped", invert = TRUE)),
"excluding pattern 'grouped'")
}) |
matrix96 <- function(dataframe,column,rm="FALSE"){
if(is.numeric(column)){data<-matrix(dataframe[,column],ncol=length(unique(dataframe[,2])), byrow = TRUE)}
if(!is.numeric(column)){
name<- as.character(column)
col<- which(colnames(dataframe)==name)
data<-matrix(dataframe[,col],ncol=length(unique(dataframe[,2])), byrow = TRUE)
}
rownames(data)<-unique(dataframe$row)
colnames(data)<-unique(dataframe$col)
if(rm==TRUE){
data[is.na(data)] <- 0
data[(data<0)]<-0
}
return(data)
} |
miaSpectra2D <- function(spectra) {
.chkArgs(mode = 21L)
chkSpectra(spectra)
if (!requireNamespace("ThreeWay", quietly = TRUE)) {
stop("You must install package ThreeWay to use this function")
}
n <- length(spectra$F1)
m <- length(spectra$F2)
p <- length(spectra$names)
X <- matrix(NA_real_, nrow = n, ncol = m * p)
for (i in 1:p) {
st <- 1 + (i - 1) * m
end <- i * m
X[, st:end] <- spectra$data[[i]]
}
t1 <- ThreeWay::pcasup1(X, n, m, p, 3)
t1$method <- "MIA"
class(t1) <- "mia"
return(t1)
} |
duplicates <- function(x) {
duplicated(x) | duplicated(x, fromLast=TRUE)
}
duplicatei <- function (x, first = TRUE) {
if (first)
match(x,x)
else
ifelse(duplicated(x), match(x,x), 0)
} |
context("Checking dob")
test_that("dob ...",{
}) |
gauss_adapt_C <-
function (d, h)
.Call("gauss_adapt_C", d, h, PACKAGE = "mgwrsar") |
jacobi.p.inner.products <- function( n, alpha, beta )
{
if ( n < 0 )
stop( "negative highest polynomial order" )
if ( n != round( n ) )
stop( "highest polynomial order is not integer" )
if ( alpha <= -1 )
stop( "alpha less than or equal to -1" )
if ( beta <= -1 )
stop( "beta less than or equal to -1" )
if ( ( abs( alpha ) < 1e-6 ) & ( abs( beta ) < 1e-6 ) )
return( legendre.inner.products( n ) )
if ( abs( alpha - beta ) < 1e-6 ) {
alpha.prime <- alpha + 0.5
return( gegenbauer.inner.products( n, alpha.prime ) )
}
ab <- alpha + beta
abp1 <- alpha + beta + 1
ap1 <- alpha + 1
bp1 <- beta + 1
coef <- 2 ^ abp1
inner.products <- rep( 1, n + 1 )
j <- 1
for ( k in 0:n ) {
num <- coef * gamma( k + ap1 ) * gamma( k + bp1 )
den <- ( 2 * k + abp1 ) * factorial( k ) * gamma( k + abp1 )
inner.products[j] <- num / den
j <- j + 1
}
return( inner.products )
} |
tableVisual <- function(v, vnam, doEval = TRUE) {
x <- table(v, useNA = "always")
x <- t(rbind(x, paste(round(rbind(x/length(v)),4)*100, "%", sep = "")))
x <- cbind(dimnames(x)[[1]], x)
rownames(x) <- NULL
dimnames(x)[[2]] <- c("value", "count", "percentage")
thisCall <- call("pander", x = x, keep.trailing.zeros = TRUE)
if (!doEval) return(deparse(thisCall))
else return(eval(thisCall))
}
tableVisual <- visualFunction(tableVisual, "Distribution tables",
classes = c("character", "factor", "labelled", "haven_labelled")) |
context("test-print_importance_plot")
test_that("printing importance works", {
gtest <- lm(mpg ~ cyl*wt*hp, data = mtcars)
gtest0 <- lm(mpg ~ 1, data = mtcars)
imp <- importance(gtest, gtest0)
expect_output(print(imp), regexp = "Importance")
}) |
context("Tracking - Experiments")
teardown({
mlflow_clear_test_dir("mlruns")
})
test_that("mlflow_create/get_experiment() basic functionality (fluent)", {
mlflow_clear_test_dir("mlruns")
experiment_1_id <- mlflow_create_experiment("exp_name", "art_loc")
experiment_1a <- mlflow_get_experiment(experiment_id = experiment_1_id)
experiment_1b <- mlflow_get_experiment(name = "exp_name")
expect_identical(experiment_1a, experiment_1b)
expect_identical(experiment_1a$artifact_location, "art_loc")
expect_identical(experiment_1a$name, "exp_name")
})
test_that("mlflow_create/get_experiment() basic functionality (client)", {
mlflow_clear_test_dir("mlruns")
client <- mlflow_client()
experiment_1_id <- mlflow_create_experiment(
client = client,
name = "exp_name",
artifact_location = "art_loc",
tags = list(foo = "bar", foz = "baz", fiz = "biz")
)
experiment_1a <- mlflow_get_experiment(client = client, experiment_id = experiment_1_id)
experiment_1b <- mlflow_get_experiment(client = client, name = "exp_name")
expect_identical(experiment_1a, experiment_1b)
expect_identical(experiment_1a$artifact_location, "art_loc")
expect_identical(experiment_1a$name, "exp_name")
expect_true(
all(purrr::transpose(experiment_1b$tags[[1]]) %in%
list(
list(key = "foz", value = "baz"),
list(key = "foo", value = "bar"),
list(key = "fiz", value = "biz")
)
)
)
})
test_that("mlflow_get_experiment() not found error", {
mlflow_clear_test_dir("mlruns")
expect_error(
mlflow_get_experiment(experiment_id = "42"),
"Could not find experiment with ID 42"
)
})
test_that("mlflow_list_experiments() works properly", {
mlflow_clear_test_dir("mlruns")
client <- mlflow_client()
ex1 <- mlflow_create_experiment(client = client, "foo1", "art_loc1")
ex2 <- mlflow_create_experiment(client = client, "foo2", "art_loc2")
experiments_list <- mlflow_list_experiments(client = client)
expect_setequal(experiments_list$experiment_id, c("0", "1", "2"))
expect_setequal(experiments_list$name, c("Default", "foo1", "foo2"))
default_artifact_loc <- file.path(getwd(), "mlruns", "0", fsep = "/")
expect_setequal(experiments_list$artifact_location, c(default_artifact_loc,
"art_loc1",
"art_loc2"))
experiments_list <- mlflow_list_experiments()
expect_setequal(experiments_list$experiment_id, c("0", "1", "2"))
expect_setequal(experiments_list$name, c("Default", "foo1", "foo2"))
default_artifact_loc <- file.path(getwd(), "mlruns", "0", fsep = "/")
expect_setequal(experiments_list$artifact_location, c(default_artifact_loc,
"art_loc1",
"art_loc2"))
expect_null(mlflow_list_experiments("DELETED_ONLY"))
mlflow_set_experiment_tag("key2", "value2", experiment_id = ex2)
experiments <- mlflow_list_experiments()
expect_true("tags" %in% names(experiments))
expect_setequal(
experiments$tags, list(NA, NA, tibble::tibble(key = "key2", value = "value2"))
)
mlflow_set_experiment_tag("key1", "value1", experiment_id = ex1)
mlflow_set_experiment_tag("key0", "value0", experiment_id = "0")
experiments <- mlflow_list_experiments()
expect_true("tags" %in% names(experiments))
expect_setequal(experiments$tags, list(
tibble::tibble(key = "key0", value = "value0"),
tibble::tibble(key = "key1", value = "value1"),
tibble::tibble(key = "key2", value = "value2")
))
mlflow_set_experiment_tag("key1.2", "value1.2", experiment_id = ex1)
experiments <- mlflow_list_experiments()
tags <- experiments$tags[experiments$experiment_id %in% ex1][[1]]
tags <- tags[order(tags$key),]
expect_equal(
tags,
tibble::tibble(key = c("key1", "key1.2"), value = c('value1', 'value1.2'))
)
mlflow_delete_experiment(experiment_id = "1")
deleted_experiments <- mlflow_list_experiments("DELETED_ONLY")
expect_identical(deleted_experiments$name, "foo1")
})
test_that("mlflow_set_experiment_tag() works correctly", {
mlflow_clear_test_dir("mlruns")
client <- mlflow_client()
experiment_id <- mlflow_create_experiment(client = client, "setExperimentTagTestExperiment", "art_exptag_loc")
mlflow_set_experiment_tag("dataset", "imagenet1K", experiment_id, client = client)
experiment <- mlflow_get_experiment(experiment_id = experiment_id, client = client)
tags <- experiment$tags[[1]]
expect_identical(tags, tibble::tibble(key = 'dataset', value = 'imagenet1K'))
expect_identical("imagenet1K", tags$value[tags$key == "dataset"])
mlflow_set_experiment_tag("dataset", "birdbike", experiment_id, client = client)
experiment <- mlflow_get_experiment(experiment_id = experiment_id, client = client)
expect_equal(experiment$tags, list(tibble::tibble(key = 'dataset', value = 'birdbike')))
experiment_id_2 <- mlflow_create_experiment(client = client, "setExperimentTagTestExperiment2", "art_exptag_loc2")
experiment_2 <- mlflow_get_experiment(experiment_id = experiment_id_2, client = client)
expect_equal(experiment_2$tags, NA)
mlflow_set_experiment_tag("dataset", "birds200", experiment_id_2, client = client)
experiment <- mlflow_get_experiment(experiment_id = experiment_id, client = client)
tags <- experiment$tags[[1]]
experiment_2 <- mlflow_get_experiment(experiment_id = experiment_id_2, client = client)
tags_2 <- experiment_2$tags[[1]]
expect_equal(tags, tibble::tibble(key = 'dataset', value = 'birdbike'))
expect_equal(tags_2, tibble::tibble(key = 'dataset', value = 'birds200'))
mlflow_set_experiment_tag("multiline tag", "value2\nvalue2\nvalue2", experiment_id, client = client)
experiment <- mlflow_get_experiment(experiment_id = experiment_id, client = client)
expect_identical(
tibble::tibble(
key = c('dataset', 'multiline tag'),
value= c("birdbike", "value2\nvalue2\nvalue2")
),
experiment$tags[[1]][order(experiment$tags[[1]]$key),]
)
})
test_that("mlflow_get_experiment_by_name() works properly", {
mlflow_clear_test_dir("mlruns")
client <- mlflow_client()
expect_error(
mlflow_get_experiment(client = client, name = "exp"),
"Could not find experiment with name 'exp'"
)
experiment_id <- mlflow_create_experiment(client = client, "exp", "art")
experiment <- mlflow_get_experiment(client = client, name = "exp")
expect_identical(experiment_id, experiment$experiment_id)
expect_identical(experiment$name, "exp")
expect_identical(experiment$artifact_location, "art")
})
test_that("infer experiment id works properly", {
mlflow_clear_test_dir("mlruns")
experiment_id <- mlflow_create_experiment("test")
Sys.setenv(MLFLOW_EXPERIMENT_NAME = "test")
expect_true(experiment_id == mlflow_infer_experiment_id())
Sys.unsetenv("MLFLOW_EXPERIMENT_NAME")
Sys.setenv(MLFLOW_EXPERIMENT_ID = experiment_id)
expect_true(experiment_id == mlflow_infer_experiment_id())
Sys.unsetenv("MLFLOW_EXPERIMENT_ID")
mlflow_set_experiment("test")
expect_true(experiment_id == mlflow_infer_experiment_id())
})
test_that("experiment setting works", {
mlflow_clear_test_dir("mlruns")
exp1_id <- mlflow_create_experiment("exp1")
exp2_id <- mlflow_create_experiment("exp2")
mlflow_set_experiment(experiment_name = "exp1")
expect_identical(exp1_id, mlflow_get_active_experiment_id())
expect_identical(mlflow_get_experiment(exp1_id), mlflow_get_experiment())
mlflow_set_experiment(experiment_id = exp2_id)
expect_identical(exp2_id, mlflow_get_active_experiment_id())
expect_identical(mlflow_get_experiment(exp2_id), mlflow_get_experiment())
})
test_that("mlflow_set_experiment() creates experiments", {
mlflow_clear_test_dir("mlruns")
mlflow_set_experiment(experiment_name = "foo", artifact_location = "artifact/location")
experiment <- mlflow_get_experiment()
expect_identical(experiment$artifact_location, "artifact/location")
expect_identical(experiment$name, "foo")
}) |
sum(1:10) |
createGmatFromMat <- function(obj, input.mat, genes, do.par, num.cores) {
UseMethod("createGmatFromMat", obj);
}
createGmatFromMat.default <- function(
obj,
input.mat=c("bmat", "pmat"),
genes,
do.par=FALSE,
num.cores=1
){
if(missing(obj)){
stop("obj is missing");
}else{
if(!is(obj, "snap")){
stop("obj is not a snap file");
}
}
if(missing(genes)){
stop("genes is missing");
}else{
if(!is(genes, "GRanges")){
stop("genes is not a GRanges object");
}
if(is.null(genes$name)){
stop("genes does not contain gene names");
}
}
if(!is(do.par, "logical")){
stop("do.par is not logical object");
}
if(!is(num.cores, "numeric")){
stop("num.cores is not logical object");
}
input.mat = match.arg(input.mat);
if(input.mat == "bmat"){
data.use = obj@bmat;
peaks.use = obj@feature;
}else if(input.mat == "pmat"){
data.use = obj@pmat;
peaks.use = obj@peak;
}else{
stop("input.mat does not exist in obj")
}
if(any(Matrix::rowSums(data.use) == 0)){
stop("input matrix contains empty rows, remove empty rows first")
}
num.total.cells = nrow(data.use);
if(do.par){
if(num.cores > 1){
if (num.cores == 1) {
num.cores = 1
} else if (num.cores > detectCores()) {
num.cores <- detectCores() - 1
warning(paste0("num.cores set greater than number of available cores(", parallel::detectCores(), "). Setting num.cores to ", num.cores, "."))
}
} else if (num.cores != 1) {
num.cores <- 1
}
}
ovs = as.data.frame(GenomicRanges::findOverlaps(peaks.use, genes));
ovs.ls = split(ovs, ovs$subjectHits);
if(do.par){
count.ls <- parallel::mclapply(ovs.ls, function(idx){
idx.bins.i = idx$queryHits;
if(length(idx.bins.i) == 1L){
count.i = data.use[,idx.bins.i,dropping=TRUE];
if(any(count.i > 0)){
data.frame(i=which(count.i > 0), j=idx$subjectHits[1], val=count.i[count.i > 0])
}else{
data.frame()
}
}else{
count.i = Matrix::rowSums(data.use[,idx.bins.i,dropping=TRUE]);
if(any(count.i > 0)){
data.frame(i=which(count.i > 0), j=idx$subjectHits[1], val=count.i[count.i > 0])
}else{
data.frame()
}
}
}, mc.cores=num.cores);
}else{
count.ls <- lapply(ovs.ls, function(idx){
idx.bins.i = idx$queryHits;
if(length(idx.bins.i) == 1L){
count.i = data.use[,idx.bins.i,dropping=TRUE];
if(any(count.i > 0)){
data.frame(i=which(count.i > 0), j=idx$subjectHits[1], val=count.i[count.i > 0])
}else{
data.frame()
}
}else{
count.i = Matrix::rowSums(data.use[,idx.bins.i,dropping=TRUE]);
if(any(count.i > 0)){
data.frame(i=which(count.i > 0), j=idx$subjectHits[1], val=count.i[count.i > 0])
}else{
data.frame()
}
}
});
}
count.df = do.call(rbind, count.ls);
dn = list(obj@barcode, as.character(genes$name))
obj@gmat = Matrix::sparseMatrix(
i=count.df[,1],
j=count.df[,2],
x=count.df[,3],
dims=c(nrow(obj), length(genes)),
dimnames = dn
);
rm(count.df);
rm(dn);
rm(data.use);
rm(ovs);
rm(ovs.ls)
gc();
return(obj);
} |
GENMETA.summary <- function(object, signi_digits = 3)
{
x <- object
GMeta_opt_estimate <- as.vector(x[[1]])
GMeta_opt_std_error <- sqrt(as.vector(diag(x[[2]])))
Var_opt_GMeta <- diag(GMeta_opt_std_error)
z_stat_opt_GMeta <- GMeta_opt_estimate/GMeta_opt_std_error
p_val_opt_GMeta <- 1 - pnorm(abs(z_stat_opt_GMeta))
summary_data_frame_opt <- data.frame(cbind(GMeta_opt_estimate, GMeta_opt_std_error, z_stat_opt_GMeta, p_val_opt_GMeta))
colnames(summary_data_frame_opt) <- c("Estimate", "Std.Error", "z value", "Pr(>|z|)")
rownames(summary_data_frame_opt) <- names(x[[1]])
summary_data_frame_opt <- signif(summary_data_frame_opt, signi_digits)
signif_column <- factor(noquote(sapply(summary_data_frame_opt[, 4], sign.star)))
summary_data_frame_opt <- cbind(summary_data_frame_opt, signif_column)
colnames(summary_data_frame_opt)[5] <- paste0(' ')
cat("Call:\n")
print(x[[5]])
cat("\n")
cat("Coefficients: \n")
print.data.frame(summary_data_frame_opt, print.gap = 2)
cat("\n---\n")
cat("Significant codes:\n")
cat("0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 \n")
cat("\n")
cat("Total number of iterations: ")
cat(x[[4]])
} |
library(dplyr)
slice(mtcars, 1L)
slice(mtcars, n())
slice(mtcars, 5:n())
slice(mtcars, c(2,4,5,10))
mtcars
(by_cyl <- group_by(mtcars, cyl))
nrow(by_cyl)
slice(by_cyl, 1:2) |
binMAEE = function(y, X, id, Z, maxiter, epsilon, printrange, alpadj, shrink, makevone) {
BEGINEND = function(n) {
last = cumsum(n)
first = last - n + 1
return(cbind(first, last))
}
is_pos_def = function(A) {
return(min(eigen(A)$values) > 1e-13)
}
GETROWB = function(mu, gamma, j, k, X, y) {
row = (gamma/2) * ((1 - 2 * mu[j]) * X[j, ] + (1 - 2 * mu[k]) *
X[k, ] + 2 * mu[j] * (1 - mu[j]) * X[j, ]/(y[j] - mu[j]) +
2 * mu[k] * (1 - mu[k]) * X[k, ]/(y[k] - mu[k]))
return(row)
}
CREATEA = function(mu, y) {
return(y - mu)
}
CREATEC = function(X, mu) {
return(X * (mu * (1 - mu)))
}
CREATEB = function(mu, gamma, n) {
B = diag(mu * (1 - mu))
l = 1
for (j in 1:(n - 1)) {
for (k in (j + 1):n) {
B[j, k] = sqrt(mu[j] * mu[k] * (1 - mu[j]) * (1 -
mu[k])) * gamma[l]
l = l + 1
}
}
B[lower.tri(B)] = t(B)[lower.tri(B)]
return(B)
}
SCORE = function(Ustarold, beta, alpha, y, X, Z, n, p, q, flag,
rangeflag, VEEFLAG, NPSDFLAG, NPSDADJFLAG) {
U = rep(0, p + q)
UUtran = Ustar = matrix(0, p + q, p + q)
naiveold = ginv(Ustarold[1:p, 1:p])
locx = BEGINEND(n)
locz = BEGINEND(choose(n, 2))
for (i in 1:length(n)) {
X_c = X[locx[i, 1]:locx[i, 2], , drop = FALSE]
y_c = y[locx[i, 1]:locx[i, 2]]
Z_c = Z[locz[i, 1]:locz[i, 2], , drop = FALSE]
U_c = rep(0, p + q)
Ustar_c = matrix(0, p + q, p + q)
mu_c = 1/(1 + exp(c(-X_c %*% beta)))
gamma_c = c(Z_c %*% alpha)
VEE = R = rep(0, choose(n[i], 2))
DB = matrix(0, choose(n[i], 2), p)
C = CREATEC(X_c, mu_c)
B = CREATEB(mu_c, gamma_c, n[i])
A = CREATEA(mu_c, y_c)
INVB = ginv(B)
CtinvB = t(C) %*% INVB
Hi1 = C %*% naiveold %*% CtinvB
if (alpadj) {
SQVARFUN = sqrt(mu_c * (1 - mu_c))
INVSQVAR = 1/SQVARFUN
CT = t(C)
omega = C %*% naiveold %*% CT
vminomega = B - omega
psd_vmin = is_pos_def(vminomega)
mineig = min(eigen(vminomega)$values)
if (psd_vmin == 1) {
Ci = diag(INVSQVAR) %*% (B %*% ginv(vminomega)) %*%
diag(SQVARFUN)
RX = (y_c - mu_c) * INVSQVAR
Gi = tcrossprod(RX)
} else {
NPSDADJFLAG = 1
stop("(V - Omega) is not positive definite")
}
} else {
SQVARFUN = sqrt(mu_c * (1 - mu_c))
INVSQVAR = 1/SQVARFUN
RX = (y_c - mu_c) * INVSQVAR
Gi = tcrossprod(RX)
}
l = 1
for (j in 1:(n[i] - 1)) {
for (k in (j + 1):n[i]) {
if ((gamma_c[l] >= min(sqrt((mu_c[j] * (1 - mu_c[k]))/(mu_c[k] *
(1 - mu_c[j]))), sqrt((mu_c[k] * (1 - mu_c[j]))/(mu_c[j] *
(1 - mu_c[k]))))) | (gamma_c[l] <= max(-sqrt((mu_c[j] *
mu_c[k])/((1 - mu_c[j]) * (1 - mu_c[k]))), -sqrt(((1 -
mu_c[j]) * (1 - mu_c[k]))/(mu_c[j] * mu_c[k])))) &
(flag == 0)) {
rangeflag = 1
if (printrange) {
warning(cat("Range Violation Detected for Cluster",
i, "and Pair", j, k, "\n"))
}
break
}
if ((gamma_c[l] >= min(sqrt((mu_c[j] * (1 - mu_c[k]))/(mu_c[k] *
(1 - mu_c[j]))), sqrt((mu_c[k] * (1 - mu_c[j]))/(mu_c[j] *
(1 - mu_c[k]))))) | (gamma_c[l] <= max(-sqrt((mu_c[j] *
mu_c[k])/((1 - mu_c[j]) * (1 - mu_c[k]))), -sqrt(((1 -
mu_c[j]) * (1 - mu_c[k]))/(mu_c[j] * mu_c[k])))) &
(flag == 1)) {
warning(cat("Last Update Pushes Parameters Out of Range.",
"\n"))
warning(cat("Range Violation Detected for Cluster",
i, "and Pair", j, k, "\n"))
}
VEE[l] = 1 + ((1 - 2 * mu_c[j]) * (1 - 2 * mu_c[k]) *
gamma_c[l])/sqrt(mu_c[j] * (1 - mu_c[j]) * mu_c[k] *
(1 - mu_c[k])) - gamma_c[l]^2
if (VEE[l] <= 0) {
VEEFLAG = 1
stop("Variance of correlation parameter is negative")
}
if (alpadj) {
R[l] = Ci[j, ] %*% Gi[, k] - gamma_c[l]
} else {
R[l] = Gi[j, k] - gamma_c[l]
}
l = l + 1
}
}
if (makevone) {
VEE = rep(1, choose(n[i], 2))
}
if (min(eigen(B)$values) <= 0) {
NPSDFLAG = 1
stop(paste("Var(Y) of Cluster", i, "is not Positive-Definite;",
"Joint Distribution Does Not Exist and Program terminates"))
}
U_c[1:p] = t(C) %*% INVB %*% A
U_c[(p + 1):(p + q)] = t(Z_c) %*% (R/VEE)
UUtran_c = tcrossprod(U_c)
Ustar_c[1:p, 1:p] = t(C) %*% INVB %*% C
Ustar_c[(p + 1):(p + q), (p + 1):(p + q)] = t(Z_c) %*%
(Z_c/VEE)
U = U + U_c
UUtran = UUtran + UUtran_c
Ustar = Ustar + Ustar_c
}
rangeflag = 0
return(list(U = U, UUtran = UUtran, Ustar = Ustar, flag = flag,
rangeflag = rangeflag, VEEFLAG = VEEFLAG, NPSDFLAG = NPSDFLAG,
NPSDADJFLAG = NPSDADJFLAG))
}
INITBETA = function(y, X, n) {
z = y + y - 1
beta = solve(t(X) %*% X, t(X) %*% z)
for (i in 1:2) {
u = c(X %*% beta)
u = 1/(1 + exp(-u))
v = u * (1 - u)
z = t(X) %*% (y - u)
Ustar = t(X) %*% (X * v)
d = solve(Ustar, z)
beta = beta + d
}
return(list(beta = c(beta), Ustar = Ustar))
}
INVBIG = function(ainvc, ainvm, m, c, start, end) {
for (i in start:end) {
b = ainvm[, i]
bt = t(b)
btm = bt %*% m
btmi = btm[, i]
gam = 1 - btmi
bg = b/gam
ainvc = ainvc + bg %*% (bt %*% c)
if (i < end) {
ainvm = ainvm + bg %*% btm
}
}
return(ainvc)
}
MAKEVAR = function(Ustarold, beta, alpha, y, X, Z, n, p, q,
VEEFLAG, ROBFLAG, NPSDADJFLAG) {
SCORE_RES = SCORE(Ustarold, beta, alpha, y, X, Z, n, p,
q, flag = 1, rangeflag = 0, VEEFLAG, NPSDFLAG, NPSDADJFLAG)
U = SCORE_RES$U
UUtran = SCORE_RES$UUtran
Ustar = SCORE_RES$Ustar
flag = SCORE_RES$flag
rangeflag = SCORE_RES$rangeflag
VEEFLAG = SCORE_RES$VEEFLAG
NPSDFLAG = SCORE_RES$NPSDFLAG
NPSDADJFLAG = SCORE_RES$NPSDADJFLAG
naive = ginv(Ustar[1:p, 1:p])
naivealp = ginv(Ustar[(p + 1):(p + q), (p + 1):(p + q)])
eigenRES1 = eigen(naive)
evals1 = eigenRES1$values
evecs1 = eigenRES1$vectors
sqrevals1 = sqrt(evals1)
sqe1 = evecs1 %*% diag(sqrevals1)
eigenRES2 = eigen(naivealp)
evals2 = eigenRES2$values
evecs2 = eigenRES2$vectors
sqrevals2 = sqrt(evals2)
sqe2 = evecs2 %*% diag(sqrevals2)
Ustar_c_array = UUtran_c_array = array(0, c(p + q, p + q,
length(n)))
UUtran = UUbc = UUbc2 = UUbc3 = Ustar = inustar = matrix(0,
p + q, p + q)
locx = BEGINEND(n)
locz = BEGINEND(choose(n, 2))
for (i in 1:length(n)) {
X_c = X[locx[i, 1]:locx[i, 2], , drop = FALSE]
y_c = y[locx[i, 1]:locx[i, 2]]
mu_c = 1/(1 + exp(c(-X_c %*% beta)))
U_i = U_c = rep(0, p + q)
Ustar_c = matrix(0, p + q, p + q)
Z_c = Z[locz[i, 1]:locz[i, 2], , drop = FALSE]
gamma_c = c(Z_c %*% alpha)
C = CREATEC(X_c, mu_c)
B = CREATEB(mu_c, gamma_c, n[i])
A = CREATEA(mu_c, y_c)
INVB = ginv(B)
U_i[1:p] = t(C) %*% INVB %*% A
CtinvB = t(C) %*% INVB
Hi1 = C %*% naive %*% CtinvB
ai1 = INVB
mm1 = C %*% sqe1
ai1A = ai1 %*% A
ai1m1 = ai1 %*% mm1
ai1A = INVBIG(ai1A, ai1m1, mm1, A, 1, p)
U_c[1:p] = t(C) %*% ai1A
VEE = R = rep(0, choose(n[i], 2))
DB = matrix(0, choose(n[i], 2), p)
RX = rep(0, n[i])
if (alpadj) {
SQVARFUN = sqrt(mu_c * (1 - mu_c))
INVSQVAR = 1/SQVARFUN
CT = t(C)
omega = C %*% naive %*% CT
vminomega = B - omega
psd_vmin = is_pos_def(vminomega)
mineig = min(eigen(vminomega)$values)
if (psd_vmin == 1) {
Ci = diag(INVSQVAR) %*% (B %*% ginv(vminomega)) %*%
diag(SQVARFUN)
RX = (y_c - mu_c) * INVSQVAR
Gi = tcrossprod(RX)
} else {
NPSDADJFLAG = 1
stop("(V - Omega) is not positive definite")
}
} else {
SQVARFUN = sqrt(mu_c * (1 - mu_c))
INVSQVAR = 1/SQVARFUN
RX = (y_c - mu_c) * INVSQVAR
Gi = tcrossprod(RX)
}
l = 1
for (j in 1:(n[i] - 1)) {
for (k in (j + 1):n[i]) {
VEE[l] = 1 + ((1 - 2 * mu_c[j]) * (1 - 2 * mu_c[k]) *
gamma_c[l])/sqrt(mu_c[j] * (1 - mu_c[j]) * mu_c[k] *
(1 - mu_c[k])) - gamma_c[l]^2
DB[l, ] = GETROWB(mu_c, gamma_c[l], j, k, X_c,
y_c)
if (alpadj) {
R[l] = Ci[j, ] %*% Gi[, k] - gamma_c[l]
} else {
R[l] = Gi[j, k] - gamma_c[l]
}
l = l + 1
}
}
if (makevone) {
VEE = rep(1, choose(n[i], 2))
}
U_i[(p + 1):(p + q)] = t(Z_c) %*% (R/VEE)
mm2 = Z_c %*% sqe2
ai2R = R/VEE
ai2m2 = mm2/VEE
ai2R = INVBIG(ai2R, ai2m2, mm2, R, 1, q)
U_c[(p + 1):(p + q)] = t(Z_c) %*% ai2R
Ustar_c[1:p, 1:p] = t(C) %*% INVB %*% C
Ustar_c[(p + 1):(p + q), 1:p] = t(Z_c) %*% (DB/VEE)
Ustar_c[(p + 1):(p + q), (p + 1):(p + q)] = t(Z_c) %*%
(Z_c/VEE)
Ustar = Ustar + Ustar_c
UUtran_c = tcrossprod(U_i)
UUtran = UUtran + UUtran_c
UUbc_c = tcrossprod(U_c)
UUbc = UUbc + UUbc_c
UUbc_ic = tcrossprod(U_c, U_i)
UUbc2 = UUbc2 + UUbc_ic
Ustar_c_array[, , i] = Ustar_c
UUtran_c_array[, , i] = UUtran_c
}
inustar[1:p, 1:p] = ginv(Ustar[1:p, 1:p])
inustar[(p + 1):(p + q), (p + 1):(p + q)] = ginv(Ustar[(p +
1):(p + q), (p + 1):(p + q)])
inustar[(p + 1):(p + q), 1:p] = -inustar[(p + 1):(p + q),
(p + 1):(p + q)] %*% Ustar[(p + 1):(p + q), 1:p] %*%
inustar[1:p, 1:p]
inustartr = t(inustar)
for (i in 1:length(n)) {
Hi = diag(1/sqrt(1 - pmin(0.75, c(diag(Ustar_c_array[,
, i] %*% inustar)))))
UUbc3 = UUbc3 + Hi %*% UUtran_c_array[, , i] %*% Hi
}
robust = inustar %*% UUtran %*% inustartr
varKC = inustar %*% (UUbc2 + t(UUbc2)) %*% inustartr/2
varMD = inustar %*% UUbc %*% inustartr
varFG = inustar %*% UUbc3 %*% inustartr
naive = inustar[1:p, 1:p]
if (min(diag(robust)) <= 0) {
ROBFLAG = 1
}
if (min(diag(varMD)) <= 0) {
ROBFLAG = 1
}
if (min(diag(varKC)) <= 0) {
ROBFLAG = 1
}
if (min(diag(varFG)) <= 0) {
ROBFLAG = 1
}
return(list(robust = robust, naive = naive, varMD = varMD,
varKC = varKC, varFG = varFG, VEEFLAG = VEEFLAG, ROBFLAG = ROBFLAG,
NPSDADJFLAG = NPSDADJFLAG))
}
FITPRENTICE = function(y, X, Z, n, maxiter, epsilon, VEEFLAG,
SINGFLAG, ROBFLAG, ALPFLAG, NPSDFLAG, NPSDADJFLAG) {
p = ncol(X)
q = ncol(Z)
delta = rep(2 * epsilon, p + q)
max_modi = 20
converge = 0
rangeflag = 0
alpha = rep(0.01, q)
INITRES = INITBETA(y, X, n)
beta = INITRES$beta
Ustar = INITRES$Ustar
niter = 1
while ((niter <= maxiter) & (max(abs(delta)) > epsilon)) {
n_modi = 0
SINGFLAG = 0
ALPFLAG = 0
repeat {
Ustarold = Ustar
NPSDFLAG = 0
NPSDADJFLAG = 0
SCORE_RES = SCORE(Ustarold, beta, alpha, y, X, Z,
n, p, q, flag = 0, rangeflag, VEEFLAG, NPSDFLAG,
NPSDADJFLAG)
U = SCORE_RES$U
UUtran = SCORE_RES$UUtran
Ustar = SCORE_RES$Ustar
rangeflag = SCORE_RES$rangeflag
VEEFLAG = SCORE_RES$VEEFLAG
if (VEEFLAG == 1) {
stop("Program terminated due to division by zero in variance")
}
if (rangeflag == 1) {
if (shrink == "THETA") {
if (niter == 1) {
alpha = rep(0, q)
} else {
theta = theta - (0.5)^(n_modi + 1) * delta
beta = theta[1:p]
alpha = theta[(p + 1):(p + q)]
}
} else if (shrink == "ALPHA") {
if (niter == 1) {
alpha = rep(0, q)
} else {
alpha = 0.95 * alpha
}
}
n_modi = n_modi + 1
if (printrange) {
warning(cat("Iteration", niter, "and Shrink Number",
n_modi, "\n"))
}
}
if ((n_modi > max_modi) | (rangeflag == 0)) {
break
}
}
if (n_modi > max_modi) {
if (printrange) {
warning(cat("n_modi too great, more than 20 shrinks"))
}
ALPFLAG = 1
}
theta = c(beta, alpha)
psdustar = is_pos_def(Ustar)
mineig = min(eigen(Ustar)$values)
if (psdustar) {
delta = solve(Ustar, U)
theta = theta + delta
beta = theta[1:p]
alpha = theta[(p + 1):(p + q)]
converge = (max(abs(delta)) <= epsilon)
} else {
SINGFLAG = 1
}
niter = niter + 1
}
Ustarold = Ustar
MAKEVAR_RES = MAKEVAR(Ustarold, beta, alpha, y, X, Z, n,
p, q, VEEFLAG, ROBFLAG, NPSDADJFLAG)
robust = MAKEVAR_RES$robust
naive = MAKEVAR_RES$naive
varMD = MAKEVAR_RES$varMD
varKC = MAKEVAR_RES$varKC
varFG = MAKEVAR_RES$varFG
VEEFLAG = MAKEVAR_RES$VEEFLAG
ROBFLAG = MAKEVAR_RES$ROBFLAG
NPSDADJFLAG = MAKEVAR_RES$NPSDADJFLAG
return(list(beta = beta, alpha = alpha, robust = robust,
naive = naive, varMD = varMD, varKC = varKC, varFG = varFG,
niter = niter, converge = converge, VEEFLAG = VEEFLAG,
SINGFLAG = SINGFLAG, ROBFLAG = ROBFLAG, ALPFLAG = ALPFLAG,
NPSDFLAG = NPSDFLAG, NPSDADJFLAG = NPSDADJFLAG))
}
RESULTS = function(beta, alpha, robust, naive, varMD, varKC,
varFG, niter, n) {
p = length(beta)
q = length(alpha)
K = length(n)
df = K - p
beta_numbers = as.matrix(seq(1:p)) - 1
bSE = sqrt(diag(naive))
bSEBC0 = sqrt(diag(robust[1:p, 1:p]))
bSEBC1 = sqrt(diag(varKC[1:p, 1:p]))
bSEBC2 = sqrt(diag(varMD[1:p, 1:p]))
bSEBC3 = sqrt(diag(varFG[1:p, 1:p]))
alpha_numbers = as.matrix(seq(1:q)) - 1
aSEBC0 = sqrt(diag(robust[(p + 1):(p + q), (p + 1):(p +
q)]))
aSEBC1 = sqrt(diag(varKC[(p + 1):(p + q), (p + 1):(p + q)]))
aSEBC2 = sqrt(diag(varMD[(p + 1):(p + q), (p + 1):(p + q)]))
aSEBC3 = sqrt(diag(varFG[(p + 1):(p + q), (p + 1):(p + q)]))
outbeta = cbind(beta_numbers, beta, bSE, bSEBC0, bSEBC1,
bSEBC2, bSEBC3)
outalpha = cbind(alpha_numbers, alpha, aSEBC0, aSEBC1, aSEBC2,
aSEBC3)
colnames(outbeta) = c("Beta", "Estimate", "MB-stderr", "BC0-stderr",
"BC1-stderr", "BC2-stderr", "BC3-stderr")
colnames(outalpha) = c("Alpha", "Estimate", "BC0-stderr",
"BC1-stderr", "BC2-stderr", "BC3-stderr")
return(list(outbeta = outbeta, outalpha = outalpha))
}
VEEFLAG = 0
SINGFLAG = 0
CONVFLAG = 0
ROBFLAG = 0
ALPFLAG = 0
NPSDFLAG = 0
NPSDADJFLAG = 0
id1 = id[order(id)]
y = y[order(id)]
X = X[order(id), ]
id = id1
n = as.vector(table(id))
PRENTICE_RES = FITPRENTICE(y, X, Z, n, maxiter, epsilon, VEEFLAG,
SINGFLAG, ROBFLAG, ALPFLAG, NPSDFLAG, NPSDADJFLAG)
beta = PRENTICE_RES$beta
alpha = PRENTICE_RES$alpha
robust = PRENTICE_RES$robust
naive = PRENTICE_RES$naive
varMD = PRENTICE_RES$varMD
varKC = PRENTICE_RES$varKC
varFG = PRENTICE_RES$varFG
niter = PRENTICE_RES$niter
converge = PRENTICE_RES$converge
VEEFLAG = PRENTICE_RES$VEEFLAG
SINGFLAG = PRENTICE_RES$SINGFLAG
ROBFLAG = PRENTICE_RES$ROBFLAG
ALPFLAG = PRENTICE_RES$ALPFLAG
NPSDFLAG = PRENTICE_RES$NPSDFLAG
NPSDADJFLAG = PRENTICE_RES$NPSDADJFLAG
if (SINGFLAG == 1) {
stop("Derivative matrix for beta is singular during updates")
}
if (ROBFLAG == 1) {
stop("Sandwich variance is not positive definite")
}
if (converge == 0 & SINGFLAG == 0) {
stop("The algorithm did not converge")
}
if (converge == 1 & ROBFLAG == 0) {
result = RESULTS(beta, alpha, robust, naive, varMD, varKC,
varFG, niter, n)
outList = list(outbeta = result$outbeta, outalpha = result$outalpha,
beta = beta, alpha = alpha, MB = naive, BC0 = robust,
BC1 = varKC, BC2 = varMD, BC3 = varFG, niter = niter)
class(outList) = "geemaee"
return(outList)
}
} |
wblr.fit <- function(x, modify.by.t0=FALSE,...){
if(!class(x)=="wblr"){
stop("\"x\" argument is not of class \"wblr\".")
}
arg <- list(...)
if(length(arg) > 0) {
if(class(arg[[1]])=="list") {
arg<-arg[[1]]
}
}
if(!is.null(c(arg$log,arg$canvas))) stop("cannot set log or canvas option in wblr.fit")
if(!is.null(c(arg$ties.handler, arg$ties))) warning("handling of ties is only performed on object creation in function wblr.")
if(!is.null(arg$method.fit)) {
if(length(arg$method.fit)>1) {
warning("concatinated rr fit methods are depreciated, use 'rr', 'rr-xony', or 'rr-yonx' ")
if(tolower(arg$method.fit[2])=="xony") arg$method.fit<-"rr-xony"
if(tolower(arg$method.fit[2])=="yonx") arg$method.fit<-"rr-yonx"
}
if(tolower(arg$method.fit) == "weibayes" && is.null(arg$weibayes.beta)) {
warning("weibayes.beta not provided, using default")
}
}
if(!is.null(x$data$dlines)) {
if(!is.null(arg$method.fit)) {
if("rr" %in% tolower(arg$method.fit)){
stop("rank regression is not performed on interval data, use an mle fit")
}
}
}
if(!is.null(arg$dist)) {
if(arg$dist=="lnorm") arg$dist<-"lognormal"
}
opadata <- x$options
opafit <- modifyList(opadata,arg)
if(!is.null(x$data$dlines)) {
if("rr" %in% substr(tolower(opafit$method.fit),1,2)){
warning("rank regression is not performed on interval data method.fit has been set to mle")
opafit$method.fit<-"mle"
}
}
supported_dist <- c(
"weibull","weibull2p","weibull3p",
"lognormal","lognormal2p","lognormal3p")
supported_fit <- c("rr", "rr-xony", "rr-yonx", "mle","mle-rba","mle-unbias", "weibayes")
if(is.null(opafit$dist)){
opafit$dist <- "weibull2p"
}
if(!any(tolower(opafit$dist) %in% supported_dist)){
stop(paste0(opafit$dist," is not a supported fit distribution."))
}
if(!any(tolower(opafit$method.fit) %in% supported_fit)){
stop(paste0(opafit$method.fit," is not a supported fit method."))
}
if(tolower(opafit$dist) %in% c("weibull3p", "lognormal3p")){
if(!is.null(x$data$dlines)) {
if(any(x$data$dlines$t1==0)) {
stop("3p modification not permitted on data with discoveries (left==0)")
}
}
}
if(tolower(opafit$method.fit)== "weibayes" && !any(tolower(opafit$dist) %in% c("weibull","weibull2p"))) {
stop("weibayes fitting only applies to 2 parameter weibull")
}
if(modify.by.t0==TRUE) {
if(tolower(opafit$dist) %in% c("weibull3p", "lognormal3p")){
x$fit<-NULL
}else{
modify.by.t0<-FALSE
warning("modify.by.t0 ignored for non-3p fitting")
}
}
atleastonefit <- FALSE
if(is.null(x$fit)){
i <- 1
x$fit <- list()
}else{
i <- length(x$fit)+1
}
x$fit[[i]] <- list()
op <- unique(c(names(x$options),names(opafit)))
if(length(li <- opafit[sapply(op,function(y){
!identical(x$options[[y]], opafit[[y]])})]) > 0){
x$fit[[i]]$options <- li
}
if(is.null(x$fit[[i]]$options)) {
x$fit[[i]]$options<-list()
}
x$fit[[i]]$options$dist<-opafit$dist
if(!is.null(x$data$dlines) && any(c("rr","rr2") %in% tolower(opafit$method.fit))){
modifyList(opafit, list(method.fit="mle"))
}
x$fit[[i]]$options$method.fit<-opafit$method.fit
x$fit[[i]]$n <- x$n
x$fit[[i]]$fail <- x$fail
x$fit[[i]]$cens <- x$cens
x$fit[[i]]$discovery <- x$discovery
x$fit[[i]]$interval <- x$interval
if(tolower(opafit$dist) %in% c("weibull","weibull2p","weibull3p")){
fit_dist<-"weibull"
}else{
if(tolower(opafit$dist) %in% c("lnorm","lognormal","lognormal2p", "lognormal3p")){
fit_dist<-"lnorm"
}else{
stop(paste0("dist option ", opafit$dist, "is not recognized for distribution fitting"))
}
}
npar<-2
if(tolower(opafit$dist) %in% c("weibull3p", "lognormal3p")){
npar<-3
}
if(any(c("rr") %in% substr(tolower(opafit$method.fit),1,2))){
regression_order<-"XonY"
if(nchar(opafit$method.fit)>2){
if(substr(tolower(opafit$method.fit),4,7)=="yonx") {
regression_order<-"YonX"
}
}
if(any(x$data$dpoints$weight!=1)) {
lrdiv<-x$data$lrq_frame$left/x$data$lrq_frame$right
lrqframe2<-data.frame(x$data$lrq_frame, lrdiv)
fail_df<-lrqframe2[lrqframe2$lrdiv==1,c(1,4,3)]
susp_df<-lrqframe2[lrqframe2$lrdiv<0,c(1,4,3)]
susp_df$lrdiv<-rep(0, nrow(susp_df))
teqframe<-rbind(fail_df, susp_df)
names(teqframe)<-c("time", "event", "qty")
fit_vec<-lslr(getPPP(teqframe), dist=fit_dist, npar=npar, reg_method=regression_order)
}else{
fit_vec<-lslr(x$data$dpoints, dist=fit_dist, npar=npar, reg_method=regression_order)
}
if(!is.null(fit_vec)){
atleastonefit<-TRUE
x$fit[[i]]$fit_vec <- fit_vec
if(fit_dist=="weibull") {
x$fit[[i]]$beta <- fit_vec[2]
x$fit[[i]]$eta <- fit_vec[1]
}
if(fit_dist=="lnorm") {
x$fit[[i]]$meanlog<-fit_vec[1]
x$fit[[i]]$sdlog<-fit_vec[2]
}
x$fit[[i]]$gof <- list()
if(npar==2){
x$fit[[i]]$gof$r2 <- fit_vec[[3]]
x$fit[[i]]$gof$prr <- fit_vec[[4]]
}else{
x$fit[[i]]$t0 <- fit_vec[3]
x$fit[[i]]$gof$r2 <- fit_vec[[4]]
}
}else{
x$fit[i] <<- list(NULL)
}
}
if(any(c("mle","mle-rba","mle-unbias") %in% tolower(opafit$method.fit))){
debias<-"none"
if(tolower(opafit$method.fit) == "mle-rba") debias <- "rba"
if(tolower(opafit$method.fit) == "mle-unbias") {
if(fit_dist == "weibull") {
debias <- "hrbu"
}else{
debias <- "rba"
}
}
fit_vec<-mlefit(x$data$lrq_frame, fit_dist, npar, debias)
if(!is.null(fit_vec)){
atleastonefit<-TRUE
x$fit[[i]]$fit_vec <- fit_vec
if(fit_dist=="weibull") {
x$fit[[i]]$beta <- fit_vec[2]
x$fit[[i]]$eta <- fit_vec[1]
}
if(fit_dist=="lnorm") {
x$fit[[i]]$meanlog<-fit_vec[1]
x$fit[[i]]$sdlog<-fit_vec[2]
}
x$fit[[i]]$gof <- list()
if(npar==2){
x$fit[[i]]$gof$loglik <- fit_vec[[3]]
}else{
x$fit[[i]]$t0 <- fit_vec[3]
x$fit[[i]]$gof$loglik <- fit_vec[[4]]
}
}
}
if(tolower(opafit$method.fit) == "weibayes") {
if(!is.null(x$dlines)) stop("weibayes processing not implemented for interval data")
lrdiv<-x$data$lrq_frame$left/x$data$lrq_frame$right
lrqframe2<-data.frame(x$data$lrq_frame, lrdiv)
fail_df<-lrqframe2[lrqframe2$lrdiv==1,c(1,4,3)]
susp_df<-lrqframe2[lrqframe2$lrdiv<0,c(1,4,3)]
susp_df$lrdiv<-rep(0, nrow(susp_df))
teqframe<-rbind(fail_df, susp_df)
names(teqframe)<-c("time", "event", "qty")
weibayes_eta<-weibayes(teqframe, beta=opafit$weibayes.beta)
atleastonefit<-TRUE
fit_vec<-c(weibayes_eta, opafit$weibayes.beta)
names(fit_vec)<-c("Eta", "Beta")
x$fit[[i]]$fit_vec <- fit_vec
x$fit[[i]]$beta <- opafit$weibayes.beta
x$fit[[i]]$eta <- weibayes_eta
}
if(!atleastonefit){
warning("*** calculateSingleFit: Nothing has been fitted. ***\n",
'*** Does \"method.fit\" include sensible options? ***')
}
x$fit[[i]]$modified<-FALSE
if(modify.by.t0==TRUE) {
if(!is.null(x$fit[[i]]$t0) ){
for(da_line in 1:nrow(x$data$lrq_frame)) {
if(x$data$lrq_frame$left[da_line]<x$fit[[i]]$t0 && x$data$lrq_frame$right[da_line]<0) {
x$data$lrq_frame$left[da_line]<-x$fit[[i]]$t0+1e-5
}
}
if(!x$fit[[i]]$t0 < min(x$data$lrq_frame$left)) {
stop("t0 too large for data modification")
}
x$data$lrq_frame$left<-x$data$lrq_frame$left - x$fit[[i]]$t0
x$data$lrq_frame$right[x$data$lrq_frame$right!=-1]<-x$data$lrq_frame$right[x$data$lrq_frame$right!=-1] - x$fit[[i]]$t0
if(!is.null(x$data$dpoints$time)) {
x$data$dpoints$time<-x$data$dpoints$time - x$fit[[i]]$t0
}
if(!is.null(x$data$dlines$t1)) {
x$data$dlines$t1<-x$data$dlines$t1 - x$fit[[i]]$t0
x$data$dlines$t2<-x$data$dlines$t2 - x$fit[[i]]$t0
}
x$fit[[i]]$modified<-TRUE
}else{
warning("t0 not found for data modification")
}
}
x
} |
tfpwmk <-function(x) {
options(scipen = 999)
x = x
z = NULL
pval = NULL
S = 0
var.S = NULL
Tau = NULL
if (is.vector(x) == FALSE) {
stop("Input data must be a vector")
}
if (any(is.finite(x) == FALSE)) {
x[-c(which(is.finite(x) == FALSE))] -> x
warning("The input vector contains non-finite numbers. An attempt was made to remove them")
}
n<-length(x)
if (n < 3) {
stop("Input vector must contain at least three values")
}
rep(NA, n * (n - 1)/2) -> V
k = 0
for (i in 1:(n-1)) {
for (j in (i+1):n) {
k = k+1
V[k] = (x[j]-x[i])/(j-i)
}
}
median(V,na.rm=TRUE)->slp
t=1:length(x)
xt<-(x[1:n])-((slp)*(t))
acf(xt, lag.max=1, plot=FALSE)$acf[-1] -> ro
a=1:(length(xt)-1)
b=2:(length(xt))
xp<-(xt[b]-(xt[a]*ro))
l<-length(xp)
q=1:l
y<-(xp[1:l]+((slp)*(q)))
n1<-length(y)
for (i in 1:(n1-1)) {
for (j in (i+1):n1) {
S = S + sign(y[j]-y[i])
}
}
var.S = n1*(n1-1)*(2*n1+5)*(1/18)
if(length(unique(y)) < n1) {
unique(y) -> aux
for (i in 1:length(aux)) {
length(which(y == aux[i])) -> tie
if (tie > 1) {
var.S = var.S - tie*(tie-1)*(2*tie+5)*(1/18)
}
}
}
if (S == 0) {
z = 0
}else
if (S > 0) {
z = (S-1)/sqrt(var.S)
} else {
z = (S+1)/sqrt(var.S)
}
pval = 2*pnorm(-abs(z))
Tau = S/(.5*n1*(n1-1))
rep(NA, n1 * (n1 - 1)/2) -> W
m = 0
for (i in 1:(n1-1)) {
for (j in (i+1):n1) {
m = m+1
W[m] = (y[j]-y[i])/(j-i)
}
}
median(W,na.rm=TRUE)->slp1
return(c("Z-Value" = z,
"Sen's Slope"= slp1,
"Old Sen's Slope"= slp,
"P-value" = pval,
"S" = S,
"Var(S)" = var.S,
"Tau"=Tau))
} |
data("hawkins")
pairs(hawkins)
mod.ols <- lm(y ~ ., data=hawkins)
qqnorm(mod.ols$residuals, ylab="LS residuals")
mod.lms <- lmsreg(y ~ ., data=hawkins)
qqnorm(mod.lms$residuals, ylab="LMS residuals")
mod <- fwdlm(y ~ ., data=hawkins)
summary(mod)
plot(mod)
plot(mod, 1, squared=TRUE, xlim=c(0,132))
par(mfrow=c(1,2))
plot(mod, 5, scaled=FALSE)
plot(mod, 6, ylim=c(-40,40))
par(mfrow=c(1,1))
plot(mod, 8, ylim=c(0,20))
par(mfrow=c(2,1))
plot(mod, 3, ylim=c(0,10))
plot(mod, 4, ylim=c(0,20))
par(mfrow=c(1,1))
plot(mod, 9)
n <- nrow(hawkins)
inc <- mod$included
pch <- rep(1,n)
pch[inc$"m=86"] <- 3
pch[setdiff(inc$"m=128", inc$"m=122")] <- 15
pch[setdiff(inc$"m=122", inc$"m=110")] <- 17
par(mfrow=c(1,1))
plot(y ~ x8, data=hawkins, pch=pch)
pairs(hawkins, col=as.numeric(factor(pch)), pch=pch)
data(stackloss)
pairs(stackloss)
mod1.ols <- lm(Loss ~ . ,data=stackloss)
summary(mod1.ols)
plot(mod1.ols)
mod1 <- fwdlm(Loss ~ ., data=stackloss, nsamp="exact")
summary(mod1)
plot(mod1)
plot(mod1, 1, squared=T)
plot(mod1, 2)
plot(mod1, 6, ylim=c(-40,40))
mod2 <- fwdlm(Loss ~ Air + Temp, data=stackloss)
summary(mod2)
plot(mod2, 2)
plot(mod2, 10, ylim=c(0.9,1))
mod3.ols <- lm(Loss ~ Air*Temp + I(Air^2), data=stackloss)
summary(mod3.ols)
mod4 <- fwdlm(Loss ~ Air*Temp + I(Air^2), data=stackloss, nsamp="exact")
plot(mod4, 1)
plot(mod4, 2)
mod5 <- fwdsco(Loss ~ Air*Temp + I(Air^2), data=stackloss, lambda = 1, nsamp="exact")
summary(mod5)
plot(mod5)
mod6 <- fwdlm(log(Loss) ~ Air*Temp + I(Air^2), data=stackloss, nsamp="exact")
summary(mod6)
plot(mod6, 1)
plot(mod6, 6)
plot(mod6, 8)
mod7 <- fwdlm(log(Loss) ~ Air + Temp, data=stackloss, nsamp="exact")
plot(mod7, 10, ylim=c(0.9,1))
plot(mod7, 1, ylim=c(-4,4))
mod8 <- fwdsco(Loss ~ Air + Temp, data=stackloss, lambda = 0, nsamp="exact")
summary(mod8)
plot(mod8, ylim=c(-4,4))
mod9 <- fwdsco(Loss ~ Air + Temp, data=stackloss, lambda = 0.5, nsamp="exact")
plot(mod9, ylim=c(-4,4))
mod10 <- fwdlm(sqrt(Loss) ~ Air + Temp, data=stackloss, nsamp="exact")
summary(mod10)
plot(mod10, 6, ylim=c(-40,40))
plot(mod10, 10, ylim=c(0.9,1))
plot(mod10, 1, ylim=c(-4,4))
data(salinity)
mod1 <- fwdlm(salinity ~ lagsalinity + trend + waterflow, data=salinity, nsamp="exact")
plot(mod1)
plot(mod1, 1)
plot(mod1, 2)
mark <- rep(16, nrow(salinity)); mark[16] <- 1
pairs(salinity, pch=mark)
salinity$waterflow[16] <- 23.443
mod2 <- fwdlm(salinity ~ lagsalinity + trend + waterflow, data=salinity, nsamp="exact")
plot(mod2, 1, th.Res=1.5)
par(mfrow=c(1,2))
plot(mod2, 6, ylim=c(-40,40))
mark <- rep(1, nrow(salinity)); mark[c(9, 15, 17)] <- 16
plot(salinity ~ lagsalinity, data=salinity, pch=mark, ylab="Salinity", xlab="Lagged salinity")
text(salinity$lagsalinity[mark==16], salinity$salinity[mark==16], which(mark==16), pos=4)
par(mfrow=c(1,1))
mod3 <- fwdlm(salinity ~ lagsalinity + waterflow, data=salinity, nsamp="exact")
plot(mod3, 1)
plot(mod3, 6, ylim=c(-40,40))
col <- pch <- rep(1,nrow(salinity))
col[c(6,14,15)] <- pch[c(6,14,15)] <- 3
col[c(9,15,17)] <- pch[c(9,15,17)] <- 4
pairs(salinity[,c(4,1,3)], col=col, pch=pch,
upper.panel = function(x,y,...) text(x,y,1:length(x), ...) )
data(ozone)
mod1 <- fwdlm(y ~ ., data=ozone)
plot(mod1, 1)
plot(mod1, 6, ylim=c(-10,10))
plot(mod1, 8, ylim=c(0,6))
mod1.sco <- fwdsco(y ~ ., data=ozone, lambda=1)
plot(mod1.sco)
mod2 <- lm(log(y) ~ ., data=ozone)
Time <- 1:nrow(ozone)
library(MASS)
plot(Time, studres(mod2), type="l")
abline(0, 0, lty=2)
plot(fwdsco(y ~ ., data=ozone, lambda=0), ylim=c(-6,6))
ozone$Time <- Time
mod3 <- lm(log(y) ~ ., data=ozone)
summary(mod3)
mod4 <- lm(log(y) ~ Time + x2 + x5 + x6 + x8, data=ozone)
summary(mod4)
mod5 <- fwdlm(log(y) ~ Time + x2 + x5 + x6 + x8, data=ozone)
plot(mod5, 1, ylim=c(-4.5, 2.5))
plot(mod5, 2)
mod5.sco <- fwdsco(y ~ Time + x2 + x5 + x6 + x8, data=ozone, lambda=0)
par(mfrow=c(1,2))
plot(mod5.sco, ylim=c(-6,6), plot.mle=F)
plot(mod5, 6, ylim=c(-40,40)) |
`getLevels` <- function(data) {
data <- as.data.frame(data)
colnames <- paste("V", ncol(data), sep = ".")
pN <- unlist(lapply(data, possibleNumeric))
noflevels <- rep(NA, ncol(data))
ulevels <- rep(NA, ncol(data))
noflevels[pN] <- apply(
data[, pN, drop = FALSE],
2,
max
) + 1
ulevels <- apply(
data,
2,
function(x) {
return(length(unique(x)))
}
)
noflevels[is.na(noflevels)] <- ulevels[is.na(noflevels)]
factor <- unlist(lapply(data, is.factor))
declared <- unlist(lapply(data, function(x) inherits(x, "declared")))
noflevels[pN][
apply(
data[, pN, drop = FALSE],
2,
function(x) any(x %% 1 > 0)
)
] <- 2
if (any(factor | declared)) {
noflevels[factor | declared] <- pmin(noflevels[factor | declared], ulevels[factor | declared])
}
noflevels[noflevels == 1] <- 2
return(noflevels)
} |
formatCode <- function(code, width = 500L, formatter = styleText, ...) {
if (!inherits(code, "shinyMetaDeparsed")) {
code <- deparseCode(code, width = width)
}
code <- do.call(formatter, c(list(code), list(...)))
prefix_class(code, "shinyMetaFormatted")
}
styleText <- function(code, ...) {
code <- rebreak(code)
styler::style_text(code, ...)
}
deparseCode <- function(code, width = 500L) {
code <- walk_ast(code, quo_to_expr, preorder = TRUE)
code <- comment_flags_to_enclosings(code)
code <- walk_ast(code, remove_meta_classes)
code_text <- deparse_flatten(code, width = width)
code_text <- comment_remove_enclosing(code_text)
oldClass(code_text) <- "shinyMetaDeparsed"
code_text
}
quo_to_expr <- function(expr) {
if (rlang::is_quosure(expr)) {
rlang::quo_get_expr(expr)
} else {
expr
}
}
remove_meta_classes <- function(expr) {
remove_class(expr, c("shinyMetaString", "shinyMetaExpr"))
}
deparse_flatten <- function(expr, width = 500L) {
if (rlang::is_call(expr, "{")) {
paste0(vapply(expr[-1], deparse_flatten, character(1)), collapse = "\n")
} else {
paste0(deparse(expr, width.cutoff = width), collapse = "\n")
}
}
rebreak <- function(str) {
str <- paste(str, collapse = "\n")
tokens <- sourcetools::tokenize_string(str)
tokens$value <- paste0(
tokens$value,
ifelse(
tokens$type == "operator" & tokens$value == "%>%",
"\n",
""
)
)
operator_newline <- grepl("\n", tokens$value) &
tokens$type == "whitespace" &
c(FALSE, head(tokens$type %in% c("comma", "operator"), -1))
tokens$value[operator_newline] <- " "
new_str <- paste(tokens$value, collapse = "")
gsub("\\s*\\r?\\n\\s*", "\n", new_str)
}
comment_remove_enclosing <- function(x) {
if (!is.character(x) || length(x) > 1) {
stop("Expected a string (character vector of length 1).")
}
txt <- strsplit(x, "\n")[[1]]
comment_index <- grep(paste0('^\\s*"', comment_start), txt)
if (!length(comment_index)) return(txt)
txt[comment_index] <- sub(paste0('^(\\s*)"', comment_start), "\\1", txt[comment_index])
txt[comment_index] <- sub(paste0(comment_end, '"$'), "", txt[comment_index])
txt[comment_index] <- gsub("\\\"", "\"", txt[comment_index], fixed = TRUE)
paste(txt, collapse = "\n")
} |
finiteSampleCorrection <- function(r, n, model = "locsc"){
if(model == "locsc" & r >= 1.74) return(r)
if(model %in% c("loc", "sc") & r >= 3.0) return(r)
if(n == 1) return(Inf)
if(n == 2) return(Inf)
eps <- r/sqrt(n)
ns <- c(3:50, seq(55, 100, by = 5), seq(110, 200, by = 10),
seq(250, 500, by = 50))
epss <- c(seq(0.001, 0.01, by = 0.001), seq(0.02, to = 0.5, by = 0.01))
if(n %in% ns){
ind <- ns == n
}else{
ind <- which.min(abs(ns-n))
}
if(model == "locsc")
return(max(r, approx(x = epss, y = .finiteSampleRadius.locsc[,ind], xout = eps, rule = 2)$y))
if(model == "loc")
return(max(r, approx(x = epss, y = .finiteSampleRadius.loc[,ind], xout = eps, rule = 2)$y))
if(model == "sc")
return(max(r, approx(x = epss, y = .finiteSampleRadius.sc[,ind], xout = eps, rule = 2)$y))
else
stop("argument 'model' has to be 'locsc', 'loc' or 'sc'")
} |
cleanup_data = function(data, ...) {
UseMethod("cleanup_data")
}
cleanup_data.data.frame = function(data, ... ){
nc = ncol(data)
group = pdr = patient_id = minute = NULL
assert_that(nc >= 2)
data = tibble::set_tidy_names(data)
data = dplyr::distinct(data)
if (nc == 2) {
names(data) = c("minute", "pdr")
if (max(table(data$minute)) > 1)
stop("Only two columns, and multiple PDR values for given time. Forgot column pat_id?")
}
if (!(is.numeric(data$minute) && (is.numeric(data$pdr))))
stop("Columns minute and pdr must be numeric")
if (nc == 3) {
if (!all(names(data) == c("patient_id", "minute", "pdr"))) {
stop("Three columns must be named patient_id, minute, pdr")
}
if (max(table(data$minute, data$patient_id)) > 1)
stop("Multiple data for one patient and minute. Forgot column group?")
}
if (nc == 4) {
if (!all(names(data) == c("patient_id", "group", "minute", "pdr")))
stop("Four columns must be named patient_id, group, minute, pdr")
if (max(table(data$minute, data$patient_id, data$group)) > 1)
warning("Multiple data for one patient, minute and group. Included the same patient's data twice?")
data$group = as.character(data$group)
}
comment = comment(data)
data = data %>%
filter(minute >= 0 ) %>%
mutate(
minute = pmax(minute, 0.01),
pdr = as.vector(pdr)
)
has_patient_id = "patient_id" %in% names(data)
if (!has_patient_id) {
data$patient_id = "pat_a"
}
data$patient_id = as.character(data$patient_id)
data$patient_id = str_replace_all(str_trim(data$patient_id), " ", "_")
has_group = "group" %in% names(data)
if (!has_group) {
if (!all(with(data, table(patient_id, minute)) %in% 0:1))
stop("Multiple values for the same patient at the same minute require a <<group>> column")
data$group = "A"
} else {
data$group = str_replace_all(str_trim(data$group), " ", "_")
}
data = data %>%
select(patient_id, group, minute, pdr)
if (!is.null(comment))
comment(data) = comment
data
}
cleanup_data.matrix = function(data, ... ){
if (ncol(data) > 2)
stop("A matrix can only be used as data input when two columns <minute> and <pdr> are passed. Use a data frame otherwise")
cleanup_data(as_tibble(data), ...)
}
cleanup_data.breathtest_data_list = function(data, ... ){
cleanup_data.list(data, ...)
}
cleanup_data.list = function(data, ... ){
if (is.null(data)) return(NULL)
ret = data.frame()
comment = list()
for (igroup in seq_along(data)) {
d1 = data[[igroup]]
if (is(d1,"simulated_breathtest_data"))
d1 = d1$data
is_breathtest_data = inherits(d1, "breathtest_data")
needs_group =
!("group" %in% names(d1)) &&
("patient_id" %in% names(d1))
group = names(data)[igroup]
if (is.null(group)) group = LETTERS[igroup]
if (needs_group && !is_breathtest_data) {
d1$group = group
d1 = d1[c("patient_id", "group", "minute", "pdr")]
}
dd = cleanup_data(d1, ...)
if (is_breathtest_data) {
if (dot_lgl("use_filename_as_patient_id", ...))
dd$patient_id = str_sub(d1["file_name"], 1, -5)
dd$group = group
}
comment[[igroup]] = comment(d1)
ret = rbind(ret, dd )
}
if (max(table(ret$minute, ret$patient_id, ret$group)) > 1)
warning("Multiple data for one patient, minute and group. Included the same patient's data twice?")
ret = tibble::as_tibble(ret[,c("patient_id", "group", "minute", "pdr")])
comment = unique(comment)
comment[map_lgl(comment, is.null)] = NULL
if (length(comment) > 0)
comment(ret) = paste(comment, collapse = "\n")
ret
}
cleanup_data.breathtest_data = function(data, ... ){
id = data$patient_id
if (is.null(id) ||
id == "0" ||
id == "" ||
dot_lgl("use_filename_as_patient_id", ...))
id = str_sub(data["file_name"], 1, -5)
d = cbind(patient_id = id, data$data[,c("minute", "pdr")])
cleanup_data(d, ...)
}
cleanup_data.simulated_breathtest_data = function(data, ... ){
cleanup_data(data$data, ...)
}
dot_lgl = function(label, ...){
!(is.null(list(...)[[label]]))
} |
context("sdf collect")
sc <- testthat_spark_connection()
test_that("sdf_collect() works properly", {
mtcars_tbl <- testthat_tbl("mtcars")
mtcars_data <- sdf_collect(mtcars_tbl)
expect_equivalent(mtcars, mtcars_data)
})
test_that("sdf_collect() can collect the first n rows of a Spark dataframe", {
mtcars_tbl <- testthat_tbl("mtcars", repartition = 5)
mtcars_data <- sdf_collect(mtcars_tbl, n = 10)
expect_equivalent(mtcars[1:10,], mtcars_data)
})
test_that("sdf_collect() works properly with impl = \"row-wise-iter\"", {
mtcars_tbl <- testthat_tbl("mtcars")
mtcars_data <- sdf_collect(mtcars_tbl, impl = "row-wise-iter")
expect_equivalent(mtcars, mtcars_data)
})
test_that("sdf_collect() works properly with impl = \"column-wise\"", {
mtcars_tbl <- testthat_tbl("mtcars")
mtcars_data <- sdf_collect(mtcars_tbl, impl = "column-wise")
expect_equivalent(mtcars, mtcars_data)
})
test_that("sdf_collect() works with nested lists", {
if (spark_version(sc) < "2.4") {
skip("serializing nested list into Spark StructType is only supported in Spark 2.4+")
}
df <- tibble::tibble(
a = list(c(1, 2, 3), c(4, 5), c(6)),
b = list(c("foo"), c("bar", "foobar"), c("a", "b", "c"))
)
sdf <- sdf_copy_to(sc, df, overwrite = TRUE)
res <- sdf_collect(sdf)
expect_equivalent(df$a, res$a)
expect_equivalent(df$b, sapply(res$b, function(x) do.call(c, as.list(x))))
})
test_that("sdf_collect() works with nested named lists", {
if (spark_version(sc) < "2.4") {
skip("serializing nested named list into Spark StructType is only supported in Spark 2.4+")
}
df <- tibble::tibble(
x = list(c(a = 1, b = 2), c(a = 3, b = 4), c(a = 5, b = 6)),
y = list(c(a = "foo", b = "bar"), c(a = "a", b = "b"), c(a = "", b = "")),
z = list(list(a = list(c = "foo", d = "bar", e = list("e")), b = "b"))
)
sdf <- sdf_copy_to(sc, df, overwrite = TRUE)
res <- sdf_collect(sdf)
for (col in colnames(df)) {
expect_equivalent(lapply(df[[col]], as.list), res[[col]])
}
})
test_that("sdf_collect() works with boolean array column", {
skip_on_arrow()
sdf <- dplyr::tbl(
sc,
dplyr::sql("
SELECT
*
FROM
VALUES (ARRAY(FALSE)),
(ARRAY(TRUE, FALSE)),
NULL,
(ARRAY(TRUE, NULL, FALSE))
AS TAB(`arr`)
")
)
expect_equivalent(
sdf %>% sdf_collect(),
tibble::tibble(arr = list(FALSE, c(TRUE, FALSE), NA, c(TRUE, NA, FALSE)))
)
})
test_that("sdf_collect() works with byte array column", {
skip_on_arrow()
sdf <- dplyr::tbl(
sc,
dplyr::sql("
SELECT
*
FROM
VALUES ARRAY(CAST(97 AS BYTE), CAST(98 AS BYTE)),
ARRAY(CAST(98 AS BYTE), CAST(99 AS BYTE), CAST(0 AS BYTE), CAST(100 AS BYTE)),
NULL,
ARRAY(CAST(101 AS BYTE))
AS TAB(`arr`)
")
)
df <- sdf %>% collect()
expect_equal(
df$arr,
list(
charToRaw("ab"),
c(charToRaw("bc"), as.raw(0), charToRaw("d")),
NA,
charToRaw("e")
)
)
})
test_that("sdf_collect() works with integral array column", {
skip_on_arrow()
for (type in c("SHORT", "INT")) {
sdf <- dplyr::tbl(
sc,
dplyr::sql(
glue::glue(
"
SELECT
*
FROM
VALUES ARRAY(CAST(1 AS {type})),
ARRAY(CAST(2 AS {type}), CAST(3 AS {type})),
NULL,
ARRAY(CAST(4 AS {type}), NULL, CAST(5 AS {type}))
AS TAB(`arr`)
",
type = type
)
)
)
expect_equivalent(
sdf %>% sdf_collect(),
tibble::tibble(arr = list(1L, c(2L, 3L), NA, c(4L, NA, 5L)))
)
}
})
test_that("sdf_collect() works with numeric array column", {
skip_on_arrow()
for (type in c("FLOAT", "LONG", "DOUBLE")) {
sdf <- dplyr::tbl(
sc,
dplyr::sql(
glue::glue(
"
SELECT
*
FROM
VALUES ARRAY(CAST(1 AS {type})),
ARRAY(CAST(2 AS {type}), CAST(3 AS {type})),
NULL,
ARRAY(CAST(4 AS {type}), NULL, CAST('NaN' AS {type}), CAST(5 AS {type}))
AS TAB(`arr`)
",
type = type
)
)
)
expect_equivalent(
sdf %>% sdf_collect(),
tibble::tibble(arr = list(1, c(2, 3), NA, c(4, NA, NaN, 5)))
)
}
})
test_that("sdf_collect() works with string array column", {
skip_on_arrow()
sdf <- dplyr::tbl(
sc,
dplyr::sql("
SELECT
*
FROM
VALUES ARRAY('ab'), ARRAY('bcd', 'e'), NULL, ARRAY('fghi', NULL, 'jk')
AS TAB(`arr`)
")
)
expect_equivalent(
sdf %>% sdf_collect(),
tibble::tibble(arr = list("ab", c("bcd", "e"), NA, c("fghi", NA, "jk")))
)
})
test_that("sdf_collect() works with temporal array column", {
skip_on_arrow()
for (type in c("DATE", "TIMESTAMP")) {
sdf <- dplyr::tbl(
sc,
dplyr::sql(
glue::glue(
"
SELECT
*
FROM
VALUES ARRAY(CAST('1970-01-01' AS {type})),
ARRAY(CAST('1970-01-02' AS {type}), CAST('1970-01-03' AS {type})),
NULL,
ARRAY(CAST('1970-01-04' AS {type}), NULL, CAST('1970-01-05' AS {type}))
AS TAB(`arr`)
",
type = type
)
)
)
cast_fn <- if (type == "DATE") as.Date else as.POSIXct
expect_equivalent(
sdf %>% dplyr::pull(arr),
list(
cast_fn("1970-01-01", tz = ""),
cast_fn(c("1970-01-02", "1970-01-03"), tz = ""),
NA,
cast_fn(c("1970-01-04", NA, "1970-01-05"), tz = "")
)
)
}
})
test_that("sdf_collect() works with struct array column", {
if (spark_version(sc) < "2.3") {
skip("deserializing Spark StructType into named list is only supported in Spark 2.3+")
}
jsonFilePath <- get_test_data_path("struct-inside-arrays.json")
sentences <- spark_read_json(sc, name = "sentences", path = jsonFilePath, overwrite = TRUE)
sentences_local <- sdf_collect(sentences)
expect_equal(sentences_local$text, c("t e x t"))
expected <- list(
list(
begin = 0L,
end = 58L,
metadata = list(embeddings = c(1L, 2L, 3L), sentence = 1L),
result = "French",
type = "document1"
),
list(
begin = 59L,
end = 118L,
metadata = list(embeddings = c(4L, 5L, 6L), sentence = 2L),
result = "English",
type = "document2"
)
)
expect_equal(sentences_local$sentences, list(expected))
})
test_that("sdf_collect() works with structs inside nested arrays", {
if (spark_version(sc) < "2.3") {
skip("deserializing Spark StructType into named list is only supported in Spark 2.3+")
}
if (spark_version(sc) < "2.4") {
skip("to_json on nested arrays is only supported in Spark 2.4+")
}
jsonFilePath <- get_test_data_path("struct-inside-nested-arrays.json")
sentences <- spark_read_json(sc, name = "sentences", path = jsonFilePath, overwrite = TRUE)
sentences_local <- sdf_collect(sentences)
expect_equal(sentences_local$text, c("t e x t"))
expected <- list(
list(list(
begin = 0,
end = 58,
metadata = list(embeddings = c(1, 2, 3), sentence = 1),
result = "French",
type = "document1"
)),
list(list(
begin = 59,
end = 118,
metadata = list(embeddings = c(4, 5, 6), sentence = 2),
result = "English",
type = "document2"
))
)
expect_equal(sentences_local$sentences, list(expected))
})
test_that("sdf_collect() supports callback", {
if (spark_version(sc) < "2.0") skip("batch collection requires Spark 2.0")
batch_count <- 0
row_count <- 0
df <- tibble(id = seq(1, 10), val = lapply(seq(1, 10), function(x) list(a = x, b = as.character(x))))
sdf <- sdf_copy_to(sc, df, repartition = 2, overwrite = TRUE)
collected <- list()
sdf %>%
sdf_collect(callback = function(batch_df) {
batch_count <<- batch_count + 1
row_count <<- row_count + nrow(batch_df)
collected <<- append(collected, batch_df$val)
})
expect_equal(
batch_count,
1
)
expect_equal(
row_count,
10
)
if (spark_version(sc) >= "2.4") {
expect_equal(
collected,
df$val
)
}
if (spark_version(sc) >= "2.4") {
collected <- list()
sdf %>%
sdf_collect(callback = function(batch_df, idx) {
collected <<- append(collected, batch_df$val)
})
expect_equal(
collected,
df$val
)
}
sdf_len_batch_count <- 0
sdf_len(sc, 10, repartition = 2) %>%
sdf_collect(callback = function(df) {
sdf_len_batch_count <<- sdf_len_batch_count + 1
})
expect_equal(
sdf_len_batch_count,
ifelse("arrow" %in% .packages(), 2, 1)
)
sdf_len_last_idx <- 0
sdf_len(sc, 10, repartition = 2) %>%
sdf_collect(callback = function(df, idx) {
sdf_len_last_idx <<- idx
})
expect_equal(
sdf_len_last_idx,
ifelse("arrow" %in% .packages(), 2, 1)
)
})
test_that("sdf_collect() supports callback expression", {
if (spark_version(sc) < "2.0") skip("batch collection requires Spark 2.0")
row_count <- 0
sdf_len(sc, 10, repartition = 2) %>%
collect(callback = ~ (row_count <<- row_count + nrow(.x)))
expect_equal(
10,
row_count
)
})
test_that("sdf_collect() preserves NA_real_", {
df <- tibble::tibble(x = c(NA_real_, 3.14, 0.142857))
sdf <- sdf_copy_to(sc, df, overwrite = TRUE)
expect_equal(sdf %>% collect(), df)
}) |
library(ordinal)
fm <- clm(rating ~ temp, nominal=~contact, data=wine)
fm$Theta
fm$alpha.mat
fm <- clm(rating ~ temp, nominal=~contact, data=wine,
threshold="symmetric")
fm$Theta
fm$alpha.mat
fm <- clm(rating ~ temp, nominal=~contact, data=wine,
threshold="equidistant")
fm$Theta
fm$alpha.mat
fm <- clm(rating ~ contact, nominal=~temp, data=wine)
fm$alpha.mat
fm$Theta
set.seed(123)
x <- rnorm(nrow(wine), sd=1)
fm <- clm(rating ~ temp, nominal=~ x, data=wine)
fm$alpha.mat
fm$Theta
fm <- clm(rating ~ temp, nominal=~ poly(x, 2), data=wine)
fm$alpha.mat
fm$Theta
set.seed(123)
x <- rnorm(nrow(wine), sd=1)
fm <- clm(rating ~ temp, nominal=~contact + x, data=wine)
fm$alpha.mat
fm$Theta
fm <- clm(rating ~ temp, nominal=~contact + x, data=wine,
threshold="symmetric")
fm$alpha.mat
fm$Theta
with(fm, t(apply(alpha.mat, 1, function(th) tJac %*% th)))
fm <- clm(rating ~ temp, nominal=~contact:x, data=wine)
fm$alpha.mat
fm$Theta
fm <- clm(rating ~ temp, nominal=~contact+x+contact:x, data=wine)
fm$alpha.mat
fm$Theta
fm <- clm(rating ~ temp, nominal=~contact*x, data=wine)
fm$alpha.mat
fm$Theta
fm <- clm(rating ~ temp, nominal=~contact + poly(x, 2), data=wine)
fm$alpha.mat
fm$Theta
wine$Con <- as.character(wine$contact) == "yes"
fm <- clm(rating ~ temp, nominal=~Con, data=wine)
fm$Theta
fm$alpha.mat
wine$Con.num <- 1 * wine$Con
fm <- clm(rating ~ temp, nominal=~Con.num, data=wine)
fm$Theta
fm$alpha.mat
set.seed(321)
y <- rnorm(nrow(wine), sd=1)
fm1 <- clm(rating ~ temp, nominal=~y + x, data=wine)
fm1$alpha.mat
fm1$Theta
fm1 <- clm(rating ~ temp, nominal=~y + contact + x, data=wine)
fm1$alpha.mat
fm1$Theta
fm1 <- clm(rating ~ temp, nominal=~contact + x + contact:x + y,
data=wine)
summary(fm1)
fm1$Theta
fm1$alpha.mat
fm1 <- clm(rating ~ temp, nominal=~contact*x + y, data=wine)
fm1$Theta
fm1$alpha.mat
t(fm1$alpha.mat)
fm1
data(soup, package="ordinal")
fm2 <- clm(SURENESS ~ 1, nominal=~PRODID + DAY, data=soup)
fm2$Theta
fm2$alpha.mat
prodid <- factor(soup$PRODID, ordered=TRUE)
fm2 <- clm(SURENESS ~ 1, nominal=~prodid + DAY, data=soup)
fm2$alpha.mat
fm2$Theta
fm2 <- clm(SURENESS ~ 1, nominal=~prodid, data=soup)
fm2$alpha.mat
fm2$Theta
soup2 <- soup
levels(soup2$DAY)
levels(soup2$GENDER)
xx <- with(soup2, DAY == "2" & GENDER == "Female")
fm8 <- clm(SURENESS ~ PRODID, nominal= ~ DAY + GENDER, data=soup2, subset=!xx)
fm8$alpha.mat
fm8$Theta
fm9 <- clm(SURENESS ~ PRODID, nominal= ~ DAY * GENDER, data=soup2, subset=!xx)
fm9$alpha.mat
fm9$Theta
stopEqual <- function(x, y, ca=FALSE)
stopifnot(isTRUE(all.equal(x, y, check.attributes=ca)))
stopEqual(fm8$alpha.mat, fm9$alpha.mat[1:3, ])
stopEqual(fm8$Theta, fm9$Theta)
stopEqual(logLik(fm8), logLik(fm9))
set.seed(12345)
wts <- runif(nrow(soup))
fm2 <- clm(SURENESS ~ 1, nominal=~SOUPTYPE + DAY, data=soup, weights=wts)
fm2$Theta
fm2 <- try(clm(SURENESS ~ 1, nominal=~SOUPTYPE + DAY + offset(wts),
data=soup), silent=TRUE)
stopifnot(inherits(fm2, "try-error"))
fm2 <- clm(SURENESS ~ 1, nominal=~SOUPTYPE + DAY, data=soup)
fm2$Theta
fm2
fm2 <- clm(SURENESS ~ 1, nominal=~SOUPTYPE * DAY, data=soup)
fm2$Theta
fm2
fm2$alpha.mat
fm2 <- clm(SURENESS ~ 1, nominal=~SOUPTYPE * DAY, data=soup,
threshold="symmetric")
fm2$Theta
fm2$alpha.mat
fm1 <- clm(rating ~ temp, nominal=~contact-1, data=wine)
fm2 <- clm(rating ~ temp, nominal=~contact, data=wine)
stopifnot(isTRUE(all.equal(fm1$Theta, fm2$Theta)))
stopifnot(isTRUE(all.equal(fm1$logLik, fm2$logLik)))
wine2 <- wine
wine2$contact <- relevel(wine2$contact, "yes")
fm3 <- clm(rating ~ temp, nominal=~contact, data=wine2)
stopifnot(isTRUE(all.equal(coef(fm1, na.rm=TRUE), coef(fm3)))) |
fmodel2pp <-
function(zeta, y, apar, bpar, prior = dnorm, ...) {
m <- length(bpar)
return(fmodel4pp(zeta, y, apar = apar, bpar = bpar,
cpar = rep(0,m), dpar = rep(1,m), prior = prior, ...))
} |
mng.tree.removal <- function(tr,
fl,
common.vars,
this.period,
next.period,...){
per.vol.harv <- list(...)$per.vol.harv
final.felling <- ifelse (substr(fl$management[, next.period],1,1) == '1', TRUE, FALSE)
thinning <- ifelse (substr(fl$management[, next.period],2,2) == '1', TRUE, FALSE)
i.harvestable <- tr$data$plot.id %in% fl$plot.id [final.felling]
vols <- common.vars$vol.wo.tr.m3.ha[i.harvestable]
uid <- tr$data$plot.id[i.harvestable]
sum.vols <- aggregate(vols ~ uid, FUN = sum)
vols <- data.frame(
treid = tr$data$treeid[i.harvestable],
vols = vols,
sum.vols = sum.vols$vols[match(uid, sum.vols$uid)])
vols$prob <- with(vols, vols/sum(vols))
vols$prob.vols <- with(vols, prob * vols)
ff <- by (data = vols, INDICES = uid, FUN = function(x) {
n <- sum(x$vols)/ sum(x$prob * x$vols)
data.frame(harvested = ifelse( x$prob >= runif(nrow(x), 0, 1/n) , TRUE, FALSE),
treeid = x$treid)
})
tr.removed <- rep(FALSE, length(tr$data$treeid))
tr.removed[match(ff$treeid, tr$data$treeid )] <- ff$harvested
i.harvestable <- tr$data$plot.id %in% fl$plot.id [thinning]
vols <- common.vars$vol.wo.tr.m3.ha[i.harvestable]
uid <- tr$data$plot.id[i.harvestable]
sum.vols <- aggregate(vols ~ uid, FUN = sum)
vols <- data.frame(
treid = tr$data$treeid[i.harvestable],
vols = vols,
sum.vols = sum.vols$vols[match(uid, sum.vols$uid)])
vols$prob <- with(vols, vols/sum(vols))
vols$prob.vols <- with(vols, prob * vols)
ff <- by (data = vols, INDICES = uid, FUN = function(x) {
n <- sum(x$vols)/ sum(x$prob * x$vols)
data.frame(harvested = ifelse( x$prob >= runif(nrow(x), 0, 1/n) , TRUE, FALSE),
treeid = x$treid)
}
)
ff <- do.call(rbind,ff)
tr.removed[match(ff$treeid, tr$data$treeid )] <- ff$harvested
return(tr.removed)
} |
tidypredict_fit.lm <- function(model) {
parsedmodel <- parse_model(model)
build_fit_formula(parsedmodel)
}
tidypredict_fit.glm <- function(model) {
parsedmodel <- parse_model(model)
build_fit_formula(parsedmodel)
}
build_fit_formula <- function(parsedmodel) {
parsed_f <- map(
parsedmodel$terms,
~ {
if (.x$is_intercept == 0) {
cols <- map(
.x$fields,
~ {
f <- NULL
if (.x$type == "ordinary") {
f <- expr(!!sym(.x$col))
}
if (.x$type == "conditional") {
f <- expr(ifelse(!!sym(.x$col) == !!.x$val, 1, 0))
}
if (.x$type == "operation") {
if (.x$op == "morethan") {
f <- expr(ifelse(!!sym(.x$col) > !!.x$val, !!sym(.x$col) - !!.x$val, 0))
}
if (.x$op == "lessthan") {
f <- expr(ifelse(!!sym(.x$col) < !!.x$val, !!.x$val - !!sym(.x$col), 0))
}
}
f
}
)
cols <- reduce(cols, function(l, r) expr(!!l * !!r))
expr((!!cols * !!.x$coef))
} else {
expr(!!.x$coef)
}
}
)
f <- reduce(parsed_f, function(l, r) expr(!!l + !!r))
if (!is.null(parsedmodel$general$offset)) {
f <- expr(!!f + !!parsedmodel$general$offset)
}
if (parsedmodel$general$is_glm == 1) {
link <- parsedmodel$general$link
assigned <- 0
if (link == "identity") {
assigned <- 1
}
if (link == "logit") {
assigned <- 1
f <- expr(1 - 1 / (1 + exp(!!f)))
}
if (link == "log") {
assigned <- 1
f <- expr(exp(!!f))
}
if (assigned == 0) {
stop("Combination of family and link are not supported")
}
}
f
}
parse_model.lm <- function(model) parse_model_lm(model)
parse_model.glm <- function(model) parse_model_lm(model)
parse_model_lm <- function(model) {
acceptable_formula(model)
coefs <- as.numeric(model$coefficients)
labels <- names(model$coefficients)
vars <- names(attr(model$terms, "dataClasses"))
qr <- NULL
if (!is.null(model$qr)) qr <- qr.solve(qr.R(model$qr))
pm <- list()
pm$general$model <- class(model)[[1]]
pm$general$version <- 2
pm$general$type <- "regression"
pm$general$residual <- model$df.residual
if (length(summary(model)$sigma^2) > 0) {
pm$general$sigma2 <- summary(model)$sigma^2
}
if (!is.null(model$family$family)) {
pm$general$family <- model$family$family
}
if (!is.null(model$family$link)) {
pm$general$link <- model$family$link
}
if (!is.null(model$call$offset)) {
pm$general$offset <- model$call$offset
}
pm$general$is_glm <- 0
if (class(model)[[1]] == "glm") {
pm$general$is_glm <- 1
}
terms <- map(
seq_len(length(labels)),
~ {
list(
label = labels[.x],
coef = coefs[.x],
is_intercept = ifelse(labels[.x] == "(Intercept)", 1, 0),
fields = parse_label_lm(labels[.x], vars),
qr = parse_qr_lm(labels[.x], qr)
)
}
)
pm$terms <- terms
as_parsed_model(pm)
}
parse_label_lm <- function(label, vars) {
all_items <- NULL
items <- strsplit(label, "\\:")[[1]]
for (i in seq_len(length(items))) {
item <- list(
type = "ordinary",
col = items[i]
)
cat_match <- map_lgl(vars, ~ .x == substr(items[i], 1, nchar(.x)))
if (any(cat_match) && any(vars[cat_match] != items[i]) && !(items[i] %in% vars)) {
cat_match_vars <- vars[cat_match]
sole_cat_match <- cat_match_vars[rank(-nchar(cat_match_vars))][[1]]
item <- list(
type = "conditional",
col = sole_cat_match,
val = substr(items[i], nchar(sole_cat_match) + 1, nchar(items[i])),
op = "equal"
)
}
all_items <- c(all_items, list(item))
}
all_items
}
parse_qr_lm <- function(label, qr) {
qrs <- qr[label == rownames(qr) ]
qrs <- set_names(
as.list(qrs),
paste0("qr_", 1:length(qrs))
)
}
tidypredict_interval.lm <- function(model, interval = 0.95) {
parsedmodel <- parse_model(model)
te_interval_lm(parsedmodel, interval)
}
tidypredict_interval.glm <- function(model, interval = 0.95) {
parsedmodel <- parse_model(model)
te_interval_glm(parsedmodel, interval)
}
get_qr_lm <- function(qr_name, parsedmodel) {
q <- map(
parsedmodel$terms,
~ {
cqr <- .x$qr[qr_name][[1]]
if (.x$is_intercept == 0) {
cols <- map(
.x$fields,
~ {
f <- NULL
if (.x$type == "ordinary") {
f <- expr(!!sym(.x$col))
}
if (.x$type == "conditional") {
f <- expr(ifelse(!!sym(.x$col) == !!.x$val, 1, 0))
}
if (.x$type == "operation") {
if (.x$op == "morethan") {
f <- expr(ifelse(!!sym(.x$col) > !!.x$val, !!sym(.x$col) - !!.x$val, 0))
}
if (.x$op == "lessthan") {
f <- expr(ifelse(!!sym(.x$col) < !!.x$val, !!.x$val - !!sym(.x$col), 0))
}
}
f
}
)
cols <- reduce(cols, function(l, r) expr(!!l * !!r))
if (cqr != 0) expr(!!cols * !!cqr)
} else {
expr(!!cqr)
}
}
)
f <- reduce(
q[!map_lgl(q, is.null)],
function(x, y) expr(!!x + !!y)
)
expr((!!f) * (!!f) * !!parsedmodel$general$sigma2)
}
te_interval_lm <- function(parsedmodel, interval = 0.95) {
qr_names <- names(parsedmodel$terms[[1]]$qr)
qrs <- map(
qr_names,
~ get_qr_lm(.x, parsedmodel)
)
qrs <- reduce(qrs, function(x, y) expr(!!x + (!!y)))
tfrac <- qt(1 - (1 - 0.95) / 2, parsedmodel$general$residual)
expr(!!tfrac * sqrt((!!qrs) + (!!parsedmodel$general$sigma2)))
}
te_interval_glm <- function(parsedmodel, interval = 0.95) {
intervals <- te_interval_lm(parsedmodel, interval)
family <- parsedmodel$general$family
link <- parsedmodel$general$link
assigned <- 0
if (family == "gaussian" && link == "identity") {
assigned <- 1
}
if (assigned == 0) {
stop("Combination of family and link are not supported for prediction intervals")
}
intervals
} |
context("waitForJobs")
test_that("waitForJobs", {
f = function(x) { if (x == 5) stop("x == 5") else x }
reg = makeTestRegistry()
batchMap(reg, f, 1:5)
submitJobs(reg)
waitForJobs(reg)
expect_equal(waitForJobs(reg, 1:5, stop.on.error = FALSE), FALSE)
expect_warning(waitForJobs(reg, 1:5, stop.on.error = TRUE))
expect_equal(suppressWarnings(waitForJobs(reg, 1:5, stop.on.error = TRUE)), FALSE)
expect_equal(waitForJobs(reg, 1:4, stop.on.error=FALSE), TRUE)
expect_equal(waitForJobs(reg, 1:4, stop.on.error=TRUE), TRUE)
}) |
install.packages("MCDA")
install.packages("cplexAPI",configure.args = "--with-cplex-dir=/opt/ibm/ILOG/CPLEX_Studio128/cplex")
library(MCDA)
criteria <- c("c1","c2","c3","c4","c5")
criteriaMinMax <- c("max","max","max","max","max")
names(criteriaMinMax) <- criteria
categories <- c("Good","Neutral","Bad")
categoriesRanks <- c(1,2,3)
names(categoriesRanks) <- categories
f <- system.file("datasets", "dataFLOSS.csv", package = "MCDA")
pT <- read.csv(f, TRUE, ",", "\"", ".")
pT <- data.matrix(pT)
rownames(pT) <- 1:dim(pT)[1]
colnames(pT) <- criteria
assig <-c("Bad", "Neutral", "Bad", "Bad", "Neutral","Good",
"Neutral", "Good", "Bad", "Bad", "Good", "Neutral",
"Bad", "Bad", "Bad", "Bad", "Bad", "Neutral",
"Bad", "Neutral", "Bad", "Bad", "Neutral", "Bad",
"Bad")
names(assig) <- 1:25
m1 <- MRSortInferenceExact(pT, assig, categoriesRanks,
criteriaMinMax, veto = FALSE,
readableWeights = TRUE,
readableProfiles = TRUE,
solver = "cplex",
alternativesIDs = names(assig))
print(m1)
incomp <- MRSortIdentifyIncompatibleAssignments(pT, assig,
categoriesRanks,
criteriaMinMax,
veto = FALSE,
solver = "cplex",
alternativesIDs = names(assig))
for(incAlt in incomp$incompatibleSets)
{
m1pr <- MRSortInferenceExact(pT, assig, categoriesRanks,
criteriaMinMax, veto = FALSE,
readableWeights = TRUE,
readableProfiles = TRUE,
alternativesIDs = names(assig)[!names(assig) %in% incAlt],
solver = "cplex")
print(MRSort(pT, m1pr$profilesPerformances, categoriesRanks, m1pr$weights, criteriaMinMax,
m1pr$lambda, alternativesIDs = incAlt))
}
m1 <- MRSortInferenceExact(pT, assig, categoriesRanks, criteriaMinMax,
veto = TRUE, readableWeights = TRUE,
readableProfiles = TRUE, solver = "cplex",
alternativesIDs = names(assig))
print(m1)
criteriaLBs=c(-2,-2,-2,-2,-2)
names(criteriaLBs) <- criteria
criteriaUBs=c(2,2,2,2,2)
names(criteriaUBs) <- criteria
plotMRSortSortingProblem(pT, m1$profilesPerformances, categoriesRanks, assig,
criteriaMinMax, criteriaUBs, criteriaLBs,
NULL, m1$vetoPerformances, 'V',
m1$weights, m1$lambda,
alternativesIDs = "no alternatives")
assig <-c(assig, "Bad", "Bad", "Neutral", "Bad", "Bad",
"Bad", "Bad", "Neutral", "Good", "Neutral")
names(assig) <- 1:35
m2 <- MRSortInferenceExact(pT, assig, categoriesRanks, criteriaMinMax,
veto = TRUE, readableWeights = TRUE,
readableProfiles = TRUE, solver = "cplex",
alternativesIDs = names(assig))
print(m2)
incomp <- MRSortIdentifyIncompatibleAssignments(pT, assig, categoriesRanks,
criteriaMinMax, veto = TRUE,
solver = "cplex",
alternativesIDs = names(assig))
for(incAlt in incomp$incompatibleSets)
{
m2pr <- MRSortInferenceExact(pT, assig, categoriesRanks,
criteriaMinMax, veto = TRUE,
readableWeights = TRUE,
readableProfiles = TRUE,
alternativesIDs = names(assig)[!names(assig) %in% incAlt],
solver = "cplex")
print(MRSort(pT, m2pr$profilesPerformances, categoriesRanks, m2pr$weights, criteriaMinMax,
m2pr$lambda, m2pr$vetoPerformances, alternativesIDs = incAlt))
}
assig[2] <- "Bad"
assig[35] <- "Bad"
m2 <- MRSortInferenceExact(pT, assig, categoriesRanks, criteriaMinMax,
veto = TRUE, readableWeights = TRUE,
readableProfiles = TRUE, solver = "cplex",
alternativesIDs = names(assig))
plotMRSortSortingProblem(pT, m2$profilesPerformances, categoriesRanks, assig,
criteriaMinMax, criteriaUBs, criteriaLBs,
NULL, m2$vetoPerformances, 'V',
m2$weights, m2$lambda,
alternativesIDs = "no alternatives")
assig <-c(assig, "Bad", "Neutral", "Bad", "Neutral",
"Good", "Bad", "Bad", "Good", "Bad", "Bad")
names(assig) <- 1:45
m3 <- MRSortInferenceExact(pT, assig, categoriesRanks, criteriaMinMax,
veto = TRUE, readableWeights = TRUE,
readableProfiles = TRUE, solver = "cplex",
alternativesIDs = names(assig))
print(m3)
incomp <- MRSortIdentifyIncompatibleAssignments(pT, assig, categoriesRanks,
criteriaMinMax, veto = TRUE,
solver = "cplex",
alternativesIDs = names(assig))
for(incAlt in incomp$incompatibleSets)
{
m3pr <- MRSortInferenceExact(pT, assig, categoriesRanks,
criteriaMinMax, veto = TRUE,
readableWeights = TRUE,
readableProfiles = TRUE,
alternativesIDs = names(assig)[!names(assig) %in% incAlt],
solver = "cplex")
print(MRSort(pT, m3pr$profilesPerformances, categoriesRanks, m3pr$weights, criteriaMinMax,
m3pr$lambda, m3pr$vetoPerformances, alternativesIDs = incAlt))
}
m3 <- LPDMRSortInferenceExact(pT, assig, categoriesRanks, criteriaMinMax,
majorityRule = "D", readableWeights = TRUE,
readableProfiles = TRUE, minmaxLPD = TRUE,
solver = "cplex", alternativesIDs = names(assig))
print(m3)
m3 <- LPDMRSortInferenceExact(pT, assig, categoriesRanks, criteriaMinMax,
majorityRule = "dV", readableWeights = TRUE,
readableProfiles = TRUE, minmaxLPD = TRUE,
solver = "cplex", alternativesIDs = names(assig))
print(m3)
m3 <- LPDMRSortInferenceExact(pT, assig, categoriesRanks, criteriaMinMax,
majorityRule = "Dv", readableWeights = TRUE,
readableProfiles = TRUE, minmaxLPD = TRUE,
solver = "cplex", alternativesIDs = names(assig))
print(m3)
m3 <- LPDMRSortInferenceExact(pT, assig, categoriesRanks, criteriaMinMax,
majorityRule = "v", readableWeights = TRUE,
readableProfiles = TRUE, minmaxLPD = TRUE,
solver = "cplex", alternativesIDs = names(assig))
print(m3)
m3 <- LPDMRSortInferenceExact(pT, assig, categoriesRanks, criteriaMinMax,
majorityRule = "d", readableWeights = TRUE,
readableProfiles = TRUE, minmaxLPD = TRUE,
solver = "cplex", alternativesIDs = names(assig))
print(m3)
m3 <- LPDMRSortInferenceExact(pT, assig, categoriesRanks, criteriaMinMax,
majorityRule = "dv", readableWeights = TRUE,
readableProfiles = TRUE, minmaxLPD = TRUE,
solver = "cplex", alternativesIDs = names(assig))
print(m3)
plotMRSortSortingProblem(pT, m3$profilesPerformances, categoriesRanks, assig,
criteriaMinMax, criteriaUBs, criteriaLBs,
m3$dictatorPerformances, m3$vetoPerformances, 'dv',
m3$weights, m3$lambda,
alternativesIDs = "no alternatives") |
test_that("testing memoryUse", {
skip_on_os("windows")
if (!interactive())
skip("This memoryUse is still very experimental")
if (!requireNamespace("future", quietly = TRUE)) {
skip("future package required")
}
testInitOut <- testInit(c("raster", "future.callr", "future"),
opts = list("spades.moduleCodeChecks" = FALSE,
"spades.memoryUseInterval" = 0.2,
"spades.futurePlan" = "callr"))
oldPlan <- future::plan()
on.exit({
testOnExit(testInitOut)
if (!identical(future::plan(), oldPlan)) {
future::plan(oldPlan)
}
}, add = TRUE)
system.time(future::plan(future.callr::callr))
times <- list(start = 0.0, end = if (isWindows()) 60 else 30, timeunit = "year")
params <- list(
.globals = list(burnStats = "npixelsburned", stackName = "landscape"),
randomLandscapes = list(.plotInitialTime = NA, .plotInterval = NA),
caribouMovement = list(.plotInitialTime = NA, .plotInterval = NA, torus = TRUE),
fireSpread = list(.plotInitialTime = NA, .plotInterval = NA)
)
modules <- list("randomLandscapes", "caribouMovement", "fireSpread")
paths <- list(modulePath = system.file("sampleModules", package = "SpaDES.core"))
mySim2 <- simInit(times = times, params = params,
modules = modules, objects = list(), paths = paths)
mySim3 <- spades(mySim2, debug = TRUE)
suppressWarnings({
memUse <- memoryUse(mySim3)
})
expect_true(is(memUse, "data.table"))
expect_true(is.numeric(memUse$maxMemory))
expect_true(sum(!is.na(memUse$maxMemory)) > 0)
suppressWarnings({
memUse <- memoryUse(mySim3, max = FALSE)
})
}) |
NULL
print.FitSecondaryGrowth <- function(x, ...) {
cat("Secondary model estimated from data\n\n")
env <- names(x$secondary_model)
cat(paste("Environmental factors included:", paste(env, collapse = ", "), "\n\n"))
cat(paste("mu_opt:", x$mu_opt_fit, "\n\n"))
for (i in 1:length(x$secondary_model)) {
cat(paste("Secondary model for ", names(x$secondary_model)[i], ":\n", sep = ""))
print(unlist(x$secondary_model[[i]]))
cat("\n")
}
}
plot.FitSecondaryGrowth <- function(x, y=NULL, ..., which = 1, add_trend = FALSE) {
obs_data <- x$data
obs_data$res <- residuals(x)
obs_data <- if (x$transformation == "sq") {
obs_data %>%
select(-"log_mu", -"mu") %>%
rename(observed = "sq_mu")
} else if (x$transformation == "none") {
obs_data %>%
select(-"log_mu", -"sq_mu") %>%
rename(observed = "mu")
} else if (x$transformation == "log") {
obs_data %>%
select(-"sq_mu", "mu") %>%
rename(observed = "mu")
}
if (which == 1) {
label_end <- switch(x$transformation,
sq = "square root of the growth rate",
log = "logarithm of the growth rate",
none = "growth rate"
)
p1 <- obs_data %>%
mutate(predicted = .data$observed + .data$res) %>%
ggplot(aes(x = .data$observed, y = .data$predicted)) +
geom_point() +
geom_abline(slope = 1, intercept = 0, linetype = 2) +
geom_smooth(method = "lm", se = FALSE, colour = "grey") +
xlab(paste("Observed", label_end)) +
ylab(paste("Fitted", label_end)) +
theme_cowplot()
} else if(which == 2) {
ylabel <- switch(x$transformation,
sq = "Square root of the growth rate",
log = "Logarithm of the growth rate",
none = "Growth rate"
)
p1 <- obs_data %>%
mutate(predicted = .data$observed + .data$res) %>%
select(-"res") %>%
pivot_longer(-c("observed", "predicted"),
names_to = "env_factor", values_to = "value") %>%
pivot_longer(-c("env_factor", "value"),
names_to = "point_type", values_to = "growth") %>%
ggplot(aes(x = .data$value)) +
geom_point(aes(y = .data$growth, colour = .data$point_type)) +
facet_wrap("env_factor", scales = "free_x") +
ylab(ylabel) + xlab("") +
theme_bw() +
theme(legend.title = element_blank())
if (isTRUE(add_trend)) {
p1 <- p1 + geom_smooth(aes(y = .data$growth, colour = .data$point_type))
}
}
p1
}
summary.FitSecondaryGrowth <- function(object, ...) {
summary(object$fit_results)
}
residuals.FitSecondaryGrowth <- function(object, ...) {
residuals(object$fit_results)
}
coef.FitSecondaryGrowth <- function(object, ...) {
coef(object$fit_results)
}
vcov.FitSecondaryGrowth <- function(object, ...) {
covar <- try(solve(0.5*object$fit_results$hessian), silent = TRUE)
if (!is.numeric(covar)) {
warning("Cannot estimate covariance; system is singular")
param <- object$par
p <- length(param)
covar <- matrix(data = NA, nrow = p, ncol = p)
}
covar
}
deviance.FitSecondaryGrowth <- function(object, ...) {
deviance(object$fit_results)
}
fitted.FitSecondaryGrowth <- function(object, ...) {
observed <- switch(object$transformation,
sq = object$data$sq_mu,
log = object$data$log_mu,
none = object$data$mu
)
observed + residuals(object)
}
predict.FitSecondaryGrowth <- function(object, newdata=NULL, ...) {
if (is.null(newdata)) {
newdata <- object$data
}
sec_model_names <- object$secondary_model %>%
map_chr(~ .$model)
gammas <- calculate_gammas_secondary(sec_model_names,
newdata,
object$secondary_model)
gammas*object$mu_opt_fit
} |
library(devtools)
library(magrittr)
setwd("~/gh/radiant.data")
document(roclets = c('rd', 'collate', 'namespace'))
system("git add --all .")
system("git commit -m 'Update [ci skip]'")
system("git push")
devtools::install_github("radiant-rstats/radiant")
devtools::install_github("radiant-rstats/radiant.data")
devtools::install_github("radiant-rstats/radiant.design")
devtools::install_github("radiant-rstats/radiant.basics")
devtools::install_github("radiant-rstats/radiant.model")
devtools::install_github("radiant-rstats/radiant.multivariate")
devtools::install_github("rstudio/shinyapps")
library(shinyapps)
fpath <- "~/gh/radiant/inst/app"
setwd(fpath)
shinyapps::deployApp(account = "vnijs", launch.browser = FALSE, lint = FALSE)
shinyapps::deployApp(account = "vnijs")
shinyapps::showLogs(entries=1000) |
utils::globalVariables(c('time',
'value',
'level_data',
'..cols_breaks',
'.',
'weekday',
'Date',
'level',
'heart_rate',
'hr_variability',
'level_start',
'percent',
'dateTime_end',
'level_end',
'dateTime_start',
'GROUP',
'..cols_accum',
'.SD'))
inner_elapsed_time = function(secs, estimated = FALSE) {
tmp_hours = as.integer((secs / 60) / 60)
tmp_hours_minutes = (secs / 60) %% 60
tmp_seconds = secs %% 60
est_verb = ifelse(estimated, "Estimated time: ", "Elapsed time: ")
res_out = paste(c(est_verb, tmp_hours, " hours and ", as.integer(tmp_hours_minutes), " minutes and ", as.integer(tmp_seconds), " seconds."), collapse = "")
return(res_out)
}
compute_elapsed_time = function(time_start) {
t_end = proc.time()
time_total = as.numeric((t_end - time_start)['elapsed'])
time_ = inner_elapsed_time(time_total)
cat(time_, "\n")
}
split_year_in_weeks = function(year) {
weeks_data = seq(from = as.Date(glue::glue('{year}-01-01')), to = as.Date(glue::glue('{year}-12-31')), by = '1 week')
return(weeks_data)
}
base_url_request = function(url,
oauth_token,
show_nchar_case_error = 135) {
auth_code = paste("Bearer", oauth_token)
query_response = httr::GET(url = url, httr::add_headers(Authorization = auth_code))
if (query_response$status_code != 200) {
content_list_obj = httr::content(query_response, "parsed")
stop(glue::glue("The request gave an error code of '{query_response$status_code}' with the following first '{show_nchar_case_error}' characters as error message: '{substr(content_list_obj, 1, show_nchar_case_error)}'"), call. = F)
}
else {
content_list_obj = httr::content(query_response, as = "text")
content_list_obj = jsonlite::fromJSON(content_list_obj, simplifyVector = TRUE)
}
return(content_list_obj)
}
ggplot_each_date = function(date_intraday, date) {
date_intraday$time = as.POSIXct(date_intraday$time, tz = 'UTC')
MAX_heart_rate = max(date_intraday$value)
idx_max = which(date_intraday$value == MAX_heart_rate)
max_time = date_intraday$time[idx_max]
plt = ggplot2::ggplot(date_intraday, ggplot2::aes(x = time, y = value, group = 1)) +
ggplot2::geom_line(color = "green") +
ggplot2::ggtitle(glue::glue("Heart-Rate value at {date} ( '{as.character(lubridate::wday(date, label = TRUE))}' )")) +
ggplot2::theme(plot.title = ggplot2::element_text(hjust = 0.5, face = "bold")) +
ggplot2::geom_hline(yintercept = MAX_heart_rate, linetype = "dotted", colour = 'blue') +
ggplot2::scale_y_continuous(breaks = sort(c(seq(min(date_intraday$value), MAX_heart_rate, length.out = 5), MAX_heart_rate))) +
ggplot2::scale_x_datetime(date_breaks = "3 hours", date_labels = "%H:%M") +
ggplot2::geom_vline(xintercept = max_time, linetype = "dotted", colour = 'blue')
return(plt)
}
heart_rate_time_series = function(user_id,
token,
date_start,
date_end,
time_start = '00:00',
time_end = '23:59',
detail_level = '1min',
ggplot_intraday = FALSE,
ggplot_ncol = NULL,
ggplot_nrow = NULL,
verbose = FALSE,
show_nchar_case_error = 135) {
if (verbose) t_start = proc.time()
seq_dates = as.character(seq(from = as.Date(date_start), to = as.Date(date_end), by = 1))
heart_rate = heart_rate_intraday = list()
for (idx in 1:length(seq_dates)) {
if (verbose) cat(glue::glue("Day: '{seq_dates[idx]}' will be processed ..."), '\n')
URL = glue::glue('https://api.fitbit.com/1/user/{user_id}/activities/heart/date/{seq_dates[idx]}/{seq_dates[idx]}/{detail_level}/time/{time_start}/{time_end}.json')
process_data = base_url_request(url = URL,
oauth_token = token,
show_nchar_case_error = show_nchar_case_error)
heart_rate[[idx]] = process_data$`activities-heart`
tmp_intraday = process_data$`activities-heart-intraday`$dataset
if (!inherits(tmp_intraday, 'list') & length(tmp_intraday) > 0) {
tmp_intraday$time = as.character(glue::glue("{seq_dates[idx]} {tmp_intraday$time}"))
heart_rate_intraday[[seq_dates[idx]]] = data.table::data.table(tmp_intraday)
}
else {
if (verbose) message(glue::glue("Data for Day: '{seq_dates[idx]}' does not exist!"))
}
}
if (length(heart_rate) > 0) {
heart_rate = do.call(rbind, heart_rate)
}
all_dat = list(heart_rate = heart_rate, heart_rate_intraday = heart_rate_intraday)
if (ggplot_intraday) {
plt = NULL
if (length(heart_rate_intraday) > 0) {
NAMS_dates = names(heart_rate_intraday)
lst_plt = list()
for (idx_plt in 1:length(heart_rate_intraday)) {
dat_iter = heart_rate_intraday[[NAMS_dates[idx_plt]]]
plt_iter = ggplot_each_date(date_intraday = dat_iter, date = NAMS_dates[idx_plt])
lst_plt[[idx_plt]] = plt_iter
}
plt_all = patchwork::wrap_plots(lst_plt,
ncol = ggplot_ncol,
nrow = ggplot_nrow)
all_dat[['plt']] = plt_all
}
}
all_dat[['detail_level']] = detail_level
if (verbose) compute_elapsed_time(t_start)
return(all_dat)
}
heart_rate_heatmap = function(heart_rate_intraday_data,
angle_x_axis = 0) {
data_heart = lapply(1:length(heart_rate_intraday_data), function(x) {
iter_date = names(heart_rate_intraday_data)[x]
wday = as.character(lubridate::wday(iter_date, label = TRUE))
min_med_max = c('min', 'median', 'max')
iter = data.table::setDT(list(level = factor(min_med_max, levels = rev(sort(min_med_max))),
heart_rate = c(min(heart_rate_intraday_data[[x]]$value),
stats::median(heart_rate_intraday_data[[x]]$value),
max(heart_rate_intraday_data[[x]]$value)),
Date = as.Date(rep(iter_date, 3)),
weekday = rep(wday, 3)))
iter
})
data_heart = data.table::rbindlist(data_heart)
cols_breaks = c('Date', 'weekday')
dat_lev_breaks = data_heart[, ..cols_breaks]
dat_lev_breaks = dat_lev_breaks[, .(weekday = unique(weekday)), by = 'Date']
dat_lev_breaks = dat_lev_breaks[order(dat_lev_breaks$Date, decreasing = F), ]
plt = ggplot2::ggplot(data = data_heart, ggplot2::aes(x = Date, y = level)) +
ggplot2::geom_tile(ggplot2::aes(fill = heart_rate)) +
ggplot2::coord_equal(ratio = 1) +
viridis::scale_fill_viridis(option = "magma", limits = c(40, 220), breaks = round(seq(40, 220, by = 20))) +
ggthemes::theme_tufte(base_family = "Helvetica") +
ggplot2::geom_text(ggplot2::aes(label = heart_rate, fontface = 2), color = "yellow", size = 4) +
ggplot2::scale_x_date(breaks = dat_lev_breaks$Date, labels = scales::date_format("%Y-%m-%d"), sec.axis = ggplot2::dup_axis(labels = dat_lev_breaks$weekday)) +
ggplot2::ylab("Level") +
ggplot2::ggtitle("Heart Rate Level Heatmap") +
ggplot2::theme(strip.placement = 'outside',
plot.title = ggplot2::element_text(size = "16", hjust = 0.5, face = "bold", colour = "blue"),
axis.title.x = ggplot2::element_text(size = 12, face = "bold", colour = "blue"),
axis.title.y = ggplot2::element_text(size = 12, face = "bold", colour = "blue"),
axis.text.x = ggplot2::element_text(size = 12, face = "bold", colour = "black", angle = angle_x_axis, vjust = 1.0, hjust = 1.0),
axis.text.y = ggplot2::element_text(size = 12, face = "bold", colour = "black"))
return(plt)
}
heart_rate_variability_sleep_time = function(heart_rate_data,
sleep_begin = "00H 40M 0S",
sleep_end = "08H 00M 0S",
ggplot_hr_var = TRUE,
angle_x_axis = 45) {
if (heart_rate_data$detail_level != '1min') stop("You have to run the 'heart_rate_time_series' function first with 'detail_level' set to '1min'!", call. = F)
heart_rate_intraday = heart_rate_data$heart_rate_intraday
LEN = length(heart_rate_intraday)
nams = names(heart_rate_intraday)
hr_var = rep(NA_real_, LEN)
for (i in 1:LEN) {
iter_dat = heart_rate_intraday[[i]]
iter_time = as.vector(unlist(lapply(strsplit(iter_dat$time, ' '), function(x) x[2])))
iter_time = lubridate::hms(iter_time)
idx_night = which(iter_time >= lubridate::hms(sleep_begin) & iter_time < lubridate::hms(sleep_end))
hr_var[i] = varian::rmssd(x = iter_dat$value[idx_night])
}
lst_out = list()
hr_var = data.table::setDT(list(Date = as.Date(nams), hr_variability = hr_var))
lst_out[['hr_var_data']] = hr_var
if (ggplot_hr_var) {
plt = ggplot2::ggplot(hr_var, ggplot2::aes(x = Date, y = hr_variability)) +
ggplot2::geom_line(linetype = 'dashed', size = 1, color = 'purple') +
ggplot2::coord_cartesian(ylim = c(min(hr_var$hr_variability) - 1, max(hr_var$hr_variability) + 1.0)) +
ggplot2::scale_x_date(date_breaks = "1 day") +
ggplot2::ylab('heart rate var.') +
ggplot2::xlab("Date") +
ggplot2::ggtitle("Heart Rate Variability (root mean square of successive differences) per minute") +
ggplot2::labs(color='Heart Rate Variability (during sleep') +
ggplot2::geom_point(color = 'green', size = 3) +
ggplot2::geom_text(ggplot2::aes(label = round(hr_var$hr_variability, 3), fontface = 2), color = "maroon", size = 4, vjust = -2) +
ggplot2::theme(plot.title = ggplot2::element_text(size = "16", hjust = 0.5, face = "bold", colour = "blue"),
axis.title.x = ggplot2::element_text(size = 12, face = "bold", colour = "blue"),
axis.title.y = ggplot2::element_text(size = 12, face = "bold", colour = "blue"),
axis.text.x = ggplot2::element_text(size = 12, face = "bold", colour = "black", angle = angle_x_axis, vjust = 1, hjust=1),
axis.text.y = ggplot2::element_text(size = 12, face = "bold", colour = "black"))
lst_out[['hr_var_plot']] = plt
}
return(lst_out)
}
sleep_single_day = function(user_id,
token,
date = '2021-03-09',
ggplot_color_palette = 'ggsci::blue_material',
show_nchar_case_error = 135,
verbose = FALSE) {
if (verbose) {
t_start = proc.time()
cat("Authentication will be performed ...\n")
}
URL = glue::glue('https://api.fitbit.com/1.2/user/{user_id}/sleep/date/{date}/{date}.json')
auth_code = paste("Bearer", token)
query_response = httr::GET(url = URL, httr::add_headers(Authorization = auth_code))
if (query_response$status_code != 200) {
content_list_obj = httr::content(query_response, "parsed")
stop(glue::glue("The request gave an error code of '{query_response$status_code}' with the following first '{show_nchar_case_error}' characters as error message: '{substr(content_list_obj, 1, show_nchar_case_error)}'"), call. = F)
}
if (verbose) cat("The sleep content will be red ...\n")
content_list_obj = httr::content(query_response, "parsed")
sleep = content_list_obj$sleep[[1]]$levels$data
sleep = lapply(sleep, unlist)
sleep = data.frame(do.call(rbind, sleep), stringsAsFactors = F)
sleep$dateTime = lubridate::ymd_hms(sleep$dateTime)
sleep$seconds = as.numeric(sleep$seconds)
if (verbose) cat("The sleep-time will be transformed ...\n")
lst_dtbl = list()
for (ROW in 1:nrow(sleep)) {
iter_row = sleep[ROW, , drop = F]
iter_row_begin = iter_row$dateTime + lubridate::seconds(iter_row$seconds)
iter_row_end = iter_row$dateTime + lubridate::seconds(iter_row$seconds)
iter_dat = data.table::setDT(list(dateTime_start = iter_row$dateTime,
level_start = iter_row$level,
dateTime_end = iter_row_begin,
level_end = iter_row$level,
seconds = lubridate::seconds(iter_row$seconds)))
lst_dtbl[[ROW]] = iter_dat
}
lst_dtbl = data.table::rbindlist(lst_dtbl)
if (verbose) cat("Groups for the sleep-time will be created ...\n")
lst_dtbl_group = list()
for (ROW in 1:(nrow(sleep)-1)) {
iter_row = sleep[ROW, , drop = F]
iter_row$dateTime_end = iter_row$dateTime + lubridate::seconds(iter_row$seconds)
iter_row$level_end = iter_row$level
COLNAMS = colnames(iter_row)
iter_row_next = sleep[ROW + 1, , drop = F]
iter_row_next$dateTime_end = iter_row_next$dateTime + lubridate::seconds(iter_row_next$seconds)
iter_row_next$level_end = iter_row_next$level
colnames(iter_row_next) = c("dateTime_end", "level", "seconds", "dateTime", "level_end")
iter_row_next = iter_row_next[, COLNAMS]
iter_row$seconds = NULL
iter_row$GROUP = as.character(ROW)
iter_row_next$seconds = NULL
iter_row_next$GROUP = as.character(ROW)
dat_iter = data.table::rbindlist(list(iter_row, iter_row_next))
colnames(dat_iter) = c('dateTime_start', 'level_start', 'dateTime_end', 'level_end', 'GROUP')
lst_dtbl_group[[ROW]] = dat_iter
}
lst_dtbl_group = data.table::rbindlist(lst_dtbl_group)
if (verbose) cat(glue::glue("The 'ggplot' for day '{date}' will be created ..."), '\n')
plt = ggplot2::ggplot(lst_dtbl, ggplot2::aes(x = dateTime_end,
y = level_end,
xend = dateTime_start,
yend = level_start,
group = level_start,
colour = level_start)) +
ggplot2::geom_segment(size = 2, ggplot2::aes(colour = level_end)) +
ggplot2::geom_line(data = lst_dtbl_group, ggplot2::aes(group = GROUP), size = 2) +
paletteer::scale_color_paletteer_d(ggplot_color_palette, direction = -1) +
ggplot2::ggtitle(glue::glue("Sleep value at {date} ( '{as.character(lubridate::wday(date, label = TRUE))}' )")) +
ggplot2::scale_x_datetime(date_breaks = "30 min", date_labels = "%H:%M") +
ggplot2::ylab(glue::glue("Sleep Level")) +
ggplot2::xlab("Time") +
ggplot2::labs(color='Sleep Level') +
ggplot2::theme(plot.title = ggplot2::element_text(hjust = 0.5, face = "bold", colour = "dark green"),
axis.title.x = ggplot2::element_text(size = 10, face = "bold", colour = "dark green"),
axis.title.y = ggplot2::element_text(size = 10, face = "bold", colour = "dark green"),
axis.text.x = ggplot2::element_text(size = 10, face = "bold", colour = "black", angle = 30, vjust = 1.0, hjust = 1.0),
axis.text.y = ggplot2::element_text(size = 10, face = "bold", colour = "black"))
cols_accum = c('level_start', 'seconds')
level_accum = lst_dtbl[, ..cols_accum]
level_accum = level_accum[, lapply(.SD, sum), by = 'level_start']
level_accum$percent = round((level_accum$seconds / sum(level_accum$seconds)) * 100.0, 2)
level_accum$Date = as.Date(rep(date, nrow(level_accum)))
level_accum$weekday = lubridate::wday(level_accum$Date, label = T, abbr = T)
lst_dat = list(init_data = lst_dtbl,
grouped_data = lst_dtbl_group,
level_accum = level_accum,
plt = plt)
if (verbose) compute_elapsed_time(t_start)
return(lst_dat)
}
sleep_heatmap = function(level_data,
angle_x_axis = 0) {
cols_breaks = c('Date', 'weekday')
dat_lev_breaks = level_data[, ..cols_breaks]
dat_lev_breaks = dat_lev_breaks[, .(weekday = unique(weekday)), by = 'Date']
dat_lev_breaks = dat_lev_breaks[order(dat_lev_breaks$Date, decreasing = F), ]
plt = ggplot2::ggplot(data = level_data, ggplot2::aes(x = Date, y = level_start)) +
ggplot2::geom_tile(ggplot2::aes(fill = percent)) +
ggplot2::coord_equal(ratio = 1) +
viridis::scale_fill_viridis(option = "magma", limits = c(0.0, 100.0), breaks = round(seq(0.0, 100.0, by = 10))) +
ggthemes::theme_tufte(base_family = "Helvetica") +
ggplot2::geom_text(ggplot2::aes(label = glue::glue("min: {round(as.integer(level_data$seconds) / 60.0, 1)} \n ({round(as.numeric(level_data$percent), 2)}%)"), fontface = 2), color = "yellow", size = 4) +
ggplot2::scale_x_date(breaks = dat_lev_breaks$Date, labels = scales::date_format("%Y-%m-%d"), sec.axis = ggplot2::dup_axis(labels = dat_lev_breaks$weekday)) +
ggplot2::ylab("Level") +
ggplot2::ggtitle("Sleep Level Heatmap (Minutes & Percentage of sleep)") +
ggplot2::theme(strip.placement = 'outside',
plot.title = ggplot2::element_text(size = "16", hjust = 0.5, face = "bold", colour = "blue"),
axis.title.x = ggplot2::element_text(size = 12, face = "bold", colour = "blue"),
axis.title.y = ggplot2::element_text(size = 12, face = "bold", colour = "blue"),
axis.text.x = ggplot2::element_text(size = 12, face = "bold", colour = "black", angle = angle_x_axis, vjust = 1.0, hjust = 1.0),
axis.text.y = ggplot2::element_text(size = 12, face = "bold", colour = level_data$colour_y_axis))
return(plt)
}
sleep_time_series = function(user_id,
token,
date_start,
date_end,
ggplot_color_palette = 'ggsci::blue_material',
ggplot_ncol = NULL,
ggplot_nrow = NULL,
show_nchar_case_error = 135,
verbose = FALSE) {
if (verbose) t_start = proc.time()
seq_dates = as.character(seq(from = as.Date(date_start), to = as.Date(date_end), by = 1))
sleep_intraday_plt = level_accum_lst = list()
for (idx in 1:length(seq_dates)) {
if (verbose) {
cat('-----------------------------------------\n')
cat(glue::glue("Day: '{seq_dates[idx]}' will be processed ..."), '\n')
}
sleep_ggplt = sleep_single_day(user_id = user_id,
token = token,
date = seq_dates[idx],
ggplot_color_palette = ggplot_color_palette,
show_nchar_case_error = show_nchar_case_error,
verbose = FALSE)
sleep_intraday_plt[[idx]] = sleep_ggplt$plt
level_accum_lst[[idx]] = sleep_ggplt$level_accum
}
if (verbose) cat("The sleep data heatmap will be added ...\n")
level_accum_lst = data.table::rbindlist(level_accum_lst)
if (verbose) cat("Assign color to the y-axis labels ..\n")
unq_labs = sort(unique(level_accum_lst$level_start))
pal_y_axis = paletteer::paletteer_c(palette = "grDevices::Blues 3", n = length(unq_labs) + 2, direction = 1)
pal_y_axis = as.character(pal_y_axis)[3:length(pal_y_axis)]
level_accum_lst$colour_y_axis = as.factor(pal_y_axis[ match(level_accum_lst$level_start, unq_labs) ])
if (verbose) cat("Create the sleep heatmap ..\n")
heat_map = sleep_heatmap(level_data = level_accum_lst, angle_x_axis = 0)
if (verbose) cat("Wrap all plots into a single one multiplot ...\n")
plt_all = patchwork::wrap_plots(sleep_intraday_plt,
ncol = ggplot_ncol,
nrow = ggplot_nrow)
if (verbose) compute_elapsed_time(t_start)
return(list(plt_lev_segments = plt_all,
plt_lev_heatmap = heat_map,
heatmap_data = level_accum_lst))
}
extract_LOG_ID = function(user_id,
token,
after_Date = '2021-03-13',
limit = 10,
sort = 'asc',
verbose = FALSE) {
if (verbose) t_start = proc.time()
URL = glue::glue('https://api.fitbit.com/1/user/{user_id}/activities/list.json?afterDate={after_Date}&offset=0&limit={limit}&sort={sort}')
auth_code = paste("Bearer", token)
query_response = httr::GET(url = URL, httr::add_headers(Authorization = auth_code))
content_list_obj = httr::content(query_response, "parsed")
res_activities = content_list_obj$activities
if (length(res_activities) == 0) stop(glue::glue("There are no activities after (and including) Date '{after_Date}'!"), call. = F)
LOG_ID = res_activities[[1]]$logId
if (verbose) {
cat(glue::glue("The activity after (and including) Date '{after_Date}' has 'id' {LOG_ID}"), '\n')
compute_elapsed_time(t_start)
}
return(LOG_ID)
}
GPS_TCX_data = function(log_id,
user_id,
token,
time_zone = 'Europe/Athens',
verbose = FALSE) {
if (verbose) {
t_start = proc.time()
cat("Extract the activity data ...\n")
}
URL = glue::glue('https://api.fitbit.com/1/user/{user_id}/activities/{log_id}.tcx')
auth_code = paste("Bearer", token)
query_response = httr::GET(url = URL, httr::add_headers(Authorization = auth_code))
if (verbose) cat("Use XML to traverse the data ...\n")
tcx_dat = XML::xmlParse(query_response)
tcx_lst = XML::xmlToList(tcx_dat)
if (length(tcx_lst) == 0) stop("The 'XML::xmlToList()' function returned an emtpy list!", call. = F)
dat_tcx = tcx_lst$Activities$Activity$Lap$Track
if (is.null(dat_tcx)) {
return(NULL)
}
dat_tcx_df = lapply(dat_tcx, unlist)
dat_tcx_df = data.table::data.table(do.call(rbind, dat_tcx_df), stringsAsFactors = F)
if (verbose) cat(glue::glue("The created data.table includes {nrow(dat_tcx_df)} rows and {ncol(dat_tcx_df)} columns"), '\n')
if (verbose) cat("The data will be formated and the columns will be renamed ...\n")
dat_tcx_df$Position.LongitudeDegrees = as.numeric(dat_tcx_df$Position.LongitudeDegrees)
dat_tcx_df$Position.LatitudeDegrees = as.numeric(dat_tcx_df$Position.LatitudeDegrees)
dat_tcx_df$AltitudeMeters = as.numeric(dat_tcx_df$AltitudeMeters)
dat_tcx_df$DistanceMeters = as.numeric(dat_tcx_df$DistanceMeters)
dat_tcx_df$HeartRateBpm.Value = as.numeric(dat_tcx_df$HeartRateBpm.Value)
colnames(dat_tcx_df) = c('Time', 'latitude', 'longitude', 'AltitudeMeters', 'DistanceMeters', 'HeartRate_Bpm')
date_time = suppressMessages(lubridate::ymd_hms(dat_tcx_df$Time, tz = time_zone))
vec_date = as.Date(date_time)
vec_time = hms::as_hms(date_time)
dat_tcx_df$Time = NULL
dat_tcx_df$Date = vec_date
dat_tcx_df$Time = vec_time
if (verbose) compute_elapsed_time(t_start)
return(dat_tcx_df)
}
leafGL_point_coords = function(dat_gps_tcx,
color_points_column = 'AltitudeMeters',
provider = leaflet::providers$Esri.WorldImagery,
option_viewer = rstudioapi::viewer,
CRS = 4326) {
init_options <- options()
on.exit(options(init_options))
options(viewer = option_viewer)
dat_gps_tcx = sf::st_as_sf(dat_gps_tcx, coords = c("longitude", "latitude"))
dat_gps_tcx = sf::st_set_crs(dat_gps_tcx, CRS)
bbox_vec = sf::st_bbox(dat_gps_tcx)
bbox_vec = as.vector(bbox_vec)
lft = leaflet::leaflet()
lft = leaflet::addProviderTiles(map = lft, provider = provider)
heat_pal = leaflet::colorNumeric(palette = as.character(grDevices::heat.colors(n = 9, alpha = 1, rev = TRUE)), domain = dat_gps_tcx[[color_points_column]])
COLOR = heat_pal(dat_gps_tcx[[color_points_column]])
popup_info = sprintf(glue::glue("<b>Time: </b>%s<br><b> Altitude: </b>%g<br><b> Distance: </b>%g<br><b> HeartRate_Bpm: </b>%g"),
dat_gps_tcx$Time,
round(dat_gps_tcx$AltitudeMeters, 2),
round(dat_gps_tcx$DistanceMeters, 2),
dat_gps_tcx$HeartRate_Bpm)
lft = leafgl::addGlPoints(map = lft,
data = dat_gps_tcx,
opacity = 1.0,
fillColor = COLOR,
popup = popup_info)
def_lft = leaflet::fitBounds(map = lft,
lng1 = bbox_vec[1],
lat1 = bbox_vec[2],
lng2 = bbox_vec[3],
lat2 = bbox_vec[4])
def_lft = leaflet::addLegend(map = def_lft,
pal = heat_pal,
values = dat_gps_tcx[[color_points_column]],
opacity = 0.7,
position = "bottomright",
title = color_points_column)
return(def_lft)
}
extend_AOI_buffer = function(dat_gps_tcx,
buffer_in_meters = 1000,
CRS = 4326,
verbose = FALSE) {
if (verbose) {
t_start = proc.time()
cat("Convert the data.table to an 'sf' object ...\n")
}
dat_gps_tcx = sf::st_as_sf(dat_gps_tcx, coords = c("longitude", "latitude"), crs = CRS)
dat_gps_tcx = sf::st_bbox(dat_gps_tcx)
dat_gps_tcx = sf::st_as_sfc(dat_gps_tcx)
if (verbose) cat(glue::glue("Transform the projection of the 'sf' object from {CRS} to 7801 ..."), '\n')
dat_gps_tcx = sf::st_transform(dat_gps_tcx, crs = 7801)
if (verbose) cat(glue::glue("Create a buffer of {buffer_in_meters} meters using as input the initial sf object ..."), '\n')
buffer = sf::st_buffer(dat_gps_tcx,
dist = buffer_in_meters,
endCapStyle = "SQUARE")
if (verbose) cat("Back-tranformation of the projection and computation of the bounding box ...\n")
buffer_trans = sf::st_transform(buffer, crs = CRS)
buf_bbx = sf::st_bbox(buffer_trans)
dat_buf_bbx = sf::st_as_sfc(buf_bbx, crs = CRS)
if (verbose) cat("Use the bounding box to extract the raster extent ...\n")
rst_ext = raster::extent(x = buf_bbx)
if (verbose) cat("Compute the centroid of the sf-buffer object ...\n")
buf_centr = suppressWarnings(sf::st_centroid(dat_buf_bbx))
buf_centr = sf::st_coordinates(buf_centr)
buf_centr = data.table::data.table(buf_centr)
colnames(buf_centr) = c('longitude', 'latitude')
lst_out = list(buffer_bbox = buf_bbx,
sfc_obj = dat_buf_bbx,
raster_obj_extent = rst_ext,
centroid_buffer = buf_centr)
if (verbose) compute_elapsed_time(t_start)
return(lst_out)
}
crop_DEM = function(tif_or_vrt_dem_file,
sf_buffer_obj,
CRS = 4326,
digits = 6,
verbose = FALSE) {
if (!file.exists(tif_or_vrt_dem_file)) stop(glue::glue("The '{tif_or_vrt_dem_file}' file does not exist or is not a valid one!"), call. = F)
if (verbose) cat("The raster will be red ...\n")
rst_elev = raster::raster(tif_or_vrt_dem_file)
if (verbose) cat("The AOI will be extracted from the raster DEM ...\n")
extr_buf = tryCatch(exactextractr::exact_extract(x = rst_elev,
y = sf_buffer_obj,
fun = function(value, cov_frac) value,
progress = F,
include_xy = TRUE),
error = function(e) e)
crs = sp::CRS(glue::glue("+init=epsg:{CRS}"))
if (verbose) cat("A data.table will be created from the x,y,z vectors ...\n")
mt_upd = data.table::setDT(list(x = extr_buf$x,
y = extr_buf$y,
z = extr_buf$value))
mt_upd = as.matrix(mt_upd)
dimnames(mt_upd) = NULL
if (verbose) cat("A raster will be created from the x,y,z data.table ...\n")
rst_upd = raster::rasterFromXYZ(xyz = mt_upd, res = raster::res(rst_elev), crs = crs, digits = digits)
return(rst_upd)
}
gps_lat_lon_to_LINESTRING = function(dat_gps_tcx,
CRS = 4326,
verbose = FALSE,
time_split_asc_desc = NULL) {
dat_gps_tcx = dat_gps_tcx[order(dat_gps_tcx$Time, decreasing = F), ]
if (is.null(time_split_asc_desc)) {
idx_max = which.max(dat_gps_tcx$AltitudeMeters)
}
else {
idx_max = which(dat_gps_tcx$Time >= time_split_asc_desc)
idx_max = idx_max[1]
}
if (verbose) {
min_t = as.character(dat_gps_tcx$Time[which.min(dat_gps_tcx$Time)])
max_t = as.character(dat_gps_tcx$Time[which.max(dat_gps_tcx$Time)])
spl_t = as.character(dat_gps_tcx$Time[idx_max])
cat(glue::glue("The time '{spl_t}' was picked as a split point [ with minimum '{min_t}' and maximum '{max_t}' time ]"), '\n')
}
color_line = rep(NA_character_, nrow(dat_gps_tcx))
color_line[1:idx_max] = 'blue'
color_line[(idx_max+1):length(color_line)] = 'red'
dat_gps_tcx$color_line = color_line
dat_gps_tcx = split(dat_gps_tcx, by = 'color_line')
dat_line_str_ASC = sf::st_as_sf(dat_gps_tcx$blue, coords = c("longitude", "latitude"), crs = CRS) %>% sf::st_combine() %>% sf::st_cast("LINESTRING")
dat_line_str_DESC = sf::st_as_sf(dat_gps_tcx$red, coords = c("longitude", "latitude"), crs = CRS) %>% sf::st_combine() %>% sf::st_cast("LINESTRING")
return(list(line_ASC = dat_line_str_ASC,
line_DESC = dat_line_str_DESC))
}
meshgrids_XY_LatLon = function(longitude,
latitude,
buffer_raster,
buffer_bbox,
distance_metric = "vincenty",
digits = 8) {
init_options <- options()
on.exit(options(init_options))
options(digits = digits)
DIMS = dim(buffer_raster)[1:2]
meshgrid_x_openim = utils::getFromNamespace(x = "meshgrid_x", ns = "OpenImageR")
meshgrid_y_openim = utils::getFromNamespace(x = "meshgrid_y", ns = "OpenImageR")
x = meshgrid_x_openim(rows = DIMS[1], cols = DIMS[2]) + 1
y = meshgrid_y_openim(rows = DIMS[1], cols = DIMS[2]) + 1
lon_seq = seq(from = buffer_bbox['xmin'], to = buffer_bbox['xmax'], length.out = DIMS[2])
lon_seq = lapply(1:DIMS[1], function(x) {
lon_seq
})
lon_seq = do.call(rbind, lon_seq)
lat_seq = seq(from = buffer_bbox['ymin'], to = buffer_bbox['ymax'], length.out = DIMS[1])
lat_seq = lapply(1:DIMS[2], function(x) {
lat_seq
})
lat_seq = do.call(cbind, lat_seq)
seq_mt_coords = data.table::setDT(list(x = as.vector(x),
y = as.vector(y),
longitude = as.vector(lon_seq),
latitude = as.vector(lat_seq)))
mt_input_coords = matrix(c(longitude, latitude), nrow = 1, ncol = 2)
dist_coords = suppressMessages(geodist::geodist(x = seq_mt_coords[, 3:4],
y = mt_input_coords,
paired = FALSE,
sequential = FALSE,
pad = TRUE,
measure = distance_metric))
idx_min = which.min(dist_coords[, 1])[1]
return(list(coords_row = seq_mt_coords[idx_min, ],
dist_meters = dist_coords[idx_min, 1]))
}
rayshader_3d_DEM = function(rst_buf,
rst_ext,
rst_bbx,
linestring_ASC_DESC = NULL,
elevation_sample_points = NULL,
zoom = 0.5,
windowsize = c(1600, 1000),
add_shadow_rescale_original = FALSE,
verbose = FALSE) {
elevation_aoi = rayshader::raster_to_matrix(raster = rst_buf, verbose = verbose)
rayshade_3d = rayshader::sphere_shade(heightmap = elevation_aoi, zscale = 0.95, texture = "desert", progbar = verbose)
rayshade_3d = rayshader::add_water(hillshade = rayshade_3d, watermap = rayshader::detect_water(elevation_aoi), color = "desert")
rayshade_3d = rayshader::add_shadow(hillshade = rayshade_3d, shadowmap = rayshader::ray_shade(elevation_aoi, zscale = 3, maxsearch = 65), max_darken = 0.5, rescale_original = add_shadow_rescale_original)
if (!is.null(linestring_ASC_DESC)) {
if (inherits(linestring_ASC_DESC, 'list')) {
if (!all(names(linestring_ASC_DESC) %in% c("line_ASC", "line_DESC"))) stop("The named list must include the 'line_ASC', 'line_DESC' sublists!", call. = F)
rayshade_3d = rayshader::add_overlay(hillshade = rayshade_3d, overlay = rayshader::generate_line_overlay(linestring_ASC_DESC$line_ASC,
linewidth=3,
color="blue",
extent = rst_ext,
heightmap = elevation_aoi),
alphalayer=0.8)
rayshade_3d = rayshader::add_overlay(hillshade = rayshade_3d, overlay = rayshader::generate_line_overlay(linestring_ASC_DESC$line_DESC,
linewidth=3,
color="red",
extent = rst_ext,
heightmap = elevation_aoi),
alphalayer=0.8)
}
else if (inherits(linestring_ASC_DESC, c('sfc', 'sf'))) {
rayshade_3d = rayshader::add_overlay(hillshade = rayshade_3d, overlay = rayshader::generate_line_overlay(linestring_ASC_DESC,
linewidth=3,
color="red",
extent = rst_ext,
heightmap = elevation_aoi),
alphalayer=0.8)
}
else {
stop("The 'linestring_ASC_DESC' parameter can be either a named list ('line_ASC', 'line_DESC') or an object of 'sf' or 'sfc'!", call. = F)
}
}
rayshade_3d = tryCatch(rayshader::plot_3d(heightmap = elevation_aoi,
hillshade = rayshade_3d,
zoom = 0.5,
zscale = 10,
windowsize = windowsize,
water = TRUE,
waterdepth = 0,
wateralpha = 0.5,
watercolor = "dodgerblue",
waterlinecolor = "white",
waterlinealpha = 0.3,
verbose = verbose), error = function(e) e)
if (!is.null(elevation_sample_points)) {
for (ROW in 1:nrow(elevation_sample_points)) {
label = meshgrids_XY_LatLon(longitude = elevation_sample_points$longitude[ROW],
latitude = elevation_sample_points$latitude[ROW],
buffer_raster = rst_buf,
buffer_bbox = rst_bbx,
distance_metric = "vincenty",
digits = 8)
rayshader::render_label(heightmap = elevation_aoi,
x = label$coords_row$x,
y = label$coords_row$y,
z = as.integer(elevation_sample_points$AltitudeMeters[ROW]),
zscale = 15,
text = as.character(glue::glue("Elevation: {round(elevation_sample_points$AltitudeMeters[ROW], 2)}")),
textcolor = "darkred",
linecolor = "darkred",
textsize = 1.3,
linewidth = 5)
Sys.sleep(0.5)
}
}
} |
getMuModelName <-
function(mutype,ng=1) {
getMuModelInfo(mutype,ng)$name
} |
calibrate_standards_water <- function(cal_df,
ref_df,
r2_thres = 0.95) {
ref_df$dlta18OH2o$mean_cal <- ref_df$dlta18OH2o$mean
ref_df$dlta18OH2o$mean_cal <- as.numeric(NA)
ref_df$dlta2HH2o$mean_cal <- ref_df$dlta2HH2o$mean
ref_df$dlta2HH2o$mean_cal <- as.numeric(NA)
ref_df$dlta18OH2o$timeBgn <- convert_NEONhdf5_to_POSIXct_time(ref_df$dlta18OH2o$timeBgn)
ref_df$dlta2HH2o$timeBgn <- convert_NEONhdf5_to_POSIXct_time(ref_df$dlta2HH2o$timeBgn)
if (nrow(ref_df$dlta18OH2o) > 1) {
for (i in 1:nrow(ref_df$dlta18OH2o)) {
int <- lubridate::interval(cal_df$start, cal_df$end)
cal_id <- which(ref_df$dlta18OH2o$timeBgn[i] %within% int)
if (!is.na(ref_df$dlta18OH2o$mean[i]) &
!is.na(ref_df$dlta18OH2oRefe$mean[i]) &
ref_df$dlta18OH2o$numSamp[i] >= 30 &
abs(ref_df$dlta18OH2o$mean[i] -
ref_df$dlta18OH2oRefe$mean[i]) < 10 &
ref_df$dlta18OH2o$vari[i] < 5) {
if (!length(cal_id) == 0) {
if (!is.na(cal_df$o_r2[cal_id]) &
cal_df$o_r2[cal_id] > r2_thres) {
ref_df$dlta18OH2o$mean_cal[i] <- cal_df$o_intercept[cal_id] +
cal_df$o_slope[cal_id] * ref_df$dlta18OH2o$mean[i]
} else {
ref_df$dlta18OH2o$mean_cal[i] <- NA
}
} else {
ref_df$dlta18OH2o$mean_cal[i] <- NA
}
} else {
ref_df$dlta18OH2o$mean_cal[i] <- NA
}
}
}
if (nrow(ref_df$dlta2HH2o) > 1) {
for (i in 1:nrow(ref_df$dlta2HH2o)) {
int <- lubridate::interval(cal_df$start, cal_df$end)
cal_id <- which(ref_df$dlta2HH2o$timeBgn[i] %within% int)
if (!is.na(ref_df$dlta2HH2o$mean[i]) &
!is.na(ref_df$dlta2HH2oRefe$mean[i]) &
ref_df$dlta2HH2o$numSamp[i] >= 30 &
abs(ref_df$dlta2HH2o$mean[i] -
ref_df$dlta2HH2oRefe$mean[i]) < 40 &
ref_df$dlta2HH2o$vari[i] < 20) {
if (!length(cal_id) == 0) {
if (!is.na(cal_df$h_r2[cal_id]) &
cal_df$h_r2[cal_id] > r2_thres) {
ref_df$dlta2HH2o$mean_cal[i] <- cal_df$h_intercept[cal_id] +
cal_df$h_slope[cal_id] * ref_df$dlta2HH2o$mean[i]
} else {
ref_df$dlta2HH2o$mean_cal[i] <- NA
}
} else {
ref_df$dlta2HH2o$mean_cal[i] <- NA
}
} else {
ref_df$dlta2HH2o$mean_cal[i] <- NA
}
}
}
ref_df$dlta18OH2o$timeBgn <- convert_POSIXct_to_NEONhdf5_time(ref_df$dlta18OH2o$timeBgn)
ref_df$dlta2HH2o$timeBgn <- convert_POSIXct_to_NEONhdf5_time(ref_df$dlta2HH2o$timeBgn)
return(ref_df)
} |
nnf_normalize <- function(input, p = 2, dim = 2, eps = 1e-12, out = NULL) {
if (is.null(out)) {
denom <- input$norm(p, dim, keepdim = TRUE)$clamp_min(eps)$expand_as(input)
return(input/denom)
} else {
denom <- input$norm(p, dim, keepdim=TRUE)$clamp_min_(eps)$expand_as(input)
return(torch_div_out(out, input, denom))
}
}
nnf_layer_norm <- function(input, normalized_shape, weight = NULL, bias = NULL,
eps = 1e-5) {
torch_layer_norm(
input = input,
normalized_shape = normalized_shape,
weight = weight,
bias = bias,
eps = eps,
cudnn_enable = FALSE
)
}
nnf_local_response_norm <- function(input, size, alpha = 1e-4, beta = 0.75, k = 1) {
dim <- input$dim()
div <- input$mul(input)$unsqueeze(1)
if (dim == 3) {
div <- nnf_pad(div, c(0, 0, as.integer(size/2), as.integer((size - 1)/2)))
div <- nnf_avg_pool2d(div, c(size, 1), stride = 1)$squeeze(1)
} else {
sizes <- input$size()
div <- div$view(sizes[1], 1, sizes[2], sizes[3], -1)
div <- nnf_pad(div, c(0,0,0,0, as.integer(size/2), as.integer((size - 1)/2)))
div <- nnf_avg_pool3d(div, c(size, 1, 1), stride = 1)$squeeze(1)
div <- div$view(sizes)
}
div <- div$mul(alpha)$add(k)$pow(beta)
input/div
}
nnf_group_norm <- function(input, num_groups, weight = NULL, bias = NULL,
eps = 1e-5) {
torch_group_norm(input, num_groups = num_groups, weight = weight,
bias = bias, eps = eps
)
} |
TO_TM <- function(a = 47, longlat_df, d, e, f, g, digits = 4){
b <- as.numeric(longlat_df[,2])
c <- as.numeric(longlat_df[,3])
N <- as.numeric(Ellipsoids[a,3])/sqrt(1-as.numeric(Ellipsoids[a,7])*sin(c*pi/180)^2)
DELTA_LAMBA <- as.numeric((b-d)*3600)
a1 <- as.numeric(Ellipsoids[a,15])*c
b1 <- as.numeric(Ellipsoids[a,16])*sin(2*(c*pi/180))
c1 <- as.numeric(Ellipsoids[a,17])*sin(4*(c*pi/180))
d1 <- as.numeric(Ellipsoids[a,18])*sin(6*(c*pi/180))
e1 <- as.numeric(Ellipsoids[a,19])*sin(8*(c*pi/180))
f1 <- as.numeric(Ellipsoids[a,20])*sin(10*(c*pi/180))
Be <- as.numeric(a1-b1+c1-d1+e1-f1)
t <- as.numeric(tan(c*pi/180))
n <- as.numeric(sqrt(as.numeric(Ellipsoids[a,8]))*cos(c*pi/180))
N1 <- as.numeric(1/2*DELTA_LAMBA^2*N*sin(c*pi/180)*cos(c*pi/180)*(Sin_1^2))
N2 <- as.numeric(1/24*DELTA_LAMBA^4*N*sin(c*pi/180)*cos(c*pi/180)^3*(Sin_1^4)*(5-t^2+9*n^2+4*n^4))
N3 <- as.numeric(1/720*DELTA_LAMBA^6*N*sin(c*pi/180)*cos(c*pi/180)^5*(Sin_1^6)*(61-58*t^2+720*n^2-350*t^2*n^2))
Y <- as.numeric(e*(Be+N1+N2+N3))
North <- as.numeric(Y+g)
E1 <- as.numeric(DELTA_LAMBA*N*cos(c*pi/180)*Sin_1)
E2 <- as.numeric(1/6*DELTA_LAMBA^3*N*cos(c*pi/180)^3*Sin_1^3*(1-t^2+n^2))
E3 <- as.numeric(1/120*DELTA_LAMBA^5*N*cos(c*pi/180)^5*Sin_1^5*(5-18*t^2+t^4+14*n^2-58*t^2*n^2))
X <- as.numeric(e*(E1+E2+E3))
East <- as.numeric(X+f)
values <- tibble::as_tibble(as.data.frame(cbind(round(East, digits), round(North, digits), round(X, digits), round(Y, digits))))
names(values) <- c("East", "North", "X", "Y")
return(values)
} |
glmi <- function (formula, family = gaussian, data,vcov = NULL, weights, subset,
na.action, start = NULL, etastart, mustart, offset, control = list(...),
model = TRUE, method = "glm.fit", x = FALSE, y = TRUE, contrasts = NULL,
...)
{
call <- match.call()
if (is.character(family))
family <- get(family, mode = "function", envir = parent.frame())
if (is.function(family))
family <- family()
if (is.null(family$family)) {
print(family)
stop("'family' not recognized")
}
if (missing(data))
data <- environment(formula)
mf <- match.call(expand.dots = FALSE)
m <- match(c("formula", "data", "subset", "weights", "na.action",
"etastart", "mustart", "offset"), names(mf), 0L)
mf <- mf[c(1L, m)]
mf$drop.unused.levels <- TRUE
mf[[1L]] <- as.name("model.frame")
mf <- eval(mf, parent.frame())
if (identical(method, "model.frame"))
return(mf)
if (!is.character(method) && !is.function(method))
stop("invalid 'method' argument")
if (identical(method, "glm.fit"))
control <- do.call("glm.control", control)
mt <- attr(mf, "terms")
Y <- model.response(mf, "any")
if (length(dim(Y)) == 1L) {
nm <- rownames(Y)
dim(Y) <- NULL
if (!is.null(nm))
names(Y) <- nm
}
X <- if (!is.empty.model(mt))
model.matrix(mt, mf, contrasts)
else matrix(, NROW(Y), 0L)
weights <- as.vector(model.weights(mf))
if (!is.null(weights) && !is.numeric(weights))
stop("'weights' must be a numeric vector")
if (!is.null(weights) && any(weights < 0))
stop("negative weights not allowed")
offset <- as.vector(model.offset(mf))
if (!is.null(offset)) {
if (length(offset) != NROW(Y))
stop(gettextf("number of offsets is %d should equal %d (number of observations)",
length(offset), NROW(Y)), domain = NA)
}
mustart <- model.extract(mf, "mustart")
etastart <- model.extract(mf, "etastart")
fit <- eval(call(if (is.function(method)) "method" else method,
x = X, y = Y, weights = weights, start = start, etastart = etastart,
mustart = mustart, offset = offset, family = family,
control = control, intercept = attr(mt, "intercept") >
0L))
if (length(offset) && attr(mt, "intercept") > 0L) {
fit2 <- eval(call(if (is.function(method)) "method" else method,
x = X[, "(Intercept)", drop = FALSE], y = Y, weights = weights,
offset = offset, family = family, control = control,
intercept = TRUE))
if (!fit2$converged)
warning("fitting to calculate the null deviance did not converge -- increase 'maxit'?")
fit$null.deviance <- fit2$deviance
}
if (model)
fit$model <- mf
fit$na.action <- attr(mf, "na.action")
if (x)
fit$x <- X
if (!y)
fit$y <- NULL
fit <- c(fit, list(call = call, formula = formula, terms = mt,
data = data, offset = offset, control = control, method = method,
contrasts = attr(X, "contrasts"), xlevels = .getXlevels(mt,
mf)))
class(fit) <- c(fit$class, c("glm", "lm"))
fit
if(is.null(vcov)) {
se <- vcov(fit)
} else {
if (is.function(vcov))
se <- vcov(fit)
else
se <- vcov
}
fit = list(fit,vHaC = se)
fit
} |
ability = function(resp, ip, method="WLE", mu=0, sigma=1, n=5) {
switch(method,
"MLE" = {mlebme(resp=resp, ip=ip$est, method="ML")},
"BME" = {mlebme(resp=resp, ip=ip$est, mu=mu, sigma=sigma, method="BM")},
"WLE" = {wle(resp=resp, ip=ip$est)},
"EAP" = {eap(resp=resp, ip=ip$est, normal.qu())},
"PV" = {dpv(resp=resp, ip=ip$est, mu=mu, sigma=sigma, n=n)},
"QRS" = {qrs(resp=resp)},
"SUM" = {resp[is.na(resp)]=0; as.matrix(rowSums(resp))})
}
llf = function(x,r,p,mu,sigma,method) {
pr = p[,3] + (1.0 - p[,3])/(1.0 + exp(p[,1]*(p[,2] - x)))
pr = pmax(pr, .00001); pr = pmin(pr, .99999)
ll = r*log(pr) + (1-r)*log(1.0-pr)
lf = sum(ll)
if (method != "ML") lf = lf + log(dnorm(x,mu,sigma))
return(lf)
}
mle.one = function(resp, ip, mu=mu, sigma=sigma, method=method) {
cc = !is.na(resp)
resp = resp[cc]
ip = ip[cc, , drop=FALSE]
n = length(resp)
if (n < 1) return(c(NA, NA, 0))
est = optimize(llf, lower = -4, upper = 4, maximum = TRUE,
r = resp, p = ip, mu = mu, sigma = sigma, method = method)$maximum
ti = tif(ip, est)$f
if (method != "ML") ti = ti + 1/(sigma * sigma)
sem = sqrt(1/ti)
return(c(est, sem, n))
}
normal.qu = function(n=15,lower=-4,upper=4,mu=0,sigma=1,scaling="points"){
if (upper<=lower || sigma<=0 || n<3) stop("bad argument")
qp=seq(lower,upper,length.out=n)
if(scaling=="points") {
qw=dnorm(qp,0,1)
qw=qw/sum(qw)
qp=qp*sigma+mu
} else {
qw=dnorm(qp,mu,sigma)
qw=qw/sum(qw)
}
return(list(quad.points=qp, quad.weights=qw))
}
mlebme = function(resp, ip, mu=0, sigma=1, method="ML") {
if (is.list(ip)) ip = ip$est
if (is.null(dim(resp))) dim(resp) = c(1,length(resp))
if (is.null(dim(ip))) stop("item parameters not a matrix")
if (nrow(ip) != ncol(resp)) stop("responses - item parameters mismatch")
np = nrow(resp)
o = sapply(1:np, function(i) mle.one(resp=resp[i,],
ip=ip, mu=mu, sigma=sigma, method=method))
rownames(o) = c("est","sem","n")
return(t(o))
}
bce.one = function(resp, ip) {
cc = !is.na(resp)
resp = resp[cc]
ip = ip[cc, , drop=FALSE]
n = length(resp)
if (n < 1) return(c(NA, NA, 0))
est = uniroot(scf, re=resp, p=ip, lower=-10, upper=10)$root
ev = bcv(est, resp, ip)
return(c(est, sqrt(ev), n))
}
bcv = function(x,r,p) {
i = iif(ip=p, x=x)$f
p[,3] = 0
q = irf(ip=p, x=x)$f
isum = sum(i)
jsum = sum(i * p[, 1] * (1 - 2 * q))
return(1/isum + jsum^2/(4 * isum^4))
}
scf = function(x,re,p) {
three = any(p[,3] > 0)
lgt = exp(p[,1] * (x - p[,2]))
pr = lgt / (1 + lgt)
z = re - pr
if (three) z = z - p[,3]*re / (p[,3] + lgt)
sm = sum(p[,1]*z)
if (three) {
pr3 = p[,3] + (1 - p[,3])*pr
ii = p[,1]^2 / pr3 * (1 - pr3) * pr^2
} else {
ii = p[,1]^2 * pr * (1 - pr)
}
isum = sum(ii)
jsum = sum(ii * p[,1] * (1 - 2*pr))
return(sm + jsum / (isum*2))
}
wle = function(resp, ip) {
if (is.list(ip)) ip = ip$est
if (is.null(dim(resp))) dim(resp) = c(1,length(resp))
if (is.null(dim(ip))) stop("item parameters not a matrix")
if (nrow(ip) != ncol(resp)) stop("responses - item parameters mismatch")
np = nrow(resp)
o = sapply(1:np, function(i) bce.one(resp=resp[i,], ip=ip))
rownames(o) = c("est","sem","n")
return(t(o))
}
eap.one = function(r, p, qp, qw) {
cc = !is.na(r)
r = r[cc]
p = p[cc,,drop=FALSE]
n = length(r)
if (n < 1) return(c(NA, NA, 0))
ll = sapply(qp, llf, r=r, p=p, mu=NULL, sigma=NULL, method="ML")
wl = exp(ll)*qw
swl = sum(wl)
x = sum(wl*qp)/swl
dev = qp - x
sem = sqrt(sum(wl*dev*dev)/swl)
return(c(x,sem,n))
}
eap = function(resp, ip, qu) {
if (is.list(ip)) ip = ip$est
if (is.null(dim(resp))) dim(resp) = c(1,length(resp))
if (is.null(dim(ip))) stop("item parameters not a matrix")
if (nrow(ip) != ncol(resp)) stop("responses - item parameters mismatch")
np = nrow(resp)
qp = qu$quad.points
qw = qu$quad.weights
o = sapply(1:np, function(i) eap.one(r=resp[i,], p=ip, qp, qw),USE.NAMES=FALSE)
rownames(o) = c("est","sem","n")
return(t(o))
}
like = function(x, r, p, mu=0, s=1, log=FALSE, post=TRUE) {
pr = irf(ip=p, x=x)$f
pr = pmax(pr, .00001); pr = pmin(pr, .99999)
ll = log(pr) %*% r + log(1 - pr) %*% (1-r)
if (post)
if (log) ll=ll+dnorm(x,mu,s,log=TRUE) else ll=exp(ll)*dnorm(x,mu,s)
else if (!log) ll=exp(ll)
return(ll)
}
ddf = function(x,r,p,d,mu,s)
log(like(x,r,p,mu=mu,s=s,post=TRUE)/d) - dt(x,df=3,log=TRUE)
dpv.one = function(resp, ip, n=5, mu, s) {
cc = !is.na(resp)
resp = resp[cc]
ip = ip[cc,]
if (length(resp) < 1) return(rep(NA,n))
d = integrate(like, lower=-6, upper=6, p=ip, r=resp, mu=mu, s=s, post=TRUE)$value
dd = optimize(f=ddf, c(-6,6), r=resp, p=ip, d=d, mu=mu, s=s, maximum=TRUE)$objective
pv = rep(0,n)
k = 0
repeat {
th = rt(1, df=3)
lf = log(like(th, r=resp, p=ip, mu=mu, s=s, post=TRUE) / d)
lg = dt(th, df=3, log=TRUE)
prob = exp(lf - lg - dd)
if (runif(1) < prob) {k = k+1; pv[k] = th}
if (k==n) break
}
return(pv)
}
dpv = function(resp, ip, mu=0, sigma=1, n=5) {
if (is.list(ip)) ip = ip$est
if (is.null(dim(resp))) dim(resp) = c(1,length(resp))
if (is.null(dim(ip))) stop("item parameters not a matrix")
if (nrow(ip) != ncol(resp)) stop("responses - item parameters mismatch")
np = nrow(resp)
o = sapply(1:np, function(i) dpv.one(resp=resp[i,], ip=ip, mu=mu, s=sigma, n=n))
return(t(o))
} |
context("sanity checks")
check_dev <- function(a, b, mean=0, sd=1) {
prefix <- sprintf("DEV: a=%f, b=%f, mean=%f, sd=%f", a, b, mean, sd)
e <- etruncnorm(a, b, mean, sd)
v <- vtruncnorm(a, b, mean, sd)
id <- integrate(function(x) dtruncnorm(x, a, b, mean, sd), a, b)$value
ee <- integrate(function(x) x * dtruncnorm(x, a, b, mean, sd), a, b)$value
ev <- integrate(function(x) (x-ee)^2 * dtruncnorm(x, a, b, mean, sd), a, b)$value
test_that(prefix, {
expect_equal(id, 1.0, tolerance=0.00005)
expect_equal(e, ee, tolerance=0.00005)
expect_equal(v, ev, tolerance=0.00005)
})
}
check_dev(-3, Inf, 0, 1)
check_dev(-2, Inf, 1, 1)
check_dev( 2, Inf, 0, 1)
check_dev( 3, Inf, 1, 1)
check_dev(-3, Inf, 0, 2)
check_dev(-2, Inf, 1, 2)
check_dev( 2, Inf, 0, 2)
check_dev( 3, Inf, 1, 2)
check_dev(-3.0, -2.5, 0, 1)
check_dev(-3.0, -1.5, 0, 1)
check_dev(-3.0, -0.5, 0, 1)
check_dev(-3.0, 0.5, 0, 1)
check_dev(0.0, 0.5, 0, 1)
check_dev(0.0, 1.5, 0, 1)
check_dev(0.0, 2.5, 0, 1)
check_dev(0.0, 3.5, 0, 1)
check_dev( 0.0, 1.0, 0.0, 10.0)
check_dev( 0.0, 1.0, 5.0, 1.0)
check_dev(-1.0, 0.0, 0.0, 10.0)
check_dev( 0.0, 1.0, -5.0, 1.0)
check_dev( 0.0, 1.0, 5.0, 0.1)
check_dev(0L, 1L, 0L, 10L)
check_r <- function(a, b, mean, sd, n=10000) {
prefix <- sprintf("R: a=%f, b=%f, mean=%f, sd=%f", a, b, mean, sd)
x <- rtruncnorm(n, a, b, mean, sd)
e.x <- mean(x)
e <- etruncnorm(a, b, mean, sd)
true_sd <- sqrt(vtruncnorm(a, b, mean, sd))
test_that(prefix, {
expect_true(all(x > a))
expect_true(all(x < b))
expect_equal(mean(x), e, tolerance=0.05, scale=sd)
expect_equal(sd(x), true_sd, tolerance=0.05, scale=sd)
})
}
check_r(-Inf, Inf, 0, 1)
check_r(-1, 1, 0, 1)
check_r(-1, 1, 1, 1)
check_r(-1, 1, 0, 2)
check_r(1, 2, 0, 1)
check_r(1, 2, 1, 1)
check_r(1, 2, 0, 2)
check_r(-2, -1, 0, 1)
check_r(-2, -1, 1, 1)
check_r(-2, -1, 0, 2)
check_r(-2, Inf, 0, 1)
check_r(-2, Inf, 1, 1)
check_r(-2, Inf, 0, 2)
check_r( 0, Inf, 0, 1)
check_r( 0, Inf, 1, 1)
check_r( 0, Inf, 0, 2)
check_r( 2, Inf, 0, 1)
check_r( 2, Inf, 1, 1)
check_r( 2, Inf, 0, 2)
check_r(-0.2, Inf, 0, 1)
check_r(-0.2, Inf, 1, 1)
check_r(-0.2, Inf, 0, 2)
check_r( 0.0, Inf, 0, 1)
check_r( 0.0, Inf, 1, 1)
check_r( 0.0, Inf, 0, 2)
check_r( 0.2, Inf, 0, 1)
check_r( 0.2, Inf, 1, 1)
check_r( 0.2, Inf, 0, 2)
check_r(-Inf, -2, 0, 1)
check_r(-Inf, -2, 1, 1)
check_r(-Inf, -2, 0, 2)
check_r(-Inf, 0, 0, 1)
check_r(-Inf, 0, 1, 1)
check_r(-Inf, 0, 0, 2)
check_r(-Inf, 2, 0, 1)
check_r(-Inf, 2, 1, 1)
check_r(-Inf, 2, 0, 2)
check_r(-Inf, -0.2, 0, 1)
check_r(-Inf, -0.2, 1, 1)
check_r(-Inf, -0.2, 0, 2)
check_r(-Inf, 0.0, 0, 1)
check_r(-Inf, 0.0, 1, 1)
check_r(-Inf, 0.0, 0, 2)
check_r(-Inf, 0.2, 0, 1)
check_r(-Inf, 0.2, 1, 1)
check_r(-Inf, 0.2, 0, 2)
check_r(-5, -4, 0, 1)
check_r(-5L, -4L, 0L, 1L)
check_pq <- function(a, b, mean, sd) {
prefix <- sprintf("PQ: a=%f, b=%f, mean=%f, sd=%f", a, b, mean, sd)
test_that(prefix, {
for (p in runif(500)) {
q <- qtruncnorm(p, a, b, mean, sd)
pp <- ptruncnorm(q, a, b, mean, sd)
expect_equal(pp, p, tolerance=0.00001)
}
})
}
check_pq(-1, 0, 0, 1)
check_pq(-1, 1, 0, 1)
check_pq( 1, 2, 0, 1)
check_pq(-1, 0, 4, 1)
check_pq(-1, 1, 4, 1)
check_pq( 1, 2, 4, 1)
check_pq(-1, 0, 0, 3)
check_pq(-1, 1, 0, 3)
check_pq( 1, 2, 0, 3)
check_pq(-1, Inf, 0, 1)
check_pq(-1, Inf, 4, 1)
check_pq(-1, Inf, 0, 3)
check_pq(-Inf, 1, 0, 1)
check_pq(-Inf, 1, 4, 1)
check_pq(-Inf, 1, 0, 3)
check_pq(1L, 2L, 0L, 3L) |
source("ESEUR_config.r")
library("plyr")
pal_col=rainbow(4)
plot_Hedge=function(hedge_str, col_num)
{
about=subset(exp1, Hedge == hedge_str)
lines(sort(about$MinValue/about$ActValue), col=pal_col[col_num])
lines(sort(about$ActValue/about$MaxValue), lty=2, col=pal_col[col_num])
}
plot_uncertainty=function(hedge_str, col_num)
{
resp=subset(exp1, Hedge == hedge_str)
rel_unc=count((resp$MinValue-resp$ActValue)/resp$MinValue)
lines(rel_unc$x, cumsum(rel_unc$freq)/nrow(resp), col=pal_col[col_num])
rel_unc=count((resp$MaxValue-resp$ActValue)/resp$MaxValue)
lines(rel_unc$x, cumsum(rel_unc$freq)/nrow(resp), col=pal_col[col_num], lty=2)
}
exp1=read.csv(paste0(ESEUR_dir, "developers/hedges_Data-Exp2.csv.xz"), as.is=TRUE)
sw=which(exp1$MinValue > exp1$MaxValue)
t=exp1$MinValue[sw]
exp1$MinValue[sw]=exp1$MaxValue[sw]
exp1$MaxValue[sw]=t
exp1=subset(exp1, ActValue/MinValue < 10)
exp1=subset(exp1, ActValue/MaxValue > 0.1)
plot(0, type="n",
xaxs="i", yaxs="i",
xlim=c(-1, 1), ylim=c(0, 1),
xlab="Relative uncertainty", ylab="Cumulative probability\n")
plot_uncertainty("almost", 1)
plot_uncertainty("below", 2)
plot_uncertainty("at_most", 3)
plot_uncertainty("no_more_than", 4)
legend(x="left", legend=c("almost", "below", "at most", "no more than"), bty="n", fill=pal_col, cex=1.2)
plot_uncertainty("above", 1)
plot_uncertainty("over", 2)
plot_uncertainty("at_least", 3)
plot_uncertainty("no_less_than", 4)
legend(x="right", legend=c("above", "over", "at least", "no less than"), bty="n", fill=pal_col, cex=1.2) |
resize.lp <- function(lprec, nrow, ncol)
{
.Call(RlpSolve_resize_lp, lprec, as.integer(nrow), as.integer(ncol))
invisible()
} |
rowCtr <- function(obj, dim) {
I <- dim(obj$rowcoord)[1]
K <- min(obj$nd, ncol(obj$rowcoord))
svF <- matrix(rep(obj$sv[1:K], I), I, K, byrow=TRUE)
rpc <- obj$rowcoord[,1:K] * svF
obj$rowmass * rowSums(rpc[,dim, drop=FALSE]^2) / sum(obj$sv[dim]^2)
}
colCtr <- function(obj, dim) {
J <- dim(obj$colcoord)[1]
K <- min(obj$nd, ncol(obj$colcoord))
svG <- matrix(rep(obj$sv[1:K], J), J, K, byrow=TRUE)
cpc <- obj$colcoord[,1:K] * svG
obj$colmass * rowSums(cpc[,dim, drop=FALSE]^2) / sum(obj$sv[dim]^2)
}
rowSubsetCa <- function(obj, indices) {
ret <- obj
for (i in 4:7) {
ret[[i]] <- list()
ret[[i]] <- obj[[i]][indices]
}
del <- which(!(1:nrow(obj$rowcoord) %in% indices))
ret$rowsup <- as.numeric(sapply(obj$rowsup[obj$rowsup %in% indices],
function(x) x - sum(del < x)))
ret$rowcoord <- matrix()
ret$rowcoord <- obj$rowcoord[indices,,drop=FALSE]
ret$rownames <- obj$rownames[indices]
ret
}
colSubsetCa <- function(obj, indices) {
ret <- obj
for (i in 9:12) {
ret[[i]] <- list()
ret[[i]] <- obj[[i]][indices]
}
ret$colsup <- ret$colsup[ret$colsup %in% indices]
ret$colsup <- as.numeric(lapply(obj$colsup, function(x) x - sum(indices < x)))
ret$colcoord <- matrix()
ret$colcoord <- obj$colcoord[indices,,drop=FALSE]
ret$colnames <- obj$colnames[indices]
ret
}
showCorpusCa <- function(corpusCa, dim=1, ndocs=10, nterms=10) {
objects <- .getCorpusWindow()
window <- objects$window
txt <- objects$txt
listbox <- objects$listbox
tkwm.title(window, .gettext("Correspondence Analysis"))
actDocs<-length(corpusCa$rowvars) == length(corpusCa$rowsup)
if(actDocs) {
tndocs <- nrow(corpusCa$rowcoord) - length(corpusCa$rowsup)
tnterms <- nrow(corpusCa$colcoord)
tnvars <- length(unique(names(corpusCa$rowvars)))
tkinsert(txt, "end", sprintf(.gettext("Correspondence analysis of %i documents, %i terms and %i supplementary variables."),
tndocs, tnterms, tnvars),
"body")
}
else {
tnactvars <- length(unique(names(corpusCa$rowvars)[!corpusCa$rowvars %in% corpusCa$rowsup]))
tndocs <- nrow(corpusCa$rowcoord) - length(corpusCa$rowvars)
tnterms <- nrow(corpusCa$colcoord)
tnsupvars <- length(unique(names(corpusCa$rowvars)[corpusCa$rowvars %in% corpusCa$rowsup]))
tkinsert(txt, "end", sprintf(.gettext("Correspondence analysis of %i active variable(s) (aggregating %i documents), %i terms and %i supplementary variable(s)."),
tnactvars, tndocs, tnterms, tnsupvars),
"body")
}
tkinsert(txt, "end", "\n\n", "heading")
mark <- 0
titles <- c(.gettext("Position"), .gettext("Contribution (%)"), .gettext("Quality (%)"))
tkinsert(txt, "end", paste(.gettext("Axes summary:"), "\n", sep=""), "heading")
tkmark.set(txt, paste("mark", mark, sep=""), tkindex(txt, "insert-1c"))
tkinsert(listbox, "end", .gettext("Axes summary"))
mark <- mark + 1
nd <- min(length(corpusCa$sv), corpusCa$nd)
values <- 100 * (corpusCa$sv[1:nd]^2)/sum(corpusCa$sv^2)
values2 <- cumsum(values)
val <- rbind(values, values2)
rownames(val) <- c(.gettext("Inertia (%)"), .gettext("Cumulated inertia (%)"))
colnames(val) <- seq.int(ncol(val))
names(dimnames(val)) <- c("", .gettext("Axis"))
tkinsert(txt, "end", paste(capture.output(val), collapse="\n"), "fixed")
if(actDocs) {
rows <- order(rowCtr(corpusCa, dim), decreasing=TRUE)[1:ndocs]
rows <- rows[!rows %in% corpusCa$rowsup]
}
else {
rows <- corpusCa$rowvars[!corpusCa$rowvars %in% corpusCa$rowsup]
}
cols <- order(colCtr(corpusCa, dim), decreasing=TRUE)[1:nterms]
cols <- cols[!cols %in% corpusCa$colsup]
for(j in 1:length(dim)) {
rowsCtr <- rowCtr(corpusCa, dim[j])
rows <- rows[order(rowsCtr[rows], decreasing=TRUE)]
colsCtr <- colCtr(corpusCa, dim[j])
cols <- cols[order(colsCtr[cols], decreasing=TRUE)]
tkinsert(txt, "end",
paste("\n\n", sprintf(.gettext("Most contributive terms on negative side of axis %i:"), dim[j]), "\n", sep=""),
"heading")
tkmark.set(txt, paste("mark", mark, sep=""), tkindex(txt, "insert-1c"))
tkinsert(listbox, "end", sprintf(.gettext("Axis %i - Negative Side:"), dim[j]))
tkitemconfigure(listbox, mark, background="grey")
mark <- mark + 1
negcols <- cols[corpusCa$colcoord[cols, dim[j]] < 0]
if(length(negcols) == 0) {
tkinsert(txt, "end",
sprintf(.gettext("None among the %i most contributive terms."), nterms), "body")
}
else {
df <- data.frame(row.names=corpusCa$colnames[negcols],
corpusCa$colcoord[negcols, dim[j]] * corpusCa$sv[dim[j]],
colsCtr[negcols] * 100,
(corpusCa$colcoord[negcols, dim[j]] * corpusCa$sv[dim[j]] / corpusCa$coldist[negcols])^2 * 100)
colnames(df) <- titles
tkinsert(txt, "end", paste(capture.output(format(df)), collapse="\n"), "fixed")
}
if(!actDocs) {
negrows <- rows[corpusCa$rowcoord[rows, dim[j]] <= 0]
tkinsert(txt, "end",
paste("\n\n", sprintf(.gettext("Active levels on negative side of axis %i:"), dim[j]), "\n", sep=""),
"heading")
df <- data.frame(row.names=corpusCa$rownames[negrows],
corpusCa$rowcoord[negrows, dim[j]] * corpusCa$sv[dim[j]],
rowsCtr[negrows] * 100,
(corpusCa$rowcoord[negrows, dim[j]] * corpusCa$sv[dim[j]] / corpusCa$rowdist[negrows])^2 * 100)
colnames(df) <- titles
tkinsert(txt, "end", paste(capture.output(format(df)), collapse="\n"), "fixed")
tkinsert(txt, "end",
paste("\n\n", sprintf(.gettext("Most extreme documents on negative side of axis %i:"), dim[j]), "\n", sep=""),
"heading")
int <- setdiff(1:nrow(corpusCa$rowcoord), corpusCa$rowvars)
negrows <- int[corpusCa$rowcoord[int, dim[j]] < 0]
negrows <- negrows[order(corpusCa$rowcoord[negrows, dim[j]])[1:ndocs]]
negrows <- negrows[!is.na(negrows)]
df <- data.frame(row.names=corpusCa$rownames[negrows],
corpusCa$rowcoord[negrows, dim[j]] * corpusCa$sv[dim[j]],
(corpusCa$rowcoord[negrows, dim[j]] * corpusCa$sv[dim[j]] / corpusCa$rowdist[negrows])^2 * 100)
colnames(df) <- titles[-2]
}
else {
tkinsert(txt, "end",
paste("\n\n", sprintf(.gettext("Most contributive documents on negative side of axis %i:"), dim[j]), "\n", sep=""),
"heading")
negrows <- rows[corpusCa$rowcoord[rows, dim[j]] < 0]
df <- data.frame(row.names=corpusCa$rownames[negrows],
corpusCa$rowcoord[negrows, dim[j]] * corpusCa$sv[dim[j]],
rowsCtr[negrows] * 100,
(corpusCa$rowcoord[negrows, dim[j]] * corpusCa$sv[dim[j]] / corpusCa$rowdist[negrows])^2 * 100)
colnames(df) <- titles
}
ids <- names(corpus)
if(length(negrows) == 0) {
tkinsert(txt, "end",
sprintf(.gettext("None among the %i most contributive documents."), ndocs), "body")
}
else {
tkinsert(txt, "end", paste(capture.output(format(df)), collapse="\n"), "fixed")
tkinsert(txt, "end", "\n", "heading")
for(i in negrows) {
id <- corpusCa$rownames[i]
tkinsert(txt, "end", paste("\n", id, "\n", sep=""),
"articlehead")
tkmark.set(txt, paste("mark", mark, sep=""), tkindex(txt, "insert-1c"))
mark <- mark + 1
tkinsert(listbox, "end", id)
doc <- corpus[[match(id, ids)]]
origin <- meta(doc, "origin")
date <- meta(doc, "datetimestamp")
if(length(origin) > 0 && length(date) > 0)
tkinsert(txt, "end", paste(origin, " - ", date, "\n", sep=""), "details")
else if(length(origin) > 0)
tkinsert(txt, "end", paste(origin, "\n", sep=""), "details")
else if(length(origin) > 0)
tkinsert(txt, "end", paste(date, "\n", sep=""), "details")
if(length(origin) > 0 || length(date) > 0)
tkinsert(txt, "end", "\n", "small")
tkinsert(txt, "end", paste(paste(doc, collapse="\n"), "\n"), "body")
}
}
tkinsert(txt, "end",
paste("\n\n", sprintf(.gettext("Most contributive terms on positive side of axis %i:"),
dim[j]), "\n", sep=""),
"heading")
tkinsert(listbox, "end", sprintf(.gettext("Axis %i - Positive Side:"), dim[j]))
tkitemconfigure(listbox, mark, background="grey")
tkmark.set(txt, paste("mark", mark, sep=""), tkindex(txt, "insert-1c"))
mark <- mark + 1
poscols <- cols[corpusCa$colcoord[cols, dim[j]] >= 0]
if(length(poscols) == 0) {
tkinsert(txt, "end",
sprintf(.gettext("None among the %i most contributive terms."), nterms), "body")
}
else {
df <- data.frame(row.names=corpusCa$colnames[poscols],
corpusCa$colcoord[poscols, dim[j]] * corpusCa$sv[dim[j]],
colsCtr[poscols] * 100,
(corpusCa$colcoord[poscols, dim[j]] * corpusCa$sv[dim[j]] / corpusCa$coldist[poscols])^2 * 100)
colnames(df) <- titles
tkinsert(txt, "end", paste(capture.output(format(df)), collapse="\n"), "fixed")
}
if(!actDocs) {
posrows <- rows[corpusCa$rowcoord[rows, dim[j]] > 0]
tkinsert(txt, "end",
paste("\n\n", sprintf(.gettext("Active levels on positive side of axis %i:"), dim[j]), "\n", sep=""),
"heading")
df <- data.frame(row.names=corpusCa$rownames[posrows],
corpusCa$rowcoord[posrows, dim[j]] * corpusCa$sv[dim[j]],
rowsCtr[posrows] * 100,
(corpusCa$rowcoord[posrows, dim[j]] * corpusCa$sv[dim[j]] / corpusCa$rowdist[posrows])^2 * 100)
colnames(df) <- titles
tkinsert(txt, "end", paste(capture.output(format(df)), collapse="\n"), "fixed")
tkinsert(txt, "end",
paste("\n\n", sprintf(.gettext("Most extreme documents on positive side of axis %i:"), dim[j]), "\n", sep=""),
"heading")
int <- setdiff(1:nrow(corpusCa$rowcoord), corpusCa$rowvars)
posrows <- int[corpusCa$rowcoord[int, dim[j]] > 0]
posrows <- posrows[order(corpusCa$rowcoord[posrows, dim[j]], decreasing=TRUE)[1:ndocs]]
posrows <- posrows[!is.na(posrows)]
df <- data.frame(row.names=corpusCa$rownames[posrows],
corpusCa$rowcoord[posrows, dim[j]] * corpusCa$sv[dim[j]],
(corpusCa$rowcoord[posrows, dim[j]] * corpusCa$sv[dim[j]] / corpusCa$rowdist[posrows])^2 * 100)
colnames(df) <- titles[-2]
}
else {
tkinsert(txt, "end",
paste("\n\n", sprintf(.gettext("Most contributive documents on positive side of axis %i:"), dim[j]), "\n", sep=""),
"heading")
posrows <- rows[corpusCa$rowcoord[rows, dim[j]] > 0]
df <- data.frame(row.names=corpusCa$rownames[posrows],
corpusCa$rowcoord[posrows, dim[j]] * corpusCa$sv[dim[j]],
rowsCtr[posrows] * 100,
(corpusCa$rowcoord[posrows, dim[j]] * corpusCa$sv[dim[j]] / corpusCa$rowdist[posrows])^2 * 100)
colnames(df) <- titles
}
if(length(posrows) == 0) {
tkinsert(txt, "end",
sprintf(.gettext("None among the %i most contributive documents."), ndocs), "body")
}
else {
tkinsert(txt, "end", paste(capture.output(format(df)), collapse="\n"), "fixed")
tkinsert(txt, "end", "\n", "heading")
for(i in posrows) {
id <- corpusCa$rownames[i]
tkinsert(txt, "end", paste("\n", id, "\n", sep=""),
"articlehead")
tkmark.set(txt, paste("mark", mark, sep=""), tkindex(txt, "insert-1c"))
mark <- mark + 1
tkinsert(listbox, "end", id)
doc <- corpus[[match(id, ids)]]
origin <- meta(doc, "origin")
date <- meta(doc, "datetimestamp")
if(length(origin) > 0 && length(date) > 0)
tkinsert(txt, "end", paste(origin, " - ", date, "\n", sep=""), "details")
else if(length(origin) > 0)
tkinsert(txt, "end", paste(origin, "\n", sep=""), "details")
else if(length(origin) > 0)
tkinsert(txt, "end", paste(date, "\n", sep=""), "details")
if(length(origin) > 0 || length(date) > 0)
tkinsert(txt, "end", "\n", "small")
tkinsert(txt, "end", paste(paste(doc, collapse="\n"), "\n"), "body")
}
}
suprows <- intersect(corpusCa$rowvars, corpusCa$rowsup)
if(length(suprows) > 0) {
tkinsert(txt, "end",
paste("\n\n", sprintf(.gettext("Situation of supplementary variables on axis %i:"), dim[j]), "\n", sep=""),
"heading")
tkinsert(listbox, "end", sprintf(.gettext("Axis %i - Variables"), dim[j]))
tkitemconfigure(listbox, mark, background="grey")
tkmark.set(txt, paste("mark", mark, sep=""), tkindex(txt, "insert-1c"))
mark <- mark + 1
df <- data.frame(row.names=corpusCa$rownames[suprows],
corpusCa$rowcoord[suprows, dim[j]] * corpusCa$sv[dim[j]],
(corpusCa$rowcoord[suprows, dim[j]] * corpusCa$sv[dim[j]] / corpusCa$rowdist[suprows])^2 * 100)
colnames(df) <- titles[-2]
tkinsert(txt, "end", paste(capture.output(format(df)), collapse="\n"), "fixed")
}
}
tkraise(window)
}
showCorpusCaDlg <- function() {
if(!exists("corpusCa") || !class(corpusCa) == "ca") {
.Message(message=.gettext("Please run a correspondence analysis on the corpus first."),
type="error")
return()
}
initializeDialog(title=.gettext("Show Correspondence Analysis"))
actDocs<-length(corpusCa$rowvars) == length(corpusCa$rowsup)
dimFrame <- tkframe(top)
tkgrid(.titleLabel(dimFrame, text=.gettext("Dimensions to plot:")),
sticky="s")
tclXDim <- tclVar(1)
tclYDim <- tclVar(2)
nd <- min(length(corpusCa$sv), corpusCa$nd)
xSlider <- tkscale(dimFrame, from=1, to=nd,
showvalue=TRUE, variable=tclXDim,
resolution=1, orient="horizontal")
ySlider <- tkscale(dimFrame, from=1, to=nd,
showvalue=TRUE, variable=tclYDim,
resolution=1, orient="horizontal")
tnterms <- nrow(corpusCa$colcoord) - length(corpusCa$colsup)
if(actDocs)
tndocs <- nrow(corpusCa$rowcoord) - length(corpusCa$rowsup)
else
tndocs <- nrow(corpusCa$rowcoord) - length(corpusCa$rowvars)
if(!actDocs) {
checkBoxes(frame="labelsFrame",
boxes=c("varLabels", "termLabels"),
initialValues=c(1, 1),
labels=c(.gettext("Variables"), .gettext("Terms")),
title=.gettext("Draw labels for:"))
checkBoxes(frame="pointsFrame",
boxes=c("varPoints", "termPoints"),
initialValues=c(1, 1),
labels=c(.gettext("Variables"), .gettext("Terms")),
title=.gettext("Draw point symbols for:"))
}
else if(length(corpusCa$rowsup) == 0) {
checkBoxes(frame="labelsFrame",
boxes=c("docLabels", "termLabels"),
initialValues=c(0, 1),
labels=c(.gettext("Documents"), .gettext("Terms")),
title=.gettext("Draw labels for:"))
checkBoxes(frame="pointsFrame",
boxes=c("docPoints", "termPoints"),
initialValues=c(0, 1),
labels=c(.gettext("Documents"), .gettext("Terms")),
title=.gettext("Draw point symbols for:"))
}
else {
checkBoxes(frame="labelsFrame",
boxes=c("varLabels", "docLabels", "termLabels"),
initialValues=c(0, 0, 1),
labels=c(.gettext("Variables"), .gettext("Documents"), .gettext("Terms")),
title=.gettext("Draw labels for:"))
checkBoxes(frame="pointsFrame",
boxes=c("varPoints", "docPoints", "termPoints"),
initialValues=c(0, 0, 1),
labels=c(.gettext("Variables"), .gettext("Documents"), .gettext("Terms")),
title=.gettext("Draw point symbols for:"))
}
vars <- colnames(meta(corpus))
if(actDocs)
selection <- (1:length(vars)) - 1
else
selection <- match(unique(names(corpusCa$rowvars[!corpusCa$rowvars %in% corpusCa$rowsup])), vars) - 1
varBox <- variableListBox(top, vars,
selectmode="multiple",
title=.gettext("Variables to plot:"),
initialSelection=selection)
tkbind(varBox$listbox, "<<ListboxSelect>>", function(...) tclvalue(varLabelsVariable) <- 1)
nFrame <- tkframe(top)
tkgrid(.titleLabel(nFrame, text=.gettext("Number of items to show:")),
sticky="s")
tclNDocs <- tclVar(25)
tclNTerms <- tclVar(25)
spinDocs <- tkwidget(top, type="spinbox", from=1, to=tndocs,
inc=1, textvariable=tclNDocs,
validate="all", validatecommand=.validate.uint)
spinTerms <- tkwidget(top, type="spinbox", from=1, to=tnterms,
inc=1, textvariable=tclNTerms,
validate="all", validatecommand=.validate.uint)
ctrDimVariable <- tclVar("xyDim")
ctrDimFrame <- tkframe(top)
ctrDim1 <- ttkradiobutton(ctrDimFrame, variable=ctrDimVariable, value="xyDim", text=.gettext("Both axes"))
ctrDim2 <- ttkradiobutton(ctrDimFrame, variable=ctrDimVariable, value="xDim", text=.gettext("Horizontal axis"))
ctrDim3 <- ttkradiobutton(ctrDimFrame, variable=ctrDimVariable, value="yDim", text=.gettext("Vertical axis"))
onCustom <- function() {
x <- tclvalue(tclXDim)
y <- tclvalue(tclYDim)
docLabels <- if(!actDocs) FALSE else tclvalue(docLabelsVariable) == 1
termLabels <- tclvalue(termLabelsVariable) == 1
varLabels <- if(actDocs && length(corpusCa$rowsup) == 0) FALSE else tclvalue(varLabelsVariable) == 1
docPoints <- if(!actDocs) FALSE else tclvalue(docPointsVariable) == 1
termPoints <- tclvalue(termPointsVariable) == 1
varPoints <- if(actDocs && length(corpusCa$rowsup) == 0) FALSE else tclvalue(varPointsVariable) == 1
vars <- getSelection(varBox)
nDocs <- as.integer(tclvalue(tclNDocs))
nTerms <- as.integer(tclvalue(tclNTerms))
ctrDim <- switch(tclvalue(ctrDimVariable), xyDim=paste("c(", x, ", ", y, ")", sep=""), xDim=x, yDim=y)
if(nDocs > tndocs)
nDocs <- tndocs
if(nTerms > tnterms)
nTerms <- tnterms
if(!(docLabels || termLabels || varLabels || docPoints || termPoints || varPoints)) {
.Message(.gettext("Please select something to plot."), "error", parent=top)
return()
}
setBusyCursor()
on.exit(setIdleCursor())
doItAndPrint(sprintf("showCorpusCa(corpusCa, %s, %s, %s)", ctrDim, nDocs, nTerms))
if(actDocs) {
if((docLabels || docPoints) && (varLabels || varPoints)) {
rowWhat <- "all"
varIndexes <- corpusCa$rowvars[names(corpusCa$rowvars) %in% vars]
doItAndPrint(paste("plottingCa <- rowSubsetCa(corpusCa, c(order(rowCtr(corpusCa, ", ctrDim,
"), decreasing=TRUE)[1:", nDocs, "], ", paste(varIndexes, collapse=", "), "))", sep=""))
}
else if(docLabels || docPoints) {
rowWhat <- "active"
doItAndPrint(paste("plottingCa <- rowSubsetCa(corpusCa, order(rowCtr(corpusCa, ", ctrDim,
"), decreasing=TRUE)[1:", nDocs, "])", sep=""))
}
else if(varLabels || varPoints) {
rowWhat <- "passive"
varIndexes <- corpusCa$rowvars[names(corpusCa$rowvars) %in% vars]
doItAndPrint(paste("plottingCa <- rowSubsetCa(corpusCa, c(",
paste(varIndexes, collapse=", "), "))", sep=""))
}
else {
rowWhat <- "none"
}
if(((docPoints || docLabels) && (varPoints || varLabels)) && docLabels != varLabels)
Message(.gettext("Plotting documents and variables at the same time currently forces labels to be drawn for both or none."),
"note")
rowActivePoints<-if(docPoints) 16 else NA
rowSupPoints<-if(varPoints) 1 else NA
rowActiveColor<-"black"
rowSupColor<-"blue"
rowActiveFont<-3
rowSupFont<-4
}
else {
if(varLabels || varPoints) {
rowWhat <- "all"
varIndexes <- corpusCa$rowvars[names(corpusCa$rowvars) %in% vars]
doItAndPrint(paste("plottingCa <- rowSubsetCa(corpusCa, c(",
paste(varIndexes, collapse=", "), "))", sep=""))
}
else {
rowWhat <- "none"
}
rowActivePoints<-if(varPoints) 1 else NA
rowSupPoints<-if(varPoints) 16 else NA
rowActiveColor<-rowSupColor<-"blue"
rowActiveFont<-rowSupFont<-4
}
if(termLabels || termPoints) {
colWhat <- "all"
if(docLabels || docPoints || varLabels || varPoints)
doItAndPrint(paste("plottingCa <- colSubsetCa(plottingCa, order(colCtr(corpusCa, ", ctrDim,
"), decreasing=TRUE)[1:", nTerms, "])", sep=""))
else
doItAndPrint(paste("plottingCa <- colSubsetCa(corpusCa, order(colCtr(corpusCa, ", ctrDim,
"), decreasing=TRUE)[1:", nTerms, "])", sep=""))
}
else {
colWhat <- "none"
}
doItAndPrint(sprintf('plotCorpusCa(plottingCa, dim=c(%s, %s), what=c("%s", "%s"), labels=c(%i, %i), pch=c(%s, %s, %s, NA), col.text=c("%s", "%s", "black", "red"), font=c(%i, %i, 1, 2), mass=TRUE, xlab="%s", ylab="%s")',
x, y, rowWhat, colWhat,
if(docLabels || varLabels) 2 else 0, if(termLabels) 2 else 0,
rowActivePoints, rowSupPoints, if(termPoints) 17 else NA,
rowActiveColor, rowSupColor, rowActiveFont, rowSupFont,
sprintf(.gettext("Dimension %s (%.1f%%)"), x, 100 * corpusCa$sv[as.integer(x)]^2/sum(corpusCa$sv^2)),
sprintf(.gettext("Dimension %s (%.1f%%)"), y, 100 * corpusCa$sv[as.integer(y)]^2/sum(corpusCa$sv^2))))
activateMenus()
}
onClose <- NULL
.customCloseHelp(helpSubject="showCorpusCaDlg", custom.button=.gettext("Show"))
tkgrid(labelRcmdr(dimFrame, text=.gettext("Horizontal axis:")), xSlider, sticky="w")
tkgrid(labelRcmdr(dimFrame, text=.gettext("Vertical axis:")), ySlider, sticky="w")
tkgrid(dimFrame, sticky="w", pady=6, columnspan=2)
tkgrid(labelsFrame, pointsFrame, sticky="w", pady=6, padx=c(0, 6))
if(!actDocs || length(corpusCa$rowsup) > 0)
tkgrid(getFrame(varBox), columnspan=2, sticky="we", pady=6)
tkgrid(labelRcmdr(nFrame, text=.gettext("Documents:")), spinDocs, sticky="w")
tkgrid(labelRcmdr(nFrame, text=.gettext("Terms:")), spinTerms, sticky="w")
tkgrid(nFrame, sticky="w", pady=6, columnspan=2)
tkgrid(labelRcmdr(ctrDimFrame, text=.gettext("Most contributive to:")), sticky="w", columnspan=2, pady=6)
tkgrid(ctrDim1, ctrDim2, ctrDim3, sticky="w", pady=6)
tkgrid(ctrDimFrame, sticky="w", pady=6, columnspan=2)
tkgrid.columnconfigure(ctrDimFrame, "all", uniform="a")
tkgrid(buttonsFrame, sticky="ew", pady=6, columnspan=2)
.Tcl("update idletasks")
tkwm.resizable(top, 0, 0)
tkbind(top, "<Return>", onCustom)
tkbind(top, "<Escape>", onClose)
if (getRcmdr("double.click") && (!preventDoubleClick)) tkbind(window, "<Double-ButtonPress-1>", onCustom)
tkwm.deiconify(top)
tkfocus(top)
if (getRcmdr("crisp.dialogs")) tclServiceMode(on=TRUE)
} |
iconfianza.x <- function(datos, nivel = 0.95, ic = T, colas = 2, use.t = F) {
datos[datos == -Inf] <- NA
datos[datos == Inf] <- NA
n <- sum(!is.na(datos))
if (n != 0) {
if (use.t) pnor <- qt(1 - (1 - nivel) / colas, n - 1) else pnor <- qnorm(1 - (1 - nivel) / colas)
med <- mean(datos, na.rm = T)
std <- sqrt(var(datos, na.rm = T))
l.i <- med - pnor * std
l.s <- med + pnor * std
} else {
med <- NA
l.i <- NA
l.s <- NA
}
if (ic) iconfres <- c(l.i, med, l.s) else iconfres <- rep(med, 3)
iconfres
} |
options(digits = 7L)
base::cat("Time elapsed: ", proc.time() - base::get("ptime", pos = 'CheckExEnv'),"\n")
grDevices::dev.off()
quit('no') |
selectActiveDims = function(q=NULL,E,threshold,mu,Sigma,pn=NULL,method=1,verb=0,pmvnorm_usr=pmvnorm){
n<-length(mu)
if(is.null(q)){
return(selectQdims(E=E,threshold=threshold,mu=mu,Sigma=Sigma,pn=pn,method=method,reducedReturn=T,verb=verb,pmvnorm_usr=pmvnorm_usr))
}else if(length(q)==2){
return(selectQdims(E=E,threshold=threshold,mu=mu,Sigma=Sigma,pn=pn,method=method,reducedReturn=T,verb=verb,limits = q, pmvnorm_usr=pmvnorm_usr))
}
if(method==0){
indQ<-as.integer(seq(from = 1,to = n,length.out = q))
}else if(method==1){
if(is.null(pn)){
pn<-pnorm((mu-threshold)/sqrt(diag(Sigma)))
}
indQ<-sample.int(n,q,prob = pn)
}else if(method==2){
if(is.null(pn)){
pn<-pnorm((mu-threshold)/sqrt(diag(Sigma)))
}
indQ<-sample.int(n,q,prob = pn*(1-pn))
}else if(method==3){
distances<-as.matrix(dist(E))
if(is.null(pn)){
pn<-pnorm((mu-threshold)/sqrt(diag(Sigma)))
}
indQ<-rep(-1,q)
indQ[1]<-sample.int(n,1,prob = pn)
dd<-1
for(i in (2:q)){
dd<-dd^0.8*distances[indQ[i-1],]
dd<-dd/diff(range(dd))
indQ[i]<-sample.int(n,1,prob = dd*pn)
}
}else if(method==4){
distances<-as.matrix(dist(E))
if(is.null(pn)){
pn<-pnorm((mu-threshold)/sqrt(diag(Sigma)))
}
indQ<-rep(-1,q)
indQ[1]<-sample.int(n,1,prob = pn*(1-pn))
dd<-1
for(i in (2:q)){
dd<-dd^0.8*distances[indQ[i-1],]
dd<-dd/diff(range(dd))
indQ[i]<-sample.int(n,1,prob = dd*pn*(1-pn))
}
}else if(method==5){
indQ<-sample.int(n,q)
}else {
indQ<-as.integer(seq(from = 1,to = n,length.out = q))
}
indQ<-sort(indQ)
return(indQ)
}
selectQdims = function(E,threshold,mu,Sigma,pn=NULL,method=1,reducedReturn=T,verb=0,limits=NULL,pmvnorm_usr=pmvnorm){
if(verb>0)
cat("\n selectQdims: find good q and select active dimensions \n")
n<-length(mu)
if(!is.null(limits)){
q0=max(2,min(limits[1],n))
qMax=min(limits[2],n,300)
}else{
q0=min(10,n)
qMax=min(n,300)
}
if(verb>0)
cat("\n Initial q:",q0)
indQ<-selectActiveDims(q=q0,E=E,threshold=threshold,mu=mu,Sigma=Sigma,pn=pn,method=method,pmvnorm_usr=pmvnorm_usr)
Eq<-E[indQ,]
muEq<-mu[indQ]
KEq<-Sigma[indQ,indQ]
pPrime<- 1 - pmvnorm_usr(lower=rep(-Inf,length(indQ)),upper = rep(threshold,length(indQ)),mean = muEq,sigma = KEq)
err<-attr(pPrime,"error")
deltaP<-1
qIncrement<-min(10,ceiling(0.01*n))
if(verb>0)
cat(", Increments q:",qIncrement,"\n")
flag=0
q=q0
while(flag<2){
q<-min(q+qIncrement,qMax)
indQ<-selectActiveDims(q=q,E=E,threshold=threshold,mu=mu,Sigma=Sigma,pn=pn,method=method,pmvnorm_usr=pmvnorm_usr)
Eq<-E[indQ,]
muEq<-mu[indQ]
KEq<-Sigma[indQ,indQ]
if("algorithm" %in% names(formals(pmvnorm_usr))){
temp<-1 - pmvnorm_usr(lower=rep(-Inf,length(indQ)),upper = rep(threshold,length(indQ)),mean = muEq,sigma = KEq,algorithm = GenzBretz(abseps = 0.01))
}else{
temp<-1 - pmvnorm_usr(lower=rep(-Inf,length(indQ)),upper = rep(threshold,length(indQ)),mean = muEq,sigma = KEq)
}
err<-attr(temp,"error")
deltaP<-abs(temp-pPrime)/(temp+1)
if(deltaP<=err)
flag<-flag+1
if(q==qMax)
flag<-flag+2
pPrime<-temp
}
if(verb>0)
cat("Final q:",q,", deltaP: ",deltaP,", err:",err,"\n")
res<-indQ
if(!reducedReturn){
pq<-1 - pmvnorm_usr(lower=rep(-Inf,length(indQ)),upper = rep(threshold,length(indQ)),mean = muEq,sigma = KEq)
attr(pq,"error")<-attr(temp,"error")
res<-list(indQ=indQ,pq=pq,Eq=Eq,muEq=muEq,KEq=KEq)
}
return(res)
} |
datacheck4 <- function(pos_list, k){
for (i in 1:length(pos_list)){
if (length(table(pos_list[i]))== k[[i]]){
pos_list = pos_list
}
else{
stop("User-declared nominal size does not match observed nominal size. Please
check vector of target positions.")
}
}
} |
ghkgcmr_R <- function(R, pdf, cdf, nrep){
.Call('ghkgcmr', PACKAGE = 'gcKrig', R, pdf, cdf, nrep)
}
ghkgcmr2_R <- function(R, pdf, cdf, nrep){
.Call('ghkgcmr2', PACKAGE = 'gcKrig', R, pdf, cdf, nrep)
}
likGHK <- function(
pars, y, x = NULL, locs, marginal, corr, effort, longlat = FALSE, distscale = 1,
nrep = 1000, reorder = FALSE, seed = 12345
){
if(longlat == FALSE){
matD <- as.matrix(dist(locs, method = "euclidean", diag = TRUE, upper = TRUE))*distscale
}else if(longlat == TRUE){
if (requireNamespace("sp", quietly = TRUE)) {
matD <- sp::spDists(x = as.matrix(locs), longlat = TRUE)*distscale
}else{
stop("Please install {sp} first!")
}
}
npars <- length(pars)
nparcorr <- corr$npar.cor
if(marginal$nod == 1 & pars[ncol(x)+1] < 0){
pars[ncol(x)+1] <- 0
}
if(corr$nug == 0 & pars[npars] < 0){
pars[npars] <- 0
}else if(corr$nug == 1){
pars[npars] <- ifelse(pars[npars] > 1, 1, pars[npars])
pars[npars] <- ifelse(pars[npars] < 0, 0, pars[npars])
pars[npars-1] <- ifelse(pars[npars-1] < 0, 0, pars[npars-1])
}
R <- corr$corr(pars[(npars-nparcorr+1):npars], D = matD)
pdf = marginal$pdf(y = y, x = x, pars = pars, effort = effort)
cdf = marginal$cdf(y = y, x = x, pars = pars, effort = effort)
set.seed(seed)
if(reorder == FALSE){
loglik <- ghkgcmr_R(R = R, pdf = pdf, cdf = cdf, nrep = nrep)
} else{
loglik <- ghkgcmr2_R(R = R, pdf = pdf, cdf = cdf, nrep = nrep)
}
if(is.nan(loglik) | loglik == -Inf) loglik = -1e6
if (loglik == Inf) loglik = 1e6
return(-loglik)
}
profilelikGHK <- function(
theta, par.index, fitted, fixvalue = NULL, single = TRUE, alpha, nrep = 1000, seed = 12345
){
if(single == TRUE){
optpar <- append(fixvalue, theta, after = par.index-1)
llik <- likGHK(pars = optpar, y = fitted$args$y, x = fitted$args$x, locs = fitted$args$locs,
marginal = fitted$args$marginal, corr = fitted$args$corr, effort = fitted$args$effort,
longlat = fitted$args$longlat, distscale = fitted$args$distscale,
nrep = nrep, reorder = FALSE, seed = seed)
llikout <- (-llik - (fitted$log.lik-qchisq(1-alpha,1)/2) )^2
}else{
optpar <- append(theta, fixvalue, after = par.index-1)
llikout <- likGHK(pars = optpar, y = fitted$args$y, x = fitted$args$x, locs = fitted$args$locs,
marginal = fitted$args$marginal, corr = fitted$args$corr, effort = fitted$args$effort,
longlat = fitted$args$longlat, distscale = fitted$args$distscale,
nrep = nrep, reorder = FALSE, seed = seed)
}
return(llikout)
}
mleProfilelikGHK <- function(
fitted, par.index, fixvalue = NULL, single = TRUE, start, alpha, nrep = 1000, seed = 12345
){
if(single == TRUE){
est <- optim(start, profilelikGHK, par.index = par.index, fitted = fitted,fixvalue = fixvalue,
single = TRUE, alpha = alpha, nrep = nrep, seed = seed, method = 'L-BFGS-B',
lower = fitted$optlb[par.index], upper = fitted$optub[par.index])$par
}else{
est <- optim(start, profilelikGHK, par.index = par.index, fitted = fitted, fixvalue = fixvalue,
single = FALSE, alpha = alpha, nrep = nrep, seed = seed, method = 'L-BFGS-B',
lower = fitted$optlb[-par.index], upper = fitted$optub[-par.index])$par
}
return(est)
}
mleGHK <- function(
y, x = NULL, locs, marginal, corr, effort = 1, longlat = FALSE, distscale = 1, corrpar0 = NULL,
ghkoptions = list(nrep = c(100,1000), reorder = FALSE, seed = 12345)
){
if(longlat == FALSE){
D <- as.matrix(dist(locs, method = "euclidean", diag = TRUE, upper = TRUE))*distscale
}else if(longlat == TRUE){
if (requireNamespace("sp", quietly = TRUE)) {
D <- sp::spDists(x = as.matrix(locs), longlat = TRUE)*distscale
}else{
stop("Please install {sp} first!")
}
}
N <- length(y)
if(length(effort) == 1 & effort[1] == 1) effort <- rep(1,N)
if(!is.null(x)){
x <- as.data.frame(x)
x <- cbind(rep(1,N), x)
}else{
x <- cbind(rep(1,N), x)
}
marg0 <- marginal$start(y = y, x = x, effort = effort)
n.nugget0 <- corr$nug
n.reg0 <- ncol(x)
n.od0 <- marginal$nod
if(is.null(corrpar0)){
corpar0 <- corr$start(D)
}else{
corpar0 <- corrpar0
names(corpar0) <- names(corr$start(D))
}
est <- c(marg0, corpar0)
optlb <- c(rep(-Inf,n.reg0), rep(0, n.od0), 0, rep(0, n.nugget0))
optub <- c(rep(Inf,n.reg0), rep(Inf, n.od0), Inf, rep(1, n.nugget0))
for(j in 1:length(ghkoptions$nrep)){
fit <- optim(par = est, fn = likGHK, y = y, x = x, locs = locs, marginal = marginal, corr = corr,
effort = effort, longlat = longlat, distscale = distscale, nrep = ghkoptions$nrep[j],
reorder = ghkoptions$reorder, seed = ghkoptions$seed, method = "L-BFGS-B",
lower = optlb, upper = optub)
est <- fit$par
}
kmarg <- length(marg0)
k <- length(est)
if (is.null(fit$convergence) || fit$convergence != 0)
warnings("Maximum likelihood estimation failed. Algorithm does not converge or MLEs do not exist.")
result.list <- list(MLE = est,
x = x,
nug = n.nugget0,
nreg = n.reg0,
log.lik = -fit$value,
AIC = 2*k+2*fit$value,
AICc = 2*k+2*fit$value + 2*k*(k+1)/(N-k-1),
BIC = 2*fit$value + k*log(N),
kmarg = kmarg,
par.df = k,
N = N,
D = D,
optlb = optlb,
optub = optub,
args = mget(names(formals()),sys.frame(sys.nframe())))
return(result.list)
}
likGHKXX <- function(pars, y, XX, locs, marginal, corr, effort, longlat, distscale, nrep, seed)
{
likGHK(pars = pars, y = y, x = cbind(rep(1, length(y)), XX), locs = locs, marginal = marginal, corr = corr,
effort = effort, longlat = longlat, distscale = distscale, nrep = nrep, reorder = FALSE, seed = seed)
}
predGHK <- function(
obs.y, obs.x = NULL, obs.locs, pred.x = NULL, pred.locs, longlat = FALSE, distscale = 1,
marginal, corr, obs.effort = 1, pred.effort = 1, estpar = NULL,
corrpar0 = NULL, pred.interval = NULL,
ghkoptions = list(nrep = c(100, 1000), reorder = FALSE, seed = 12345)
){
x <- as.data.frame(cbind(rep(1,length(obs.y)), obs.x))
colnames(x)[1] <- c("Intercept")
if(!is.matrix(pred.locs) & !is.data.frame(pred.locs))
stop("Input 'pred.locs' must be a data frame or matrix!")
if(!is.matrix(obs.locs) & !is.data.frame(obs.locs))
stop("Input 'obs.locs' must be a data frame or matrix!")
if(length(obs.effort) == 1) obs.effort <- rep(obs.effort, nrow(obs.locs))
if(!length(obs.effort) == nrow(obs.locs))
stop("Sampling Effort must be equal to the number of sampling locations!")
if(length(pred.effort) == 1) pred.effort <- rep(pred.effort, nrow(pred.locs))
if(!length(pred.effort) == nrow(pred.locs))
stop("Prediction Effort must be equal to the number of prediction locations!")
if(is.null(estpar)){
MLE.est <- mleGHK(y = obs.y, x = x[,-1], locs = obs.locs, marginal = marginal, corr = corr, effort = obs.effort,
longlat = longlat, distscale = distscale, corrpar0 = corrpar0, ghkoptions = ghkoptions)
loglik <- MLE.est$log.lik; estpar <- MLE.est$MLE
}else{
loglik <- -likGHK(pars = estpar, y = obs.y, x = x, locs = obs.locs, marginal = marginal, corr = corr,
effort = obs.effort, longlat = longlat, distscale = distscale,
nrep = ghkoptions$nrep[length(ghkoptions$nrep)],
reorder = ghkoptions$reorder, seed = ghkoptions$seed)
}
if(is.null(pred.x)) {
pred.x <- matrix(1, nrow = nrow(pred.locs), ncol = 1)
}else {
pred.x <- cbind(rep(1, nrow(pred.locs)) , pred.x)
}
pred.x <- as.data.frame(pred.x)
names(pred.x) <- names(x)
if(nrow(pred.x)!= nrow(pred.locs))
stop("Number of prediction locations did not match rows of covariates")
if (requireNamespace("FNN", quietly = TRUE)) {
if(nrow(pred.locs) == 1){
indexloc <- which.min(FNN::get.knnx(pred.locs, obs.locs, 1)$nn.dist)
m0 <- n0 <- round(unique(pred.effort)*obs.y[indexloc]/obs.effort[indexloc])+1
}else{
m0 <- n0 <- round(pred.effort*apply(pred.locs, 1, function(x) obs.y[which.min(FNN::get.knnx(t(as.matrix(x)),
obs.locs, 1)$nn.dist)]/obs.effort[which.min(FNN::get.knnx(t(as.matrix(x)), obs.locs, 1)$nn.dist)]))+1
}
}else{
stop("Please install {FNN} first!")
}
NPL <- length(m0)
max.count <- ceiling(5*max(obs.y/obs.effort)*max(pred.effort))
if(is.null(pred.interval)){
ans <- matrix(NA, nrow = NPL, ncol = 2)
} else if(pred.interval >=0 & pred.interval<=1 ) {
ans <- matrix(NA, nrow = NPL, ncol = 6)
}else stop("Input pred.interval must be a number between 0 and 1!")
nnrep <- length(ghkoptions$nrep)
for(j in 1:NPL){
tmpfun <- function(xtmp) {
exp(-likGHK(pars = estpar, y = c(obs.y, xtmp), x = rbind(as.matrix(x), pred.x[j,]),
locs = rbind(obs.locs, pred.locs[j,]), marginal = marginal, corr = corr,
effort = c(obs.effort, pred.effort[j]), longlat = longlat, distscale = distscale,
nrep = ghkoptions$nrep[nnrep], reorder = ghkoptions$reorder, seed = ghkoptions$seed) - loglik)
}
p.m0 <- p.n0 <- tmpfun(m0[j]); mu.m0 <- mu.n0 <- p.m0*m0[j]; mu2.m0 <- mu2.n0 <- p.m0*m0[j]^2
MM1 <- matrix(0, nrow = 2, ncol = 4); MM2 <- matrix(0, nrow = 2, ncol = 4)
MM1[1,] <- c(p.m0, mu.m0, mu2.m0, m0[j]); MM2[1,] <- c(p.n0, mu.n0, mu2.n0, n0[j])
p.m0 <- tmpfun(m0[j]-1); mu.m0 <- p.m0*(m0[j]-1)
mu2.m0 <- p.m0*(m0[j]-1)^2; MM1[2,] <- c(p.m0, mu.m0, mu2.m0, m0[j]-1)
while( (p.m0 > sqrt(.Machine$double.eps) | MM1[nrow(MM1), 2] > MM1[nrow(MM1)-1, 2]) & m0[j] > 1)
{
p.m0 <- tmpfun(m0[j]-2); mu.m0 <- p.m0*(m0[j]-2); mu2.m0 <- p.m0*(m0[j]-2)^2
MM1 <- rbind(MM1, c(p.m0, mu.m0, mu2.m0, m0[j]-2)); m0[j] <- m0[j]-1
}
p.n0 <- tmpfun(n0[j]+1); mu.n0 <- p.n0*(n0[j]+1); mu2.n0 <- p.n0*(n0[j]+1)^2
MM2[2, ] <- c(p.n0, mu.n0, mu2.n0, n0[j]+1)
while( (p.n0 > sqrt(.Machine$double.eps) | MM2[nrow(MM2), 2] > MM2[nrow(MM2)-1, 2]) & n0[j] < max.count)
{
p.n0 <- tmpfun(n0[j]+2); mu.n0 <- p.n0*(n0[j]+2); mu2.n0 <- p.n0*(n0[j]+2)^2
MM2 <- rbind(MM2, c(p.n0, mu.n0, mu2.n0, n0[j]+2)); n0[j] <- n0[j]+1
}
MM2 <- MM2[-1, ]; MM.all <- rbind(MM1, MM2); weight <- 1/sum(MM.all[,1])
if(!is.null(pred.interval)){
pd <- cbind(MM.all[,4], MM.all[,1]*weight )
pd1 <- pd[order(pd[,1]), ];pd1 <- cbind(pd1, cumsum(pd1[,2]))
id1 <- suppressWarnings(ifelse(max(which(pd1[,3] <= (1-pred.interval)/2 ))==-Inf, 0,
max(which(pd1[,3] <= (1-pred.interval)/2 ))))
id2 <- min(which(pd1[,3] >= 1-(1-pred.interval)/2 ))
L1 <- id1; U1 <- id2-1
pd2 <- pd[order(pd[,2], decreasing = TRUE),]
pd2 <- cbind(pd2, cumsum(pd2[,2]))
id3 <- which(pd2[,3] >= pred.interval)[1]
L2 <- min(pd2[1:id3,1]); U2 <- max(pd2[1:id3,1])
ans[j, ] <- c(sum(MM.all[,2])*weight, sum(MM.all[,3]*weight)-(sum(MM.all[,2])*weight)^2,
L1, U1, L2, U2)
}else{
ans[j, ] <- c(sum(MM.all[,2])*weight, sum(MM.all[,3]*weight)-(sum(MM.all[,2])*weight)^2)
}
}
if(!is.null(pred.interval)){
anslist <- (list(obs.locs = obs.locs,
obs.y = obs.y,
pred.locs = pred.locs,
predValue = ans[,1],
predCount = round(ans[,1]),
predVar = ans[,2],
ConfidenceLevel = pred.interval,
predInterval.EqualTail = ans[,3:4],
predInterval.Shortest = ans[,5:6]))
}else{
anslist <- (list(obs.locs = obs.locs,
obs.y = obs.y,
pred.locs = pred.locs,
predValue = ans[,1],
predCount = round(ans[,1]),
predVar = ans[,2]))
}
return(anslist)
}
predGHK.sf <- function(
obs.y, obs.x = NULL, obs.locs, pred.x = NULL, pred.locs, longlat = FALSE, distscale = 1,
marginal, corr, obs.effort = 1, pred.effort = 1, estpar = NULL,
corrpar0 = NULL, pred.interval = NULL,
n.cores = 2, cluster.type="SOCK", ghkoptions = list(nrep = c(100,1000), reorder = FALSE, seed = 12345)
){
x <- as.data.frame(cbind(rep(1,length(obs.y)), obs.x))
colnames(x)[1] <- c("Intercept")
if(is.null(ghkoptions[["nrep"]])) ghkoptions$nrep = c(100,1000)
if(is.null(ghkoptions[["reorder"]])) ghkoptions$reorder = FALSE
if(is.null(ghkoptions[["seed"]])) ghkoptions$seed = 12345
if(!is.matrix(pred.locs) & !is.data.frame(pred.locs))
stop("Input 'pred.locs' must be a data frame or matrix!")
if(!is.matrix(obs.locs) & !is.data.frame(obs.locs))
stop("Input 'obs.locs' must be a data frame or matrix!")
if(length(obs.effort) == 1) obs.effort <- rep(obs.effort, nrow(obs.locs))
if(!length(obs.effort) == nrow(obs.locs))
stop("Sampling Effort must be equal to the number of sampling locations!")
if(length(pred.effort) == 1) pred.effort <- rep(pred.effort, nrow(pred.locs))
if(!length(pred.effort) == nrow(pred.locs))
stop("Prediction Effort must be equal to the number of prediction locations!")
if(is.null(estpar)){
MLE.est <- mleGHK(y = obs.y, x = x[,-1], locs = obs.locs, marginal = marginal, corr = corr,
effort = obs.effort, longlat = longlat, distscale = distscale,
corrpar0 = corrpar0, ghkoptions = ghkoptions)
loglik <- MLE.est$log.lik; estpar <- MLE.est$MLE
}else{
loglik <- -likGHK(pars = estpar, y = obs.y, x = x, locs = obs.locs, marginal = marginal, corr = corr,
effort = obs.effort, longlat = longlat, distscale = distscale,
nrep = ghkoptions$nrep[length(ghkoptions$nrep)],
reorder = ghkoptions$reorder, seed = ghkoptions$seed)
}
if(is.null(pred.x)) {
pred.x <- matrix(1, nrow = nrow(pred.locs), ncol = 1)
}else {
pred.x <- cbind(rep(1, nrow(pred.x)) , pred.x)
}
pred.x <- as.data.frame(pred.x)
colnames(pred.x)[1] <- c("Intercept")
names(pred.x) <- names(x)
if(nrow(pred.x)!= nrow(pred.locs))
stop("Number of prediction locations did not match the number of covariates")
if (requireNamespace("FNN", quietly = TRUE)) {
if(nrow(pred.locs) == 1){
indexloc <- which.min(FNN::get.knnx(pred.locs, obs.locs, 1)$nn.dist)
m0 <- n0 <- round(unique(pred.effort)*obs.y[indexloc]/obs.effort[indexloc])+1
}else{
m0 <- n0 <- round(pred.effort*apply(pred.locs, 1, function(x) obs.y[which.min(FNN::get.knnx(t(as.matrix(x)),
obs.locs, 1)$nn.dist)]/obs.effort[which.min(FNN::get.knnx(t(as.matrix(x)), obs.locs, 1)$nn.dist)]))+1
}
}else {
stop("Please install {FNN} first!")
}
NPL <- length(m0)
max.count <- ceiling(5*max(obs.y/obs.effort)*max(pred.effort))
nnrep <- length(ghkoptions$nrep)
if (requireNamespace("snowfall", quietly = TRUE)) {
snowfall::sfInit(parallel =TRUE, cpus = n.cores, type = cluster.type)
suppressMessages(snowfall::sfExportAll(except = NULL, debug = FALSE))
suppressMessages(snowfall::sfLibrary("gcKrig", character.only= TRUE))
par.pred.inner <- function(j){
tmpfun <- function(xtmp) {
exp(-likGHK(pars = estpar, y = c(obs.y, xtmp), x = rbind(as.matrix(x), pred.x[j,]),
locs = rbind(obs.locs, pred.locs[j,]), marginal = marginal, corr = corr,
effort = c(obs.effort, pred.effort[j]), longlat = longlat, distscale = distscale,
nrep = ghkoptions$nrep[nnrep], reorder = ghkoptions$reorder, seed = ghkoptions$seed) - loglik)
}
p.m0 <- p.n0 <- tmpfun(m0[j]); mu.m0 <- mu.n0 <- p.m0*m0[j]; mu2.m0 <- mu2.n0 <- p.m0*m0[j]^2
MM1 <- matrix(0, nrow = 2, ncol = 4); MM2 <- matrix(0, nrow = 2, ncol = 4)
MM1[1,] <- c(p.m0, mu.m0, mu2.m0, m0[j]); MM2[1,] <- c(p.n0, mu.n0, mu2.n0, n0[j])
p.m0 <- tmpfun(m0[j]-1); mu.m0 <- p.m0*(m0[j]-1);
mu2.m0 <- p.m0*(m0[j]-1)^2; MM1[2,] <- c(p.m0, mu.m0, mu2.m0, m0[j]-1)
while( (p.m0 > sqrt(.Machine$double.eps) | MM1[nrow(MM1), 2] > MM1[nrow(MM1)-1, 2]) & m0[j] > 1)
{
p.m0 <- tmpfun(m0[j]-2); mu.m0 <- p.m0*(m0[j]-2); mu2.m0 <- p.m0*(m0[j]-2)^2
MM1 <- rbind(MM1, c(p.m0, mu.m0, mu2.m0, m0[j]-2)); m0[j] <- m0[j]-1
}
p.n0 <- tmpfun(n0[j]+1); mu.n0 <- p.n0*(n0[j]+1); mu2.n0 <- p.n0*(n0[j]+1)^2
MM2[2, ] <- c(p.n0, mu.n0, mu2.n0, n0[j]+1)
while( (p.n0 > sqrt(.Machine$double.eps) | MM2[nrow(MM2), 2] > MM2[nrow(MM2)-1, 2]) & n0[j] < max.count)
{
p.n0 <- tmpfun(n0[j]+2); mu.n0 <- p.n0*(n0[j]+2); mu2.n0 <- p.n0*(n0[j]+2)^2
MM2 <- rbind(MM2, c(p.n0, mu.n0, mu2.n0, n0[j]+2)); n0[j] <- n0[j]+1
}
MM2 <- MM2[-1, ]; MM.all <- rbind(MM1, MM2); weight <- 1/sum(MM.all[,1])
if(!is.null(pred.interval)){
pd <- cbind(MM.all[,4], MM.all[,1]*weight )
pd1 <- pd[order(pd[,1]), ];pd1 <- cbind(pd1, cumsum(pd1[,2]))
id1 <- suppressWarnings(ifelse(max(which(pd1[,3] <= (1-pred.interval)/2 ))==-Inf, 0,
max(which(pd1[,3] <= (1-pred.interval)/2 ))))
id2 <- min(which(pd1[,3] >= 1-(1-pred.interval)/2 )); L1 <- id1; U1 <- id2-1
pd2 <- pd[order(pd[,2], decreasing = TRUE),]
pd2 <- cbind(pd2, cumsum(pd2[,2]))
id3 <- which(pd2[,3] >= pred.interval)[1]
L2 <- min(pd2[1:id3,1]); U2 <- max(pd2[1:id3,1])
ans <- c(sum(MM.all[,2])*weight, sum(MM.all[,3]*weight)-(sum(MM.all[,2])*weight)^2,
L1, U1, L2, U2)
}else{
ans <- c(sum(MM.all[,2])*weight, sum(MM.all[,3]*weight)-(sum(MM.all[,2])*weight)^2)
}
return(ans)
}
out = snowfall::sfSapply(1:NPL, par.pred.inner)
snowfall::sfStop()
}else{
stop("Please install {snowfall} first before using this function!")
}
ans <- t(out)
if(!is.null(pred.interval)){
anslist <- (list(obs.locs = obs.locs,
obs.y = obs.y,
pred.locs = pred.locs,
predValue = ans[,1],
predCount = round(ans[,1]),
predVar = ans[,2],
ConfidenceLevel = pred.interval,
predInterval.EqualTail = ans[,3:4],
predInterval.Shortest = ans[,5:6]))
}else{
anslist <- (list(obs.locs = obs.locs,
obs.y = obs.y,
pred.locs = pred.locs,
predValue = ans[,1],
predCount = round(ans[,1]),
predVar = ans[,2]))
}
return(anslist)
} |
library(testthat)
library(gluedown)
library(stringr)
library(rvest)
library(glue)
test_that("md_quote creates a <blockquote> tag with other blocks (ex. 206)", {
lines <- md_quote(md_softline(md_heading("Foo"), "bar", "baz"))
lines %>%
md_convert() %>%
read_html() %>%
html_node("blockquote") %>%
expect_full()
lines %>%
md_convert() %>%
read_html() %>%
html_node("h1") %>%
html_text() %>%
expect_equal("Foo")
})
test_that("md_quote can create an empty block quote (ex. 217)", {
node <- md_quote("") %>%
md_convert() %>%
read_html() %>%
html_nodes("blockquote") %>%
html_text(trim = TRUE)
expect_nchar(node, 0)
})
test_that("md_quotes with soft lines create a single quote (ex. 221)", {
text <- c("foo", "bar")
text %>%
md_quote() %>%
md_convert() %>%
read_html() %>%
html_node("blockquote") %>%
html_text(trim = TRUE) %>%
expect_equal(expected = str_c(text, collapse = "\n"))
})
test_that("md_quotes with blank lines create paragraphs (ex. 222)", {
text <- c("foo", "", "bar")
node <- text %>%
md_quote() %>%
md_convert() %>%
read_html() %>%
html_node("blockquote") %>%
html_nodes("p") %>%
html_text(trim = TRUE)
expect_equal(node, text[which(text != "")])
})
test_that("md_quote can create nested block qutoes (ex. 228)", {
lines <- md_quote(md_quote(md_quote("foo")))
nodes <- lines %>%
md_convert() %>%
read_html() %>%
html_nodes("blockquote")
expect_length(nodes, 3)
}) |
grnn.optmiz_auc <- function(net, lower = 0, upper, nfolds = 4, seed = 1, method = 1) {
if (class(net) != "General Regression Neural Net") stop("net needs to be a GRNN.", call. = F)
if (!(method %in% c(1, 2))) stop("the method is not supported.", call. = F)
fd <- folds(seq(nrow(net$x)), n = nfolds, seed = seed)
cv <- function(s) {
cls <- parallel::makeCluster(min(nfolds, parallel::detectCores() - 1), type = "PSOCK")
obj <- c("fd", "net", "grnn.fit", "grnn.predone", "grnn.predict")
parallel::clusterExport(cls, obj, envir = environment())
rs <- Reduce(rbind,
parallel::parLapply(cls, fd,
function(f) data.frame(ya = net$y[f],
yp = grnn.predict(grnn.fit(net$x[-f, ], net$y[-f], sigma = s),
net$x[f, ]))))
parallel::stopCluster(cls)
return(MLmetrics::AUC(y_pred = rs$yp, y_true = rs$ya))
}
if (method == 1) {
rst <- optimize(f = cv, interval = c(lower, upper), maximum = T)
} else if (method == 2) {
rst <- optim(par = mean(lower, upper), fn = cv, lower = lower, upper = upper,
method = "Brent", control = list(fnscale = -1))
}
return(data.frame(sigma = rst[[1]], auc = rst[[2]]))
} |
a <- matrix(rnorm(50 * 10), 50)
b <- svd(a, nu = 5, nv = 5)
str(b)
c <- svd(a)
str(c)
rowMeans(c$u)
a <- NA
`if`(b, 2, 3)
crossprod(c$u)
tcrossprod(c$u)[1:5, 1:5]
crossprod(c$v)
tcrossprod(c$v)[1:5, 1:5]
crossprod(b$u)[1:5, 1:5]
tcrossprod(b$u)[1:5, 1:5]
crossprod(b$v)[1:5, 1:5]
tcrossprod(b$v)[1:5, 1:5]
a <- matrix(rnorm(1e5 * 200), 1e5)
print(system.time(e <- qr.Q(qr(a))))
print(system.time(b <- svd(a, nv = 0)))
all.equal(crossprod(e), diag(ncol(e)))
all.equal(crossprod(b$u), diag(ncol(e))) |
buildNames <- function(dframe, numOfNames, minLength, maxLength)
{
if(!missing(dframe))
{
alphaMatrixFirst <- genMatrix(dframe,"first")
alphaMatrixAll <- genMatrix(dframe,"all")
}
topAlphas <- rowSums(alphaMatrixFirst)
topAlphas <- topAlphas[order(-topAlphas)]
maxRange <- min(length(topAlphas), 13)
if(maxRange < 13)
{
warning("Training data is not large enough. Expect less than minimum length names and/or names that do not seem like training data")
}
n <- sample(topAlphas[1:maxRange],numOfNames, replace = TRUE)
custNames <- list()
for(i in seq_along(n))
{
firstAlpha <- names(n[i])
secondAlpha <- nextAlphaProb(alphaMatrix = alphaMatrixFirst, currentAlpha = firstAlpha, placement = "first")
custName <- list(firstAlpha,secondAlpha)
prevAlpha <- paste0(unlist(custName),collapse = "")
nameLength <- sample(minLength:maxLength, 1, replace = FALSE)
for(j in 3:nameLength)
{
nextAlpha <- nextAlphaProb(alphaMatrix = alphaMatrixAll, currentAlpha = prevAlpha, placement = "all")
if(nextAlpha == "doesNotExist")
{
prevAlphaEnd <- unlist(strsplit(prevAlpha, split = ""))[2]
nextAlpha <- nextAlphaProb(alphaMatrix = alphaMatrixFirst, currentAlpha = prevAlphaEnd, placement = "first")
next
}
custName <- list(custName,nextAlpha)
custName <- unlist(custName)
prevAlpha <- paste0(custName[length(custName)-1], custName[length(custName)], collapse = "")
}
custName <- paste0(custName, collapse = "")
custNames <- c(unlist(custNames), custName)
}
return(custNames)
} |
InitialPopulation <- function(x, populationSize, startGenes, EveryGeneInInitialPopulation=TRUE) {
cat(paste("\nGenerating the initial population...\n"))
genomeLength=length(featureNames(x));
cat("Generating random chromosomes...\n");
populationInit = matrix(0, nrow=populationSize, ncol=genomeLength);
if (EveryGeneInInitialPopulation==TRUE){
noDGenes=ceiling(genomeLength/populationSize);
index1<-sample(1:genomeLength, genomeLength, replace=FALSE)
if (genomeLength%%populationSize==0){
indexI<-matrix(index1, nrow=populationSize)
cat(paste("\nNo rest...\n"))
} else {
cat(paste("\nWith rest...\n"))
index2<-sample(setdiff(1:genomeLength,tail(index1, startGenes+1)), abs(populationSize*noDGenes-genomeLength), replace=FALSE)
indexI<-matrix(c(index1,index2), nrow=populationSize, byrow = TRUE)
}
for (i in 1:populationSize) {
for(j in 1:noDGenes){
populationInit[i, indexI[i, j]] = 1;
}
}
if (startGenes>noDGenes){
for (i in 1:populationSize) {
populationInit[i, sample(setdiff(1:genomeLength,indexI[i,]), startGenes-noDGenes, replace=FALSE)] = 1;
}
}
} else {
for (i in 1:populationSize) {
populationInit[i, sample(1:genomeLength, startGenes, replace=FALSE)] = 1;
}
}
colnames(populationInit)=featureNames(x);
rownames(populationInit)=1:populationSize;
return(populationInit);
}
Individuals<-function(population){
cat(paste("\tGenerating Individuals...\n"))
if(nrow(population)%%2==1){
population<-rbind(population, population[1,])
rownames(population)<-c(1:nrow(population))
}
noIndividuals<-nrow(population)
Id<-rep(1:(nrow(population)/2), each=2)
population<-cbind(Id, population)
}
splitChromosomes <- function(x, noChr=22) {
noGenes<-matrix(c(1, 3000, 9.174312, 2, 2500, 7.645260, 3, 1900, 5.810398, 4, 1600, 4.892966, 5, 1700, 5.198777, 6, 1900, 5.810398, 7, 1800, 5.504587, 8, 1400, 4.281346, 9, 1400, 4.281346, 10, 1400, 4.281346, 11, 2000, 6.116208, 12, 1600, 4.892966, 13, 800, 2.446483, 14, 1200, 3.669725, 15, 1200, 3.669725, 16, 1300, 3.975535, 17, 1600, 4.892966, 18, 600, 1.834862, 19, 1700, 5.198777, 20, 900, 2.752294, 21, 400, 1.223242, 22, 800, 2.446483), nrow=22, ncol=3, byrow=TRUE)
colnames(noGenes)<-c("Chromosome", "NoOfGenes", "Percent")
toSplit<-dim(exprs(x))[1];
if (3*noChr>toSplit){
cat(paste("\nToo many chromosomes for the given genome. Please specify a lower number of chromosomes...\n"));
noChr<-toSplit%/%3
cat(paste("\nAutomatically changing the number of chromosomes to "), noChr, paste(" ...\n"));
}
noGenes2<-noGenes[1:noChr,3];
rm(noGenes);
newPercent<-noGenes2/sum(noGenes2)*100;
rm(noGenes2);
chr<-as.integer(newPercent*toSplit/100);
rm(newPercent);
chr[noChr]<-chr[noChr]+(toSplit-sum(chr));
rm(toSplit);
rm(noChr);
chrNumber<-0;
chrConfig<-c();
for (i in chr) {
chrNumber<-chrNumber+1
chrConfig<-c(chrConfig, rep(chrNumber,i));
}
rm(chrNumber);
rm(chr);
rm(i);
return(chrConfig);
}
RandomizePop<-function(population){
cat(paste("\tRandomizing the population...\n"))
newIndex<-sample(1:dim(population)[1], replace=FALSE)
newPopulation<-population[newIndex,]
rownames(newPopulation)<-c(1:dim(newPopulation)[1])
return(newPopulation)
}
EvaluationFunction <- function(x, individuals, response, method, trainTest, nnetSize=NA, nnetDecay=NA, rdaAlpha=NA, rdaDelta=NA, ...){
cat(paste("\tEvaluating Fitnesses...\n"))
if(toString(trainTest)=="LOO"){
traintest<-xvalSpec("LOO")
} else if (toString(trainTest)=="LOG"){
traintest<-xvalSpec("LOG", 5, balKfold.xvspec(5))
} else {
traintest<-c(trainTest)
}
formula<-as.formula(paste(response, "~."))
population<-individuals[,2:dim(individuals)[2]]
populationSize<-nrow(population)
results<-c()
for (q in 1:populationSize) {
if (!is.na(nnetSize)) {
result = MLearn(formula, x[population[q,]==1,], .method = method, trainInd = traintest, size=nnetSize, decay=nnetDecay)
} else if (!is.na(rdaAlpha)) {
result = MLearn(formula, x[population[q,]==1,], .method = method, trainInd = traintest, alpha=rdaAlpha, delta=rdaDelta)
} else {
result = MLearn(formula, x[population[q,]==1,], .method = method, trainInd = traintest)
}
Accuracy<-(confuMat(result)[1]+confuMat(result)[4])/(confuMat(result)[1]+confuMat(result)[2]+confuMat(result)[3]+confuMat(result)[4])
results<-rbind(results, Accuracy)
}
results<-cbind(individuals[,1], results)
rownames(results)=1:populationSize
avgs<-c()
for (j in results[,1]){
avgs<-rbind(avgs, mean(results[results[,1]==j,2]))
}
results<-cbind(results, avgs)
colnames(results)=c("Id", "Accuracy", "Average Acc")
return(results)
}
AnalyzeResults<-function(individuals, results, randomAssortment=TRUE, chrConf){
cat(paste("\tAnalyzing the results\n"))
keep<-matrix(0, nrow=nrow(individuals), ncol=1)
rownames(keep)<-rownames(results)
colnames(keep)<-"Keep"
crossOvers<-matrix(c(0), nrow=0, ncol=ncol(individuals))
colnames(crossOvers)<-colnames(individuals)
for (i in 1:nrow(results)){
if (results[i, 2] >= results[i, 3])
keep[i, 1]=1
}
cat(paste("\tApplying crossovers...\n"))
if (randomAssortment==TRUE){
for (j in 1:(nrow(individuals)/2)){
selChr<-individuals[results[,1]==j, -1]
repeat {
newChr<-RandomAssortment(Crossover(selChr[1,], selChr[2,], chrConf), chrConf)
if ((sum(newChr[1,])>3)&&(sum(newChr[2,])>3)){
break
}
cat(paste("\tFailed crossover & random assortment...Redone.\n"))
}
newChromosomes<-cbind(c(j,j), newChr)
crossOvers<-rbind(crossOvers, newChromosomes)
}
cat(paste("\tApplying Random Assortment...\n"))
rownames(crossOvers)<-1:nrow(crossOvers)
} else {
for (j in 1:(nrow(individuals)/2)){
selChr<-individuals[results[,1]==j, -1]
repeat {
newChr<-Crossover(selChr[1,], selChr[2,], chrConf)
if ((sum(newChr[1,])>3)&&(sum(newChr[2,])>3)){
break
}
cat(paste("\tFailed crossover...Redone.\n"))
}
newChromosomes<-cbind(c(j,j), newChr)
crossOvers<-rbind(crossOvers, newChromosomes)
}
rownames(crossOvers)<-1:nrow(crossOvers)
}
return(list(keep, crossOvers))
}
Crossover<-function(c1, c2, chrConf){
crossVector<-rep(0, length(c1))
for (i in 1:max(chrConf)) {
crossSlice<-crossVector[chrConf==i]
crossIndexes<-sort(sample(1:(length(crossSlice)),2, replace=TRUE))
crossSlice[crossIndexes[1]:crossIndexes[2]]=1
rm(crossIndexes)
crossVector[chrConf==i]<-crossSlice
}
c3<-rep(NA, length(c1))
c4<-rep(NA, length(c1))
maTemp<-rbind(chrConf, crossVector,c1, c2, c3, c4)
rm(c3)
rm(c4)
rm(crossVector)
maTemp[5,maTemp[2,]==0]<-maTemp[3,maTemp[2,]==0]
maTemp[5,maTemp[2,]==1]<-maTemp[4,maTemp[2,]==1]
maTemp[6,maTemp[2,]==0]<-maTemp[4,maTemp[2,]==0]
maTemp[6,maTemp[2,]==1]<-maTemp[3,maTemp[2,]==1]
newChrs<-maTemp[5:6,]
rm(maTemp)
return(newChrs)
}
RandomAssortment<-function(newChrs, chrConf){
AssortIndex<-sample(c(0,1), size=max(chrConf), replace = TRUE)
c3<-rep(NA, ncol(newChrs))
c4<-rep(NA, ncol(newChrs))
exchange<-c()
for (i in 1:max(chrConf)) {
exchange<-c(exchange, rep(AssortIndex[i], sum(as.numeric(chrConf==i))))
}
maTemp<-rbind(chrConf, exchange, newChrs[1,], newChrs[2,], c3, c4)
rm(c3)
rm(c4)
rm(AssortIndex)
rm(exchange)
maTemp[5,maTemp[2,]==0]<-maTemp[3,maTemp[2,]==0]
maTemp[5,maTemp[2,]==1]<-maTemp[4,maTemp[2,]==1]
maTemp[6,maTemp[2,]==0]<-maTemp[4,maTemp[2,]==0]
maTemp[6,maTemp[2,]==1]<-maTemp[3,maTemp[2,]==1]
splittedChrs<-maTemp[5:6,]
rm(maTemp)
return(splittedChrs)
}
pointMutation<-function(individuals, mutationChance){
noMutations<-floor(mutationChance/100*length(individuals[,-1]))
cat(paste("\tApplying", noMutations, "Point Mutations...\n"))
individualsOrig<-individuals
indexes<-sample(0:length(individuals[,-1]),noMutations)
individuals[,-1][indexes]=as.numeric(!as.logical(individuals[,-1][indexes]))
for (q in 1:dim(individuals[,-1])[1]) {
if (sum(individuals[q,-1])<4){
individuals[q,]=individualsOrig[q,];
cat(paste("\tInvalid mutated genotype.Not inherited....\n"))
}
}
rm(q)
rm(individualsOrig)
return(individuals)
}
nonSenseMutation<-function(individuals, chrConf, mutationChance){
noChr<-max(chrConf)
noMutations<-floor(mutationChance/100*dim(individuals)[1]*noChr)
cat(paste("\tApplying", noMutations, "NonSenseMutation mutations...\n"))
indexes<-sample(1:length(individuals[,-1]), noMutations)
indexChr<-rep(chrConf, dim(individuals)[1])
addIndexes<-c()
for (i in indexes) {
p<-i
repeat {
p<-p+1
if (!identical(indexChr[i], indexChr[p])||identical(i, length(individuals[,-1]))){
break;
}
addIndexes<-c(addIndexes, p)
}
}
rm(i)
allIndexes<-sort(c(indexes, addIndexes), decreasing = FALSE)
invIndividuals<-t(individuals)
for (i in allIndexes) {
invIndividuals[-1,][i]=0
}
for (q in 1:dim(invIndividuals[-1,])[2]) {
if (sum(invIndividuals[-1,q])<4){
invIndividuals[,q]=t(individuals)[,q];
cat(paste("\tInvalid mutated genotype.Not inherited....\n"))
}
}
individuals<-t(invIndividuals)
rm(invIndividuals)
rm(i)
rm(q)
rm(addIndexes)
rm(indexes)
rm(allIndexes)
return(individuals)
}
frameShiftMutation<-function(individuals, chrConf, mutationChance){
noChr<-max(chrConf)
noMutations<-floor(mutationChance/100*dim(individuals)[1]*noChr)
cat(paste("\tApplying", noMutations, "FrameShiftMutation mutations...\n"))
indexes<-sample(1:length(individuals[,-1]),noMutations)
indexChr<-rep(chrConf, dim(individuals)[1])
addIndexes<-c()
for (i in indexes) {
p<-i
repeat {
p<-p+1
if (!identical(indexChr[i], indexChr[p])||identical(i, length(individuals[,-1]))){
break;
}
addIndexes<-c(addIndexes, p)
}
}
rm(i)
allIndexes<-sort(c(indexes, addIndexes), decreasing = FALSE)
invIndividuals<-t(individuals)
for (j in 1:length(allIndexes)) {
if (identical(allIndexes[j]+1, allIndexes[j+1])){
invIndividuals[-1,][allIndexes[j]]=invIndividuals[-1,][allIndexes[j]+1]
}else{
invIndividuals[-1,][allIndexes[j]]=0
}
}
for (q in 1:dim(invIndividuals[-1,])[2]) {
if (sum(invIndividuals[-1,q])<4){
invIndividuals[,q]=t(individuals)[,q];
cat(paste("\tInvalid mutated genotype.Not inherited....\n"))
}
}
individuals<-t(invIndividuals)
rm(invIndividuals)
rm(j)
rm(q)
rm(addIndexes)
rm(indexes)
rm(allIndexes)
return(individuals)
}
largeSegmentDeletion<-function(individuals, chrConf, mutationChance){
noChr<-max(chrConf)
indexesChr<-sample(1:(dim(individuals)[1]*noChr),floor(mutationChance/100*dim(individuals)[1]*noChr))
indivIndex<-ceiling(indexesChr/noChr)
chrIndex<-indexesChr%/%indivIndex
noMutations<-length(indexesChr)
cat(paste("\tApplying", noMutations, "LargeSegmentDeletion mutations...\n"))
individualsOrig<-individuals
for (i in 1:length(indexesChr)) {
margins<-sort(sample(1:(length(individuals[,-1][indivIndex[i],chrConf==chrIndex[i]])),2, replace=FALSE), decreasing = FALSE)
individuals[,-1][indivIndex[i],chrConf==chrIndex[i]][margins[1]:margins[2]]=0
}
for (q in 1:dim(individuals[,-1])[1]) {
if (sum(individuals[q,-1])<4){
individuals[q,]=individualsOrig[q,];
cat(paste("\tInvalid mutated genotype.Not inherited....\n"))
}
}
rm(i)
rm(q)
rm(individualsOrig)
rm(indexesChr)
rm(indivIndex)
rm(chrIndex)
return(individuals)
}
wholeChromosomeDeletion<-function(individuals, chrConf, mutationChance){
noChr<-max(chrConf)
indexesChr<-sample(1:(dim(individuals)[1]*noChr),floor(mutationChance/100*dim(individuals)[1]*noChr))
indivIndex<-ceiling(indexesChr/noChr)
chrIndex<-indexesChr%/%indivIndex
noMutations<-length(indexesChr)
cat(paste("\tApplying", noMutations, "WholeChromosomeDeletion mutations...\n"))
individualsOrig<-individuals
for (i in 1:length(indexesChr)) {
individuals[,-1][indivIndex[i],chrConf==chrIndex[i]]=0
}
for (q in 1:dim(individuals[,-1])[1]) {
if (sum(individuals[q,-1])<4){
individuals[q,]=individualsOrig[q,];
cat(paste("\tInvalid mutated genotype.Not inherited....\n"))
}
}
rm(i)
rm(q)
rm(individualsOrig)
rm(indexesChr)
rm(indivIndex)
rm(chrIndex)
return(individuals)
}
transposon<-function(individuals, chrConf, mutationChance){
noChr<-max(chrConf)
indexesChr<-sample(1:(dim(individuals)[1]*noChr),floor(mutationChance/100*dim(individuals)[1]*noChr))
indivIndex<-ceiling(indexesChr/noChr)
chrIndex<-indexesChr%/%indivIndex
noMutations<-length(indexesChr)
cat(paste("\tApplying", noMutations, "Transposons mutations...\n"))
individualsOrig<-individuals
for (i in 1:length(indexesChr)) {
pickFrom<-as.numeric(which(individuals[,-1][indivIndex[i],chrConf==chrIndex[i]]==1))
if(length(pickFrom)!=0){
transposon<-sample(pickFrom, 1)
positions<-sample(c(-(sum(chrConf==chrIndex[i])-1):-1, 1:(sum(chrConf==chrIndex[i])-1)), 1)
newIndex<-transposon+positions
if (newIndex>sum(chrConf==chrIndex[i])){
newIndex<-newIndex-sum(chrConf==chrIndex[i])
}
if (newIndex<1){
newIndex<-sum(chrConf==chrIndex[i])+newIndex
}
individuals[,-1][indivIndex[i],chrConf==chrIndex[i]][transposon]=0
individuals[,-1][indivIndex[i],chrConf==chrIndex[i]][newIndex]=1
}
}
for (q in 1:dim(individuals[,-1])[1]) {
if (sum(individuals[q,-1])<4){
individuals[q,]=individualsOrig[q,];
cat(paste("\tInvalid mutated genotype.Not inherited....\n"))
}
}
rm(i)
rm(q)
rm(individualsOrig)
rm(indexesChr)
rm(indivIndex)
rm(chrIndex)
return(individuals)
}
Elitism<-function(results, elitism, ID){
cat(paste("\tApplying Elitism...Keeping the Best ", elitism, "%\n"))
if (ID=="ID2"){
cat("Elitistic individuals...\n")
elite<-3
} else {
cat("Elitistic genotypes...\n")
elite<-2
}
keep<-matrix(0, nrow=nrow(results), ncol=1)
rownames(keep)<-rownames(results)
colnames(keep)<-"Keep"
toKeep<-sort(results[,elite], decreasing = TRUE, index.return=TRUE)
toKeep<-toKeep$ix
newIndex<-floor(length(toKeep)*elitism/100)
rez<-results[toKeep,]
toKeep<-toKeep[1:newIndex]
keep[toKeep]<-1
return(list(keep, toKeep))
}
EmbryonicSelection<-function(population, results, embryonicSelection){
cat(paste("\tApplying Embryonic Selection for Fitness > ", embryonicSelection, "\n"))
keep<-matrix(0, nrow=nrow(population), ncol=1)
rownames(keep)<-rownames(results)
colnames(keep)<-"Keep"
keep[(results[,3] > embryonicSelection)==TRUE]=1
return(keep)
}
PlotGenAlg <- function(DGenes, dGenes, maxEval, meanEval){
dev.off()
dev.new()
setFr <- layout(matrix(c(1,2,3,3),2,2,byrow = TRUE), TRUE)
layout.show(setFr)
par(las=2)
index<-sort(DGenes, decreasing=TRUE, index.return=TRUE)
DGenes<-DGenes[index$ix]
plottedGenes<-length(DGenes)
plot(maxEval, type="o", col="red", xlab="iteration no.", main="Maximum Accuracy")
plot(meanEval, type="o", col="blue", xlab="iteration no.", main="Mean Accuracy")
barplot(DGenes[1:plottedGenes], main="Genes inheritance", xlab="Gene", col=c("darkblue"), beside=FALSE, add=FALSE)
}
dGAselID<-function(x, response, method=knn.cvI(k=3, l=2), trainTest="LOG", startGenes, populationSize, iterations, noChr=22, elitism=NA, ID="ID1", pMutationChance=0, nSMutationChance=0, fSMutationChance=0, lSDeletionChance=0, wChrDeletionChance=0, transposonChance=0, randomAssortment=TRUE, embryonicSelection=NA, EveryGeneInInitialPopulation=TRUE, nnetSize=NA, nnetDecay=NA, rdaAlpha=NA, rdaDelta=NA, ...){
if (typeof(x)!="S4") {
stop("The supplied data is not an ExpressionSet.");
}
if(randomAssortment==TRUE){
cat("The chromosomes will be randomly assigned...\n")
}
if (EveryGeneInInitialPopulation==TRUE){
cat("Every gene will be present in the initial population...\n")
}
if (is.na(embryonicSelection)) {
embryonicSelection = NA
}
if (is.na(elitism)) {
elitism = 0
}
cat("Elitism =", elitism, "%\n")
cat("Point Mutations rate =", pMutationChance, "%\n")
cat("Non-sense Mutations rate =", nSMutationChance, "%\n")
cat("Frameshift Mutation rate =", fSMutationChance, "%\n")
cat("Large Segment Deletion rate =", lSDeletionChance, "%\n")
cat("Whole Chromosome Deletion rate =", wChrDeletionChance, "%\n")
cat("Transposon rate =", transposonChance, "%\n")
cat("Embryonic Selection for fitness > ", embryonicSelection, "\n")
cat("Fitness evaluation function =", method@mlFunName, "\n")
cat("Cross-validation =", trainTest, "\n")
cat("\nInitial population...\n")
initialPopulation<-InitialPopulation(x, populationSize, startGenes, EveryGeneInInitialPopulation)
individuals<-Individuals(initialPopulation)
cat(paste("Splitting the genotype in", noChr, "chromosomes\n"))
chrConf<-splitChromosomes(x, noChr)
kDGenes<-matrix(c(0), nrow=1, ncol=ncol(initialPopulation))
colnames(kDGenes)<-colnames(initialPopulation)
rownames(kDGenes)<-"DGenes"
kdGenes<-matrix(c(0), nrow=1, ncol=ncol(initialPopulation))
colnames(kdGenes)<-colnames(initialPopulation)
rownames(kdGenes)<-"dGenes"
MaxAcc<-c()
MeanAcc<-c()
MinAcc<-c()
bestIndividual<-matrix(0, nrow=1, ncol=ncol(initialPopulation))
iteration<-0
dev.new()
repeat{
cat(paste("Starting iteration no.", iteration, "\n"))
results<-EvaluationFunction(x, individuals, response, method, trainTest, nnetSize, nnetDecay, rdaAlpha, rdaDelta)
iterMinAccuracy <- range(results[,2])[1]
MinAcc<-c(MinAcc, iterMinAccuracy)
cat(paste("\tMinimum Fitness in iteration no.", iteration, "equals", iterMinAccuracy*100, "%\n"))
iterMeanAccuracy <- mean(results[,2])
MeanAcc<-c(MeanAcc, iterMeanAccuracy)
cat(paste("\tMean Fitness in iteration no.", iteration, "equals", iterMeanAccuracy*100, "%\n"))
iterMaxAccuracy <- range(results[,2])[2]
MaxAcc<-c(MaxAcc, iterMaxAccuracy)
cat(paste("\tMaximum Fitness in iteration no.", iteration, "equals", iterMaxAccuracy*100, "%\n"))
lostInEmbryonic<-0
if (!is.na(embryonicSelection)) {
keptEmbr<-EmbryonicSelection(individuals, results, embryonicSelection)
keptIndividuals<-individuals[keptEmbr==1,]
keptIndividuals[,1]<-rep(1:(nrow(keptIndividuals)/2), each=2)
rownames(keptIndividuals)<-1:nrow(keptIndividuals)
keptResults<-results[keptEmbr==1,]
keptResults[,1]<-rep(1:(nrow(keptIndividuals)/2), each=2)
rownames(keptResults)<-1:nrow(keptResults)
discardedIndividuals<-individuals[keptEmbr==0,]
lostInEmbryonic<-nrow(discardedIndividuals)
} else {
keptIndividuals<-individuals
keptResults<-results
discardedIndividuals<-individuals[0,]
}
iterRes<-AnalyzeResults(keptIndividuals, keptResults, randomAssortment, chrConf)
keptIndividualsFromChildren<-iterRes[[2]]
discardedIndividuals<-rbind(discardedIndividuals, keptIndividuals[!(iterRes[[1]]==1),])
keptIndividualsFromParents<-keptIndividuals[(iterRes[[1]]==1),]
rownames(keptIndividualsFromParents)<-1:nrow(keptIndividualsFromParents)
keptResults<-keptResults[iterRes[[1]]==1,]
rownames(keptResults)<-1:nrow(keptResults)
keptInElitism<-0
if (floor(nrow(keptResults)*elitism/100)==0) {
tempResults<-keptResults
tempIndividualsFromParents<-keptIndividualsFromParents
keptIndividualsFromParents<-keptIndividualsFromParents[0,]
keptResults<-keptResults[0,]
keptElit<-Elitism(tempResults, 50, ID)
forBest<-tempIndividualsFromParents[keptElit[[2]],]
forBest<-forBest[1,]
rm(tempResults)
rm(tempIndividualsFromParents)
} else {
keptElit<-Elitism(keptResults, elitism, ID)
keptIndividualsFromParents<-keptIndividualsFromParents[keptElit[[2]],]
forBest<-rbind(keptIndividualsFromParents)[1,]
keptResults<-keptResults[keptElit[[2]],]
keptInElitism<-nrow(rbind(keptResults))
}
best<-t(as.matrix(forBest))[,-1]
bestIndividual<-rbind(bestIndividual, best)
rm(forBest)
if(lostInEmbryonic<keptInElitism){
adjust<-keptInElitism-lostInEmbryonic
toRemove<-sample(1:nrow(keptIndividualsFromChildren), adjust, replace=FALSE)
keptIndividualsFromChildren<-keptIndividualsFromChildren[-toRemove,]
}
keptIndividuals<-rbind(keptIndividualsFromParents,keptIndividualsFromChildren)
rownames(keptIndividuals)<-1:nrow(keptIndividuals)
tempMat<-matrix(0, nrow=1, ncol= ncol(individuals))
tempMat<-rbind(keptIndividualsFromParents)
kDGenes[1,]<-kDGenes[1,] + colSums(tempMat)[-1]
rm(tempMat)
if((nrow(discardedIndividuals)>0)&&(ncol(discardedIndividuals)>0)){
tempMat<-matrix(0, nrow=1, ncol= ncol(individuals))
tempMat<-rbind(keptIndividualsFromParents)
kdGenes[1,]<-kdGenes[1,] + colSums(tempMat)[-1]
rm(tempMat)
}
if(pMutationChance!=0){
keptIndividuals<-pointMutation(keptIndividuals, pMutationChance)
}
if(nSMutationChance!=0){
keptIndividuals<-nonSenseMutation(keptIndividuals, chrConf, nSMutationChance)
}
if(fSMutationChance!=0){
keptIndividuals<-frameShiftMutation(keptIndividuals, chrConf, fSMutationChance)
}
if(lSDeletionChance!=0){
keptIndividuals<-largeSegmentDeletion(keptIndividuals, chrConf, lSDeletionChance)
}
if(wChrDeletionChance!=0){
keptIndividuals<-wholeChromosomeDeletion(keptIndividuals, chrConf, wChrDeletionChance)
}
if(transposonChance!=0){
keptIndividuals<-transposon(keptIndividuals, chrConf, transposonChance)
}
for (i in 1:dim(keptIndividuals)[1]) {
if((sum(keptIndividuals[i,2:dim(keptIndividuals)[2]]))<3){
index<-keptIndividuals[i,]==0
interval<-1:dim(keptIndividuals)[2]
interval<-interval[index]
index<-sample(interval,1, replace=FALSE)
keptIndividuals[i,index]<-1
rm(index)
rm(interval)
}
}
keptIndividuals<-RandomizePop(keptIndividuals)
keptIndividuals<-Individuals(keptIndividuals[,-1])
cat(paste("\tPopulation Size in iteration no. ", iteration," = ", nrow(keptIndividuals), "\n"))
PlotGenAlg(kDGenes, kdGenes, MaxAcc, MeanAcc)
flush.console()
individuals<-keptIndividuals
iteration <- iteration+1
if(iteration>=iterations) break()
}
rezultate<-list(DGenes=kDGenes, dGenes=kdGenes, MaximumAccuracy=MaxAcc, MeanAccuracy=MeanAcc, MinAccuracy=MinAcc, BestIndividuals=bestIndividual[-1,])
return(rezultate)
} |
context("Prepare text")
test_that("textProcessor and prepDocuments correctly subset metadata ", {
data("gadarian")
txtOut <- textProcessor(documents = gadarian$open.ended.response,
metadata = data.frame(MetaID = gadarian$MetaID),
sparselevel = .8)
expect_equal(nrow(txtOut$meta), length(txtOut$documents))
prepped <- prepDocuments(txtOut$documents, txtOut$vocab,
txtOut$meta, upper.thresh = 100)
expect_equal(nrow(prepped$meta), length(prepped$documents))
}) |
print.spam.chol.NgPeyton <- function(x,...) {
nrow <- x@dimension[1]
nnzR <- x@rowpointers[nrow+1]-1
cat("(Upper) Cholesky factor of dimension ", nrow,
"x", nrow, " with ",nnzR," (row-wise) nonzero elements (",printSize(x),").", sep = "", fill=TRUE)
cat(" (The object is supposed to be used with: 'as.spam', 'backsolve', 'forwardsolve', etc.)\n",
fill=TRUE)
cat("Class 'spam.chol.NgPeyton'\n")
invisible(NULL)
}
setMethod("show","spam.chol.NgPeyton", function(object) {
nrow <- object@dimension[1]
nnzR <- object@rowpointers[nrow+1]-1
cat("(Upper) Cholesky factor of dimension ", nrow,
"x", nrow, " with ",nnzR," (row-wise) nonzero elements (",printSize(object),").", sep = "", fill=TRUE)
cat(" (The object is supposed to be used with: 'as.spam', 'backsolve', 'forwardsolve', etc.)\n",
fill=TRUE)
cat("Class 'spam.chol.NgPeyton'\n")
invisible(NULL)
})
"diag.of.spam.chol.NgPeyton" <- function(x, nrow, ncol)
return( x@entries[x@rowpointers[-(x@dimension[1]+1)]])
setMethod("diag", "spam.chol.NgPeyton", diag.of.spam.chol.NgPeyton)
setMethod("diag<-", "spam.chol.NgPeyton", function(x) stop("diagonal cannot be changed on 'spam.chol.NgPeyton' object"))
setMethod("print", "spam.chol.NgPeyton", print.spam.chol.NgPeyton)
setMethod("length", "spam.chol.NgPeyton",function(x) x@rowpointers[x@dimension[1]+1]-1)
setMethod("length<-","spam.chol.NgPeyton",function(x,value) stop("length cannot be changed on 'spam.chol.NgPeyton' object") )
setMethod("dim", "spam.chol.NgPeyton",function(x) x@dimension)
setMethod("dim<-", "spam.chol.NgPeyton",function(x,value) stop("dimension cannot be altered on 'spam.chol.NgPeyton' object") )
setMethod("c","spam.chol.NgPeyton", function(x,...){
nrow <- x@dimension[1]
nnzR <- x@rowpointers[nrow+1]-1
nsuper <- as.integer( length(x@supernodes)-1)
if( getOption("spam.force64") || .format.spam(x)$package != "spam")
SS <- .format64()
else
SS <- .format32
xcolindices <- .C64('calcja',
SIGNATURE = rep(SS$signature, 7),
nrow,
nsuper,
x@supernodes,
x@colindices,
x@colpointers,
x@rowpointers,
xja = vector_dc( SS$type, nnzR),
INTENT=c("r", "r", "r", "r",
"r", "r", "w"),
NAOK = getOption("spam.NAOK"),
PACKAGE = SS$package)$xja
cx <- .C64("spamcsrdns",
SIGNATURE = c(SS$signature, "double" , SS$signature, SS$signature, "double"),
nrow = nrow,
entries = x@entries,
colindices = xcolindices,
rowpointers = x@rowpointers,
res = vector_dc( "double", nrow*nrow),
INTENT = c("r", "r", "r", "r",
"w"),
NAOK=getOption("spam.NAOK"),
PACKAGE = SS$package)$res
if (length( list(...)) < 1)
return( cx)
else
c( cx,c(...))
})
as.spam.chol.NgPeyton <- function(x, eps = getOption("spam.eps")) {
if( getOption("spam.force64") || .format.spam(x)$package != "spam")
SS <- .format64()
else
SS <- .format32
if (eps<.Machine$double.eps) stop("'eps' should not be smaller than machine precision",call.=FALSE)
nrow <- x@dimension[1]
nnzR <- x@rowpointers[nrow+1]-1
nsuper <- length(x@supernodes)-1
colindices <- .C64('calcja',
SIGNATURE=rep(SS$signature, 7),
nrow,
nsuper,
x@supernodes,
x@colindices,
x@colpointers,
x@rowpointers,
xja=vector(SS$type, nnzR),
INTENT=c("r", "r", "r", "r", "r", "r", "w"),
NAOK = getOption("spam.NAOK"),
PACKAGE = SS$package)$xja
return(.newSpam(
entries=x@entries,
colindices=colindices,
rowpointers=x@rowpointers,
dimension=x@dimension
))
}
"as.matrix.spam.chol.NgPeyton" <- function(x,...){
nrow <- x@dimension[1]
nnzR <- x@rowpointers[nrow+1]-1L
nsuper <- length(x@supernodes)-1L
if( getOption("spam.force64") || .format.spam(x)$package != "spam" )
SS <- .format64()
else
SS <- .format32
xcolindices <- .C64('calcja',
SIGNATURE = c(rep(SS$signature,7)),
nrow,
nsuper,
x@supernodes,
x@colindices,
x@colpointers,
x@rowpointers,
xja = vector_dc( SS$type, nnzR),
INTENT=c("r", "r", "r", "r",
"r", "r", "w"),
NAOK = getOption("spam.NAOK"),
PACKAGE = SS$package)$xja
return(array(.C64("spamcsrdns",
SIGNATURE = c(SS$signature, "double" , SS$signature, SS$signature,
"double"),
nrow = nrow,
entries = x@entries,
colindices = xcolindices,
rowpointers = x@rowpointers,
res = vector_dc( "double", nrow*nrow),
INTENT = c("r", "r", "r", "r",
"w"),
NAOK=getOption("spam.NAOK"),
PACKAGE = SS$package)$res,
c(nrow,nrow))
)
}
setMethod("as.spam","spam.chol.NgPeyton", as.spam.chol.NgPeyton)
setMethod("as.matrix","spam.chol.NgPeyton",as.matrix.spam.chol.NgPeyton)
setMethod("as.vector","spam.chol.NgPeyton",
function(x){
as.vector.spam(as.spam.chol.NgPeyton(x))
})
setGeneric("backsolve", def = function(r, x, ...) standardGeneric("backsolve"),
useAsDefault= function(r, x, ...) base::backsolve(r, x, ...))
setGeneric("forwardsolve", def = function(l, x, ...) standardGeneric("forwardsolve"),
useAsDefault= function(l, x, ...) base::forwardsolve(l, x, ...))
setMethod("chol","spam", chol.spam)
setMethod("solve","spam",solve.spam)
setMethod("chol2inv","spam", chol2inv.spam)
setMethod("chol2inv","spam.chol.NgPeyton", chol2inv.spam)
setMethod("backsolve","spam",
backsolve.spam)
setMethod("backsolve","spam.chol.NgPeyton",
backsolve.spam, sealed=TRUE)
setMethod("forwardsolve","spam", forwardsolve.spam)
setMethod("forwardsolve","spam.chol.NgPeyton", forwardsolve.spam)
"ordering.default" <- function(x, inv=FALSE) stop('Operation not defined form this class')
setGeneric("ordering", function(x, inv=FALSE) standardGeneric("ordering"))
setMethod("ordering","spam.chol.NgPeyton",function(x,inv=FALSE)
{
if (inv) return(x@invpivot) else return(x@pivot) })
setMethod("ordering","matrix",function(x,inv=FALSE)
{
if (dim(x)[1]!=dim(x)[2])
stop("ordering is defined for square matrices only")
if(inv)return(dim(x)[1]:1) else return(1:dim(x)[1]) })
setMethod("ordering","spam",function(x,inv=FALSE)
{
if (dim(x)[1]!=dim(x)[2])
stop("ordering is defined for square matrices only")
if(inv) return(dim(x)[1]:1) else return(1:dim(x)[1]) })
setMethod("image","spam.chol.NgPeyton",
function(x,cex=NULL,...){
image.spam(as.spam.chol.NgPeyton(x),cex=cex,...)
})
setMethod("display","spam.chol.NgPeyton",
function(x,...){
display.spam(as.spam.chol.NgPeyton(x),...)
})
setMethod("t","spam.chol.NgPeyton",
function(x){
t.spam(as.spam.chol.NgPeyton(x))
})
setMethod("chol","spam.chol.NgPeyton",
function(x){
x
}) |
setClass("fDISTFIT",
representation(
call = "call",
model = "character",
data = "data.frame",
fit = "list",
title = "character",
description = "character"
)
)
setMethod("show", "fDISTFIT",
function(object)
{
cat("\nTitle:\n ")
cat(object@title, "\n")
cat("\nCall:\n ")
cat(paste(deparse(object@call), sep = "\n", collapse = "\n"),
"\n", sep = "")
cat("\nModel:\n ", object@model, "\n", sep = "")
cat("\nEstimated Parameter(s):\n")
print(object@fit$estimate)
cat("\nDescription:\n ")
cat(object@description, "\n\n")
invisible()
}) |
fitYP4 <- function(Y, d, Z, beta1=1, beta2= -1, maxiter=60){
temp1 <- YP4(y=Y, d=d, Z=Z, b1=beta1, b2=beta2, k=maxiter)
ELval <- ELcomp(Haz=temp1$Hazw, Sur=temp1$Survival, gam=temp1$gam)
list(EmpLik=ELval, BaselineH=temp1$Hazw, alpha=temp1$alpha)
} |
library(lumberjack)
tmpfile <- tempfile()
logger <- expression_logger$new(
mh = mean(height)
, mw = mean(weight)
, verbose = FALSE
)
women %L>%
start_log(logger) %L>%
identity() %L>%
{.$height <- 2*.$height; .} %L>%
dump_log(file=tmpfile)
lg <- read.csv(tmpfile)
expect_equal(lg$mh[1], mean(women$height))
expect_equal(lg$mw[1], mean(women$weight))
expect_equal(lg$mh[2], mean(2*women$height))
expect_equal(lg$mw[2], mean(women$weight))
expect_true("label" %in% ls(logger))
expect_true(all(is.na(lg$srcref))) |
IRT.predict.R <- function( object, dat, group=1 )
{
resp <- dat
irf1 <- IRT.irfprob( object )
irf1[ is.na(irf1) ] <- 0
N <- nrow(resp)
I <- ncol(resp)
TP <- dim(irf1)[3]
K <- dim(irf1)[2]
if ( length( dim(irf1) )==4 ){
irf1 <- irf1[,,,group]
}
pred <- array( 0, dim=c(N,TP,I) )
dimnames(pred)[[3]] <- colnames(resp)
var1 <- pred
pred.categ <- array( 0, dim=c(N,K,TP,I) )
dimnames(pred.categ)[[4]] <- colnames(resp)
for (ii in 1:I){
v1 <- rep(0,N)
kk <- 1
irf.ii <- matrix( irf1[ii,kk,], nrow=N, ncol=TP, byrow=TRUE )
pred.categ[,kk,,ii] <- irf.ii
for (kk in 2:K){
irf.ii <- matrix( irf1[ii,kk,], nrow=N, ncol=TP, byrow=TRUE )
p1 <- irf.ii
pred.categ[,kk,,ii] <- p1
v1 <- (kk-1) * p1 + v1
}
pred[,,ii] <- v1
ind.ii <- which( is.na(resp[,ii]) )
if ( length(ind.ii) > 0 ){
pred[ ind.ii,,ii ] <- NA
pred.categ[ ind.ii, 1:K,,ii ] <- NA
}
for (kk in 1:K){
var1[,,ii ] <- var1[,,ii] + pred.categ[, kk,, ii] * ( ( kk-1 ) - pred[,,ii] )^2
}
}
resp1 <- array( 0, dim=c(N,TP,I) )
for (tt in 1:TP){
resp1[,tt,] <- resp
}
resid1 <- resp1 - pred
sresid1 <- resid1 / sqrt( var1 )
res <- list( "expected"=pred, "probs.categ"=pred.categ,
"variance"=var1, "residuals"=resid1, "stand.resid"=sresid1 )
return(res)
} |
IC_RR_coxph=function(model,alpha=0.05,sided=2){
tab=matrix(nrow=nrow(summary(model)$coefficients),ncol=ncol(summary(model)$coefficients)+3,
dimnames = list(c(rownames(summary(model)$coefficients)),c(colnames(summary(model)$coefficients),"RR","IC.inf","IC.sup")))
tab[,1:ncol(summary(model)$coefficients)]=round(summary(model)$coefficients,digits=3)
tab[,"RR"]=round(exp(summary(model)$coefficients[,"coef"]),digits=3)
tab[,"IC.inf"]=round(exp(summary(model)$coefficients[,"coef"]-qnorm(1-alpha/2)*summary(model)$coefficients[,"se(coef)"]),digits=3)
tab[,"IC.sup"]=round(exp(summary(model)$coefficients[,"coef"]+qnorm(1-alpha/2)*summary(model)$coefficients[,"se(coef)"]),digits=3)
if (sided==1){
tab[,5]=tab[,5]/2
}
if (nrow(tab)==1){tab=t(as.matrix(tab[,-2]))} else{tab=tab[,-2]}
rownames(tab)=rownames(summary(model)$coefficients)
signif=ifelse(tab[,"Pr(>|z|)"]>=0.1,"",ifelse(tab[,"Pr(>|z|)"]>=0.05,".",ifelse(tab[,"Pr(>|z|)"]>=0.01,"*",ifelse(tab[,"Pr(>|z|)"]>=0.001,"**","***"))))
res=cbind(tab,signif)
colnames(res)=c(colnames(tab),"")
return(noquote(res))
} |
anecdotal <- function(RESOURCES = NULL,
LAND = NULL,
PARAS = NULL,
AGENTS = NULL,
res_type = 1,
samp_age = 1,
agent_type = 0,
type_cat = 1,
move_agents = FALSE,
model = "IBM"
){
check_model <- 0;
if(model == "IBM"){
if(!is.array(RESOURCES)){
stop("Warning: Resources need to be in an array");
}
if(!is.array(LAND)){
stop("Warning: Landscape need to be in an array");
}
if(!is.vector(PARAS) | !is.numeric(PARAS)){
stop("Warning: Parameters must be in a numeric vector");
}
if(!is.array(AGENTS)){
stop("Warning: Agents need to be in an array");
}
PARAS[8] <- agent_type;
PARAS[10] <- res_type;
PARAS[17] <- samp_age;
PARAS[18] <- type_cat;
PARAS[29] <- move_agents;
ANECDOTAL_OUT <- run_anecdotal_a( RESOURCE_c = RESOURCES,
LANDSCAPE_c = LAND,
PARAMETERS_c = PARAS,
AGENT_c = AGENTS
);
check_model <- 1;
}
if(check_model == 0){
stop("Invalid model selected (Must be 'IBM')");
}
return(ANECDOTAL_OUT);
}
run_anecdotal_a <- function(RESOURCE_c, LANDSCAPE_c, PARAMETERS_c, AGENT_c){
.Call("anecdotal", RESOURCE_c, LANDSCAPE_c, PARAMETERS_c, AGENT_c);
} |
getSummaryStatisticsTable <- function(
data,
var = NULL, varFlag = NULL, varLab = NULL,
varLabInclude = length(var) > 1,
varInclude0 = FALSE,
varIgnore = NULL,
varGeneralLab = "Variable", varSubgroupLab = "Variable group",
varIncludeTotal = FALSE, varTotalInclude = FALSE,
varTotalInSepRow = FALSE,
rowVar = NULL, rowVarLab = NULL,
rowVarDataLevels = NULL,
rowOrder = "auto", rowOrderTotalFilterFct = NULL, rowOrderCatLast = NULL,
rowVarInSepCol = NULL,
rowVarFormat = NULL,
rowVarTotalInclude = NULL,
rowVarTotalByVar = NULL,
rowVarTotalInSepRow = NULL,
rowTotalLab = NULL,
rowInclude0 = FALSE,
rowAutoMerge = TRUE,
emptyValue = "-",
rowVarTotalPerc = NULL,
colVar = NULL,
colVarTotal = colVar, colVarTotalPerc = colVarTotal,
colInclude0 = FALSE,
colVarDataLevels = NULL,
colTotalInclude = FALSE, colTotalLab = "Total",
stats = NULL,
statsExtra = NULL,
statsVarBy = NULL,
statsPerc = c("statN", "statm"),
statsGeneralLab = "Statistic",
statsValueLab = "StatisticValue",
statsLabInclude = NULL,
subjectVar = "USUBJID",
filterFct = NULL,
dataTotal = NULL, dataTotalPerc = dataTotal,
dataTotalRow = NULL, dataTotalCol = NULL,
type = "auto",
byVar = NULL, byVarLab = NULL,
checkVarDiffBySubj = "error",
labelVars = NULL,
outputType = "flextable",
statsLayout = ifelse("DT" %in% outputType, "col", "row"),
landscape = (style == "presentation"), margin = 1, rowPadBase = 14.4,
title = NULL, footer = NULL,
file = NULL,
style = "report",
colorTable = getColorPaletteTable(style = style),
colHeaderTotalInclude = TRUE,
fontsize = switch(style, 'report' = 8, 'presentation' = 10),
fontname = switch(style, 'report' = "Times", 'presentation' = "Tahoma"),
vline = "none", hline = "auto",
pageDim = NULL,
expandVar = NULL, noEscapeVar = NULL, barVar = NULL,
...){
summaryTable <- computeSummaryStatisticsTable(
data = data,
var = var, varFlag = varFlag, varLab = varLab,
varLabInclude = varLabInclude, varInclude0 = varInclude0,
varIgnore = varIgnore,
varGeneralLab = varGeneralLab, varSubgroupLab = varSubgroupLab,
varIncludeTotal = varIncludeTotal,
varTotalInclude = varTotalInclude,
varTotalInSepRow = varTotalInSepRow,
colVar = colVar,
colVarTotal = colVarTotal, colVarTotalPerc = colVarTotalPerc,
colInclude0 = colInclude0,
colVarDataLevels = colVarDataLevels,
colTotalInclude = colTotalInclude,
colTotalLab = colTotalLab,
rowVar = rowVar, rowInclude0 = rowInclude0,
rowVarDataLevels = rowVarDataLevels,
rowVarLab = rowVarLab,
rowOrder = rowOrder,
rowOrderTotalFilterFct = rowOrderTotalFilterFct,
rowOrderCatLast = rowOrderCatLast,
rowVarTotalInclude = rowVarTotalInclude,
rowVarTotalByVar = rowVarTotalByVar,
rowVarTotalPerc = rowVarTotalPerc,
rowVarTotalInSepRow = rowVarTotalInSepRow,
type = type,
subjectVar = subjectVar,
stats = stats, statsExtra = statsExtra,
statsPerc = statsPerc,
statsVarBy = statsVarBy,
statsGeneralLab = statsGeneralLab,
filterFct = filterFct,
dataTotal = dataTotal, dataTotalPerc = dataTotalPerc,
dataTotalRow = dataTotalRow, dataTotalCol = dataTotalCol,
labelVars = labelVars,
byVar = byVar, byVarLab = byVarLab,
checkVarDiffBySubj = checkVarDiffBySubj
)
if(is.null(summaryTable)) return(invisible())
ft <- exportSummaryStatisticsTable(
summaryTable = summaryTable,
rowVarFormat = rowVarFormat,
rowTotalLab = rowTotalLab,
rowVarInSepCol = rowVarInSepCol,
rowAutoMerge = rowAutoMerge,
colHeaderTotalInclude = colHeaderTotalInclude,
statsValueLab = statsValueLab,
statsLabInclude = statsLabInclude,
title = title, footer = footer,
labelVars = labelVars,
emptyValue = emptyValue,
file = file, landscape = landscape,
margin = margin, rowPadBase = rowPadBase,
outputType = outputType,
statsLayout = statsLayout,
expandVar = expandVar, noEscapeVar = noEscapeVar, barVar = barVar,
style = style, colorTable = colorTable,
fontsize = fontsize,
fontname = fontname,
vline = vline, hline = hline,
pageDim = pageDim,
...
)
return(ft)
} |
knitr::opts_chunk$set(echo = TRUE)
knitr::include_graphics("grid.jpg")
library(portsort)
library(PerformanceAnalytics)
library(xts)
data(Factors)
R.Forward = Factors[[1]]; R.Lag = Factors[[2]]; V.Lag = Factors[[3]]
Fa = R.Lag; Fb = V.Lag
dimA = 0:3/3;dimB = 0:3/3;dimC = c(0,1)
sort.output.con = conditional.sort(Fa,Fb,Fc=NULL,R.Forward,dimA,dimB,dimC,type = 7)
sort.output.uncon = unconditional.sort(Fa,Fb,Fc=NULL,R.Forward,dimA,dimB,dimC, type = 7)
table.AnnualizedReturns(sort.output.con$returns, scale = 365, geometric = FALSE, digits = 3)
table.AnnualizedReturns(sort.output.uncon$returns, scale = 365, geometric = FALSE, digits = 3)
library(portsort)
library(PerformanceAnalytics)
library(xts)
data(Factors)
R.Forward = Factors[[1]]; R.Lag = Factors[[2]]; V.Lag = Factors[[3]]
Fa = R.Lag; Fb = V.Lag
dimA = 0:3/3;dimB = 0:3/3;dimC = c(0,1)
sort.output = conditional.sort(Fa,Fb,Fc=NULL,R.Forward,dimA,dimB,dimC)
turnover.output = portfolio.turnover(sort.output)
turnover.output$`Mean Turnover`
library(portsort)
library(PerformanceAnalytics)
library(xts)
data(Factors)
R.Forward = Factors[[1]]; R.Lag = Factors[[2]]; V.Lag = Factors[[3]]
Fa = R.Lag; Fb = V.Lag
dimA = 0:3/3;dimB = 0:3/3;dimC = c(0,1)
sort.output = conditional.sort(Fa,Fb,Fc=NULL,R.Forward,dimA,dimB,dimC)
portfolio.frequency(sort.output, rank = 1)
portfolio.frequency(sort.output, rank = 2)
library(portsort)
library(PerformanceAnalytics)
library(xts)
data(Factors)
R.Forward = Factors[[1]]; R.Lag = Factors[[2]]; V.Lag = Factors[[3]]
Fa = R.Lag; Fb = V.Lag
dimA = 0:3/3;dimB = 0:3/3;dimC = c(0,1)
sort.output.con = conditional.sort(Fa,Fb,Fc=NULL,R.Forward,dimA,dimB,dimC)
sort.output.uncon = unconditional.sort(Fa,Fb,Fc=NULL,R.Forward,dimA,dimB,dimC)
portfolio.mean.size(sort.output = sort.output.con)
portfolio.mean.size(sort.output = sort.output.uncon)
library(portsort)
library(PerformanceAnalytics)
library(xts)
data(Factors)
s = 21
k = 1
R.Forward = Factors[[1]]; R.Lag = Factors[[2]]; V.Lag = Factors[[3]]
XSMOM = R.Lag
XSMOM[1:nrow(XSMOM),1:ncol(XSMOM)] <- NA
for (i in 1:ncol(R.Lag)){
for (t in (s + 1):nrow(R.Lag)){
XSMOM[t,i] = sum(R.Lag[(t-s):(t-1-k),i])
}
}
XSMOM = na.omit(XSMOM)
R.Forward = R.Forward[(s + 1):nrow(R.Forward), ]
Fa = XSMOM; Fb = NULL; Fc = NULL
dimA = 0:5/5
XSMOM.output = conditional.sort(Fa=Fa,R.Forward=R.Forward,dimA=dimA)
table.AnnualizedReturns(XSMOM.output$returns,scale = 365, geometric = FALSE)
portfolio.turnover(XSMOM.output)$`Mean Turnover`
portfolio.frequency(XSMOM.output, rank = 1)
portfolio.mean.size(XSMOM.output)
LS.Portfolio = XSMOM.output$returns[,5] + (-1*XSMOM.output$returns[,1])
table.AnnualizedReturns(LS.Portfolio,scale = 365, geometric = FALSE)
chart.CumReturns(LS.Portfolio, geometric = FALSE, main = "XSMOM Long-Short Portfolio")
library(portsort)
library(PerformanceAnalytics)
library(xts)
data(Factors)
R.Forward = Factors[[1]]; R.Lag = Factors[[2]]; V.Lag = Factors[[3]]
Fa = R.Lag; Fb = V.Lag
dimA = 0:3/3
dimB = 0:3/3
sort.con = conditional.sort(Fa=Fa,Fb=Fb,R.Forward = R.Forward,dimA=dimA,dimB=dimB)
sort.uncon = unconditional.sort(Fa=Fa,Fb=Fb,R.Forward = R.Forward,dimA=dimA,dimB=dimB)
table.AnnualizedReturns(sort.con$returns,scale = 365, geometric = FALSE)
table.AnnualizedReturns(sort.uncon$returns,scale = 365, geometric = FALSE)
portfolio.turnover(sort.con)$`Mean Turnover`
portfolio.turnover(sort.uncon)$`Mean Turnover`
portfolio.frequency(sort.con, rank = 1)
portfolio.frequency(sort.uncon, rank = 1)
portfolio.mean.size(sort.con)
portfolio.mean.size(sort.uncon)
Conditonal.LS.Portfolio = sort.con$returns[,1] + (-1*sort.con$returns[,9])
Unconditonal.LS.Portfolio = sort.uncon$returns[,1] + (-1*sort.uncon$returns[,9])
Portfolios = cbind(Conditonal.LS.Portfolio,Unconditonal.LS.Portfolio)
colnames(Portfolios) = c("Conditional","Unconditional")
chart.CumReturns(Portfolios, geometric = FALSE, legend.loc = "topleft",
main = "Sorting Comparison")
table.AnnualizedReturns(Portfolios,scale = 365, geometric = FALSE) |
ramml <- function(X,y,p,e) {
X <- as.matrix(X)
n <- length(y)
m <- dim(X)[2]
q <- 2*p-3
if (m == 1) {
mx <- median(X)
}
else {
mx <-l1median(X)
}
Xmc <- as.matrix(scale(X, center = mx, scale = FALSE))
wx <- sqrt(apply(Xmc^2, 1, sum))
du <- wx/median(wx)
t <- e
betay <- 1/((1+((1/q)*(t^2)))^2)
betax <- 1/((1+((1/q)*(du^2)))^2)
beta <- betay*betax
alfa <- (1/q*t)/((1+((1/q)*(t^2)))^2)
alfa <- alfa*betax;
WB <- diag(as.vector(beta), n,n)
WA <- diag(as.vector(alfa), n,n)
K <- solve(t(X)%*%WB%*%X)%*%(t(X)%*%WB%*%y)
D <- solve(t(X)%*%WB%*%X)%*%(t(X)%*%WA%*%c(rep(1,n)))
B <- (2*p/q)*t(y-X%*%K)%*%WA%*%c(rep(1,n));
C <- (2*p/q)*t(y-X%*%K)%*%WB%*%(y-X%*%K);
sigmamml <- (B+sqrt(B^2+4*n*C))/(2*sqrt(n*(n-m)));
betamml <- K+D%*%sigmamml;
yfit <- X%*%betamml
residual <- y-yfit
list(coef = betamml, scale=as.numeric(sigmamml), fitted.values = yfit, residuals = residual)
} |
spatmed.reg <- function(y, x, xnew = NULL, tol = 1e-07, ses = FALSE) {
x <- model.matrix(y ~ ., data.frame(x) )
p <- dim(x)[2]
d <- dim(y)[2]
medi <- function(be) {
be <- matrix(be, nrow = p)
est <- x %*% be
sum( sqrt( rowSums( (y - est)^2 ) ) )
}
tic <- proc.time()
mod <- Rfast::spatmed.reg(y, x[, -1], tol = tol)
be <- mod$be
seb <- NULL
if ( ses ) {
qa <- nlm(medi, as.vector(be), iterlim = 5000)
qa <- optim(qa$estimate, medi, control = list(maxit = 5000), hessian = TRUE)
seb <- sqrt( diag( solve(qa$hessian) ) )
seb <- matrix(seb, ncol = d)
if ( is.null(colnames(y)) ) {
colnames(seb) <- colnames(be) <- paste("Y", 1:d, sep = "")
} else colnames(seb) <- colnames(be) <- colnames(y)
}
est <- NULL
if ( !is.null(xnew) ) {
xnew <- model.matrix( ~ ., data.frame(xnew) )
est <- xnew %*% be
}
if ( is.null(colnames(y)) ) {
colnames(be) <- paste("Y", 1:d, sep = "")
} else colnames(be) <- colnames(y)
rownames(be) <- colnames(x)
if ( !is.null(seb) ) rownames(seb) <- colnames(x)
runtime <- proc.time() - tic
list(iter = mod$iters, runtime = runtime, be = be, seb = seb, est = est)
} |
selected_sites_DI <- function(PAM_subset, selection_type = "all",
method = "jaccard", verbose = TRUE, ...) {
if (missing(PAM_subset)) {
stop("Argument 'PAM_subset' must be defined.")
}
if (class(PAM_subset)[1] != "PAM_subset") {
stop("Object 'PAM_subset' must be of class 'PAM_subset'.")
}
if (!selection_type %in% c("all", "random", "E", "G", "EG")) {
stop("Argument 'selection_type' is not valid, options are: 'all'', 'random', 'E', 'G', or 'EG'.")
} else {
if (!selection_type == "all") {
selection_type <- paste0("selected_sites_", selection_type)
}
}
diss <- list()
dise <- list()
nnull <- which(!sapply(PAM_subset[3:6], is.null))[1] + 2
rsel <- PAM_subset[[nnull]]
fcol <- ncol(rsel[[1]])
icol <- which(colnames(rsel[[1]]) == "Latitude_PAM") + 1
if (selection_type == "all") {
selects <- names(PAM_subset)
selection_type <- grep("selected_sites", selects, value = TRUE)
} else {
selection_type <- paste0("PAM_", selection_type)
}
if (verbose == TRUE) {
message("Running analysis...")
}
if ("PAM_selected_sites_random" %in% selection_type &
!is.null(PAM_subset$PAM_selected_sites_random)) {
if (verbose == TRUE) {
message("Random selection")
}
rsel <- PAM_subset$PAM_selected_sites_random
diss$DI_selected_sites_random <- dis_loop(rsel, icol, fcol, method, verbose,
...)
diss$cluster_random <- lapply(diss$DI_selected_sites_random, function(x) {
stats::hclust(x)
})
sums <- lapply(rsel, function(x) {colSums(x[, icol:fcol]) > 0})
sums <- do.call(rbind, sums) * 1
rownames(sums) <- paste0("random_", rownames(sums))
dise$random <- sums
}
if ("PAM_selected_sites_G" %in% selection_type &
!is.null(PAM_subset$PAM_selected_sites_G)) {
if (verbose == TRUE) {
message("G selection")
}
rsel <- PAM_subset$PAM_selected_sites_G
diss$DI_selected_sites_G <- dis_loop(rsel, icol, fcol, method, verbose,
...)
diss$cluster_G <- lapply(diss$DI_selected_sites_G, function(x) {
stats::hclust(x)
})
sums <- lapply(rsel, function(x) {colSums(x[, icol:fcol]) > 0})
sums <- do.call(rbind, sums) * 1
rownames(sums) <- paste0("G_", rownames(sums))
dise$G <- sums
}
if ("PAM_selected_sites_E" %in% selection_type &
!is.null(PAM_subset$PAM_selected_sites_E)) {
if (verbose == TRUE) {
message("E selection")
}
rsel <- PAM_subset$PAM_selected_sites_E
diss$DI_selected_sites_E <- dis_loop(rsel, icol, fcol, method, verbose,
...)
diss$cluster_E <- lapply(diss$DI_selected_sites_E, function(x) {
stats::hclust(x)
})
sums <- lapply(rsel, function(x) {colSums(x[, icol:fcol]) > 0})
sums <- do.call(rbind, sums) * 1
rownames(sums) <- paste0("E_", rownames(sums))
dise$E <- sums
}
if ("PAM_selected_sites_EG" %in% selection_type &
!is.null(PAM_subset$PAM_selected_sites_EG)) {
if (verbose == TRUE) {
message("EG selection")
}
rsel <- PAM_subset$PAM_selected_sites_EG
diss$DI_selected_sites_EG <- dis_loop(rsel, icol, fcol, method, verbose,
...)
diss$cluster_EG <- lapply(diss$DI_selected_sites_EG, function(x) {
stats::hclust(x)
})
sums <- lapply(rsel, function(x) {colSums(x[, icol:fcol]) > 0})
sums <- do.call(rbind, sums) * 1
rownames(sums) <- paste0("EG_", rownames(sums))
dise$EG <- sums
}
if (verbose == TRUE) {
message("Summary of all selections")
}
diss$all_selections <- do.call(rbind, dise)
toex <- which(apply(diss$all_selections, 1, sum) == 0)
if (length(toex) > 0) {
if (verbose == TRUE) {
message("\tOne or more sites were excluded due to lack of species data:\n\t",
paste(names(toex), collapse = ", "))
}
diss$all_selections <- diss$all_selections[-toex, ]
}
diss$DI_selections <- vegan::vegdist(diss$all_selections, method = method,
...)
diss$cluster_selections <- stats::hclust(diss$DI_selections)
return(diss)
}
dis_loop <- function(site_spp_list, icol, fcol, method = "jaccard",
verbose = TRUE, ...) {
if (missing(site_spp_list)) {stop("Argument 'site_spp_list' is missing")}
lapply(site_spp_list, function(x) {
comat <- x[, icol:fcol]
rownames(comat) <- paste0("Site_" , 1:nrow(comat))
toex <- which(apply(comat, 1, sum) == 0)
if (length(toex) > 0) {
if (verbose == TRUE) {
message("\tOne or more sites were excluded due to lack of species data:\n\t",
paste(names(toex), collapse = ", "))
}
comat <- comat[-toex, ]
}
vegan::vegdist(comat, method = method, ...)
})
} |
FWT_TI <- function(x, L, qmf) {
n <- dyadlength(x)$x
J <- dyadlength(x)$y
D <- J - L
wp <- matrix(0, n, D + 1)
x <- ShapeAsRow(x)
wp[, 1] <- t(x)
for (d in 0:(D - 1)) {
for (b in 0:(2^d - 1)) {
s <- t(wp[packet(d, b, n), 1])
hsr <- DownDyadHi(s, qmf)
hsl <- DownDyadHi(rshift(s), qmf)
lsr <- DownDyadLo(s, qmf)
lsl <- DownDyadLo(rshift(s), qmf)
wp[packet(d + 1, 2 * b, n), d + 2] <- t(hsr)
wp[packet(d + 1, 2 * b + 1, n), d + 2] <- t(hsl)
wp[packet(d + 1, 2 * b, n), 1] <- t(lsr)
wp[packet(d + 1, 2 * b + 1, n), 1] <- t(lsl)
}
}
return(wp)
} |
if(!isGeneric("montePop"))
setGeneric('montePop',
function(popVals, ...) standardGeneric('montePop'),
signature = c('popVals')
)
if(!isGeneric("monteNTSample"))
setGeneric('monteNTSample',
function(population, ...) standardGeneric('monteNTSample'),
signature = c('population')
)
if(!isGeneric("monteBSSample"))
setGeneric('monteBSSample',
function(population, ...) standardGeneric('monteBSSample'),
signature = c('population')
)
if(!isGeneric("monte"))
setGeneric('monte',
function(object, ...) standardGeneric('monte'),
signature = c('object')
)
setMethod('montePop',
signature(popVals = 'numeric'),
function(popVals,
zeroTruncate = FALSE,
n = NA,
description = 'Monte Carlo Population Object',
...
)
{
if(length(popVals) <= 1)
stop('Need a population of size > 1!')
if(any(is.na(popVals))) {
popVals = popVals[!is.na(popVals)]
cat('\n Missing values (NA) have been removed from the population\n')
}
if(zeroTruncate)
popVals = popVals[popVals > 0]
N = length(popVals)
mean = mean(popVals)
var = var(popVals)
stDev = sqrt(var)
total = sum(popVals)
if(!all(is.na(n))) {
nn = length(n)
if(nn < 1 || !is.numeric(n))
stop('Please specify \"n\" as an integer vector!')
n = sort(round(n))
n.names = paste('n',n,sep='.')
names(n) = n.names
if(any(n >= N))
stop('Sample sizes \"n\" can not be larger than the population size \"N\"!')
fpc = (N - n)/N
names(fpc) = n.names
varMean = var/n * fpc
stErr = sqrt(varMean)
}
else {
n = NA_real_
fpc = NA_real_
varMean = fpc
stErr = fpc
}
pop = new('montePop',
mean = mean,
var = var,
stDev = stDev,
N = N,
total = total,
popVals = popVals,
description = description,
zeroTruncated = zeroTruncate,
n = n,
fpc = fpc,
varMean = varMean,
stErr = stErr
)
return(pop)
}
)
setMethod('montePop',
signature(popVals = 'sampSurf'),
function(popVals,
zeroTruncate = TRUE,
n = NA,
description = 'Monte Carlo Population Object: sampSurf',
...
)
{
vals = getValues(popVals@tract)
pop = montePop(vals, zeroTruncate = zeroTruncate, n = n, description = description, ...)
return(pop)
}
)
setMethod('monteNTSample',
signature(population = 'montePop'),
function(population,
n = c(10),
mcSamples = 100,
alpha = 0.05,
replace = TRUE,
startSeed = NA,
runQuiet = TRUE,
...
)
{
nn = length(n)
if(nn < 1 || !is.numeric(n))
stop('Please specify \"n\" as an integer vector!')
n = sort(round(n))
n.names = paste('n',n,sep='.')
names(n) = n.names
if(mcSamples < 2 || !is.numeric(mcSamples) || length(mcSamples)>1)
stop('Please specify \"mcSamples\" as an integer scalar >= 2!')
if(alpha <=0 || alpha >=1)
stop('Please specify 0<alpha<1!')
popVals = population@popVals
popMean = population@mean
N = population@N
if(any(n >= N))
stop('Sample sizes \"n\" can not be larger than the population size \"N\"!')
fpc = (N - n)/N
names(fpc) = n.names
if(!runQuiet)
cat('\nCalculating Normal theory intervals...')
ranSeed = initRandomSeed(startSeed)
alpha = alpha/2
t.values = qt(1-alpha, n-1)
names(t.values) = n.names
means = data.frame(matrix(NA, nrow=mcSamples, ncol=nn))
colnames(means) = n.names
vars = means
stDevs = means
varMeans = means
stErrs = means
lowerCIs = means
upperCIs = means
caught = means
caught[] = FALSE
for(i in seq_len(mcSamples)) {
for(j in seq_len(nn)) {
n.j = n[j]
samp = sample(popVals, n.j, replace=replace, ...)
means[i, j] = mean(samp)
vars[i, j] = var(samp)
stDevs[i, j] = sqrt(vars[i, j])
varMeans[i, j] = vars[i, j]/n.j * fpc[j]
stErrs[i, j] = sqrt(varMeans[i, j])
lowerCIs[i, j] = means[i, j] - t.values[j]*stErrs[i, j]
upperCIs[i, j] = means[i, j] + t.values[j]*stErrs[i, j]
if(lowerCIs[i, j] <= popMean && popMean <= upperCIs[i, j])
caught[i, j] = TRUE
}
}
caughtPct = colMeans(caught) * 100
names(caughtPct) = n.names
nt.means = colMeans(means)
nt.vars = colMeans(vars)
nt.stDevs = colMeans(stDevs)
nt.varMeans = colMeans(varMeans)
nt.stErrs = colMeans(stErrs)
nt.lowerCIs = colMeans(lowerCIs)
nt.upperCIs = colMeans(upperCIs)
nt.Stats = data.frame(rbind(nt.means, nt.vars, nt.stDevs, nt.varMeans, nt.stErrs, nt.lowerCIs, nt.upperCIs))
rownames(nt.Stats) = c('mean', 'var', 'stDev', 'VarMean', 'stErr', 'lowerCI', 'upperCI')
colnames(nt.Stats) = n.names
if(!runQuiet) cat('\n')
ms = new('monteNTSample',
mcSamples = mcSamples,
n = n,
fpc = fpc,
means = means,
vars = vars,
stDevs = stDevs,
varMeans = varMeans,
stErrs = stErrs,
lowerCIs = lowerCIs,
upperCIs = upperCIs,
caught = caught,
caughtPct = caughtPct,
stats = nt.Stats,
alpha = alpha*2,
t.values = t.values,
replace = replace,
ranSeed = ranSeed
)
return(ms)
}
)
setMethod('monteBSSample',
signature(population = 'montePop'),
function(population,
n = c(10),
mcSamples = 100,
R = 100,
alpha = 0.05,
replace = TRUE,
startSeed = NA,
runQuiet = TRUE,
...
)
{
nn = length(n)
if(nn < 1 || !is.numeric(n))
stop('Please specify \"n\" as an integer vector!')
n = sort(round(n))
n.names = paste('n',n,sep='.')
names(n) = n.names
mcSamples = round(mcSamples)
if(mcSamples < 2 || !is.numeric(mcSamples) || length(mcSamples)>1)
stop('Please specify \"mcSamples\" as an integer scalar >= 2!')
R = round(R)
if(R < 10 || !is.numeric(R) || length(R)>1)
stop('Please specify \"R\" as an integer scalar >= 10!')
if(alpha <=0 || alpha >=1)
stop('Please specify 0<alpha<1!')
popVals = population@popVals
popMean = population@mean
N = population@N
if(any(n >= N))
stop('Sample sizes \"n\" can not be larger than the population size \"N\"!')
fpc = (N - n)/N
names(fpc) = n.names
if(!runQuiet)
cat('\nCalculating bootstrap intervals...')
ranSeed = initRandomSeed(startSeed)
means = data.frame(matrix(NA, nrow=mcSamples, ncol=nn))
colnames(means) = n.names
vars = means
stDevs = means
varMeans = means
stErrs = means
lowerCIs = means
upperCIs = means
caught = means
caught[] = FALSE
meanboot = function(x,idx) mean(x[idx])
degenerate = rep(0, nn)
names(degenerate) = n.names
for(i in seq_len(mcSamples)) {
for(j in seq_len(nn)) {
n.j = n[j]
samp = sample(popVals, n.j, replace=replace)
boot.samp = boot(samp, meanboot, R, ...)
means[i, j] = mean(boot.samp$t0)
vars[i, j] = var(samp)
stDevs[i, j] = sqrt(vars[i, j])
varMeans[i, j] = vars[i, j]/n.j * fpc[j]
stErrs[i, j] = sqrt(varMeans[i, j])
if(length(unique(samp)) == 1) {
degenerate[j] = degenerate[j] + 1
next
}
suppressWarnings({
boot.cis = boot.ci(boot.samp, 1-alpha, type='bca')
})
lowerCIs[i, j] = boot.cis$bca[1, 4]
upperCIs[i, j] = boot.cis$bca[1, 5]
if(lowerCIs[i, j] <= popMean && popMean <= upperCIs[i, j])
caught[i, j] = TRUE
}
}
caughtPct = colMeans(caught) * 100
names(caughtPct) = n.names
m.means = colMeans(means, na.rm = TRUE)
m.vars = colMeans(vars, na.rm = TRUE)
m.stDevs = colMeans(stDevs, na.rm = TRUE)
m.varMeans = colMeans(varMeans, na.rm = TRUE)
m.stErrs = colMeans(stErrs, na.rm = TRUE)
m.lowerCIs = colMeans(lowerCIs, na.rm = TRUE)
m.upperCIs = colMeans(upperCIs, na.rm = TRUE)
bs.Stats = data.frame(rbind(m.means, m.vars, m.stDevs, m.varMeans, m.stErrs, m.lowerCIs, m.upperCIs))
rownames(bs.Stats) = c('mean', 'var', 'stDev', 'varMean', 'stErr', 'lowerCI', 'upperCI')
colnames(bs.Stats) = n.names
if(!runQuiet) cat('\n')
ms = new('monteBSSample',
mcSamples = mcSamples,
n = n,
fpc = fpc,
means = means,
vars = vars,
stDevs = stDevs,
varMeans = varMeans,
stErrs = stErrs,
lowerCIs = lowerCIs,
upperCIs = upperCIs,
caught = caught,
caughtPct = caughtPct,
stats = bs.Stats,
R = R,
alpha = alpha,
replace = replace,
degenerate = degenerate,
ranSeed = ranSeed
)
return(ms)
}
)
setMethod('monte',
signature(object = 'montePop'),
function(object,
zeroTruncate = TRUE,
n = object@n,
mcSamples = 100,
type = c('both', 'normalTheory', 'bootstrap'),
R = 100,
alpha = 0.05,
description = 'Monte Carlo Object',
replace = TRUE,
startSeed = NA,
runQuiet = TRUE,
...
)
{
nn = length(n)
if(nn < 1 || !is.numeric(n))
stop('Please specify \"n\" as an integer vector!')
n = sort(round(n))
if(mcSamples < 1 || !is.numeric(mcSamples) || length(mcSamples)>1)
stop('Please specify \"mcSamples\" as an integer scalar!')
if(alpha <=0 || alpha >=1)
stop('Please specify 0<alpha<1!')
type = match.arg(type)
pop = object
if(any(is.na(pop@n)))
stop('Sample sizes must be present (no NAs) in \"montePop\" object for monte')
if(length(intersect(pop@n, n)) != length(n))
stop('Sample sizes between \"montePop\" object and argument \"n\" must match exactly!')
if(type == 'both' || type == 'normalTheory')
mnts = monteNTSample(pop, n=n, mcSamples=mcSamples,
alpha=alpha, replace=replace, startSeed=startSeed,
runQuiet=runQuiet, ...
)
else
mnts = NULL
if(type == 'both' || type == 'bootstrap')
mbss = monteBSSample(pop, n=n, mcSamples=mcSamples, R=R,
alpha=alpha, replace=replace, startSeed=startSeed,
runQuiet=runQuiet, ...
)
else
mbss = NULL
mo = new('monte',
pop = pop,
estimate = NA_character_,
NTsamples = mnts,
BSsamples = mbss,
description = description
)
return(mo)
}
)
setMethod('monte',
signature(object = 'sampSurf'),
function(object,
zeroTruncate = TRUE,
n = c(10),
mcSamples = 100,
type = c('both', 'normalTheory', 'bootstrap'),
R = 100,
alpha = 0.05,
description = 'Monte Carlo Object',
replace = TRUE,
startSeed = NA,
runQuiet = TRUE,
...
)
{
pop = montePop(object, zeroTruncate = zeroTruncate, n = n, ...)
mo = monte(object = pop,
zeroTruncate = zeroTruncate,
n = n,
mcSamples = mcSamples,
type = type,
R = R,
alpha = alpha,
description = description,
replace = replace,
startSeed = startSeed,
runQuiet = runQuiet,
... )
mo@estimate = object@estimate
return(mo)
}
)
setMethod('monte',
signature(object = 'numeric'),
function(object,
zeroTruncate = TRUE,
n = c(10),
mcSamples = 100,
type = c('both', 'normalTheory', 'bootstrap'),
R = 100,
alpha = 0.05,
description = 'Monte Carlo Object',
replace = TRUE,
startSeed = NA,
runQuiet = TRUE,
...
)
{
pop = montePop(object, zeroTruncate = zeroTruncate, n = n, ...)
mo = monte(object = pop,
zeroTruncate = zeroTruncate,
n = n,
mcSamples = mcSamples,
type = type,
R = R,
alpha = alpha,
description = description,
replace = replace,
startSeed = startSeed,
runQuiet = runQuiet,
... )
return(mo)
}
)
|
plot_missing_dates <- function(data,
dates = Date,
values = Value,
groups = STATION_NUMBER,
station_number,
roll_days = 1,
roll_align = "right",
water_year_start = 1,
start_year,
end_year,
months = 1:12,
include_title = FALSE){
if (missing(data)) {
data <- NULL
}
if (missing(station_number)) {
station_number <- NULL
}
if (missing(start_year)) {
start_year <- 0
}
if (missing(end_year)) {
end_year <- 9999
}
include_title_checks(include_title)
flow_data <- flowdata_import(data = data, station_number = station_number)
flow_data <- format_all_cols(data = flow_data,
dates = as.character(substitute(dates)),
values = as.character(substitute(values)),
groups = as.character(substitute(groups)),
rm_other_cols = TRUE)
flow_summary <- screen_flow_data(data = flow_data,
roll_days = roll_days,
roll_align = roll_align,
water_year_start = water_year_start,
start_year = start_year,
end_year = end_year,
months = months)
missing_plotdata <- flow_summary[,c(1,2,11:ncol(flow_summary))]
missing_plotdata <- tidyr::gather(missing_plotdata, Month, Value, 3:ncol(missing_plotdata))
missing_plotdata <- dplyr::mutate(missing_plotdata, Month = substr(Month, 1, 3))
missing_plotdata$Month <- factor(missing_plotdata$Month, levels = month.abb[c(water_year_start:12, 1:water_year_start-1)])
miss_plots <- dplyr::group_by(missing_plotdata, STATION_NUMBER)
miss_plots <- tidyr::nest(miss_plots)
miss_plots <- dplyr::mutate(miss_plots,
plot = purrr::map2(data, STATION_NUMBER,
~ggplot2::ggplot(data = ., ggplot2::aes(x = Year, y = Value)) +
ggplot2::geom_bar(colour = "cornflowerblue", fill = "cornflowerblue", na.rm = TRUE, stat = "identity") +
ggplot2::facet_wrap(~Month, ncol = 3, scales = "fixed", strip.position = "top") +
ggplot2::ylab("Missing Days") +
ggplot2::xlab("Year") +
ggplot2::theme_bw() +
ggplot2::scale_y_continuous(limits = c(0, 32)) +
{if (include_title & .y != "XXXXXXX") ggplot2::ggtitle(paste(.y)) } +
ggplot2::theme(panel.border = ggplot2::element_rect(colour = "black", fill = NA, size = 1),
panel.grid = ggplot2::element_line(size = .2),
axis.title = ggplot2::element_text(size = 12),
axis.text = ggplot2::element_text(size = 10),
plot.title = ggplot2::element_text(hjust = 1, size = 9, colour = "grey25"),
strip.background = ggplot2::element_blank(),
strip.text = ggplot2::element_text(hjust = 0, face = "bold", size = 10))
))
plots <- miss_plots$plot
if (nrow(miss_plots) == 1) {
names(plots) <- "Missing_Dates"
} else {
names(plots) <- paste0(miss_plots$STATION_NUMBER, "_Missing_Dates")
}
plots
} |
translateIOV <- function(model, occ.name, nocc, output, iov0, cat0=NULL) {
if (length(cat0)==0)
cat0 <- NULL
sections <- sectionsModel(model)
sm <- splitModel(model, sections)
lines <- c()
i.iov <- iov0
o.iov <- addiov(i.iov, output)
cat0.name <- unlist(lapply(cat0, function(x) x$name))
rem.name <- c("occ","socc", "OCC","sOCC")
if (!is.null(cat0))
rem.name <- unique(c(rem.name,paste0("s",cat0.name)))
i.cov <- which(sapply(sm, function(ch) ch$name=="[COVARIATE]"))
d.iov <- c.iov <- v.iov <- NULL
add.iov <- FALSE
if (length(i.cov)>0) {
sec.cov <- splitSection(sm[[i.cov]])
r0.cov <- iovin(sec.cov$input, NULL, i.iov, nocc, sec.cov$name, cat0, rem.name=rem.name)
i.iov <- r0.cov$iov
lines.cov <- r0.cov$lines
if (length(sec.cov$blocks)) {
for (k in (1:length(sec.cov$blocks))) {
if (identical(sec.cov$blocks[k],'EQUATION:')) {
rk.cov <- ioveq(sec.cov$lines[[k]], i.iov, d.iov, nocc)
} else {
rk.cov <- iovdef(sec.cov$lines[[k]], i.iov, nocc)
d.iov <- rk.cov$d.iov
}
c.iov <- rk.cov$iov
lines.cov <- c(lines.cov, rk.cov$lines)
}
}
if (!is.null(d.iov) | !is.null(i.iov)) {
add.iov <- TRUE
lines <- c(lines,"",lines.cov)
} else {
lines <- c(lines,"",sm[[i.cov]]$lines)
}
o.iov <- unique(c(o.iov,addiov(c.iov, output)))
}
i.ind <- which(sapply(sm, function(ch) ch$name=="[INDIVIDUAL]"))
if (length(i.ind)>0) {
sec.ind <- splitSection(sm[[i.ind]])
u.iov <- c.iov
u.iov <- setdiff(c.iov, cat0.name)
r0.ind <- iovin(sec.ind$input, u.iov, i.iov, nocc, sec.ind$name, cat0, rem.name=rem.name)
v.iov <- unique(c(u.iov, i.iov))
o.iov <- unique(c(o.iov,addiov(c.iov, output)))
lines.ind <- r0.ind$lines
for (k in (1:length(sec.ind$blocks))) {
if (identical(sec.ind$blocks[k],'EQUATION:')) {
rk.ind <- ioveq(sec.ind$lines[[k]], v.iov, d.iov, nocc)
v.iov <- rk.ind$iov
} else {
rk.ind <- iovdef(sec.ind$lines[[k]], v.iov, nocc)
d.iov <- rk.ind$d.iov
v.iov <- rk.ind$iov
}
lines.ind <- c(lines.ind, rk.ind$lines)
}
if (!is.null(d.iov)) {
add.iov <- TRUE
lines <- c(lines,"",lines.ind)
} else {
lines <- c(lines,"",sm[[i.ind]]$lines)
}
}
i.long <- which(sapply(sm, function(ch) ch$name=="[LONGITUDINAL]"))
if (length(i.long)>0) {
if (!is.null(v.iov) & add.iov) {
sec.long <- splitSection(sm[[i.long]])
u.iov <- unique(c(i.iov,o.iov))
long.lines <- iovinlong(sec.long$input, v.iov, u.iov, nocc, sec.long$name, occ.name)
if (length(v.iov)==length(d.iov)) {
r1.long <- iovseclong(sec.long, v.iov, d.iov, u.iov, nocc, occ.name)
lines <- c(lines,"",long.lines,r1.long$lines)
}
} else {
lines <- c(lines,"",sm[[i.long]]$lines)
}
}
model <- "tempiov_model.txt"
write(lines,model)
return(list(model=model, iov=unique(c(o.iov,v.iov,i.iov)), occ.name=occ.name, cat=cat0))
}
regstr1 <-function(expr,str) {
strb <- paste0(" ",str," ")
sep <- "([^[:digit:]_])"
ig <- grep(paste0(sep,expr,sep),strb)
newv <- gsub("\\=.*","",str[ig])
return(newv)
}
repstr1 <-function(expr,x,str) {
str <- paste0(" ",str," ")
sep <- "([^[:digit:]_])"
str <- gsub(paste0(sep,expr,sep),paste0("\\1",expr,x,"\\2"),str)
str <- gsub("^\\s+|\\s+$", "", str)
return(str)
}
line2field <- function(str) {
if (length(str)>1)
str <- strmerge(str, 0)
str <- gsub(" ","",str)
rout <- list()
for (k in (1:length(str))) {
strk <- str[k]
strk <- gsub("no-variability","sd=0",strk)
r <- list(name=sub("\\=.*","",strk))
i1 <- regexpr("\\{",strk)
i2 <- tail(gregexpr("\\}",strk)[[1]],n=1)
strk <- substr(strk,i1+1,i2-1)
sp <- strsplit(strk,",")[[1]]
sp <- strmerge(sp, 1)
fields <- sub("\\=.*","",sp)
rv <- sub(".*\\=","",sp)
nv <- lapply(rv, function(x) sum(gregexpr("\\{",x)[[1]]>0))
lrv <- as.list(rv)
iv1 <- which(nv==1)
if (length(iv1)>0) {
riv1 <- gsub(".*?\\{(.*?)\\}.*", "\\1", rv[iv1])
lrv[iv1] <- strsplit(riv1,",")
}
iv2 <- which(nv>=2)
if (length(iv2)>0) {
for (i2 in (iv2)) {
rvi2 <- rv[i2]
i1 <- regexpr("\\{",rvi2)
i2 <- tail(gregexpr("\\}",rvi2)[[1]],n=1)
rvi2 <- substr(rvi2,i1+1,i2-1)
j1 <- -1
j2 <- -1
ni2 <- nchar(rvi2)
lrvi2 <- c()
while (j2>=j1 && j2 <ni2) {
j1 <- j2+2
if (substr(rvi2,j1,j1)=="{")
j2 <- regexpr("\\}",substr(rvi2,j1+1,ni2))+j1
else
j2 <- regexpr("\\,",substr(rvi2,j1+1,ni2))+j1-1
if (j2>=j1)
lrvi2 <- c(lrvi2, substr(rvi2,j1,j2))
else
lrvi2 <- c(lrvi2, substr(rvi2,j1,ni2))
}
lrv[[iv2]] <- lrvi2
}
}
r$fields <- lrv
names(r$fields) <- fields
rout[[k]] <- r
}
if (length(rout)==1) rout <- rout[[1]]
return(rout)
}
field2line <- function(r) {
nr <- length(r)
lines <- vector(length=nr)
for (k in (1:nr)) {
rk <- r[[k]]
nk <- names(rk$fields)
mk <- sapply(rk$fields,length)
ik <- which(mk>1)
rk$fields[ik] <- sapply(rk$fields[ik], function(x) paste0("{",paste(x,collapse=","),"}"))
uk <- paste(nk,unlist(rk$fields),sep="=")
lines[k] <- paste0(rk$name,"={",paste(uk, collapse=","),"}")
}
return(lines)
}
strmerge <- function(str1, op=0) {
n <- length(str1)
str2 <- c()
idx <- 0
while (idx<n) {
idx <- idx + 1
si <- str1[idx]
n1 <- sum(gregexpr("\\{",si)[[1]]>0)
n2 <- sum(gregexpr("\\}",si)[[1]]>0)
while(n1 > n2) {
idx <- idx+1
if (identical(substr(si,nchar(si),nchar(si)),",") | identical(substr(si,nchar(si),nchar(si)),"{"))
si <- paste0(si,str1[idx])
else
si <- paste(si,str1[idx],sep=",")
n1 <- sum(gregexpr("\\{",si)[[1]]>0)
n2 <- sum(gregexpr("\\}",si)[[1]]>0)
}
str2 <- c(str2, si)
}
list1 <- NULL
for (idx in (1:length(str2))) {
si <- str2[idx]
if (identical(substr(si,nchar(si),nchar(si)),"="))
list1 <- c(list1, idx)
}
if (length(list1)>0) {
for (idx in list1)
str2[idx] <- paste0(str2[idx],str2[idx+1])
str2 <- str2[-(list1+1)]
}
if (op==1) {
n <- length(str2)
str3 <- c()
idx <- 0
while (idx<n) {
idx <- idx + 1
si <- str2[idx]
if (!identical(si, "no-variability")) {
while (!grepl("=",si)) {
idx <- idx + 1
si <- paste(si, str2[idx], sep=",")
}
}
str3 <- c(str3, si)
}
str3 <- gsub(",}","}",str3)
return(str3)
} else {
str2 <- gsub(",}","}",str2)
return(str2)
}
}
splitSection <- function(section) {
lines <- section$lines
i.input = grep("input=",lines, fixed=TRUE)[1]
i.eq = grep("EQUATION:",lines, fixed=TRUE)
i.def = grep("DEFINITION:",lines, fixed=TRUE)
i.pk = grep("PK:",lines, fixed=TRUE)
is <- sort(c(i.pk,i.eq,i.def))
is <- c(is, length(lines)+1)
if (!is.na(i.input))
input <- lines[i.input:(is[1]-1)]
else
input <- NULL
bt <- c()
bc <- list()
if (length(is)>1) {
for (k in (1:(length(is)-1))) {
if (regexpr("EQUATION:",lines[is[k]], fixed=TRUE)==1)
bt <- c(bt, "EQUATION:")
else if (regexpr("DEFINITION:",lines[is[k]], fixed=TRUE)==1)
bt <- c(bt, "DEFINITION:")
else
bt <- c(bt, "PK:")
if (is[k+1]-is[k] >1)
bc[[k]] <- lines[(is[k]+1):(is[k+1]-1)]
else
bc[[k]] <- ""
}
}
return(list(name=section$name, input=input, blocks=bt, lines=bc))
}
iovin <- function(lines, c.iov=NULL, v.iov=NULL, nocc, name, cat=NULL, rem.name=NULL) {
if (!is.null(rem.name)) {
vc <- sub("\\=.*","",lines)
lines <- lines[!(vc %in% rem.name)]
foo <- "foo123456"
lines <- gsub(paste0(rem.name,collapse="|"),foo,lines)
lines <- gsub(paste0(",",foo),"",lines)
lines <- gsub(paste0(foo,","),",",lines)
lines <- gsub("\\{,","\\{",lines)
lines <- gsub(",\\}","\\}",lines)
lines <- gsub(paste(paste0("=",rem.name),collapse="|"),"=",lines)
lines <- lines[lines!="input="]
lines <- lines[lines!=paste0("input=",foo)]
gl <- grep("\\{\\}",lines)
if (length(gl)>0)
lines <- lines[-gl]
}
if (length(lines)==0)
return(list(iov=NULL, lines=name))
suffix <- "_iov"
sep <- "([\\,\\{\\}])"
vi <- c()
for (expr in v.iov)
if (length(grep(expr, lines))==0)
rem.name <- c(rem.name, expr)
v.iov <- setdiff(v.iov,rem.name)
l.input <- grep("input", lines)
if (length(v.iov)==1 && l.input %in% grep(v.iov,lines) && length(grep("\\{", lines[l.input]))==0)
lines[l.input] <- paste0("input={",v.iov,"}")
for (expr in c.iov) {
nexpr0 <- paste0(expr,"0")
nexpr1 <- paste0("eta_",expr,suffix,1:nocc,collapse=",")
nexpr <- paste(nexpr0, nexpr1, sep=",")
if (any(regexpr(paste0(sep,expr,sep),lines)>0)) {
vi <- c(vi, expr)
lines <- gsub(paste0(sep,expr,sep),paste0("\\1",nexpr,"\\2"),lines)
} else {
lines <- c(lines, paste0("input={",nexpr,"}"))
}
}
for (expr in v.iov) {
nexpr <- paste0(expr,suffix,1:nocc,collapse=",")
if (any(regexpr(paste0(sep,expr,sep),lines)>0))
vi <- c(vi, expr)
lines <- gsub(paste0(sep,expr,sep),paste0("\\1",nexpr,"\\2"),lines)
}
vc <- sub("\\=.*","",lines)
if (!is.null(c(c.iov,v.iov)))
lines <- lines[!(vc %in% c(c.iov,v.iov))]
if (!is.null(cat)) {
for (k in (1: length(cat))) {
if (!(cat[[k]]$name %in% rem.name)) {
if (!(cat[[k]]$name %in% vi))
lines <- c(lines, paste0("input = {", paste0(cat[[k]]$name,suffix,1:nocc,collapse=","),"}"))
for (ko in (1:nocc)) {
lo <- paste0(cat[[k]]$name,suffix,ko,"={type=categorical,categories={",paste(cat[[k]]$categories,collapse = ","),"}}")
lines <- c(lines, lo)
}
}
}
}
return(list(iov=vi, lines=c(name,lines)))
}
iovinlong <- function(lines, v.iov, o.iov, nocc, name, occ.name) {
suffix <- "_iov"
sep <- "([\\,\\{\\}])"
for (expr in v.iov) {
nexpr0 <- paste0(expr,"0")
nexpr1 <- paste0("eta_",expr,suffix,1:nocc,collapse=",")
nexpr <- paste(nexpr0, nexpr1, sep=",")
if (any(regexpr(paste0(sep,expr,sep),lines)>0))
lines <- gsub(paste0(sep,expr,sep),paste0("\\1",nexpr,"\\2"),lines)
else
lines <- c(lines, paste0("input={",nexpr,"}"))
}
for (expr in o.iov) {
nexpr <- paste0(expr,suffix,1:nocc,collapse=",")
if (any(regexpr(paste0(sep,expr,sep),lines)>0))
lines <- gsub(paste0(sep,expr,sep),paste0("\\1",nexpr,"\\2"),lines)
else
lines <- c(lines, paste0("input={",nexpr,"}"))
}
lines <- c(lines, paste0("input={",occ.name,"}"), paste0(occ.name,"={use=regressor}"))
return(lines=c(name,lines))
}
ioveq <- function(lines, v.iov=NULL, d.iov=NULL, nocc) {
new.lines <- c()
if (length(d.iov)>0) {
for (i in 1:length(v.iov)) {
vi <- v.iov[i]
di <- d.iov[i]
vi0 <- paste0(vi,"0")
vis <- paste0(vi,"_iov")
vie <- paste0("eta_",vi,"_iov")
for (k in (1:nocc)) {
if (tolower(di)=="normal") {
nl <- paste0(vis,k," = ",vi0," + ",vie,k)
} else if (tolower(di)=="lognormal") {
nl <- paste0(vis,k," = ",vi0," * exp(",vie,k,")")
} else if (tolower(di)=="logitnormal") {
nl <- paste0(vis,k," = 1/(1+exp(-logit(",vi0,") + ",vie,k,"))")
} else {
stop("IOV is only possible with distributions normal, lognormal, logitnormal", call.=FALSE)
}
new.lines <- c(new.lines, nl)
}
}
}
listc <- c()
for (kc in (1: length(v.iov))) {
listk <- testk <- v.iov[kc]
while (length(testk)>0) {
listk <- unique(c(listk,regstr1(testk[1],lines)))
testk <- unique(c(testk,regstr1(testk[1],lines)))
testk <- testk[-1]
}
listc <- unique(c(listc, listk))
}
new.eq <- c()
for (ko in (1:nocc)) {
smek <- lines
for (exprk in listc) {
smek <- repstr1(exprk,paste0("_iov",ko),smek)
}
new.eq <- c(new.eq, smek)
}
new.eq <- unique(new.eq)
return(list(iov=listc,lines=c("EQUATION:",new.lines,new.eq)))
}
iovseclong <- function(sec, v.iov=NULL, d.iov=NULL, o.iov=NULL, nocc, occ.name) {
suffix <- "_iov"
new.lines <- c()
if (!is.null(v.iov)) {
new.lines <- c("EQUATION:",paste0("if ",occ.name,"==1"))
for (ko in (1:nocc)) {
for (vi in v.iov) {
new.lines <- c(new.lines, paste0(" eta_",vi,"=",paste0("eta_",vi,suffix,ko)))
}
for (vi in o.iov) {
new.lines <- c(new.lines, paste0(" ",vi,"=",paste0(vi,suffix,ko)))
}
new.lines <- c(new.lines, paste0("elseif ",occ.name,"==",ko+1))
}
new.lines[length(new.lines)] <- "end"
}
for (i in 1:length(v.iov)) {
vi <- v.iov[i]
di <- d.iov[i]
if (tolower(di)=="normal") {
nl <- paste0(vi," = ",vi,"0 + eta_",vi)
} else if (tolower(di)=="lognormal") {
nl <- paste0(vi," = ",vi,"0 * exp(eta_",vi,")")
} else if (tolower(di)=="logitnormal") {
nl <- paste0(vi," = 1/(1+exp(-logit(",vi,"0) + eta_",vi,"))")
} else {
stop("IOV is only possible with distributions normal, lognormal, logitnormal", call.=FALSE)
}
new.lines <- c(new.lines, nl)
}
for (k in (1:length(sec$blocks))) {
new.lines <- c(new.lines,sec$blocks[k],sec$lines[[k]])
}
return(list(lines=new.lines))
}
iovdef <- function(lines, v.iov=NULL, nocc) {
suffix <- "_iov"
lines <- gsub(",mean=",",reference=",lines)
lines <- gsub(",prediction=",",reference=",lines)
lines <- gsub(",typical=",",reference=",lines)
iop.sd <- (length(grep("var=",lines))==0)
fields <- line2field(lines)
if (length(lines)==1) fields <- list(fields)
i.iov <- which(sapply(fields, function(x) !is.null(x$fields$varlevel)))
vcv <- list()
for (k in (1:length(fields))) {
icv <- match(fields[[k]]$fields$covariate,v.iov)
icv <- icv[!is.na(icv)]
vcv[[k]] <- v.iov[icv]
if (length(icv)>0)
i.iov <- unique(c(i.iov, k))
}
if (length(i.iov)>0) {
v.iov <- sapply(fields[i.iov], function(x) x$name)
d.iov <- sapply(fields[i.iov], function(x) x$fields$distribution)
for (iv in i.iov) {
if (identical(fields[[iv]]$fields$varlevel,"id*occ")) {
if (iop.sd)
fields[[iv]]$fields$sd <- c(0,fields[[iv]]$fields$sd)
else
fields[[iv]]$fields$var <- c(0,fields[[iv]]$fields$var)
}
}
f1 <- fields
for (iv in i.iov) {
f1[[iv]]$name <- paste0(f1[[iv]]$name,"0")
f1[[iv]]$fields$varlevel <- NULL
f1[[iv]]$fields$covariate <- NULL
f1[[iv]]$fields$coefficient <- NULL
if (iop.sd)
f1[[iv]]$fields$sd <- f1[[iv]]$fields$sd[1]
else
f1[[iv]]$fields$var <- f1[[iv]]$fields$var[1]
}
line1 <- field2line(f1)
f2 <- fields
for (iv in i.iov) {
f2[[iv]]$fields$varlevel <- NULL
f2[[iv]]$fields$reference <- 0
f2[[iv]]$fields$distribution <- "normal"
if (iop.sd)
f2[[iv]]$fields$sd <- ifelse(is.na(f2[[iv]]$fields$sd[2]),0,f2[[iv]]$fields$sd[2])
else
f2[[iv]]$fields$var <- ifelse(is.na(f2[[iv]]$fields$var[2]),0,f2[[iv]]$fields$var[2])
}
f2o <- f2[i.iov]
f2 <- list()
for (ko in (1:nocc)) {
for (k in 1:length(i.iov)) {
f2o[[k]]$name <- paste0("eta_",v.iov[k],suffix,ko)
if (length(vcv)>0) {
vcvk <- vcv[[i.iov[k]]]
if (length(vcvk)>0) {
fck <- fields[[i.iov[k]]]$fields$covariate
for (j in (1:length(vcvk)))
fck <- gsub(vcvk[j],paste0(vcvk[j],suffix,ko),fck)
f2o[[k]]$fields$covariate <- fck
}
}
}
f2 <- c(f2,f2o)
}
line2 <- field2line(f2)
new.lines <- c("DEFINITION:",line1,line2)
} else {
new.lines <- c("DEFINITION:", lines)
d.iov <- NULL
}
new.lines <- gsub("no-variability=no-variability","no-variability",new.lines)
l.cor <- grep("id\\*occ",new.lines)
if (length(l.cor)>0) {
lk.cor <- NULL
for (i in l.cor) {
li.cor0 <- new.lines[i]
li.cor0 <- gsub("id\\*occ", "id", li.cor0)
vi1 <- gregexpr("\\(", li.cor0)[[1]]
vi2 <- gregexpr("\\)", li.cor0)[[1]]
nv1 <- length(vi1)
li.cor <- NULL
for (ko in (1:nocc)) {
li.cork <- li.cor0
for (ji in seq_len(nv1)) {
i1 <- vi1[nv1-ji+1]
i2 <- vi2[nv1-ji+1]
str12 <- substr(li.cork, start=i1, stop=i2)
isep <- regexpr(",", str12)
viov <- c(substring(str12,first=2,last=isep-1),substring(str12,first=isep+1, last=nchar(str12)-1))
li.cork <- gsub(str12,paste0("(eta_",viov[1],suffix,ko,", ","eta_",viov[2],suffix,ko,")"),li.cork, fixed=TRUE)
}
lk.cor <- c(lk.cor, li.cork )
}
}
new.lines <- new.lines[-l.cor]
new.lines <- c(new.lines, lk.cor)
}
return(list(iov=v.iov,d.iov=d.iov,lines=new.lines))
}
addiov0 <- function(var.iov, v.iov, output) {
for (k in 1:length(output)) {
nk <- output[[k]]$name
ik <- match(nk, var.iov)
nk <- nk[!is.na(ik)]
ik <- ik[!is.na(ik)]
if (length(ik)>0) {
v.iov <- unique(c(v.iov, nk))
}
}
return(v.iov)
}
addiov <- function(v.iov, output) {
o.iov <- NULL
for (k in 1:length(output)) {
nk <- output[[k]]$name
ik <- match(nk, v.iov)
nk <- nk[!is.na(ik)]
ik <- ik[!is.na(ik)]
if (length(ik)>0) {
o.iov <- unique(c(o.iov, nk))
}
}
return(o.iov)
}
outiov <- function(output,v.iov,occ, v.iov0) {
new.output <- list()
j <- 0
for (k in 1:length(output)) {
nk <- output[[k]]$name
ik <- match(nk, c(v.iov,v.iov0))
nk <- nk[!is.na(ik)]
ik <- ik[!is.na(ik)]
if (length(ik)>0) {
output[[k]]$name <- setdiff(output[[k]]$name, nk)
if (is.data.frame(occ[[1]]))
outk <- list(name=nk, time=occ[[1]])
else
outk <- list(name=nk, time=occ[[1]]$time)
j <-j+1
new.output[[j]] <- outk
nk <- setdiff(nk, v.iov0)
if (length(nk)>0) {
j <-j+1
new.output[[j]] <- list(name=paste0(nk,"0"))
}
}
}
output <- c(output, new.output)
}
param.iov <- function(p, occ) {
suffix <- "_iov"
occ <- occ[[1]]
p2 <- p
if (is.data.frame(occ)) {
no <- names(occ)
occ.name <- setdiff(no,c("id","time"))
nocc <- length(unique(occ[[occ.name]]))
} else {
occ.name <- occ$name
nocc <- length(occ$time)
no <- c("id", "time", occ.name)
}
v.iov <- c()
cat <- list()
indj <- 0
for (k in (1:length(p))) {
pk <- p[[k]]
if (is.data.frame(pk) && (!is.null(pk$time))) {
jfk <- which(sapply(pk,is.character)|sapply(pk,is.factor))
jfk <- jfk[names(jfk)!="id"]
if (length(jfk)>0) {
for (jf in (1:length(jfk))) {
indj <- indj+1
cat[[indj]] <- list(name=names(pk)[jfk[jf]], categories=levels(pk[,jfk[jf]]))
}
}
nk <- names(pk)
so <- setdiff(nk, no)
if (length(so)==0)
so <- occ.name
n.param <- length(so)
if (!("id" %in% nk))
pk$id <- 1
N <- length(unique(pk$id))
io <- intersect(nk, no)
if (is.data.frame(occ))
mo <- merge(occ, pk)
else
mo <- merge(data.frame(occ=1:nocc,time=occ$time),pk)
dk <- vector(length=n.param)
for (j in (1:n.param)) {
pkj <- mo[c("id",so[j])]
dk[j] <- (dim(unique(pkj))[1] == N)
}
for (j in (1:length(dk))){
if (!dk[j])
pk[so[j]] <- NULL
}
pk$time <- NULL
pk[occ.name] <- NULL
if (!("id" %in% nk))
pk$id <- NULL
if (dim(pk)[2]>1)
p2[[k]] <- unique(pk)
else
p2[[k]] <- NULL
pk.occ <- mo
pk.occ[so[dk]] <- NULL
pk.occ["time"] <- NULL
if (!identical(occ.name,"occ")) {
pk.occ$occ <- pk.occ[occ.name]
pk.occ[occ.name] <- NULL
}
pkn <- subset(pk.occ, occ==1)
io <- which(names(pkn) %in% so[!dk])
names(pkn)[io] <- paste0(so[!dk],suffix,1)
pkn$occ <- NULL
for (ko in (2:nocc)) {
pko <- subset(pk.occ, occ==ko)
names(pko)[io] <- paste0(so[!dk],suffix,ko)
pko$occ <- NULL
pkn <- merge(pkn,pko, by=c("id"), all=TRUE)
}
for (kf in (1:dim(pkn)[2])) {
if (is.factor(pkn[,kf]))
pkn[is.na(pkn[,kf]),kf] <- levels(pkn[,kf])[1]
else
pkn[is.na(pkn[,kf]),kf] <- NaN
}
if (!("id" %in% nk)) {
pkn$id <- NULL
pkn <- as.vector(pkn)
}
p2[[length(p2)+1]] <- pkn
v.iov <- c(v.iov, so[!dk])
}
}
p2 <- p2[sapply(p2,length)>0]
for (j in seq_len(length(cat))) {
cj <- grep("[\\+\\-\\*<>]",cat[[j]]$categories)
if (length(cj)>0)
cat[[j]]$categories[cj] <- paste0("'",cat[[j]]$categories[cj],"'")
}
return(list(param=p2, iov=v.iov, cat=cat))
} |
V_aD<-function(px,x,h,n,k=1,cantprem=1,premperyear=1,i=0.04,data,prop=1,assumption="none",variation="none",cap,t){
dig<-getOption("digits")
on.exit(options(digits = dig))
options(digits = 15)
reserve<-c()
res<-0
rown<-c()
if(px>0 && x>=0 && is_integer(x)==1 && h>=0 && is_integer(h)==1 && n>0 && is_integer(n)==1 && k>=1 && is_integer(k)==1 && cantprem>=1 && is_integer(cantprem)==1 && premperyear>=1 && premperyear<=12 && is_integer(premperyear)==1 && i>=0 && prop>0 && cap>0){
if(k==1 && premperyear==1){
if(t<=(h+n)){
for(j in 1:t){
risk<-0
prem<-px
if(j>h){
risk<-cap*(n-(j-h-1))
}
if(j>cantprem){
prem<-0
}
res<-(res+prem-risk)*(E(x+j-1,1,i,data,prop,"none",1))^(-1)
e<-(E(x+j-1,1,i,data,prop,"none",1))^(-1)
reserve<-rbind(reserve,c(prem,risk,e,round(res,3)))
rown<-c(rown,paste("Year",j))
}
} else{
stop("Check Year")
}
colnames(reserve)<-c("Premium","Risk","1/E","Reserve")
rownames(reserve)<-rown
}else if(k<=12){
if(t<=(h+n)*12){
if(variation=="inter"){
Premiums_Paid<-0
frac<-1
CumVariationInter<-n+1
for(s in 1:t){
risk<-0
prem<-0
age<-trunc((s-1)/12)
if(s>h*12 & contmeses(s,k)==1){
if((s-1)/12==round((s-1)/12)){
CumVariationInter<-CumVariationInter-1
}
risk<-(cap/k)*(CumVariationInter)
}
if(contmeses(s,premperyear)==1 & Premiums_Paid<cantprem){
prem<-px
Premiums_Paid<-Premiums_Paid+1
}
va<-(res+prem-risk)*E(x+age,(frac-1)/12,i,data,prop,assumption,1)
res<-va*(E(x+age,frac/12,i,data,prop,assumption,1))^(-1)
e<-E(x+age,(frac-1)/12,i,data,prop,assumption,1)*(E(x+age,frac/12,i,data,prop,assumption,1))^(-1)
reserve<-rbind(reserve,c(prem,risk,e,round(res,3)))
rown<-c(rown,paste("Month",s))
frac<-frac+1
if(round(s/12)==s/12){
frac<-1
}
}
colnames(reserve)<-c("Premium","Risk","1/E","Reserve")
rownames(reserve)<-rown
} else if(variation=="intra"){
Premiums_Paid<-0
frac<-1
CumVariationIntra<-n*k+1
for(s in 1:t){
risk<-0
prem<-0
age<-trunc((s-1)/12)
if(s>h*12 & contmeses(s,k)==1){
CumVariationIntra<-CumVariationIntra-1
risk<-(cap/k)*(CumVariationIntra)
}
if(contmeses(s,premperyear)==1 & Premiums_Paid<cantprem){
prem<-px
Premiums_Paid<-Premiums_Paid+1
}
va<-(res+prem-risk)*E(x+age,(frac-1)/12,i,data,prop,assumption,1)
res<-va*(E(x+age,frac/12,i,data,prop,assumption,1))^(-1)
e<-E(x+age,(frac-1)/12,i,data,prop,assumption,1)*(E(x+age,frac/12,i,data,prop,assumption,1))^(-1)
reserve<-rbind(reserve,c(prem,risk,e,round(res,3)))
rown<-c(rown,paste("Month",s))
frac<-frac+1
if(round(s/12)==s/12){
frac<-1
}
}
colnames(reserve)<-c("Premium","Risk","1/E","Reserve")
rownames(reserve)<-rown
}else{
stop("Check variation")
}
}else{
stop("Check Month")
}
}else{
stop("Check k")
}
} else{
stop("Check values")
}
return(as.data.frame(reserve))
} |
context("stat_prop")
test_that("example", {
expect_print <- function(x) {
expect_silent(print(x))
}
d <- as.data.frame(Titanic)
p <- ggplot(d) +
aes(x = Class, fill = Survived, weight = Freq, by = Class) +
geom_bar(position = "fill") +
geom_text(stat = "prop", position = position_fill(.5))
expect_print(p)
expect_print(p + facet_grid(~Sex))
expect_print(ggplot(d) +
aes(x = Class, fill = Survived, weight = Freq) +
geom_bar(position = "dodge") +
geom_text(
aes(by = Survived),
stat = "prop",
position = position_dodge(0.9), vjust = "bottom"
))
expect_print(ggplot(d) +
aes(x = Class, fill = Survived, weight = Freq, by = 1) +
geom_bar() +
geom_text(
aes(label = scales::percent(after_stat(prop), accuracy = 1)),
stat = "prop",
position = position_stack(.5)
))
skip_if_not_installed("reshape")
data(tips, package = "reshape")
expect_print(ggally_colbar(tips, mapping = aes(x = smoker, y = sex)))
expect_print(ggally_rowbar(tips, mapping = aes(x = smoker, y = sex)))
expect_print(ggally_colbar(tips, mapping = aes(x = smoker, y = sex), size = 8))
expect_print(ggally_colbar(tips, mapping = aes(x = smoker, y = sex),
colour = "white", fontface = "bold"))
expect_print(ggally_colbar(tips, mapping = aes(x = smoker, y = sex, label = after_stat(count))))
expect_print(ggally_colbar(tips, mapping = aes(x = smoker, y = sex), geom_bar_args = list(width = .5)))
expect_print(ggally_colbar(tips, mapping = aes(x = smoker, y = sex),
label_format = scales::label_percent(accuracy = .01, decimal.mark = ",")))
expect_print(ggduo(
data = as.data.frame(Titanic),
mapping = aes(weight = Freq),
columnsX = "Survived",
columnsY = c("Sex", "Class", "Age"),
types = list(discrete = "rowbar"),
legend = 1
))
})
test_that("stat_prop() works with an y aesthetic", {
expect_print <- function(x) {
expect_silent(print(x))
}
d <- as.data.frame(Titanic)
p <- ggplot(d) +
aes(y = Class, fill = Survived, weight = Freq, by = Class) +
geom_bar(position = "fill") +
geom_text(stat = "prop", position = position_fill(.5))
expect_print(p)
}) |
dev2bitmap <- function(file, type = "png16m", height = 7, width = 7, res = 72,
units = "in", pointsize, ...,
method = c("postscript", "pdf"), taa = NA, gaa = NA)
{
if(missing(file)) stop("'file' is missing with no default")
if(!is.character(file) || length(file) != 1L || !nzchar(file))
stop("'file' must be a non-empty character string")
method <- match.arg(method)
units <- match.arg(units, c("in", "px", "cm", "mm"))
height <- switch(units, "in"=1, "cm"=1/2.54, "mm"=1/25.4, "px"=1/res) * height
width <- switch(units, "in"=1, "cm"=1/2.54, "mm"=1/25.4, "px"=1/res) * width
gsexe <- tools::find_gs_cmd()
if(!nzchar(gsexe)) stop("GhostScript was not found")
check_gs_type(gsexe, type)
if(missing(pointsize)) pointsize <- 1.5*min(width, height)
tmp <- tempfile("Rbit")
on.exit(unlink(tmp))
din <- graphics::par("din"); w <- din[1L]; h <- din[2L]
if(missing(width) && !missing(height)) width <- w/h * height
if(missing(height) && !missing(width)) height <- h/w * width
current.device <- dev.cur()
if(method == "pdf")
dev.off(dev.copy(device = pdf, file = tmp, width = width,
height = height,
pointsize = pointsize, paper = "special", ...))
else
dev.off(dev.copy(device = postscript, file = tmp, width = width,
height = height,
pointsize = pointsize, paper = "special",
horizontal = FALSE, ...))
dev.set(current.device)
extra <- ""
if (!is.na(taa)) extra <- paste0(" -dTextAlphaBits=", taa)
if (!is.na(gaa)) extra <- paste0(extra, " -dGraphicsAlphaBits=", gaa)
cmd <- paste0(gsexe, " -dNOPAUSE -dBATCH -q -sDEVICE=", type,
" -r", res,
" -dAutoRotatePages=/None",
" -g", ceiling(res*width), "x", ceiling(res*height),
extra,
" -sOutputFile=", shQuote(file), " ", tmp)
system(cmd, invisible = TRUE)
invisible()
}
bitmap <- function(file, type = "png16m", height = 7, width = 7, res = 72,
units = "in", pointsize, taa = NA, gaa = NA, ...)
{
if(missing(file)) stop("'file' is missing with no default")
if(!is.character(file) || length(file) != 1L || !nzchar(file))
stop("'file' must be a non-empty character string")
units <- match.arg(units, c("in", "px", "cm", "mm"))
height <- switch(units, "in"=1, "cm"=1/2.54, "mm"=1/25.4, "px"=1/res) * height
width <- switch(units, "in"=1, "cm"=1/2.54, "mm"=1/25.4, "px"=1/res) * width
gsexe <- tools::find_gs_cmd()
if(!nzchar(gsexe)) stop("GhostScript was not found")
check_gs_type(gsexe, type)
if(missing(pointsize)) pointsize <- 1.5*min(width, height)
extra <- ""
if (!is.na(taa)) extra <- paste0(" -dTextAlphaBits=", taa)
if (!is.na(gaa)) extra <- paste0(extra, " -dGraphicsAlphaBits=", gaa)
tmp <- tempfile("Rbit")
cmd <- paste0(gsexe, " -dNOPAUSE -dBATCH -q -sDEVICE=", type,
" -r", res,
" -dAutoRotatePages=/None",
" -g", ceiling(res*width), "x", ceiling(res*height),
extra,
" -sOutputFile=", shQuote(file))
postscript(file = tmp, width = width, height = height,
pointsize = pointsize, paper = "special", horizontal = FALSE,
print.it = TRUE, command = cmd, ...)
invisible()
}
check_gs_type <- function(gsexe, type)
{
gshelp <- system(paste(gsexe, "-help"), intern = TRUE, invisible = TRUE)
st <- grep("^Available", gshelp)
en <- grep("^Search", gshelp)
if(!length(st) || !length(en))
warning("unrecognized format of gs -help")
else {
gsdevs <- gshelp[(st+1L):(en-1L)]
devs <- c(strsplit(gsdevs, " "), recursive = TRUE)
if(match(type, devs, 0L) == 0L) {
op <- options(warning.length = 8000L)
on.exit(options(op))
stop(gettextf("device '%s' is not available\n", type),
gettextf("Available devices are:\n%s",
paste(gsdevs, collapse = "\n")),
domain = NA)
}
}
} |
close_bracket_style <- function(srcData)
{
parseData <- srcData$parseData
if (is.null(parseData))
stop("no parse data; ",
"use 'getSourceData' with 'keep.source = TRUE'")
w <- which(parseData$token == "']'")
if (!length(w))
return(TRUE)
col <- parseData$col1[w]
lines <- parseData$line1[w]
pcol <- parseData$col2[w - 1L]
pchar <- parseData$token[w - 1L]
valid <- ((col - pcol) == 1L & pchar != "','") |
((col - pcol) > 1L & pchar == "','")
res <- all(valid)
if (!res)
{
lines <- lines[!valid]
msg <- sapply(lines, function(l)
.makeMessage(gettext("Line"), " ", l, ": ",
gettext("do not use spaces before closing brackets (except after a comma)"),
appendLF = TRUE))
attributes(res) <- list(lines = lines, message = msg)
message(msg, appendLF = FALSE)
}
res
}
open_bracket_style <- function(srcData)
{
parseData <- srcData$parseData
if (is.null(parseData))
stop("no parse data; ",
"use 'getSourceData' with 'keep.source = TRUE'")
w <- which(parseData$token == "'['")
if (!length(w))
return(TRUE)
col <- parseData$col1[w]
lines <- parseData$line1[w]
valid <- 1L == (parseData$col1[w + 1L] - col)
res <- all(valid)
if (!res)
{
lines <- lines[!valid]
msg <- sapply(lines, function(l)
.makeMessage(gettext("Line"), " ", l, ": ",
gettext("do not use spaces after opening brackets"),
appendLF = TRUE))
attributes(res) <- list(lines = lines, message = msg)
message(msg, appendLF = FALSE)
}
res
} |
SKAT.bootstrap <- function(x, NullObject, genomic.region = x@snps$genomic.region,
weights = (1-x@snps$maf)**24, maf.threshold = 0.5,
perm.target = 100, perm.max = 5e4, debug = FALSE,
estimation.pvalue = "kurtosis") {
if(nrow(x) != length(NullObject$group)) stop("Different number of individuals in 'x' and 'NullObject'")
if(!is.factor(genomic.region)) stop("'genomic.region' should be a factor")
genomic.region <- droplevels(genomic.region)
if(any(table(genomic.region)==1)) stop("All 'genomic.region' sould contain at least 2 variants, please use 'filter.rare.variants()' to filter the bed matrix")
which.snps <- (x@snps$maf <= maf.threshold) & (x@snps$maf > 0)
group <- NullObject$group
Pi <- NullObject$Pi.data
X <- NullObject$X
W <- W.mat(Pi[,-1,drop=FALSE])
XX <- block.diag( rep(list(X), ncol(Pi) - 1) )
WX <- W %*% XX
XWX <- t(XX) %*% WX
U <- -(WX %*% solve(XWX, t(XX)))
diag(U) <- diag(U)+1
B <- .Call('skat_bootstrap', PACKAGE = "Ravages", x@bed, which.snps, genomic.region, group, x@p, Pi, weights, U, perm.target, perm.max);
names(B)[5] <- "p.perm"
M1 <- B$M1;
M2 <- B$M2;
M3 <- B$M3;
M4 <- B$M4;
S2 <- M2 - M1**2
m3 <- M3 - 3*S2*M1 - M1**3
m4 <- M4 - 4*m3*M1 - 6*S2*M1**2 - M1**4
B$sigma <- sqrt(S2)
B$skewness <- m3/S2**1.5
B$kurtosis <- m4/S2**2
B$p.chi2 <- as.vector(mapply(p.valeur.moments.liu, Q = B$stat, mu = B$M1, sigma = B$sigma, skewness = B$skewness, kurtosis = B$kurtosis - 3, estimation.pvalue = estimation.pvalue))
names(B)[6] <- "mean"
B$p.value <- ifelse(B$nb.perm < perm.max, B$p.perm, B$p.chi2)
B$p.value <- ifelse(is.na(B$p.chi), NA, B$p.value)
B <- as.data.frame(B, row.names = levels(genomic.region))
if(debug)
B[,!(names(B) %in% c("M2", "M3", "M4"))]
else
B[ , c("stat", "p.perm", "p.chi2", "p.value") ]
} |
item.alpha <- function(x, exclude = NULL, std = FALSE, ordered = FALSE, na.omit = FALSE,
print = c("all", "alpha", "item"), digits = 2, conf.level = 0.95,
as.na = NULL, check = TRUE, output = TRUE) {
if (isTRUE(missing(x))) {
stop("Please specify a matrix, data frame, variance-covariance or correlation matrix for the argument 'x'.",
call. = FALSE)
}
if (isTRUE(is.null(x))) {
stop("Input specified for the argument 'x' is NULL.", call. = FALSE)
}
if (isTRUE(!is.logical(check))) {
stop("Please specify TRUE or FALSE for the argument 'check'.", call. = FALSE)
}
if (isTRUE(check)) {
if (isTRUE(!is.matrix(x) && !is.data.frame(x))) {
stop("Please specify a matrix, a data frame, a variance-covariance or correlation matrix for the argument 'x'.",
call. = FALSE)
}
if (isTRUE(ncol(x) == 1L)) {
stop("Please specify at least two items to compute coefficient alpha.", call. = FALSE)
}
if (isTRUE(nrow(x) != ncol(x))) {
x.check <- vapply(as.data.frame(x, stringsAsFactors = FALSE), function(y) length(na.omit(unique(y))) == 1L, FUN.VALUE = logical(1))
if (isTRUE(any(x.check))) {
stop(paste0("Following variables in the matrix or data frame specified in 'x' have zero variance: ",
paste(names(which(x.check)), collapse = ", ")), call. = FALSE)
}
}
check.ex <- !exclude %in% colnames(x)
if (isTRUE(any(check.ex))) {
stop(paste0("Items to be excluded from the analysis were not found in 'x': ", paste(exclude[check.ex], collapse = ", ")),
call. = FALSE)
}
if (isTRUE(!is.logical(std))) {
stop("Please specify TRUE or FALSE for the argument 'std'.", call. = FALSE)
}
if (isTRUE(!is.logical(ordered))) {
stop("Please specify TRUE or FALSE for the argument 'ordered'.", call. = FALSE)
}
if (isTRUE(!is.logical(na.omit))) {
stop("Please specify TRUE or FALSE for the argument 'na.omit'.", call. = FALSE)
}
if (isTRUE(!all(print %in% c("all", "alpha", "item")))) {
stop("Character strings in the argument 'print' do not all match with \"all\", \"alpha\", or \"item\".",
call. = FALSE)
}
if (isTRUE(conf.level >= 1L || conf.level <= 0L)) {
stop("Please specifiy a numeric value between 0 and 1 for the argument 'conf.level'.",
call. = FALSE)
}
if (isTRUE(digits %% 1 != 0L || digits < 0L)) {
stop("Specify a positive integer number for the argument 'digits'.", call. = FALSE)
}
if (isTRUE(!is.logical(output))) {
stop("Please specify TRUE or FALSE for the argument 'output'.", call. = FALSE)
}
}
if (isTRUE(nrow(x) == ncol(x))) {
if (isTRUE(isSymmetric(x))) {
sym <- TRUE
x.raw <- FALSE
} else {
sym <- FALSE
x.raw <- TRUE
}
if (isTRUE(sym)) {
std <- ifelse(all(diag(x) == 1L), TRUE, FALSE)
}
} else {
x.raw <- TRUE
}
if (isTRUE(ordered)) {
if (!isTRUE(x.raw)) {
stop("Please submit raw data to the argument 'x' to compute ordinal coefficient alpha.",
call. = FALSE)
}
x <- misty::cor.poly(x, output = FALSE)$result$cor
x.raw <- FALSE
std <- TRUE
}
x <- as.data.frame(x, stringsAsFactors = FALSE)
if (isTRUE(x.raw)) {
if (isTRUE(!is.null(exclude))) {
x <- x[, which(!colnames(x) %in% exclude)]
if (isTRUE(is.null(dim(x)))) {
stop("At least two items after excluding items are needed to compute coefficient alpha.", call. = FALSE)
}
}
if (isTRUE(!is.null(as.na))) {
x <- misty::as.na(x, na = as.na, check = check)
x.miss <- vapply(x, function(y) all(is.na(y)), FUN.VALUE = logical(1L))
if (isTRUE(any(x.miss))) {
stop(paste0("After converting user-missing values into NA, following items are completely missing: ",
paste(names(which(x.miss)), collapse = ", ")), call. = FALSE)
}
x.zero.var <- vapply(x, function(y) length(na.omit(unique(y))) == 1L, FUN.VALUE = logical(1))
if (isTRUE(any(x.zero.var))) {
stop(paste0("After converting user-missing values into NA, following items have zero variance: ",
paste(names(which(x.zero.var)), collapse = ", ")), call. = FALSE)
}
}
} else {
if (isTRUE(!is.null(exclude))) {
x <- x[, which(!colnames(x) %in% exclude)]
x <- x[which(!rownames(x) %in% exclude), ]
if (isTRUE(is.null(dim(x)))) {
stop("At least two items after excluding items are needed to compute coefficient alpha.", call. = FALSE)
}
}
}
if (isTRUE(na.omit)) {
x <- na.omit(x)
}
if (isTRUE(all(c("all", "alpha", "item") %in% print))) { print <- c("alpha", "item") }
if (isTRUE(length(print) == 1L && "all" %in% print)) { print <- c("alpha", "item") }
if (isTRUE(x.raw)) {
if (isTRUE(std)) {
mat.sigma <- cor(x, use = "pairwise.complete.obs", method = "pearson")
} else {
mat.sigma <- cov(x, use = "pairwise.complete.obs", method = "pearson")
}
} else {
mat.sigma <- x
}
alpha.function <- function(mat.sigma, p) {
return((p / (p - 1)) * (1L - sum(diag(as.matrix(mat.sigma))) / sum(as.matrix(mat.sigma))))
}
p <- ncol(mat.sigma)
alpha.mat.sigma <- alpha.function(mat.sigma, p)
if (isTRUE(x.raw)) {
alpha.x <- data.frame(n = nrow(x), items = ncol(mat.sigma), alpha = alpha.mat.sigma,
stringsAsFactors = FALSE)
} else {
alpha.x <- data.frame(items = ncol(mat.sigma), alpha = alpha.mat.sigma,
stringsAsFactors = FALSE)
}
if (isTRUE(x.raw)) {
if (isTRUE(any(is.na(x)) && !isTRUE(na.omit))) {
df1 <- mean(apply(combn(ncol(x), 2L), 2, function(y) nrow(na.omit(cbind(x[, y[1L]], x[, y[2L]]))))) - 1L
} else {
df1 <- nrow(na.omit(x)) - 1L
}
df2 <- (ncol(x) - 1L) * df1
alpha.low <- 1L - (1L - alpha.mat.sigma) * qf(1L - (1L - conf.level) / 2L, df1, df2)
alpha.upp <- 1L - (1L - alpha.mat.sigma) * qf((1L - conf.level) / 2L, df1, df2)
alpha.x <- data.frame(alpha.x, low = alpha.low, upp = alpha.upp, stringsAsFactors = FALSE)
}
if (isTRUE(x.raw)) {
itemstat <- matrix(rep(NA, times = ncol(x)*2L), ncol = 2L,
dimnames = list(NULL, c("it.cor", "alpha")))
for (i in seq_len(ncol(x))) {
var <- colnames(x)[i]
itemstat[i, 1L] <- ifelse(ncol(x) > 2L, cor(x[, i], rowMeans(x[, -grep(var, colnames(x))], na.rm = TRUE),
use = "pairwise.complete.obs"), NA)
if (isTRUE(std)) {
itemstat[i, 2L] <- ifelse(ncol(x) > 2L, alpha.function(cor(x[, -grep(var, colnames(x))],
use = "pairwise.complete.obs", method = "pearson"), p = (ncol(x) - 1L)), NA)
} else {
itemstat[i, 2L] <- ifelse(ncol(x) > 2L, alpha.function(cov(x[, -grep(var, colnames(x))],
use = "pairwise.complete.obs", method = "pearson"), p = (ncol(x) - 1L)), NA)
}
}
itemstat <- data.frame(var = colnames(x),
misty::descript(x, output = FALSE)$result[, c("n", "nNA", "pNA", "m", "sd", "min", "max")],
itemstat,
stringsAsFactors = FALSE)
} else {
itemstat <- NULL
}
object <- list(call = match.call(),
type = "item.alpha",
data = x,
args = list(exclude = exclude, std = std, ordered = ordered, na.omit = na.omit,
print = print, digits = digits, conf.level = conf.level, as.na = as.na,
check = check, output = output),
result = list(alpha = alpha.x, itemstat = itemstat))
class(object) <- "misty.object"
if (isTRUE(output)) { print(object, check = FALSE) }
return(invisible(object))
} |
context("as_workbook")
test_that("as_workbook works as expected", {
source(file.path(test_path(), 'testdata', 'testdata.R'))
for(el in list(t_df1, t_mash_1, t_comp_1, t_stack_1))
{
tres <- as_workbook(el, blah = "blubb", keepNA = TRUE)
expect_is(tres, 'Workbook')
if(!inherits(el, 'Tatoo_report')){
expect_identical(length(names(tres)), 1L)
} else {
expect_identical(names(tres), names(el))
}
}
})
test_that("Tagged_table and default named regions are created correctly", {
source(file.path(test_path(), 'testdata', 'testdata.R'))
footer(t_tagged_1) <- c("blah", "blubb")
wb <- as_workbook(t_tagged_1)
res <- openxlsx::getNamedRegions(wb)
expect_setequal(
attr(res, "position"),
c("A1:A3", "A5:D10", "A5:D5", "A6:D10", "A12:A13")
)
footer(t_tagged_1) <- NULL
wb <- as_workbook(t_tagged_1)
res <- openxlsx::getNamedRegions(wb)
expect_setequal(
attr(res, "position"),
c("A1:A3", "A5:D10", "A5:D5", "A6:D10")
)
title(t_tagged_1) <- NULL
table_id(t_tagged_1) <- NULL
longtitle(t_tagged_1) <- NULL
subtitle(t_tagged_1) <- NULL
wb <- as_workbook(t_tagged_1)
res <- openxlsx::getNamedRegions(wb)
expect_setequal(
attr(res, "position"),
c("A2:D7", "A2:D2", "A3:D7")
)
})
test_that("Mashed_table named regions are created correctly", {
source(file.path(test_path(), 'testdata', 'testdata.R'))
wb <- as_workbook(t_mash_1)
res <- openxlsx::getNamedRegions(wb)
expect_identical(
attr(res, "position"),
c("A1:D21", "A1:D1", "A2:D21")
)
wb <- as_workbook(t_mash_4)
res <- openxlsx::getNamedRegions(wb)
expect_identical(
attr(res, "position"),
c("A1:A3", "A5:J5", "A6:J11", "A6:J6", "A7:J11", "A13:A13")
)
})
test_that("Composite_table named regions are created correctly", {
source(file.path(test_path(), 'testdata', 'testdata.R'))
wb <- as_workbook(t_comp_1)
res <- openxlsx::getNamedRegions(wb)
expect_setequal(
attr(res, "position"),
c("A1:I1", "A2:I8", "A2:I2", "A3:I8")
)
wb <- as_workbook(t_comp_3)
res <- openxlsx::getNamedRegions(wb)
expect_setequal(
attr(res, "position"),
c("A1:A3", "A5:G5", "A6:G12", "A6:G6", "A7:G12", "A14:A14")
)
}) |
chart.Events <-
function (R, dates, prior=12, post=12, main = NULL, xlab=NULL, ...)
{
R = checkData(R[,1,drop=FALSE])
if(is.null(main))
main = paste(colnames(R), "Event Study")
for(i in 1:length(dates)){
date = dates[i]
origin = grep(date,index(R))
if(length(origin)==0)
stop("Date not found or dates don't match the index of the data.")
x1 = matrix(R[(max(0,origin-prior)):origin, ],ncol=1)
if(origin-prior<0)
x1 = rbind(matrix(rep(NA,abs(origin-prior)+1),ncol=1),x1)
x2 = matrix(R[(origin+1):min(length(R),origin+post) ],ncol=1)
if(origin+post>length(R))
x2 = rbind(x2,matrix(rep(NA,((origin+post)-length(R))),ncol=1))
x = rbind(x1,x2)
if(date == dates[1])
y = x
else
y = cbind(y,x)
}
colnames(y) = as.character(dates)
x.xts=xts(y, order.by=time(R)[1:nrow(y)])
event.line = format(time(x.xts)[prior+1], "%m/%y")
if(is.null(xlab))
xlab = "Periods to Event"
chart.TimeSeries(x.xts, xlab = xlab, xaxis.labels = seq(-prior, post, by=1), event.lines = event.line, main = main, ...)
}
|
tabu.search = function(x, start, whitelist, blacklist, score, extra.args,
max.iter, maxp, optimized, tabu, debug = FALSE) {
nodes = names(x)
n.nodes = length(nodes)
iter = 1
score.equivalence = is.score.equivalent(score, nodes, extra.args)
score.decomposability = is.score.decomposable(score, extra.args)
cache = matrix(0, nrow = n.nodes, ncol = n.nodes)
updated = seq_len(n.nodes) - 1L
tabu.list = vector("list", tabu)
max.loss.iter = tabu
loss.iter = 0
best.score = -Inf
reference.score = per.node.score(network = start, score = score,
targets = nodes, extra.args = extra.args, data = x)
if (!is.null(blacklist))
blmat = arcs2amat(blacklist, nodes)
else
blmat = matrix(0L, nrow = n.nodes, ncol = n.nodes)
if (!is.null(whitelist))
wlmat = arcs2amat(whitelist, nodes)
else
wlmat = matrix(0L, nrow = n.nodes, ncol = n.nodes)
if (debug) {
cat("----------------------------------------------------------------\n")
cat("* starting from the following network:\n")
print(start)
cat("* current score:", sum(reference.score), "\n")
cat("* whitelisted arcs are:\n")
if (!is.null(whitelist)) print(whitelist)
cat("* blacklisted arcs are:\n")
if (!is.null(blacklist)) print(blacklist)
start$learning$algo = "tabu"
start$learning$ntests = 0
start$learning$test = score
start$learning$args = extra.args
start$learning$optimized = optimized
}
repeat {
current = as.integer((iter - 1) %% tabu)
if ((robust.difference(sum(reference.score), best.score) > 0) || (iter == 1)) {
best.network = start
best.score = sum(reference.score)
}
if (debug)
cat("* iteration", iter, "using element", current, "of the tabu list.\n")
amat = arcs2amat(start$arcs, nodes)
nparents = colSums(amat)
.Call(call_tabu_hash,
amat = amat,
nodes = nodes,
tabu.list = tabu.list,
current = current)
.Call(call_score_cache_fill,
nodes = nodes,
data = x,
network = start,
score = score,
extra = extra.args,
reference = reference.score,
equivalence = score.equivalence && optimized,
decomposability = score.decomposability,
updated = (if (optimized) updated else seq(length(nodes)) - 1L),
amat = amat,
cache = cache,
blmat = blmat,
debug = debug)
to.be.added = arcs.to.be.added(amat = amat, nodes = nodes,
blacklist = blmat, whitelist = NULL, nparents = nparents,
maxp = maxp, arcs = FALSE)
bestop = .Call(call_tabu_step,
amat = amat,
nodes = nodes,
added = to.be.added,
cache = cache,
reference = reference.score,
wlmat = wlmat,
blmat = blmat,
tabu.list = tabu.list,
current = current,
baseline = 0,
nparents = nparents,
maxp = maxp,
debug = debug)
if (bestop$op == FALSE) {
if (loss.iter >= max.loss.iter) {
start = best.network
if (debug) {
cat("----------------------------------------------------------------\n")
cat("* maximum number of iterations without improvements reached, stopping.\n")
cat("* best network ever seen is:\n")
print(best.network)
}
break
}
else {
loss.iter = loss.iter + 1
}
if (debug) {
cat("----------------------------------------------------------------\n")
cat("* network score did not increase (for", loss.iter,
"times), looking for a minimal decrease :\n")
}
bestop = .Call(call_tabu_step,
amat = amat,
nodes = nodes,
added = to.be.added,
cache = cache,
reference = reference.score,
wlmat = wlmat,
blmat = blmat,
tabu.list = tabu.list,
current = current,
baseline = -Inf,
nparents = nparents,
maxp = maxp,
debug = debug)
if (bestop$op == FALSE) {
if (debug) {
cat("----------------------------------------------------------------\n")
cat("* no more possible operations.\n")
cat("@ stopping at iteration", iter, ".\n")
}
if (loss.iter > 0)
start = best.network
break
}
}
else {
if (robust.difference(sum(reference.score), best.score) > 0)
loss.iter = 0
}
start = arc.operations(start, from = bestop$from, to = bestop$to,
op = bestop$op, check.cycles = FALSE, check.illegal = FALSE,
update = TRUE, debug = FALSE)
if (bestop$op == "reverse")
updated = which(nodes %in% c(bestop$from, bestop$to)) - 1L
else
updated = which(nodes %in% bestop$to) - 1L
if (debug) {
start$learning$ntests = test.counter()
cat("----------------------------------------------------------------\n")
cat("* best operation was: ")
if (bestop$op == "set")
cat("adding", bestop$from, "->", bestop$to, ".\n")
else if (bestop$op == "drop")
cat("removing", bestop$from, "->", bestop$to, ".\n")
else
cat("reversing", bestop$from, "->", bestop$to, ".\n")
cat("* current network is :\n")
print(start)
cat("* current score:", sum(reference.score), "\n")
cat(sprintf("* best score up to now: %s (delta: %s)\n",
format(best.score),
format(robust.difference(sum(reference.score), best.score))))
}
if (iter >= max.iter) {
if (debug)
cat("@ stopping at iteration", max.iter, ".\n")
if (loss.iter > 0)
start = best.network
break
}
else iter = iter + 1
}
return(start)
} |
fun.errorMessage <-
function(type)
{
if(type=='DataSet'){
txt <- "The data sets was not specificated."
}
if(type=='DataLength'){
txt <- "The length of data must be set."
}
if(type=='DefaultParameter'){
txt <- "Parameters are not specificated, using the default one."
}
return(txt)
} |
doigbeta <- function(x, shape0, shape1, shape2, p1, log=FALSE)
{
doifun(x=x, dfun=dgbeta, p1=p1, log=log, shape1=shape1, shape2=shape2, shape0=shape0)
}
poigbeta <- function(q, shape0, shape1, shape2, p1, lower.tail = TRUE, log.p = FALSE)
{
poifun(q=q, pfun=pgbeta, p1=p1, lower.tail=lower.tail, log.p=log.p, shape1=shape1, shape2=shape2, shape0=shape0)
}
qoigbeta <- function(p, shape0, shape1, shape2, p1, lower.tail = TRUE, log.p = FALSE)
{
qoifun(p=p, qfun=qgbeta, p1=p1, lower.tail=lower.tail, log.p=log.p, shape1=shape1, shape2=shape2, shape0=shape0)
}
roigbeta <- function(n, shape0, shape1, shape2, p1)
{
roifun(n=n, rfun=rgbeta, p1=p1, shape1=shape1, shape2=shape2, shape0=shape0)
}
ecoigbeta <- function(x, shape0, shape1, shape2, p1)
{
ecoifun(x=x, ecfun=ecgbeta, mfun=mgbeta, p1=p1, shape1=shape1, shape2=shape2, shape0=shape0)
}
moigbeta <- function(order, shape0, shape1, shape2, p1)
{
moifun(order=order, mfun=mgbeta, p1=p1, shape1=shape1, shape2=shape2, shape0=shape0)
}
tloigbeta <- function(shape0, shape1, shape2, p1)
{
tloifun(p1=p1, shape1=shape1, shape2=shape2, shape0=shape0)
} |
maxlik.fd.scoring <- function(m, step = NULL,
information = c("expected", "observed", "mix"),
ls = list(type = "optimize", tol = .Machine$double.eps^0.25, cap = 1),
barrier = list(type = c("1", "2"), mu = 0),
control = list(maxit = 100, tol = 0.001, trace = FALSE, silent = FALSE),
debug = FALSE)
{
if (!is.null(step) && (step <= 0 || !is.numeric(step)))
stop("'step' must be a numeric positive value.")
mcall <- match.call()
mll.step <- function(x, m, pd, barrier)
{
mloglik.fd(x = m@pars + x * pd, model = m,
barrier = barrier)
}
dmll.step <- function(x, m, pd, xreg, barrier)
{
m@pars <- m@pars + x * pd
gr <- mloglik.fd.deriv(model = m, xreg = xreg, gradient = TRUE,
hessian = FALSE, infomat = FALSE, modcovgrad = FALSE,
barrier = barrier, version = "2")$gradient
drop(gr %*% pd)
}
cargs <- list(maxit = 100, tol = 0.001, trace = FALSE, silent = TRUE)
ncargs0 <- names(cargs)
cargs[ncargs <- names(control)] <- control
if (length(nocargs <- ncargs[!ncargs %in% ncargs0]))
warning("unknown names in 'control': ", paste(nocargs, collapse = ", "))
control <- cargs
info <- match.arg(information)[1]
use.gcov <- info == "expected"
use.hess <- info == "observed"
use.mix <- info == "mix"
lsres <- list(ls.iter = NULL, ls.counts = c("fnc" = 0, "grd" = 0))
lsintv <- c(0, ls$cap)
convergence <- FALSE
iter <- 0
if (is.null(m@ssd)) {
pg <- Mod(fft(m@diffy))^2 / (2 * pi * length(m@diffy))
} else
pg <- m@ssd
if (!is.null(m@xreg))
{
dxreg <- m@fdiff(m@xreg, frequency(m@y))
fitxreg <- lm(m@diffy ~ dxreg - 1, na.action = na.omit)
m@pars[m@ss$xreg] <- coef(fitxreg)
xreg <- list(dxreg = dxreg, fft.dxreg = fft.dxreg <- apply(dxreg, 2, fft))
} else
xreg <- NULL
pars0 <- m@pars
if (control$trace) {
Mpars <- rbind(pars0,
matrix(nrow = control$maxit + 1, ncol = length(pars0)))
} else Mpars <- NULL
steps <- if (control$trace) rep(NA, control$maxit + 2) else NULL
while (!(convergence || iter > control$maxit))
{
tmp <- mloglik.fd.deriv(m, xreg = xreg, gradient = TRUE,
hessian = use.hess, infomat = use.gcov, modcovgrad = use.mix,
barrier = barrier, version = "2")
G <- -tmp$gradient
M <- switch(info, "expected" = tmp$infomat,
"observed" = tmp$hessian, "mix" = tmp$modcovgrad)
if (info == "observed" || info == "mix")
{
M <- force.defpos(M, 0.001, FALSE)
}
pd <- drop(solve(M) %*% G)
if (is.null(step))
{
lsintv[2] <- step.maxsize(m@pars, m@lower, m@upper, pd, ls$cap)
lsout <- switch(ls$type,
"optimize" = optimize(f = mll.step, interval = lsintv,
maximum = FALSE, tol = ls$tol, m = m, pd = pd,
barrier = barrier),
"brent.fmin" = Brent.fmin(a = 0, b = lsintv[2],
fcn = mll.step, tol = ls$tol, m = m, pd = pd,
barrier = barrier),
"wolfe" = linesearch(b = lsintv[2],
fcn = mll.step, grd = dmll.step,
ftol = ls$ftol, gtol = ls$gtol, m = m, pd = pd, xreg = xreg,
barrier = barrier))
lambda <- lsout$minimum
lsres$ls.iter <- c(lsres$ls.iter, lsout$iter)
lsres$ls.counts <- lsres$ls.counts + lsout$counts
} else
if (is.numeric(step))
lambda <- step
pars.old <- m@pars
pars.new <- pars.old + lambda * pd
m <- set.pars(m, pars.new)
if (sqrt(sum((pars.old - pars.new)^2)) < control$tol)
{
convergence <- TRUE
}
iter <- iter + 1
if (control$trace)
{
Mpars[iter+1,] <- pars.new
steps[iter] <- lambda
}
if (debug)
{
val <- logLik(object = m, domain = "frequency", barrier = barrier)
cat(paste("\niter =", iter, "logLik =", round(val, 4), "\n"))
print(get.pars(m))
}
if (debug && !is.null(m@lower) && !is.null(m@upper))
{
check.bounds(m)
}
}
if (!control$silent && !convergence)
warning(paste("Possible convergence problem.",
"Maximum number of iterations reached."))
if (control$trace)
{
Mpars <- na.omit(Mpars)
attr(Mpars, "na.action") <- NULL
steps <- na.omit(steps)
attr(steps, "na.action") <- NULL
}
val <- -mloglik.fd(
model = m, barrier = barrier, inf = 99999)
if (convergence) {
convergence <- "yes"
} else
convergence <- "maximum number of iterations was reached"
vcov.type <- switch(info, "expected" = "information matrix",
"observed" = "Hessian", "mix" = "modified outer product of the gradient")
res <- c(list(call = mcall, model = m,
init = pars0, pars = m@pars, xreg = xreg, loglik = val,
convergence = convergence, iter = iter, message = "",
Mpars = Mpars, steps = steps), lsres,
list(Dmat = M, std.errors = sqrt(diag(solve(M))), vcov.type = vcov.type))
class(res) <- "stsmFit"
res
} |
knit_print.huxtable <- function (x, options, ...) {
of <- getOption("huxtable.knitr_output_format", guess_knitr_output_format())
call_name <- switch(of,
latex = "to_latex",
html = "to_html",
pptx = ,
docx = "as_flextable",
md = "to_md",
screen = "to_screen",
rtf = "to_rtf",
{
warning(glue::glue(
'Unrecognized output format "{of}". Using `to_screen` to print huxtables.\n',
'Set options("huxtable.knitr_output_format") manually to ',
'"latex", "html", "rtf", "docx", "pptx", "md" or "screen".'))
"to_screen"
})
res <- do.call(call_name, list(x))
res <- switch(of,
latex = {
latex_deps <- report_latex_dependencies(quiet = TRUE)
tenv <- tabular_environment(x)
if (tenv %in% c("tabulary", "longtable")) {
latex_deps <- c(latex_deps, list(rmarkdown::latex_dependency(tenv)))
}
knitr::asis_output(res, meta = latex_deps)
},
html = knitr::asis_output(htmlPreserve(res)),
rtf = knitr::raw_output(res),
pptx = ,
docx = knitr::knit_print(res),
knitr::asis_output(res)
)
return(res)
}
knit_print.data.frame <- function(x, options, ...) {
if (! isTRUE(getOption("huxtable.knit_print_df", FALSE))) {
NextMethod()
} else {
ht <- as_huxtable(x)
df_theme <- getOption("huxtable.knit_print_df_theme", theme_plain)
assert_that(is.function(df_theme))
ht <- df_theme(ht)
knitr::knit_print(ht)
}
}
guess_knitr_output_format <- function() {
assert_package("guess_knitr_output_format", "knitr")
assert_package("guess_knitr_output_format", "rmarkdown")
of <- knitr::opts_knit$get("out.format")
if (is.null(of) || of == "markdown") {
of <- knitr::opts_knit$get("rmarkdown.pandoc.to")
if (is.null(of)) {
knit_in <- knitr::current_input()
if (is.null(knit_in)) return("")
of <- rmarkdown::default_output_format(knit_in)
of <- of$name
}
}
if (of == "tufte_handout") of <- "latex"
if (of == "tufte_html") of <- "html"
of <- sub("_.*", "", of)
if (of == "html4") of <- "html"
if (of %in% c("ioslides", "revealjs", "slidy")) of <- "html"
if (of %in% c("beamer", "pdf")) of <- "latex"
of
}
htmlPreserve <- function (x) {
x <- paste(x, collapse = "\r\n")
if (nzchar(x)) sprintf("<!--html_preserve-->%s<!--/html_preserve-->", x) else x
} |
`km1Nugget.init` <-
function(model) {
n <- nrow(model@X)
parinit <- model@parinit
trend.estimate <- lm(model@y~model@F-1)
random.part.estimate <- trend.estimate$residuals
sigma.total.standard <- sd(random.part.estimate)
x.dist <- dist(model@X)
y.dist <- dist(random.part.estimate)
alpha <- max(min(0.5, 100/(n*(n-1))), 10/100)
I <- (x.dist >= quantile(x.dist, alpha))
sigma.total.vario <- sqrt(1/2*mean(y.dist[I]^2))
sigma.total <- max(sigma.total.standard, sigma.total.vario)
sq.nugget.init <- sqrt(model@covariance@nugget)
if (identical(sq.nugget.init, numeric(0))) sq.nugget.init <- -1
if ((sq.nugget.init>=0) & (sq.nugget.init<=sigma.total)) {
sigma.init <- sqrt(sigma.total^2 - sq.nugget.init^2)} else {
alpha <- max(min(0.5, 20/(n*(n-1))), 1/100)
I <- (x.dist <= quantile(x.dist, alpha))
mreg <- lm(y.dist[I]^2 ~ I(x.dist[I]^2))
nugget.init <- mreg$coef[1]
if (nugget.init<=0) {
sq.nugget.init <- sqrt(1/10)*sigma.total
sigma.init <- sqrt(9/10)*sigma.total}
else if (nugget.init>=sigma.total^2) {
sq.nugget.init <- sqrt(9/10)*sigma.total
sigma.init <- sqrt(1/10)*sigma.total}
else {
sq.nugget.init <- sqrt(nugget.init)
sigma.init <- sqrt(sigma.total^2 - sq.nugget.init^2)
}
}
lower <- model@lower
upper <- model@upper
ninit <- model@control$pop.size
param.n <- model@[email protected]
if (length(parinit)>0) {
matrixinit <- matrix(parinit, nrow = param.n, ncol = ninit)
} else {
if (existsMethod("paramSample", signature = class(model@covariance))) {
matrixinit <- paramSample(model@covariance, n=ninit, lower=lower, upper=upper, y=model@y)
} else {
matrixinit <- matrix(runif(ninit*param.n), nrow = param.n, ncol = ninit)
matrixinit <- lower + matrixinit*(upper - lower)
}
}
angle.init <- atan(sigma.init/sq.nugget.init)
radius.init <- sigma.total
radius.min <- 1/2*radius.init
radius.max <- 3/2*radius.init
angle.sim <- runif(n=ninit, min=1/2*angle.init, max=min(3/2*angle.init, pi/2))
radius.sim <- sqrt(2*runif(n=ninit, min=1/2*radius.min^2, max=1/2*radius.max^2))
sq.nugget.init.sim <- radius.sim*cos(angle.sim)
sigma.init.sim <- radius.sim*sin(angle.sim)
alphainit <- sigma.init.sim^2 / (sigma.init.sim^2 + sq.nugget.init.sim^2)
matrixinit <- rbind(matrixinit, alphainit)
fninit <- apply(matrixinit, 2, logLikFun, model)
selection <- sort(fninit, decreasing = TRUE, index.return = TRUE)$ix
selection <- selection[1:model@control$multistart]
parinit <- matrixinit[, selection, drop = FALSE]
lp <- nrow(parinit)
covinit <- list()
for (i in 1:model@control$multistart){
pari <- as.numeric(parinit[, i])
covinit[[i]] <- vect2covparam(model@covariance, pari[1:(lp-1)])
covinit[[i]]@nugget <- pari[lp]
covinit[[i]]@sd2 <- pari[lp-1]
}
return(list(par = parinit,
value = fninit[selection],
cov = covinit,
lower = c(lower, 0),
upper = c(upper, model@control$upper.alpha)))
} |
kbSkew <- function(x) {
x <- as.vector(x)
orderX <- x[order(x)] - mean(x)
revOrdrX <- x[order(x, decreasing = TRUE)] - mean(x)
even <- (orderX + revOrdrX)/2
odd <- (orderX - revOrdrX)/2
combine <- c(even, odd)
est_sigma <- sum(even^2)/sum(combine^2)
return(est_sigma)
}
kbMvtSkew <- function(x) {
x <- as.matrix(x)
p <- dim(x)[2]
perm <- function(v) {
n <- length(v)
if (n == 1) v
else {
X <- NULL
for (i in 1:n) X <- rbind(X, cbind(v[i], perm(v[-i])))
X
}
}
cardP <- factorial(p)
permX <- perm(seq(p))
Delta_Hat <- rep(0, cardP)
for (k in 1:cardP) {
delta_hat <- rep(0, p)
X = x[, permX[k, ]]
meanX = apply(X, 2, mean)
deMeanX = sweep(X, 2, meanX, FUN = "-")
delta_hat[1] = kbSkew(X[, 1])
for (j in 1:(p-1)) {
V = meanX[j+1] + cov(X[,(j+1)], X[,1:j]) %*% solve(cov(X[,1:j], X[,1:j])) %*% t(deMeanX[,1:j])
Z = X[,j+1] - as.vector(V)
delta_hat[j+1] = kbSkew(Z)
}
Delta_Hat[k] = sum(delta_hat)
}
return(mean(Delta_Hat))
}
pcKbSkew <- function(x, cor = FALSE) {
x_pca <- princomp(x, cor = cor)
pcSkew <- apply(x_pca$scores, 2, kbSkew)
return(sum(pcSkew))
}
MardiaMvtSkew <- function(x) {
x <- as.matrix(x)
n <- dim(x)[1]
sample.mean <- apply(x, 2, mean)
cx <- sweep(x, 2, sample.mean, FUN = "-")
S_Cov <- cov(x)*(n - 1)/n
inv_S <- solve(S_Cov)
quad_mat <- cx %*% inv_S %*% t(cx)
quad_cube <- quad_mat^3
beta_hat <- 1/(n^2) * sum(quad_cube)
return(beta_hat)
}
PearsonSkew <- function(x) {
if (!is.vector(x, mode = "numeric")) {
stop(sQuote("x"), " must be a vector of numeric values")
}
gamma <- mean((x - mean(x))^3)/(sd(x)^3)
return(gamma)
}
BowleySkew <- function(x) {
if (!is.vector(x, mode = "numeric")) {
stop(sQuote("x"), " must be a vector of numeric values")
}
Q <- as.vector(quantile(x, prob = c(0.25, 0.50, 0.75)))
gamma = (Q[3] + Q[1] - 2 * Q[2]) / (Q[3] - Q[1])
return(gamma)
} |
write_log <- function(con, tablename, source){
log <- data.frame(table_name = tablename, timestamp = as.character(lubridate::now()), source = source, stringsAsFactors = F)
if (dbExistsTable(con,"log")) {
log <- dbReadTable(con,"log") %>%
dplyr::filter(table_name != tablename) %>%
dplyr::bind_rows(log)
}
dbWriteTable(con,"log",log, overwrite = TRUE)
}
update_table <- function(con, table, source, tablename) {
if (missing(tablename)) {
tablename <- deparse(substitute(table))
}
dbBegin(con)
rexpr <- try({
dbWriteTable(con, tablename, as.data.frame(table), overwrite = TRUE)
write_log(con, tablename, source)
})
if (inherits(rexpr, "try-error")) {
dbRollback(con)
stop(rexpr)
}
else{
dbCommit(con)
}
}
clean_database <- function(con) {
invisible(
lapply(dbListTables(con),function(x){
message("DROP TABLE ",x,"\n")
dbRemoveTable(con,x)})
)
} |
.createModel_MC <- function(tree){
comment <- "Matching competition model\n Implemented as in Drury et al. Systematic Biology."
paramsNames <- c("m0","logsigma","S")
params0 <- c(0,log(1),0)
periodizing <- periodizeOneTree(tree)
eventEndOfPeriods <- endOfPeriods(periodizing, tree)
initialCondition <- function(params) return( list(mean=c(params[1]), var=matrix(c(0))) )
aAGamma <- function(i, params){
vectorU <- getLivingLineages(i, eventEndOfPeriods)
vectorA <- function(t) return(0*vectorU)
matrixGamma <- function(t) return(exp(params[2])*diag(vectorU))
matrixA <- params[3]*diag(vectorU) - (params[3]/sum(vectorU)) * outer(vectorU,vectorU)
return(list(a=vectorA, A=matrixA, Gamma=matrixGamma))
}
constraints <- function(params) return(params[3]<=0)
model <- new(Class="PhenotypicADiag", name="MC", period=periodizing$periods, aAGamma=aAGamma, numbersCopy=eventEndOfPeriods$copy, numbersPaste=eventEndOfPeriods$paste, initialCondition=initialCondition, paramsNames=paramsNames, constraints=constraints, params0=params0, tipLabels=eventEndOfPeriods$labeling, comment=comment)
return(model)
}
getMatrixCoalescenceJ <- function(tree, periods){
matrixCoalescenceTimes <- findMRCA(tree, type="height")
n <- length(matrixCoalescenceTimes[,1])
matrixCoalescenceJ <- diag(0, n)
for(k in 1:n){
for(l in 1:n){
matrixCoalescenceJ[k,l] <- which(periods == matrixCoalescenceTimes[k,l])
}
}
return(matrixCoalescenceJ)
}
isATip <- function(tree, branch_number){
return(!(tree$edge[branch_number,2] %in% tree$edge[,1]))
}
periodizeOneTree <- function(tree){
nodeheight <- nodeHeights(tree)
startingTimes <- nodeheight[,1]
endTimes <- nodeheight[,2]
all_time_events <- sort(c(startingTimes, endTimes))
periods <- unique(all_time_events)
return(list(periods=periods, startingTimes=startingTimes, endTimes=endTimes))
}
endOfPeriods <- function(periodizing, tree){
nBranch <- length(periodizing$startingTimes)
nPeriods <- length(periodizing$periods)
numbersCopy <- rep(0, times=nPeriods)
numbersPaste <- rep(0, times=nPeriods)
numbersLineages <- rep(0, times=nPeriods)
numbersLivingLineages <- rep(0, times=nPeriods)
labelingLineages <- rep(0, times=nBranch)
initialBranches <- periodizing$startingTimes[periodizing$startingTimes==0]
if(length(initialBranches) == 1){
labelingLineages[1] <- 1
n <- 1
}else{
labelingLineages[periodizing$startingTimes==0] <- c(1,2)
n <- 2
}
numbersLineages[1] <- n
numbersLivingLineages[1] <- n
numbersCopy[1] <- 1
numbersPaste[1] <- 2
for(i in 2:nPeriods){
tau_i <- periodizing$periods[i]
newBranches <- which(tau_i == periodizing$startingTimes)
if(length(newBranches) == 2){
n <- n+1
labelingLineages[newBranches[1]] <- labelingLineages[newBranches[1]-1]
labelingLineages[newBranches[2]] <- n
numbersCopy[i] <- labelingLineages[newBranches[1]-1]
numbersPaste[i] <- n
numbersLivingLineages[i] <- numbersLivingLineages[i-1]+1
}else{
deadBranches <- which(tau_i == periodizing$endTimes)
numbersCopy[i] <- labelingLineages[ deadBranches[1] ]
numbersPaste[i] <- 0
numbersLivingLineages[i] <- numbersLivingLineages[i-1]-1
}
numbersLineages[i] <- n
}
permutationLabels <- labelingLineages[!(periodizing$endTimes %in% periodizing$startingTimes)]
labeling <- tree$tip.label[order(permutationLabels)]
return(list(copy=numbersCopy, paste=numbersPaste, nLineages=numbersLineages, labeling=labeling, nLivingLineages=numbersLivingLineages))
}
getLivingLineages <- function(i, eventEndOfPeriods){
livingLineages <- rep(1, times=eventEndOfPeriods$nLineages[i])
deads <- eventEndOfPeriods$copy[1:i][eventEndOfPeriods$paste[1:i] == 0]
livingLineages[deads] <- 0
return(livingLineages)
} |
isStrictlyPositiveIntegerOrNaOrNanScalar <- function(argument, default = NULL, stopIfNot = FALSE, message = NULL, argumentName = NULL) {
checkarg(argument, "N", default = default, stopIfNot = stopIfNot, nullAllowed = FALSE, n = 1, zeroAllowed = FALSE, negativeAllowed = FALSE, positiveAllowed = TRUE, nonIntegerAllowed = FALSE, naAllowed = TRUE, nanAllowed = TRUE, infAllowed = FALSE, message = message, argumentName = argumentName)
} |
fbag <- function (data, factor, xlim = NULL, ylim = range(data$y, na.rm = TRUE),
xlab, ylab, plotlegend, legendpos, ncol, projmethod, ...)
{
y <- t(data$y)
x <- data$x
if (projmethod == "PCAproj") {
rob <- PCAproj(y, k = 2, center = median)$score
}
if (projmethod == "rapca") {
rob <- fdpca(x, data$y)$coeff[, 2:3]
rownames(rob) = 1:(dim(data$y)[2])
colnames(rob) = c("Comp.1", "Comp.2")
}
pcbag <- compute.bagplot(rob[, 1], rob[, 2], factor = factor)
if (pcbag$is.one.dim == TRUE) {
stop("Bivariate principal component scores lie in one direction.")
}
else {
outlier <- as.numeric(rownames(pcbag$pxy.outlier))
inside <- as.numeric(rownames(pcbag$pxy.bag))
insidecurve <- y[inside, ]
maximum1 <- apply(insidecurve, 2, max, na.rm = TRUE)
minimum1 <- apply(insidecurve, 2, min, na.rm = TRUE)
out <- as.numeric(rownames(pcbag$pxy.outer))
outcurve <- y[out, ]
maximum2 <- apply(outcurve, 2, max, na.rm = TRUE)
minimum2 <- apply(outcurve, 2, min, na.rm = TRUE)
p = dim(y)[2]
low = up = matrix(, p, 1)
for (i in 1:p) {
up[i, ] = quantile(outcurve[, i], probs = 0.75)
low[i, ] = quantile(outcurve[, i], probs = 0.25)
}
IQR = up - low
dist <- (rob[, 1] - pcbag$center[1])^2 + (rob[, 2] -
pcbag$center[2])^2
center <- order(dist)[1]
centercurve <- y[center, ]
notchlow <- centercurve - 1.57 * (IQR)/sqrt(nrow(y))
notchupper <- centercurve + 1.57 * (IQR)/sqrt(nrow(y))
n <- length(outlier)
plot(c(x, rev(x)), c(maximum2, rev(minimum2)), type = "n",
main = "", ylim = ylim, xlab = xlab, ylab = ylab)
polygon(c(x, rev(x)), c(maximum2, rev(minimum2)), border = FALSE,
col = "light gray", ylim = ylim, ...)
polygon(c(x, rev(x)), c(maximum1, rev(minimum1)), border = FALSE,
col = "dark gray", ...)
lines(fts(x, notchlow), col = "blue", lty = 2, ...)
lines(fts(x, notchupper), col = "blue", lty = 2, ...)
lines(fts(x, centercurve), col = "black", ...)
if (n > 0) {
if(n == 1)
{
outliercurve <- as.matrix(y[outlier, ])
lines(fts(x, outliercurve), col = rainbow(n),
...)
if (plotlegend == TRUE) {
legend(legendpos, c(colnames(data$y)[outlier]),
col = rainbow(n), lty = 1, ncol = ncol, ...)
}
}
if(n > 1)
{
outliercurve <- y[outlier, ]
lines(fts(x, t(outliercurve)), col = rainbow(n),
...)
if (plotlegend == TRUE) {
legend(legendpos, c(colnames(data$y)[outlier]),
col = rainbow(n), lty = 1, ncol = ncol, ...)
}
}
return(outlier)
}
}
} |
wtd.stat.amb <- function(data, w){
y <- cos(data) + 1i * sin(data)
wtd_ext_mean <- colMeans( y * w )
ymean <- rbind( Re(wtd_ext_mean),
Im(wtd_ext_mean))
y1bar <- ymean[,1]
y2bar <- ymean[,2]
S12 <- colMeans(
cbind(Im(y[,1]) * Im(y[,2]) * w,
-Re(y[,1]) * Im(y[,2]) * w,
-Im(y[,1]) * Re(y[,2]) * w,
Re(y[,1]) * Re(y[,2]) * w) )
S12 <- matrix(S12, nrow = 2)
return( list(y1bar = y1bar, y2bar = y2bar, S12 = S12))
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.