code
stringlengths 1
13.8M
|
---|
initial.center.spmd <- function(PARAM, MU = NULL){
if(exists("X.spmd", envir = .pmclustEnv)){
X.spmd <- get("X.spmd", envir = .pmclustEnv)
}
if(is.null(MU)){
N.spmd <- nrow(X.spmd)
N.allspmds <- spmd.allgather.integer(as.integer(N.spmd),
integer(.pmclustEnv$COMM.SIZE))
center.spmd <- rep(0, PARAM$K)
if(.pmclustEnv$COMM.RANK == 0){
center.spmd <- sample(1:.pmclustEnv$COMM.SIZE, PARAM$K, replace = TRUE,
prob = N.allspmds / PARAM$N) - 1
}
center.spmd <- spmd.bcast.integer(as.integer(center.spmd))
tmp <- NULL
n.center.spmd <- sum(center.spmd == .pmclustEnv$COMM.RANK)
if(n.center.spmd > 0){
id.center.spmd <- sample(1:N.spmd, n.center.spmd)
tmp <- matrix(X.spmd[id.center.spmd,], ncol = ncol(X.spmd),
byrow = TRUE)
}
PARAM$MU <- unlist(spmd.allgather.object(tmp))
dim(PARAM$MU) <- c(PARAM$p, PARAM$K)
} else{
PARAM$MU <- MU
}
for(i.k in 1:PARAM$K){
B <- W.plus.y(X.spmd, -PARAM$MU[, i.k], nrow(X.spmd), ncol(X.spmd))
.pmclustEnv$Z.spmd[, i.k] <- -rowSums(B * B)
}
.pmclustEnv$CLASS.spmd <- unlist(apply(.pmclustEnv$Z.spmd, 1, which.max))
PARAM
} |
Pvals.null <- do(10000) * { t.test(rnorm(25, 0, 1)) %>% pval() }
gf_dhistogram(~ p.value, data = Pvals.null, binwidth = 0.02, center = 0.01)
gf_qq(~ p.value, data = Pvals.null, distribution = qunif, geom = "line") |
compute_scales <- function(data, obj, params, ...) {
class(obj) <- obj
UseMethod("compute_scales", obj)
}
compute_scales.density <- function(data, obj, params) {
scale.y <- params$scale.y
prop <- params$prop
switch(scale.y,
"data" = {
scalingProp <- data %>%
dplyr::group_by(PANEL, y) %>%
dplyr::summarise(sum_n = sum(n, na.rm = TRUE),
max_density = max(density, na.rm = TRUE)) %>%
dplyr::mutate(prop_n = sum_n/max(sum_n, na.rm = TRUE),
prop_density = 1/max(max_density)) %>%
dplyr::transmute(PANEL = PANEL,
y = y,
scalingYprop = prop_n * prop_density)
data %>%
dplyr::left_join(scalingProp, by = c("PANEL", "y")) %>%
dplyr::mutate(
y = density * scalingYprop * prop
) %>%
dplyr::select(-scalingYprop)
},
"group" = {
scalingProp <- data %>%
dplyr::group_by(PANEL, y) %>%
dplyr::summarise(scalingYprop = 1/max(density, na.rm = TRUE))
data %>%
dplyr::left_join(scalingProp, by = c("PANEL", "y")) %>%
dplyr::mutate(
y = density * scalingYprop * prop
) %>%
dplyr::select(-scalingYprop)
},
"variable" = {
warning("`scale.y = variable` is deprecated now. Use `group` instead.",
call. = FALSE)
scalingProp <- data %>%
dplyr::group_by(PANEL, y) %>%
dplyr::summarise(scalingYprop = 1/max(density, na.rm = TRUE))
data %>%
dplyr::left_join(scalingProp, by = c("PANEL", "y")) %>%
dplyr::mutate(
y = density * scalingYprop * prop
) %>%
dplyr::select(-scalingYprop)
},
"none" = {
data %>%
dplyr::mutate(
y = density,
)
}
)
}
compute_scales.histogram <- function(data, obj, params) {
scale.y <- params$scale.y
prop <- params$prop
switch(scale.y,
"data" = {
scalingProp <- data %>%
dplyr::group_by(PANEL, location) %>%
dplyr::summarise(sum_n = sum(count, na.rm = TRUE),
max_y = max(y, na.rm = TRUE)) %>%
dplyr::mutate(prop_n = sum_n/max(sum_n, na.rm = TRUE),
prop_y = 1/max(max_y)) %>%
dplyr::transmute(PANEL = PANEL,
location = location,
scalingYprop = prop_n * prop_y)
data %>%
dplyr::left_join(scalingProp, by = c("PANEL", "location")) %>%
dplyr::mutate(
y = y * scalingYprop * prop
) %>%
dplyr::select(-scalingYprop)
},
"group" = {
maxHeights <- data %>%
dplyr::group_by(PANEL, location, x) %>%
dplyr::summarise(height = sum(y, na.rm = TRUE)) %>%
dplyr::ungroup() %>%
dplyr::group_by(location, PANEL) %>%
dplyr::summarise(max_height = max(height, na.rm = TRUE))
data %>%
dplyr::left_join(y = maxHeights, by = c("location", "PANEL")) %>%
dplyr::group_by(location) %>%
dplyr::mutate(
y = y/max_height * prop
) %>%
dplyr::select(-max_height)
},
"variable" = {
warning("`scale.y = variable` is deprecated now. Use `group` instead.",
call. = FALSE)
maxHeights <- data %>%
dplyr::group_by(PANEL, location, x) %>%
dplyr::summarise(height = sum(y, na.rm = TRUE)) %>%
dplyr::ungroup() %>%
dplyr::group_by(location, PANEL) %>%
dplyr::summarise(max_height = max(height, na.rm = TRUE))
data %>%
dplyr::left_join(y = maxHeights, by = c("location", "PANEL")) %>%
dplyr::group_by(location) %>%
dplyr::mutate(
y = y/max_height * prop
) %>%
dplyr::select(-max_height)
},
"none" = {
data
}
)
} |
summary.ZIPBayes <-function (object, burnin = 1, thinperiod = 1, confidence.level = 0.95, ...)
{
confidence.margin <- (1-confidence.level)/2
summarized.res <- lapply( object, FUN= function(resultdata){
results <- apply(resultdata, MARGIN =2, FUN = function(x){
xpure <- purifyseq(x, burnin, thinperiod)
mean <- mean(xpure,na.rm=T)
sd <- sd(xpure,na.rm=T)
median <- median(xpure,na.rm=T)
CrI <- quantile(xpure, c(confidence.margin, 1 - confidence.margin),na.rm=T)
HDR <- getHDP(xpure,confidence.margin*2)
return(c(mean,median,sd,CrI,HDR))
})
rownames(results) <- c("mean", "sd", "median", paste0("CI:",confidence.margin,"%"), paste0("CI:", 1 - confidence.margin,"%"),
"HDR_LB","HDR_UB")
return(results)
})
class(summarized.res) <- "summary.ZIPBayes"
summarized.res
} |
capacity_logreg_main<-function(dataRaw, signal="input", response=NULL,output_path=NULL,
side_variables=NULL,formula_string=NULL,cc_maxit=100,lr_maxit=1000, MaxNWts = 5000,
testing=FALSE, model_out=TRUE,scale=TRUE,
TestingSeed=1234,testing_cores=1,
boot_num=10,boot_prob=0.8,
sidevar_num=10,
traintest_num=10,partition_trainfrac=0.6,
plot_width=6,plot_height=4,
data_out=FALSE){
if(!is.null(output_path)){
dir.create(output_path,recursive=TRUE)
}
message(" Estimating channel capacity ...")
if (is.null(response)){
response=paste0("output_",1:(ncol(dataRaw)-1) )
}
time_start=proc.time()
dataRaw=as.data.frame(dataRaw)
if (is.null(output_path)) {
message('Output path is not defined. Graphs and RDS file will not be saved.')
}
if (!is.data.frame(dataRaw)) {
stop('data is not in data.frame format')
}
if ( sum(colnames(dataRaw)==signal)==0 ) {
stop('There is no column described as signal in data')
}
if (!sum(colnames(dataRaw) %in% response)==length(response) ) {
stop('There is no column described as response in data')
}
if (!is.null(side_variables)){
if (!sum(colnames(dataRaw) %in% side_variables)==length(side_variables) ) {
stop('There is no column described as side_variables in data')
}
}
data0=dataRaw[,c(signal,response,side_variables)]
if ( any(apply(data0,1,function(x) any(is.na(x)) )) ) {
message("There are NA in observations - removing...")
data0=data0[!apply(data0,1,function(x) any(is.na(x)) ),]
}
data0=func_signal_transform(data0,signal)
tempcolnames=colnames(data0)
tempsignal=data.frame(data0[,(tempcolnames%in%c(signal,paste(signal,"_RAW",sep="") ) )])
colnames(tempsignal)<-tempcolnames[(tempcolnames%in%c(signal,paste(signal,"_RAW",sep="") ) )]
data0=data.frame(data0[,!(tempcolnames%in%c(signal,paste(signal,"_RAW",sep="") ) )])
colnames(data0)<-tempcolnames[!(tempcolnames%in%c(signal,paste(signal,"_RAW",sep="") ) )]
temp_idnumeric=sapply(data0,is.numeric)
if (scale&sum(temp_idnumeric)==1) {
data0[,temp_idnumeric]<-(data0[,temp_idnumeric]-mean(data0[,temp_idnumeric]))/stats::sd(data0[,temp_idnumeric])
data <- cbind(data0,tempsignal)
} else if (scale) {
preProcValues <- caret::preProcess(data0, method = c("center", "scale"))
data <- cbind(stats::predict(preProcValues, data0),tempsignal)
} else {
data <- cbind(data0,tempsignal)
}
rm(temp_idnumeric)
output<-capacity_logreg_algorithm(data=data,signal=signal,response=response,side_variables=side_variables,
formula_string=formula_string, model_out = model_out,
cc_maxit=cc_maxit,lr_maxit=lr_maxit,MaxNWts =MaxNWts)
if (testing){
output$testing<-capacity_logreg_testing(data,signal=signal,response=response,side_variables=side_variables,
cc_maxit=cc_maxit,lr_maxit=lr_maxit,MaxNWts =MaxNWts,
formula_string=formula_string,
TestingSeed=TestingSeed,testing_cores=testing_cores,
boot_num=boot_num,boot_prob=boot_prob,
sidevar_num=sidevar_num,
traintest_num=traintest_num,partition_trainfrac=partition_trainfrac)
output$testing_pv<-lapply(output$testing,function(x){
tmp_boot_cc=sapply(x,function(xx) xx$cc)
c(mean(tmp_boot_cc<output$cc),mean(tmp_boot_cc>output$cc))
})
}
output$time <- proc.time() - time_start
output$params <- c(cc_maxit=cc_maxit,lr_maxit=lr_maxit,MaxNWts =MaxNWts)
if (data_out){
output$data <- dataRaw
}
if(!is.null(output_path)){
dir.create(output_path,recursive=TRUE)
message(" Drawing graphs and saving objects ...")
temp_logGraphs=try(output_graphs_main(data=dataRaw,signal=signal,response=response,side_variables=side_variables,cc_output=output,
output_path=output_path,height=plot_height,width=plot_width),
silent=FALSE)
rm(temp_logGraphs)
saveRDS(output,file=paste(output_path,'output.rds',sep=""))
message(paste0(" Estimation finished. Results saved in ",output_path,""))
} else {
message(paste0(" Estimation finished."))
}
output
} |
probAnc <- function(p,q,R,mode = "budding",analysis = "directDesc",Mmax = 85,nrep = 10000){
if(!any(mode == c("budding","bifurcating","anagenesis"))){
stop("Mode not designated, must be 'budding', 'bifurcating' or 'anagenesis'")}
if(mode == "anagenesis"){message("p will be treated as the rate of anagenesis/pseudospeciation")}
if(!any(analysis == c("directDesc","indirectDesc"))){
stop("Analysis type not designated, must be 'directDesc' or 'indirectDesc'")}
if(nrep<0){stop("Nrep is less than zero?")}
if(analysis == "directDesc"){
Pp <- qsProb2Comp(R = R,p = p,q = q,mode = mode)
if(mode == "budding"){
Pd <- function(p,q,Ti){
exp(-q*(Ti-1))-exp(-q*Ti)
}
PN <- function(p,q,Ti,Ni){
(exp(-p*Ti)*((p*Ti)^Ni))/factorial(Ni)
}
maxN <- 100
}
if(mode == "bifurcating"){
Pd <- function(p,q,Ti){exp(-(p+q)*(Ti-1))-exp(-(p+q)*Ti)}
PN <- function(p,q,Ti,Ni){
if(Ni == 0){res <- q/(p+q)}
if(Ni == 2){res <- p/(p+q)}
if(Ni != 2 & Ni != 0){res <- 0}
return(res)
}
maxN <- 2
}
if(mode == "anagenesis"){
Pd <- function(p,q,Ti){exp(-(p+q)*(Ti-1))-exp(-(p+q)*Ti)}
PN <- function(p,q,Ti,Ni){
if(Ni == 0){res <- q/(p+q)}
if(Ni == 1){res <- p/(p+q)}
if(Ni != 1 & Ni != 0){res <- 0}
return(res)
}
maxN <- 1
}
res <- numeric()
for(t in 1:nrep){
Nres <- numeric()
for(N in 0:maxN){
Nres[N] <- PN(p = p,q = q,Ti = t,Ni = N)*(1-((1-Pp)^N))
}
res[t] <- (1-((1-R)^t))*Pd(p = p,q = q,Ti = t)/Pp*sum(Nres)
}
}
if(analysis == "indirectDesc"){
if(mode == "budding" | mode == "bifurcating"){
if(p>q){stop(
"Indirect Descendant formulae are unsolved if p>q, see Foote 1996")}
Qm <- function(p,q,M){
x <- (4*p*q)/((p+q)^2)
res <- ((p+q)/(2*p))*(factorial(2*M)/((2^(2*M))*factorial(M)^2))*((x^M)/((2*M)-1))
return(res)
}
Pp <- qsProb2Comp(R = R,q = q,mode = "budding")
res <- numeric()
for(M in 1:Mmax){
res[M] <- Qm(p = p,q = q,M = M)*(1-(1-Pp)^M)
}
}
if(mode == "anagenesis"){
Pp <- qsProb2Comp(R = R,q = q,mode = "anagenesis")
QmStar <- function(p,q,M,T){
firstTerm <- numeric()
for(t in 1:(T-1)){
firstTerm <- (exp(-q*(t-1))-exp(-q*t))*(exp(-p*t)*((p*t)^(M-1)))/factorial(M-1)
}
secondTerm <- exp(-q*(T-1))*(exp(-p*T)*((p*T)^(M-1)))/factorial(M-1)
res <- sum(firstTerm)+secondTerm
return(res)
}
res <- numeric()
for(T in 1:nrep){
Tres <- numeric()
for(M in 1:Mmax){
Tres[M] <- QmStar(p = p,q = q,M = M,T = T)*(1-(1-Pp)^M)
}
res[T] <- sum(Tres)
}
}
}
if(any(is.nan(res))){
message("Input parameters and nrep produce NaN values, which are replaced with zeroes.")
message("May want to decrease nrep to see if returned estimate holds.")
res[is.nan(res)] <- 0
}
res <- sum(res)
names(res) <- NULL
if(res>0.5 & p == q){
message("Treat result with caution: if p = q, then prob of a taxon being an ancestor should be no greater than 0.5.")
message("Values higher than 0.5 result from limits of finite calculates, particularly with high sampling probabilities.")
message("See documentation.")
}
return(res)
}
|
NHKaux <-
function(lambdaC, lambdaD,T, posC, typeC, posD, typeD,r=NULL,typeEst)
{
NHKraux<-sapply(r, FUN=NHKaux2 ,lambdaC=lambdaC, lambdaD, posC=posC,
typeC=typeC, posD=posD, typeD=typeD, T=T, typeEst=typeEst)
NHKr<-NHKraux
return(NHKr)
} |
drake_meta_ <- function(target, config) {
if (exists(target, envir = config$meta, inherits = FALSE)) {
return(config$meta[[target]])
}
set_drake_meta(target, config)
config$meta[[target]]
}
drake_meta_old <- function(target, config) {
if (exists(target, envir = config$meta_old, inherits = FALSE)) {
return(config$meta_old[[target]])
}
set_drake_meta_old(target, config)
config$meta_old[[target]]
}
set_drake_meta <- function(target, config) {
class(target) <- drake_meta_class(target, config)
meta <- drake_meta_impl(target, config)
set_drake_meta_old(target, config)
meta <- subsume_old_meta(target, meta, config)
class(meta) <- c("drake_meta", "drake")
config$meta[[target]] <- meta
NULL
}
set_drake_meta_old <- function(target, config) {
if (target_exists(target, config)) {
meta_old <- config$cache$get(
key = target,
namespace = "meta",
use_cache = FALSE
)
config$meta_old[[target]] <- meta_old
}
}
print.drake_meta <- function(x, ...) {
cat("drake metadata for ", display_key(x$name), ":\n", sep = "")
elts <- names(x)
long <- c("command", "date")
lsts <- c("trigger", "time_start", "time_build", "time_command")
list1 <- x[setdiff(elts, c(long, lsts))]
list2 <- x[intersect(elts, long)]
list2 <- lapply(list2, crop_text, width = getOption("width") - 18L)
list3 <- x[intersect(elts, lsts)]
str(list1, no.list = TRUE)
str(list2, no.list = TRUE)
min_str(list3)
}
drake_meta_class <- function(target, config) {
spec <- config$spec[[target]]
if (is_subtarget(target, config)) {
return("subtarget")
}
if (is_dynamic(target, config)) {
return("dynamic")
}
if (is_encoded_path(target)) {
return("imported_file")
}
is_imported <- is_encoded_namespaced(target) || (spec$imported %|||% TRUE)
if (is_imported) {
return("imported_object")
}
"static"
}
drake_meta_impl <- function(target, config) {
UseMethod("drake_meta_impl")
}
drake_meta_impl.imported_file <- function(target, config) {
spec <- config$spec[[target]]
meta <- list(
name = target,
target = target,
imported = TRUE,
isfile = TRUE,
format = "none",
dynamic = FALSE,
missing = target_missing(target, config)
)
path <- config$cache$decode_path(target)
meta$mtime <- storage_mtime(path)
meta$size_storage <- storage_size(path)
spec$trigger <- trigger(condition = TRUE)
meta <- decorate_trigger_meta(target, meta, spec, config)
meta
}
drake_meta_impl.imported_object <- function(target, config) {
spec <- config$spec[[target]]
meta <- list(
name = target,
target = target,
imported = TRUE,
isfile = FALSE,
dynamic = FALSE,
format = "none",
missing = target_missing(target, config),
file_out = spec$deps_build$file_out
)
spec$trigger <- trigger(condition = TRUE)
meta <- decorate_trigger_meta(target, meta, spec, config)
meta
}
drake_meta_impl.subtarget <- function(target, config) {
parent_spec <- config$spec[[subtarget_parent(target, config)]]
list(
name = target,
target = target,
imported = FALSE,
isfile = FALSE,
dynamic = FALSE,
format = parent_spec$format %||NA% "none",
seed = resolve_target_seed(target, config),
time_start = drake_meta_start(config),
trigger = as.list(parent_spec$trigger)
)
}
drake_meta_impl.dynamic <- function(target, config) {
spec <- config$spec[[target]]
meta <- list(
name = target,
target = target,
imported = FALSE,
isfile = FALSE,
dynamic = TRUE,
format = spec$format %||NA% "none",
missing = target_missing(target, config),
seed = resolve_target_seed(target, config),
time_start = drake_meta_start(config),
dynamic_dependency_hash = dynamic_dependency_hash(target, config),
max_expand = spec$max_expand %||NA% config$max_expand
)
meta <- decorate_trigger_meta(target, meta, spec, config)
meta$dynamic_progress_namespace <- dynamic_progress_namespace(
target,
meta,
config
)
meta
}
dynamic_progress_namespace <- function(target, meta, config) {
prefix <- dynamic_progress_ns_pfx(target)
key <- dynamic_progress_key(target, meta, config)
paste0(prefix, key)
}
dynamic_progress_key <- function(target, meta, config) {
x <- dynamic_progress_prekey(target, meta, config)
x <- paste(as.character(x), collapse = "|")
digest_murmur32(x, serialize = FALSE)
}
dynamic_progress_prekey <- function(target, meta, config) {
command <- ifelse(
meta$trigger$command,
meta$command,
NA_character_
)
depend <- ifelse(
meta$trigger$depend,
meta$dependency_hash,
NA_character_
)
input_file_hash <- ifelse(
meta$trigger$file,
meta$input_file_hash,
NA_character_
)
output_file_hash <- ifelse(
meta$trigger$file,
meta$output_file_hash,
NA_character_
)
seed <- ifelse(
meta$trigger$seed,
as.character(meta$seed),
NA_character_
)
format <- ifelse(
meta$trigger$format,
meta$format,
NA_character_
)
condition <- safe_deparse(meta$trigger$condition, backtick = TRUE)
mode <- meta$trigger$mode
change_hash <- ifelse(
is.null(meta$trigger$value),
NA_character_,
config$cache$digest(meta$trigger$value)
)
list(
command = command,
depend = depend,
input_file_hash = input_file_hash,
output_file_hash = output_file_hash,
seed = seed,
format = format,
condition = condition,
mode = mode,
change_hash = change_hash
)
}
dynamic_progress_ns_pfx <- function(target) {
paste0("dyn-", target, "-")
}
drake_meta_impl.static <- function(target, config) {
spec <- config$spec[[target]]
meta <- list(
name = target,
target = target,
imported = FALSE,
isfile = FALSE,
dynamic = FALSE,
format = spec$format %||NA% "none",
missing = target_missing(target, config),
file_out = spec$deps_build$file_out,
seed = resolve_target_seed(target, config),
time_start = drake_meta_start(config)
)
meta <- decorate_trigger_meta(target, meta, spec, config)
meta
}
decorate_trigger_meta <- function(target, meta, spec, config) {
meta$trigger <- as.list(spec$trigger)
meta$command <- spec$command_standardized
meta$dependency_hash <- static_dependency_hash(target, config)
meta$input_file_hash <- input_file_hash(target = target, config = config)
meta$output_file_hash <- output_file_hash(target = target, config = config)
if (!is.null(meta$trigger$change)) {
try_load_deps(spec$deps_change$memory, config = config)
meta$trigger$value <- eval(meta$trigger$change, config$envir_targets)
}
meta
}
subsume_old_meta <- function(target, meta, config) {
if (!is_dynamic(target, config)) {
class(target) <- meta$format
meta <- decorate_trigger_format_meta(target, meta, config)
}
meta
}
decorate_trigger_format_meta <- function(target, meta, config) {
UseMethod("decorate_trigger_format_meta")
}
decorate_trigger_format_meta.default <- function(target, meta, config) {
meta
}
decorate_trigger_format_meta.file <- function(target, meta, config) {
meta_old <- config$meta_old[[target]]
if (is.null(meta_old) || !meta$trigger$file) {
return(meta)
}
path <- as.character(meta_old$format_file_path)
new_mtime <- storage_mtime(path)
new_size <- storage_size(path)
hash <- as.character(meta_old$format_file_hash)
exists <- file.exists(path)
hash[!exists] <- ""
should_rehash <- exists & should_rehash_local(
size_threshold = rehash_storage_size_threshold,
new_mtime = new_mtime,
old_mtime = as.numeric(meta_old$format_file_time),
new_size = new_size,
old_size = as.numeric(meta_old$format_file_size)
)
hash[should_rehash] <- rehash_local(path[should_rehash], config)
meta$format_file_path <- path
meta$format_file_hash <- hash
meta$format_file_time <- new_mtime
meta$format_file_size <- new_size
meta
}
drake_meta_start <- function(config) {
if (config$settings$log_build_times) {
proc_time()
}
}
target_missing <- function(target, config) {
!target_exists(target, config)
}
target_exists <- function(target, config) {
if (is.null(config$ht_target_exists)) {
target_exists_slow(target, config)
} else {
target_exists_fast(target, config)
}
}
target_exists_slow <- function(target, config) {
config$cache$exists(key = target) &
config$cache$exists(key = target, namespace = "meta")
}
target_exists_single <- function(target, config) {
ht_exists(ht = config$ht_target_exists, x = target)
}
target_exists_fast_list <- Vectorize(
target_exists_single,
vectorize.args = "target",
USE.NAMES = FALSE
)
target_exists_fast <- function(target, config) {
out <- target_exists_fast_list(target, config)
as.logical(out)
}
resolve_target_seed <- function(target, config) {
seed <- config$spec[[target]]$seed
if (is.null(seed) || is.na(seed)) {
seed <- seed_from_basic_types(config$settings$seed, target)
}
as.integer(seed)
}
seed_from_basic_types <- function(...) {
x <- paste0(..., collapse = "")
integer_hash(x = x, mod = .Machine$integer.max)
}
integer_hash <- function(x, mod = .Machine$integer.max) {
hash <- digest_murmur32(x, serialize = FALSE)
hexval <- paste0("0x", hash)
as.integer(type.convert(hexval, as.is = TRUE) %% mod)
}
static_dependency_hash <- function(target, config) {
spec <- config$spec[[target]]
x <- spec$deps_build
deps <- c(x$globals, x$namespaced, x$loadd, x$readd)
if (is_imported(target, config)) {
deps <- c(deps, x$file_in, x$knitr_in)
}
deps <- setdiff(deps, spec$deps_dynamic)
if (!length(deps)) {
return("")
}
deps <- unlist(deps)
deps <- as.character(deps)
deps <- unique(deps)
deps <- sort(deps)
dependency_hash_impl(deps, config)
}
dynamic_dependency_hash <- function(target, config) {
spec <- config$spec[[target]]
deps_dynamic <- spec$deps_dynamic
deps_trace <- sort(unique(spec$deps_dynamic_trace))
deps <- c(deps_dynamic, deps_trace)
dependency_hash_impl(deps, config)
}
dependency_hash_impl <- function(deps, config) {
out <- config$cache$memo_hash(
x = deps,
fun = self_hash,
config = config
)
out <- paste(out, collapse = "")
config$cache$digest(out, serialize = FALSE)
}
self_hash <- function(target, config) {
tryCatch(
config$cache$get_hash(target),
error = error_na
)
}
is_imported <- function(target, config) {
config$spec[[target]]$imported %|||% TRUE
}
input_file_hash <- function(
target,
config,
size_threshold = rehash_storage_size_threshold
) {
deps <- config$spec[[target]]$deps_build
files <- sort(unique(as.character(c(deps$file_in, deps$knitr_in))))
if (!length(files)) {
return("")
}
out <- config$cache$memo_hash(
x = files,
fun = static_storage_hash,
config = config,
size_threshold = size_threshold
)
out <- paste(out, collapse = "")
config$cache$digest(out, serialize = FALSE)
}
output_file_hash <- function(
target,
config,
size_threshold = rehash_storage_size_threshold
) {
deps <- config$spec[[target]]$deps_build
files <- sort(unique(as.character(deps$file_out)))
if (!length(files)) {
return("")
}
out <- vapply(
X = files,
FUN = static_storage_hash,
FUN.VALUE = character(1),
config = config,
size_threshold = size_threshold
)
out <- paste(out, collapse = "")
config$cache$digest(out, serialize = FALSE)
}
static_storage_hash <- function(
target,
config,
size_threshold = rehash_storage_size_threshold
) {
if (!is_encoded_path(target)) {
return(NA_character_)
}
file <- config$cache$decode_path(target)
if (is_url(file)) {
return(rehash_static_storage(target, file, config))
}
if (!file.exists(file)) {
return(NA_character_)
}
if (target_missing(target, config)) {
return(rehash_static_storage(target, file, config))
}
meta <- config$cache$get(key = target, namespace = "meta")
should_rehash <- should_rehash_local(
size_threshold = size_threshold,
new_mtime = storage_mtime(file),
old_mtime = as.numeric(meta$mtime %|||% -Inf),
new_size = storage_size(file),
old_size = meta$size_storage %|||% -1L
)
ifelse(
should_rehash,
rehash_static_storage(target = target, config = config),
config$cache$get(key = target)
)
}
should_rehash_local_impl <- function(
size_threshold,
new_mtime,
old_mtime,
new_size,
old_size
) {
small <- (new_size < size_threshold) %|||NA% TRUE
touched <- (new_mtime > old_mtime) %|||NA% TRUE
resized <- (abs(new_size - old_size) > rehash_storage_size_tol) %|||NA% TRUE
small || touched || resized
}
should_rehash_local_list <- Vectorize(
should_rehash_local_impl,
vectorize.args = c("new_mtime", "old_mtime", "new_size", "old_size"),
USE.NAMES = FALSE
)
should_rehash_local <- function(
size_threshold,
new_mtime,
old_mtime,
new_size,
old_size
) {
out <- should_rehash_local_list(
size_threshold = size_threshold,
new_mtime = new_mtime,
old_mtime = old_mtime,
new_size = new_size,
old_size = old_size
)
as.logical(out)
}
rehash_storage_size_threshold <- 1e5
rehash_storage_size_tol <- .Machine$double.eps ^ 0.5
storage_mtime_impl <- function(x) {
ifelse(dir.exists(x), dir_mtime(x), file_mtime(x))
}
storage_mtime_list <- Vectorize(
storage_mtime_impl,
vectorize.args = "x",
USE.NAMES = FALSE
)
storage_mtime <- function(x) {
as.numeric(storage_mtime_list(x))
}
dir_mtime <- function(x) {
files <- list.files(
path = x,
all.files = TRUE,
full.names = TRUE,
recursive = TRUE,
include.dirs = FALSE
)
times <- vapply(files, file_mtime, FUN.VALUE = numeric(1))
max(times %||% Inf)
}
file_mtime <- function(x) {
as.numeric(file.mtime(x))
}
storage_size_impl <- function(x) {
ifelse(dir.exists(x), dir_size(x), file_size(x))
}
storage_size_list <- Vectorize(
storage_size_impl,
vectorize.args = "x",
USE.NAMES = FALSE
)
storage_size <- function(x) {
as.numeric(storage_size_list(x))
}
dir_size <- function(x) {
files <- list.files(
path = x,
all.files = TRUE,
full.names = TRUE,
recursive = TRUE,
include.dirs = FALSE
)
sizes <- vapply(files, file_size, FUN.VALUE = numeric(1))
sum(sizes %||% 0)
}
file_size <- function(x) {
if (file.exists(x)) {
file.size(x)
} else {
NA_real_
}
}
rehash_static_storage <- function(target, file = NULL, config) {
if (!is_encoded_path(target)) {
return(NA_character_)
}
if (is.null(file)) {
file <- config$cache$decode_path(target)
}
if (is_url(file)) {
return(rehash_url(url = file, config = config))
}
if (!file.exists(file)) {
return(NA_character_)
}
rehash_local(file, config)
}
rehash_local_impl <- function(file, config) {
ifelse(dir.exists(file), rehash_dir(file, config), rehash_file(file, config))
}
rehash_local_list <- Vectorize(
rehash_local_impl,
vectorize.args = "file",
USE.NAMES = FALSE
)
rehash_local <- function(file, config) {
as.character(rehash_local_list(file, config))
}
rehash_dir <- function(dir, config) {
files <- list.files(
path = dir,
all.files = TRUE,
full.names = TRUE,
recursive = TRUE,
include.dirs = FALSE
)
out <- vapply(
files,
rehash_file,
FUN.VALUE = character(1),
config = config
)
out <- paste(out, collapse = "")
config$cache$digest(out, serialize = FALSE)
}
rehash_file <- function(file, config) {
config$cache$digest(object = file, file = TRUE, serialize = FALSE)
}
rehash_url <- function(url, config) {
assert_pkg("curl")
headers <- NULL
if (!curl::has_internet()) {
stop0("no internet. Cannot check url: ", url)
}
choices <- names(config$settings$curl_handles)
name <- longest_match(choices = choices, against = url) %||% NA_character_
handle <- config$settings$curl_handles[[name]] %|||% curl::new_handle()
handle <- curl::handle_setopt(handle, nobody = TRUE)
req <- curl::curl_fetch_memory(url, handle = handle)
stopifnot(length(req$content) < 1L)
headers <- curl::parse_headers_list(req$headers)
assert_status_code(req, url)
assert_useful_headers(headers, url)
etag <- paste(headers[["etag"]], collapse = "")
mtime <- paste(headers[["last-modified"]], collapse = "")
return(paste(etag, mtime))
}
longest_match <- function(choices, against) {
index <- vapply(
choices,
pmatch,
table = against,
FUN.VALUE = integer(1)
)
matches <- names(index[!is.na(index)])
matches[which.max(nchar(matches))]
}
is_url <- function(x) {
grepl("^http://|^https://|^ftp://", x)
}
assert_status_code <- function(req, url) {
if (req$status_code != 200L) {
stop0("could not access url: ", url)
}
}
assert_useful_headers <- function(headers, url) {
if (!any(c("etag", "last-modified") %in% names(headers))) {
stop0("no ETag or Last-Modified for url: ", url)
}
} |
odl = list(
latex=paste(
'Data by \\href{http://openstreetmap.org}{OpenStreetMap}',
' available under the',
'\\href{http://opendatacommons.org/licenses/odbl}{Open Database License}'
),
markdown=paste(
'Data by [OpenStreetMap](http://openstreetmap.org) available under the',
'[Open Database License](http://opendatacommons.org/licenses/odbl)'
),
html=paste(
'Data by <a href="http://openstreetmap.org">OpenStreetMap</a>,',
' available under the',
'<a href="http://opendatacommons.org/licenses/odbl">Open Database License</a>'
),
text='Data by OpenStreetMap.org available under the Open Database License (opendatacommons.org/licenses/odbl)'
)
osm = list(long=list(
latex=paste(
', cartography is licensed as ',
'\\href{http://creativecommons.org/licenses/by-sa/2.0}{CC BY-SA}.',
sep=''
),
markdown=paste(
', cartography is licensed as [CC BY-SA](http://creativecommons.org/licenses/by-sa/2.0).',
sep=''
),
html=paste(
', cartography is licensed as',
' <a href="http://creativecommons.org/licenses/by-sa/2.0">CC BY-SA</a>.',
sep=''
),
text =paste(
', cartography is licensed as CC BY-SA (see www.openstreetmap.org/copyright).',
sep=''
)
),
short=list(
latex='\\copyright \\href{http://openstreetmap.org/copyright}{OpenStreetMap}',
markdown='© [OpenStreetMap](http://openstreetmap.org/copyright)',
html= '© <a href="http://openstreetmap.org/copyright">OpenStreetMap</a>',
text='copyright OpenStreetMap.org'
)
)
sputnik = list(
long=list(
latex='\\href{http://corp.sputnik.ru/maps}{corp.sputnik.ru/maps}',
markdown='[corp.sputnik.ru/maps](http://corp.sputnik.ru/maps)',
html=' <a href="http://corp.sputnik.ru/maps">corp.sputnik.ru/maps</a>.',
text ='http://corp.sputnik.ru/maps'
)
)
sputnik$short = sputnik$long
nrcan = list(long=list(
latex=paste(
'Cartography by \\href{http://www.nrcan.gc.ca/earth-sciences/geography/topographic-information/free-data-geogratis/geogratis-web-services/17216}',
'{The Canada Base Map --- Transportation (CBMT) web mapping services',
' of the Earth Sciences Sector (ESS) at Natural Resources Canada (NRCan)}',
' licensed as the ',
'\\href{http://open.canada.ca/en/open-government-licence-canada}',
'{Open Government Licence --- Canada}.',
sep=''
),
markdown=
paste(
'Cartography by ',
'[The Canada Base Map - Transportation (CBMT) web mapping services',
' of the Earth Sciences Sector (ESS) at Natural Resources Canada (NRCan)]',
'(http://www.nrcan.gc.ca/earth-sciences/geography/topographic-information/free-data-geogratis/geogratis-web-services/17216)',
' licensed as the ',
'[Open Government Licence - Canada]',
'(http://open.canada.ca/en/open-government-licence-canada).',
sep=''
),
html=paste(
'Cartography by <a href="http://www.nrcan.gc.ca/earth-sciences/geography/topographic-information/free-data-geogratis/geogratis-web-services/17216">',
'The Canada Base Map - Transportation (CBMT) web mapping services',
' of the Earth Sciences Sector (ESS) at Natural Resources Canada (NRCan)</a>',
' licensed as the ',
'<a href="http://open.canada.ca/en/open-government-licence-canada">',
'Open Government Licence - Canada</a>.',
sep=''
),
text =paste(
'Cartography by ',
'The Canada Base Map - Transportation (CBMT) web mapping services',
' of the Earth Sciences Sector (ESS) at Natural Resources Canada (NRCan) ',
'(www.nrcan.gc.ca/earth-sciences/geography/topographic-information)',
' licensed as the ',
'Open Government Licence - Canada',
' (open.canada.ca/en/open-government-licence-canada).',
sep=''
)
),
short=list(
latex='\\href{http://www.nrcan.gc.ca}{Natural Resources Canada}',
markdown='[Natural Resources Canada](http://www.nrcan.gc.ca)',
html= '© <a href="http://www.nrcan.gc.ca">Natural Resources Canada"</a>',
text='Natural Resources Canada'
)
)
for(D in names(osm$long)){
osm$long[[D]] = paste(
osm$short[[D]],
' contributors. ',
odl[[D]],
osm$long[[D]], sep=''
)
}
osmHumanitarian = osm
osmHumanitarian$long$latex = gsub("cartography",
"cartography by \\\\href{http://hot.openstreetmap.org/about}{Humanitarian OSM team}",
osmHumanitarian$long$latex)
osmHumanitarian$long$markdown = gsub("cartography",
"cartography by [Humanitarian OSM team](http://hot.openstreetmap.org/about)",
osmHumanitarian$long$markdown)
osmHumanitarian$long$text = gsub("cartography",
"cartography by Humanitarian OSM team (hot.openstreetmap.org)",
osmHumanitarian$long$text)
osmHumanitarian$long$html = gsub("cartography",
"cartography by <a href=\"http://hot.openstreetmap.org/about\">Humanitarian OSM team</a>",
osmHumanitarian$long$html)
osmLandscape = osm
osmLandscape$long$latex = gsub("cartography[[:print:]]+$",
"cartography by \\\\href{http://www.thunderforest.com/}{Thunderforest}",
osmLandscape$long$latex)
osmLandscape$long$markdown = gsub("cartography[[:print:]]+$",
"cartography by [Thunderforest](http://www.thunderforest.com/)",
osmLandscape$long$markdown)
osmLandscape$long$text = gsub("cartography[[:print:]]+$",
"cartography by Thunderforest.com",
osmLandscape$long$text)
osmLandscape$long$html = gsub("cartography[[:print:]]+$",
"cartography by <a href=\"http://www.thunderforest.com/\">Thunderforest</a>",
osmLandscape$long$html)
mapquest = mapquestSat = list(
short=list(
latex='Tiles courtesy of \\href{http://www.mapquest.com}{MapQuest}',
text='Tiles courtesy of MapQuest(www.mapquest.com)',
markdown='Tiles courtesy of [MapQuest](http://www.mapquest.com)',
html='Tiles courtesy of <a href="http://www.mapquest.com">MapQuest</a>'
),
long=list()
)
for(D in names(mapquest$short)){
mapquest$long[[D]] = paste(
mapquest$short[[D]],
odl[[D]],
sep='. ')
mapquestSat$long[[D]] = paste(
mapquest$short[[D]],
", portions courtesy NASA/JPL-Caltech and U.S. Depart. of Agriculture, Farm Service Agency.",
sep='')
}
stamen = stamenToner = list(
short=list(
latex='\\copyright \\href{http://stamen.com}{Stamen Design}',
markdown='© [Stamen Design](http://stamen.com)',
html= '© <a href="http://stamen.com">Stamen Design</a>',
text='copyright Stamen Design'
),
long=list(
html=paste(
'Map tiles by <a href="http://stamen.com">Stamen Design</a>',
'under <a href="http://creativecommons.org/licenses/by/3.0">CC BY 3.0</a>.'
),
latex=paste(
'Map tiles by \\href{http://stamen.com}{Stamen Design}',
'under \\href{http://creativecommons.org/licenses/by/3.0}{CC BY 3.0}.'
),
markdown=paste(
'Map tiles by [Stamen Design](http://stamen.com)',
'under [CC BY 3.0](http://creativecommons.org/licenses/by/3.0).'
),
text=paste(
'Map tiles by Stamen Design',
'under CC BY 3.0 (creativecommons.org/licenses/by/3.0).'
)
)
)
for(D in names(stamenToner$long)){
stamenToner$long[[D]] = paste(
stamenToner$long[[D]],
odl[[D]]
)
}
for(D in names(stamen$long)){
stamen$long[[D]] =
gsub("http://opendatacommons.org/licenses/odbl",
"http://creativecommons.org/licenses/by/3.0",
stamenToner$long[[D]])
stamen$long[[D]] =
gsub("Open Database License",
"CC BY-SA",
stamen$long[[D]])
}
thunderforest = maptoolkit = waze=cartodb = stamenToner
for(D in names(cartodb$long)){
for(D2 in c('long','short')){
cartodb[[D2]][[D]] = gsub(
"Stamen Design", "CartoDB",
cartodb[[D2]][[D]]
)
cartodb[[D2]][[D]] = gsub(
"stamen.com", "carto.com",
cartodb[[D2]][[D]]
)
waze[[D2]][[D]] = gsub(
"Stamen Design", "Waze mobile",
waze[[D2]][[D]]
)
waze[[D2]][[D]] = gsub(
"stamen.com", "www.waze.com/legal/notices",
waze[[D2]][[D]]
)
maptoolkit[[D2]][[D]] = gsub(
"stamen.com", "www.toursprung.com",
maptoolkit[[D2]][[D]]
)
maptoolkit[[D2]][[D]] = gsub(
"Stamen Design", "Toursprung GmbH",
maptoolkit[[D2]][[D]]
)
thunderforest[[D2]][[D]] = gsub(
"stamen.com", "thunderforest.com",
thunderforest[[D2]][[D]]
)
thunderforest[[D2]][[D]] = gsub(
"Stamen Design", "Thunderforest",
thunderforest[[D2]][[D]]
)
}
maptoolkit$long[[D]] = paste(
maptoolkit$short[[D]],
odl[[D]]
)
waze$long[[D]] = waze$short[[D]]
}
esriAttribution = function(name) {
long=list(
'esri'='Esri, HERE, DeLorme, USGS, Intermap, increment P Corp., NRCAN, Esri Japan, METI, Esri China (Hong Kong), Esri (Thailand), TomTom, MapmyIndia, copyright OpenStreetMap contributors, and the GIS User Community',
'esri-grey' = 'Esri, HERE, DeLorme, MapmyIndia, copyright OpenStreetMap contributors, and the GIS user community',
'esri-topo' = 'Esri, HERE, DeLorme, TomTom, Intermap, increment P Corp., GEBCO, USGS, FAO, NPS, NRCAN, GeoBase, IGN, Kadaster NL, Ordnance Survey, Esri Japan, METI, Esri China (Hong Kong), swisstopo, MapmyIndia, copyright OpenStreetMap contributors, and the GIS User Community',
'esri-transport' = 'Esri, HERE, DeLorme, MapmyIndia, copyright OpenStreetMap contributors'
)
short = 'Esri, OpenStreetMap, and others'
if(!length(grep("^http", name))){
name = gsub("esri\\.", "esri-", name)
if(! all(name %in% names(long))){
warning('name not an esri tile', name)
return(list())
}
name = osmTiles(name)
}
if(! all(names(name) %in% names(long))){
warning('name not an esri tile', names(name))
return(list())
}
if(!length(grep("esri", names(name)))){
return(list())
}
weburl = gsub("/tile/?$", '', name)
long = long[names(name)]
list(long=list(
latex=paste(
gsub("copyright", "\\\\copyright", long),
' see \\href{',weburl,
'}{arcgisonline.com}.',
sep=''
),
markdown=paste(
gsub("copyright", '©', long),
' see [arcgisonline.com](',
weburl, ').',
sep=''
),
html=paste(
gsub("copyright", '©', long),
' see <a href=\"',
weburl,
'\">arcgisonline.com</a>',
sep=''
),
text =paste(
long,
' see arcgisonline.com',
sep=''
)
),
short=list(
latex = paste('\\copyright \\href{', weburl,
'}{', short,'}',sep=''),
markdown = paste('©','[', short,'](',
weburl, ')',sep=''),
html = paste('© <a href=\"', weburl,
'\">', short , '</a>',sep=''),
text=paste('copyright', short)
)
)
}
openmapAttribution = function(name, type=c('text','latex','markdown','html','auto'), short=FALSE) {
type = type[1]
if(type == 'auto') {
if(all(unlist(mget("mdToTex", envir=.GlobalEnv, ifnotfound=FALSE)) == TRUE ) ) {
type = 'latex'
} else {
type ='markdown'
}
}
if(!is.null(names(name))){
name = names(name)
}
shortlong = c('long','short')[short+1]
name = unique(gsub("Red$|Green$|Blue$|Trans$", "", name))
result = c()
for(D in name){
if(length(grep('^nrcan', D))) {
result[D] = nrcan[[shortlong]][[type]]
} else if(length(grep(
"^osm|wikimedia|hyda|opentopomap|openstreetmap|historical|bw.mapnik",
D))){
result[D] = osm[[shortlong]][[type]]
} else if(length(grep("humanitarian",D))){
result[D] = osmHumanitarian[[shortlong]][[type]]
} else if(length(grep("sputnik",D))){
result[D] = sputnik[[shortlong]][[type]]
} else if(length(grep("landscape",D))){
result[D] = osmLandscape[[shortlong]][[type]]
} else if(length(grep("mapquest|mqcdn",D))){
if(length(grep("sat/?$",D))){
result[D] = mapquestSat[[shortlong]][[type]]
} else {
result[D] = mapquest[[shortlong]][[type]]
}
} else if(length(grep("^waze",D))){
result[D] = waze[[shortlong]][[type]]
} else if(length(grep("maptoolkit",D))){
result[D] = maptoolkit[[shortlong]][[type]]
} else if(length(grep("thunderforest|^(spinal|neighbourhood|mobile.atlas|pioneer)$",D))){
result[D] = thunderforest[[shortlong]][[type]]
} else if(length(grep("stamen",D))){
if(length(grep("stamen-toner",D))){
result[D] = stamenToner[[shortlong]][[type]]
} else {
result[D] = stamen[[shortlong]][[type]]
}
} else if(length(grep("cartodb",D))){
result[D] = cartodb[[shortlong]][[type]]
} else if(length(grep("^esri",D))){
result[[D]] = esriAttribution(D)[[shortlong]][[type]]
} else {
result[D] = NA
}
}
result
} |
params <-
list(EVAL = TRUE)
stopifnot(require("knitr"))
library("bayesplot")
knitr::opts_chunk$set(
dev = "png",
dpi = 150,
fig.asp = 0.618,
fig.width = 5,
out.width = "60%",
fig.align = "center",
comment = NA,
eval = if (isTRUE(exists("params"))) params$EVAL else FALSE
)
library("ggplot2")
library("rstanarm")
head(mtcars)
fit <- stan_glm(mpg ~ ., data = mtcars, QR = TRUE, seed = 1111)
print(fit)
posterior <- as.array(fit)
dim(posterior)
dimnames(posterior)
color_scheme_set("red")
mcmc_intervals(posterior, pars = c("cyl", "drat", "am", "sigma"))
mcmc_areas(
posterior,
pars = c("cyl", "drat", "am", "sigma"),
prob = 0.8,
prob_outer = 0.99,
point_est = "mean"
)
color_scheme_set("green")
mcmc_hist(posterior, pars = c("wt", "sigma"))
color_scheme_set("blue")
mcmc_hist(posterior, pars = c("wt", "sigma"),
transformations = list("sigma" = "log"))
color_scheme_set("brightblue")
mcmc_hist_by_chain(posterior, pars = c("wt", "sigma"))
color_scheme_set("purple")
mcmc_dens(posterior, pars = c("wt", "sigma"))
mcmc_dens_overlay(posterior, pars = c("wt", "sigma"))
color_scheme_set("teal")
mcmc_violin(posterior, pars = c("wt", "sigma"), probs = c(0.1, 0.5, 0.9))
color_scheme_set("gray")
mcmc_scatter(posterior, pars = c("(Intercept)", "wt"),
size = 1.5, alpha = 0.5)
if (requireNamespace("hexbin", quietly = TRUE)) {
mcmc_hex(posterior, pars = c("(Intercept)", "wt"))
}
color_scheme_set("pink")
mcmc_pairs(posterior, pars = c("(Intercept)", "wt", "sigma"),
off_diag_args = list(size = 1.5))
color_scheme_set("blue")
mcmc_trace(posterior, pars = c("wt", "sigma"))
color_scheme_set("mix-blue-red")
mcmc_trace(posterior, pars = c("wt", "sigma"),
facet_args = list(ncol = 1, strip.position = "left"))
mcmc_trace_highlight(posterior, pars = "sigma", highlight = 3) |
Hfuns.plot <- function(theta, WfdList, U, plotindex=1) {
evalarg <- seq(0,100,len=51)
Hval <- Hfun(theta, WfdList, U)
Result <- DHfun(theta, WfdList, U)
DHval <- Result$DH
D2Hval <- Result$D2H
linesize <- 1
nindex <- length(plotindex)
plot_list <- list()
for (j in 1:nindex) {
indexj <- plotindex[j]
Umatj <- matrix(1,51,1) %*% U[indexj,]
thetaj <- theta[indexj]
Hj <- Hfun(evalarg, WfdList, Umatj)
DHResult <- DHfun(evalarg, WfdList, Umatj)
D2Hj <- DHResult$D2H
df <- data.frame(x=evalarg, y=Hj)
p1 <- ggplot2::ggplot(df, ggplot2::aes(evalarg, Hj)) +
ggplot2::geom_line(size = linesize, color='blue') +
ggplot2::geom_vline(xintercept = thetaj, size = linesize, color='blue', linetype = 2) +
ggplot2::xlab("") +
ggplot2::ylab(expression(H(theta))) +
ggplot2::labs(title=paste("Examinee",indexj,", theta =",round(thetaj, 2)))
df <- data.frame(x=evalarg, y=D2Hj)
p2 <- ggplot2::ggplot(df, ggplot2::aes(evalarg, D2Hj)) +
ggplot2::geom_line(size=linesize, color='blue') +
ggplot2::geom_vline(xintercept = thetaj, color='blue', size=linesize, linetype = 2) +
ggplot2::geom_hline(yintercept = 0, color='blue', size=linesize, linetype = 2) +
ggplot2::xlab(expression(paste("Score index ", theta))) +
ggplot2::ylab(expression(D2H(theta))) +
ggplot2::labs(paste("Second derivative =",round(D2Hj,4)))
p <- ggpubr::ggarrange(p1, p2, ncol = 1, nrow = 2)
print(p)
plot_list[[j]] <- p
if (nindex > 1)
readline(prompt = paste("theta", indexj, ". Press [enter] to continue"))
}
return(plot_list)
} |
copyDataToTemp <- function(f0="M35NT2PM") {
sdf <- readNAEP(system.file("extdata/data", "M36NT2PM.dat", package = "NAEPprimer"))
sf <- system.file("extdata/data", "M36NT2PM.dat", package = "NAEPprimer")
sfs <- system.file("extdata/data", "M36NC2PM.dat", package = "NAEPprimer")
d0 <- tempdir()
d0 <- paste0(dirname(d0),"/", basename(d0))
d1 <- file.path(d0,"data")
dir.create(d1, showWarnings=FALSE)
f1 <- file.path(d1, paste0(f0, ".dat"))
file.create(f1)
schFilename <- f0
substr(schFilename, nchar(schFilename) - 3, nchar(schFilename) - 3) <- "C"
f1s <- file.path(d1, paste0(schFilename, ".dat"))
n <- nrow(sdf)
smp <- sort(sample(1:n,n, replace=TRUE))
line <- readLines(sf)
line <- line[smp]
writeChar(paste(line,collapse="\n"), f1)
d2 <- file.path(d0,"select")
dir.create(d2, showWarnings=FALSE)
d3 <- file.path(d2,"parms")
dir.create(d3, showWarnings=FALSE)
od0 <- system.file("extdata/select/parms", "M36NT2PM.fr2", package = "NAEPprimer")
file.copy(from=od0, to=file.path(d3, paste0(f0, ".fr2")))
od0s <- system.file("extdata/select/parms", "M36NC2PM.fr2", package = "NAEPprimer")
file.copy(from=od0s, to=file.path(d3, paste0(schFilename, ".fr2")))
file.copy(from=sfs, to=f1s)
readNAEP(f1)
} |
context("visual acuity")
test_that("VAConverter works", {
sampdat <- c("HM 12", "20/20 + 3", "20/50", "CF", "HM",
"20/70 - 2", "LP", NA, "Prosthetic")
tmp <- VAConverter(OS = sampdat, OD = rev(sampdat), datatype = "snellen")
expect_is(tmp, "VAObject")
}) |
test_that("Testing get_sims function", {
skip_on_cran()
x = c(rpldis(100, 1, alpha = 2), 1:5)
m = displ$new(x)
bs = bootstrap(m, no_of_sims = 4, threads = 2, seed = 1)
sims = get_bootstrap_sims(m, 4, threads = 2, seed = 1)
m1 = displ$new(sims[, 1])
expect_equal(estimate_xmin(m1)$gof, bs$bootstraps$gof[1], tol = 1e-3)
m1 = displ$new(sims[, 4])
expect_equal(estimate_xmin(m1)$gof, bs$bootstraps$gof[4], tol = 1e-3)
sims = get_bootstrap_sims(m, 4, threads = 2, seed = 2)
m1 = displ$new(sims[, 1])
expect_gt(abs(estimate_xmin(m1)$gof - bs$bootstraps$gof[1]), 0)
m$xmin = 2
m$pars = 2
bs = bootstrap_p(m, no_of_sims = 4, threads = 2, seed = 1)
sims = get_bootstrap_p_sims(m, no_of_sims = 4, threads = 2, seed = 1)
m1 = displ$new(sims[, 1])
expect_equal(estimate_xmin(m1)$gof, bs$bootstraps$gof[1], tol = 1e-3)
m1 = displ$new(sims[, 4])
expect_equal(estimate_xmin(m1)$gof, bs$bootstraps$gof[4], tol = 1e-3)
sims = get_bootstrap_p_sims(m, 4, threads = 2, seed = 2)
m1 = displ$new(sims[, 1])
expect_gt(abs(estimate_xmin(m1)$gof - bs$bootstraps$gof[1]), 0)
}) |
chunk.special.output = function(txt, chunk.ind, output=ps$cdt$chunk.opt[[chunk.ind]]$output, ps = get.ps(), nali=NULL,...) {
restore.point("chunk.special.output")
opts = ps$cdt$chunk.opt[[chunk.ind]]
if (output=="htmlwidget") {
res = chunk.output.htmlwidget(txt=txt, widget.name=opts$widget, chunk.ind=chunk.ind, nali=nali,...)
return(res)
} else {
stop(paste0("Unknown chunk output ", output, "."))
}
}
chunk.output.htmlwidget = function(txt, widget.name,chunk.ind=ps$chunk.ind, widget.id=paste0("chunkHtmlWidget_",ps$cdt$nali[[chunk.ind]]$name), outputFun = NULL, ps = get.ps(), nali=NULL, app=getApp(), width="100%", height="400px",...) {
restore.point("chunk.output.htmlwidget")
txt = paste0("{\n", paste0(txt, collapse="\n"),"\n}")
expr = parse(text=txt)[[1]]
if (is.null(outputFun)) {
outputFun <- function(outputId, width = "100%", height = "400px",...) {
htmlwidgets::shinyWidgetOutput(outputId, widget.name, width, height)
}
}
ui = outputFun(widget.id,width=width,height=height)
stud.env = ps$cdt[["stud.env"]][[chunk.ind]]
app$output[[widget.id]] = app$session$output[[widget.id]] = htmlwidgets::shinyRenderWidget(expr=expr, outputFunction=outputFun, env=stud.env, quoted=TRUE)
ui
} |
"print.bic.glm" <-
function (x, digits = max(3, getOption("digits") - 3), ...)
{
cat("\nCall:\n", deparse(x$call), "\n\n", sep = "")
cat("\n Posterior probabilities(%): \n")
out <- x$probne0
names(out) <- x$namesx
print(out, ...)
cat("\n Coefficient posterior expected values: \n")
out <- x$postmean
outnames<- c(NA, x$output.names)
names(outnames)[1]<- "Intercept"
nms <- NULL
for (i in 1:length(outnames)) {
if (is.na(outnames[i][1]))
nms <- c(nms, names(outnames[i]))
else nms <- c(nms, paste(names(outnames[i]), unlist(outnames[i])[-1],
sep = "."))
}
names(out) <- nms
fout<- format(out, digits=digits)
fout[is.na(out)]<- ""
print.default(fout, print.gap = 2,
quote = FALSE, ...)
invisible(x)
}
print.bic.glm <-
function (x, digits = max(3, getOption("digits") - 3), ...)
{
cat("\nCall:\n", deparse(x$call), "\n\n", sep = "")
cat("\n Posterior probabilities(%): \n")
out <- x$probne0
print(out, ...)
cat("\n Coefficient posterior expected values: \n")
out <- x$postmean
fout <- format(out, digits = digits)
fout[is.na(out)] <- ""
print.default(fout, print.gap = 2, quote = FALSE, ...)
invisible(x)
}
"print.bicreg" <-
function (x, digits = max(3, getOption("digits") - 3), ...)
{
cat("\nCall:\n", deparse(x$call), "\n\n", sep = "")
cat("\n Posterior probabilities(%): \n")
out <- x$probne0
names(out) <- x$namesx
print(out, ...)
cat("\n Coefficient posterior expected values: \n")
out <- x$postmean
names(out) <- c("(Intercept)", x$namesx)
print.default(format(out, digits = digits), print.gap = 2,
quote = FALSE, ...)
invisible(x)
}
"print.bic.surv" <-
function (x, digits = max(3, getOption("digits") - 3), ...)
{
cat("\nCall:\n", deparse(x$call), "\n\n", sep = "")
cat("\n Posterior probabilities(%): \n")
out <- x$probne0
names(out) <- x$namesx
print(out, ...)
cat("\n Coefficient posterior expected values: \n")
out <- x$postmean
nms <- NULL
for (i in 1:length(x$output.names)) {
if (is.na(x$output.names[i][1]))
nms <- c(nms, names(x$output.names[i]))
else nms <- c(nms, paste(names(x$output.names[i]), unlist(x$output.names[i])[-1],
sep = "."))
}
names(out) <- nms
print.default(format(out, digits = digits), print.gap = 2,
quote = FALSE, ...)
invisible(x)
}
"summary.bic.glm" <-
function (object, n.models = 5, digits = max(3, getOption("digits") -
3), conditional = FALSE, display.dropped = FALSE, ...)
{
x<- object
cat("\nCall:\n", deparse(x$call), "\n\n", sep = "")
if (display.dropped & x$reduced) {
cat("\nThe following variables were dropped prior to averaging:\n")
cat(x$dropped)
cat("\n")
}
n.models <- min(n.models, x$n.models)
sel <- 1:n.models
cat("\n ", length(x$postprob), " models were selected")
cat("\n Best ", n.models, " models (cumulative posterior probability = ",
round(sum(x$postprob[sel]), digits), "): \n\n")
x$namesx<- c("Intercept", x$namesx)
nms <- length(x$namesx)
ncx <- length(unlist(x$assign))
nvar <- rep(0, times = n.models)
for (i in 1:(nms-1)) nvar <- nvar + as.numeric(as.vector(rbind(rep(1,
length(x$assign[[i+1]]))) %*% (t(x$mle[sel, x$assign[[i+1]], drop = FALSE] != 0)) > 0))
modelposts <- format(round(x$postprob[sel], 3), digits = 3)
coeffs <- t(x$mle[sel, , drop = FALSE])
cfbic <- rbind(x$bic[sel], coeffs)
cfbicf <- format(cfbic, digits = digits)
coeffsf <- cfbicf[-1, , drop = FALSE]
bic <- cfbicf[1, , drop = FALSE]
postmeans <- format(x$postmean, digits = digits)
postsds <- format(x$postsd, digits = digits)
postmeans[is.na(x$postmean)]<- ""
postsds[is.na(x$postsd)]<- ""
if (conditional) {
cpostmeans <- format(x$condpostmean, digits = digits)
cpostsds <- format(x$condpostsd, digits = digits)
cpostmeans[is.na(x$condpostmean)]<- ""
cpostsds[is.na(x$condpostsd)]<- ""
}
varposts <- format(round(x$probne0, 1), digits = 3)
strlength <- nchar(coeffsf[1, 1])
decpos <- nchar(unlist(strsplit(coeffsf[2, 1], "\\."))[1])
offset <- paste(rep(" ", times = decpos - 1), sep = "", collapse = "")
offset2 <- paste(rep(" ", times = decpos + 1), sep = "",
collapse = "")
modelposts <- paste(offset, modelposts, sep = "")
nvar <- paste(offset2, nvar, sep = "")
dotoffset <- round(max(nchar(coeffsf))/2)
zerocoefstring <- paste(paste(rep(" ", times = dotoffset),
collapse = "", sep = ""), ".", sep = "")
coeffsf[coeffs == 0] <- zerocoefstring
coeffsf[is.na(coeffs)]<- ""
avp <- NULL
outnames<- c(NA, x$output.names)
names(outnames)[1]<- "Intercept"
varposts<- c("100",varposts)
for (i in 1:nms) {
avp <- rbind(avp, varposts[i])
if (!is.na(outnames[[i]][1]))
avp <- rbind(avp, cbind(rep("", times = length(x$assign[[i]]))))
}
top <- cbind(postmeans, postsds)
if (conditional)
top <- cbind(top, cpostmeans, cpostsds)
top <- cbind(top, coeffsf)
atop <- NULL
for (i in 1:nms) {
if (!is.na(outnames[[i]][1]))
atop <- rbind(atop, rbind(rep("", times = ncol(top))))
atop <- rbind(atop, top[x$assign[[i ]], ])
}
top <- cbind(avp, atop)
linesep <- rep("", times = ncol(top))
offset <- c("", "", "")
if (conditional)
offset <- c(offset, c("", ""))
bottom <- rbind(c(offset, nvar), c(offset, bic), c(offset,
modelposts))
out <- rbind(top, linesep, bottom)
vnames <- NULL
for (i in 1:nms) {
vnames <- c(vnames, names(outnames[i]))
blnk <- paste(rep(" ", times = nchar(names(outnames[i]))),
collapse = "")
if (!is.na(outnames[i][1]))
vnames <- c(vnames, paste(blnk, unlist(outnames[i])[-1],
sep = "."))
}
row.names(out) <- c(vnames, "", "nVar", "BIC", "post prob")
colnms <- c("p!=0", " EV", "SD")
if (conditional)
colnms <- c(colnms, "cond EV", "cond SD")
colnms <- c(colnms, paste("model ", 1:n.models, sep = ""))
dimnames(out)[[2]] <- colnms
print.default(out, print.gap = 2, quote = FALSE, ...)
}
"summary.bicreg" <-
function (object, n.models = 5, digits = max(3, getOption("digits") -
3), conditional = FALSE, display.dropped = FALSE, ...)
{
x<- object
cat("\nCall:\n", deparse(x$call), "\n\n", sep = "")
if (display.dropped & x$reduced) {
cat("\nThe following variables were dropped prior to averaging:\n")
cat(x$dropped)
cat("\n")
}
n.models <- min(n.models, x$n.models)
sel <- 1:n.models
cat("\n ", length(x$postprob), " models were selected")
cat("\n Best ", n.models, " models (cumulative posterior probability = ",
round(sum(x$postprob[sel]), digits), "): \n\n")
nms <- length(x$namesx) + 1
r2 <- format(round(x$r2[sel]/100, 3), digits = 3)
nvar <- rbind(rep(1, length(x$namesx) + 1)) %*% t(x$ols[sel,
] != 0) - 1
modelposts <- format(round(x$postprob[sel], 3), digits = 3)
coeffs <- t(x$ols[sel, ])
cfbic <- rbind(x$bic[sel], coeffs)
cfbicf <- format(cfbic, digits = digits)
coeffsf <- cfbicf[-1, ]
bic <- cfbicf[1, ]
dotoffset <- round(max(nchar(coeffsf))/2)
zerocoefstring <- paste(paste(rep(" ", times = dotoffset),
collapse = "", sep = ""), ".", sep = "")
coeffsf[coeffs == 0] <- zerocoefstring
postmeans <- format(x$postmean, digits = digits)
postsds <- format(x$postsd, digits = digits)
if (conditional) {
cpostmeans <- format(x$condpostmean, digits = digits)
cpostsds <- format(x$condpostsd, digits = digits)
}
varposts <- format(round(c(100, x$probne0), 1), digits = 3)
strlength <- nchar(coeffsf[1, 1])
decpos <- nchar(unlist(strsplit(coeffsf[1, 1], "\\."))[1])
offset <- paste(rep(" ", times = decpos - 1), sep = "", collapse = "")
offset2 <- paste(rep(" ", times = decpos + 1), sep = "",
collapse = "")
r2 <- paste(offset, r2, sep = "")
modelposts <- paste(offset, modelposts, sep = "")
nvar <- paste(offset2, nvar, sep = "")
top <- cbind(varposts, postmeans, postsds)
if (conditional)
top <- cbind(top, cpostmeans, cpostsds)
top <- cbind(top, coeffsf)
linesep <- rep("", times = ncol(top))
offset <- c("", "", "")
if (conditional)
offset <- c(offset, c("", ""))
bottom <- rbind(c(offset, nvar), c(offset, r2), c(offset,
bic), c(offset, modelposts))
out <- rbind(top, linesep, bottom)
row.names(out) <- c("Intercept", x$namesx, "", "nVar",
"r2", "BIC", "post prob")
colnms <- c("p!=0", " EV", "SD")
if (conditional)
colnms <- c(colnms, "cond EV", "cond SD")
colnms <- c(colnms, paste("model ", 1:n.models, sep = ""))
dimnames(out)[[2]] <- colnms
print.default(out, print.gap = 2, quote = FALSE, ...)
}
"summary.bic.surv" <-
function (object, n.models = 5, digits = max(3, getOption("digits") -
3), conditional = FALSE, display.dropped = FALSE, ...)
{
x<- object
cat("\nCall:\n", deparse(x$call), "\n\n", sep = "")
if (display.dropped & x$reduced) {
cat("\nThe following variables were dropped prior to averaging:\n")
cat(x$dropped)
cat("\n")
}
n.models <- min(n.models, x$n.models)
sel <- 1:n.models
cat("\n ", length(x$postprob), " models were selected")
cat("\n Best ", n.models, " models (cumulative posterior probability = ",
round(sum(x$postprob[sel]), digits), "): \n\n")
nms <- length(x$namesx)
ncx <- length(unlist(x$assign)[-1])
nvar <- rep(0, times = n.models)
for (i in 1:nms) nvar <- nvar + as.numeric(as.vector(rbind(rep(1,
length(x$assign[[i + 1]]))) %*% (t(x$mle[sel, x$assign[[i +
1]], drop = FALSE] != 0)) > 0))
modelposts <- format(round(x$postprob[sel], 3), digits = 3)
coeffs <- t(x$mle[sel, , drop = FALSE])
cfbic <- rbind(x$bic[sel], coeffs)
cfbicf <- format(cfbic, digits = digits)
coeffsf <- cfbicf[-1, , drop = FALSE]
bic <- cfbicf[1, , drop = FALSE]
postmeans <- format(x$postmean, digits = digits)
postsds <- format(x$postsd, digits = digits)
if (conditional) {
cpostmeans <- format(x$condpostmean, digits = digits)
cpostsds <- format(x$condpostsd, digits = digits)
}
varposts <- format(round(x$probne0, 1), digits = 3)
strlength <- nchar(coeffsf[1, 1])
decpos <- nchar(unlist(strsplit(coeffsf[1, 1], "\\."))[1])
offset <- paste(rep(" ", times = decpos - 1), sep = "", collapse = "")
offset2 <- paste(rep(" ", times = decpos + 1), sep = "",
collapse = "")
modelposts <- paste(offset, modelposts, sep = "")
nvar <- paste(offset2, nvar, sep = "")
dotoffset <- round(max(nchar(coeffsf))/2)
zerocoefstring <- paste(paste(rep(" ", times = dotoffset),
collapse = "", sep = ""), ".", sep = "")
coeffsf[coeffs == 0] <- zerocoefstring
avp <- NULL
for (i in 1:nms) {
avp <- rbind(avp, varposts[i])
if (!is.na(x$output.names[[i]][1]))
avp <- rbind(avp, cbind(rep("", times = length(x$assign[[i +
1]]))))
}
top <- cbind(postmeans, postsds)
if (conditional)
top <- cbind(top, cpostmeans, cpostsds)
top <- cbind(top, coeffsf)
atop <- NULL
for (i in 1:nms) {
if (!is.na(x$output.names[[i]][1]))
atop <- rbind(atop, rbind(rep("", times = ncol(top))))
atop <- rbind(atop, top[x$assign[[i + 1]], ])
}
top <- cbind(avp, atop)
linesep <- rep("", times = ncol(top))
offset <- c("", "", "")
if (conditional)
offset <- c(offset, c("", ""))
bottom <- rbind(c(offset, nvar), c(offset, bic), c(offset,
modelposts))
out <- rbind(top, linesep, bottom)
vnames <- NULL
for (i in 1:nms) {
vnames <- c(vnames, names(x$output.names[i]))
blnk <- paste(rep(" ", times = nchar(names(x$output.names[i]))),
collapse = "")
if (!is.na(x$output.names[i][1]))
vnames <- c(vnames, paste(blnk, unlist(x$output.names[i])[-1],
sep = "."))
}
row.names(out) <- c(vnames, "", "nVar", "BIC", "post prob")
colnms <- c("p!=0", " EV", "SD")
if (conditional)
colnms <- c(colnms, "cond EV", "cond SD")
colnms <- c(colnms, paste("model ", 1:n.models, sep = ""))
dimnames(out)[[2]] <- colnms
print.default(out, print.gap = 2, quote = FALSE, ...)
}
`summary.bicreg` <-
function (object, n.models = 5, digits = max(3, getOption("digits") -
3), conditional = FALSE, display.dropped = FALSE, ...)
{
x <- object
cat("\nCall:\n", deparse(x$call), "\n\n", sep = "")
if (display.dropped & x$reduced) {
cat("\nThe following variables were dropped prior to averaging:\n")
cat(x$dropped)
cat("\n")
}
n.models <- min(n.models, x$n.models)
sel <- 1:n.models
cat("\n ", length(x$postprob), " models were selected")
cat("\n Best ", n.models, " models (cumulative posterior probability = ",
round(sum(x$postprob[sel]), digits), "): \n\n")
nms <- length(x$namesx) + 1
r2 <- format(round(x$r2[sel]/100, 3), digits = 3)
nvar <- rbind(rep(1, length(x$namesx) + 1)) %*% t(x$ols[sel, , drop = FALSE
] != 0) - 1
modelposts <- format(round(x$postprob[sel], 3), digits = 3)
coeffs <- t(x$ols[sel, ,drop=FALSE])
cfbic <- rbind(x$bic[sel], coeffs)
cfbicf <- format(cfbic, digits = digits)
coeffsf <- cfbicf[-1, ,drop=FALSE]
bic <- cfbicf[1, ]
dotoffset <- round(max(nchar(coeffsf))/2)
zerocoefstring <- paste(paste(rep(" ", times = dotoffset),
collapse = "", sep = ""), ".", sep = "")
coeffsf[coeffs == 0] <- zerocoefstring
postmeans <- format(x$postmean, digits = digits)
postsds <- format(x$postsd, digits = digits)
if (conditional) {
cpostmeans <- format(x$condpostmean, digits = digits)
cpostsds <- format(x$condpostsd, digits = digits)
}
varposts <- format(round(c(100, x$probne0), 1), digits = 3)
strlength <- nchar(coeffsf[1, 1])
decpos <- nchar(unlist(strsplit(coeffsf[1, 1], "\\."))[1])
offset <- paste(rep(" ", times = decpos - 1), sep = "", collapse = "")
offset2 <- paste(rep(" ", times = decpos + 1), sep = "",
collapse = "")
r2 <- paste(offset, r2, sep = "")
modelposts <- paste(offset, modelposts, sep = "")
nvar <- paste(offset2, nvar, sep = "")
top <- cbind(varposts, postmeans, postsds)
if (conditional)
top <- cbind(top, cpostmeans, cpostsds)
top <- cbind(top, coeffsf)
linesep <- rep("", times = ncol(top))
offset <- c("", "", "")
if (conditional)
offset <- c(offset, c("", ""))
bottom <- rbind(c(offset, nvar), c(offset, r2), c(offset,
bic), c(offset, modelposts))
out <- rbind(top, linesep, bottom)
row.names(out) <- c("Intercept", x$namesx, "", "nVar", "r2",
"BIC", "post prob")
colnms <- c("p!=0", " EV", "SD")
if (conditional)
colnms <- c(colnms, "cond EV", "cond SD")
colnms <- c(colnms, paste("model ", 1:n.models, sep = ""))
dimnames(out)[[2]] <- colnms
print.default(out, print.gap = 2, quote = FALSE, ...)
} |
Candidates.getByLastname <-
function (lastName, electionYear=NULL) {
if (length(electionYear)==0) {
Candidates.getByLastname.basic1 <- function (.lastName) {
request <- "Candidates.getByLastname?"
inputs <- paste("&lastName=",.lastName,sep="")
output <- pvsRequest4(request,inputs)
output$lastName <- .lastName
output
}
output.list <- lapply(lastName, FUN= function (s) {
Candidates.getByLastname.basic1(.lastName=s)
}
)
output.list <- redlist(output.list)
output <- dfList(output.list)
} else {
Candidates.getByLastname.basic2 <- function (.lastName, .electionYear) {
request <- "Candidates.getByLastname?"
inputs <- paste("&lastName=",.lastName, "&electionYear=", .electionYear, sep="")
output <- pvsRequest4(request,inputs)
output$lastName <- .lastName
output$electionYear.input <- .electionYear
output
}
output.list <- lapply(lastName, FUN= function (s) {
lapply(electionYear, FUN= function (c) {
Candidates.getByLastname.basic2( .lastName=s, .electionYear=c)
}
)
}
)
output.list <- redlist(output.list)
output <- dfList(output.list)
output$electionYear[c(as.vector(is.na(output$electionYear)))] <- output$electionYear.input[as.vector(is.na(output$electionYear))]
output$electionYear.input <- NULL
}
output
} |
bartlett_test <- function(x, g, ...) UseMethod("bartlett_test", x)
bartlett_test.default <- function(x, g, ...){
cl <- match.call()
cl[[1]] <- as.name("bartlett.test")
bart <- eval.parent(cl)
vars <- tapply(x, g, var, na.rm = TRUE)
n <- table(g)
names(vars) <- names(n)
class(bart) <- c("bartlett_htest", class(bart))
bart$vars <- vars
bart$n <- n
bart
}
BF.bartlett_htest <- function(x,
hypothesis = NULL,
prior.hyp = NULL,
complement = TRUE,
...) {
get_est <- get_estimates(x)
nsim <- 1e5
s2 <- get_est$estimate
n <- c(x$n)
b <- 2/n
J <- length(n)
names_coef <- names(get_est$estimate)
logmx0 <- - 1 / 2 * sum((1 - b) * n) * log(pi) + 1 / 2 * log(prod(b)) +
lgamma((sum(n) - J) / 2) - lgamma((sum(b * n) - J) / 2) -
1 / 2 * (sum(n) - J) * log(sum((n - 1) * s2)) +
1 / 2 * (sum(b * n) - J) * log(sum(b * (n - 1) * s2))
logmxu <- - 1 / 2 * sum((1 - b) * n) * log(pi) + 1 / 2 * log(prod(b)) +
sum(lgamma((n - 1) / 2) - lgamma((b * n - 1) / 2) -
1 / 2 * (n - 1) * log((n - 1) * s2) +
1 / 2 * (b * n - 1) * log(b * (n - 1) * s2))
BF0u <- exp(logmx0 - logmxu)
BFtu_exploratory <- c(BF0u,1)
names(BFtu_exploratory) <- c("homogeneity of variances","no homogeneity of variances")
PHP_exploratory <- BFtu_exploratory / sum(BFtu_exploratory)
if (!is.null(hypothesis)){
parse_hyp <- parse_hypothesis(names_coef, hypothesis)
parse_hyp$hyp_mat <- do.call(rbind, parse_hyp$hyp_mat)
RrList <- make_RrList2(parse_hyp)
RrE <- RrList[[1]]
RrO <- RrList[[2]]
}
if (is.null(hypothesis)) {
BFmatrix_confirmatory <- PHP_confirmatory <- BFtu_confirmatory <- relfit <-
relcomp <- hypotheses <- BFtable <- priorprobs <- NULL
} else if (all(unlist(lapply(append(RrE, RrO), is.null)))) {
BFmatrix_confirmatory <- PHP_confirmatory <- BFtu_confirmatory <- relfit <-
relcomp <- hypotheses <- BFtable <- priorprobs <- NULL
} else {
RrCheck <- do.call(rbind, append(RrE, RrO))
RrCheck_count <- t(apply(RrCheck[, -ncol(RrCheck), drop = FALSE], 1,
function(x) {sapply(list(-1, 1), function(y) {sum(y == x)})}))
if (any(RrCheck_count != 1) || any(RrCheck[, ncol(RrCheck)] != 0)) {
stop(paste0("The hypotheses contain inadmissible constraints."))
}
Th <- length(RrE)
logmx <- relfit <- relcomp <- logmxE <- rep(NA, times = Th)
names(logmx) <- names(relfit) <- names(relcomp) <- names(logmxE) <-
parse_hyp$original_hypothesis
for (h in 1:Th) {
if (is.null(RrE[[h]])) {
unique_vars <- as.list(1:J)
} else {
RrEh <- RrE[[h]][, -ncol(RrE[[h]])]
if (!is.matrix(RrEh)) {
RrEh <- t(as.matrix(RrEh))
}
RrEh_pos <- t(apply(RrEh, 1, function(x) which(!(x == 0))))
unique_vars <- list()
rows <- 1:nrow(RrEh_pos)
while (length(rows) > 0) {
equal_vars <- RrEh_pos[min(rows), ]
row_check <- min(rows)
for (i in setdiff(rows, row_check)) {
if (any(equal_vars %in% RrEh_pos[i, ])) {
equal_vars <- unique(c(equal_vars, RrEh_pos[i, ]))
row_check <- c(row_check, i)
}
}
unique_vars <- c(unique_vars, list(equal_vars))
rows <- setdiff(rows, row_check)
}
unique_vars <- c(unique_vars, setdiff(1:J, unlist(unique_vars)))
}
K <- length(unique_vars)
Jk <- sapply(unique_vars, length)
s2list <- lapply(unique_vars, function(x) s2[x])
nlist <- lapply(unique_vars, function(x) n[x])
blist <- lapply(unique_vars, function(x) b[x])
df <- dfb <- SS <- SSb <- rep(NA, times = K)
for (i in 1:K) {
df[i] <- sum(nlist[[i]]) - Jk[i]
dfb[i] <- sum(blist[[i]] * nlist[[i]]) - Jk[i]
SS[i] <- sum((nlist[[i]] - 1) * s2list[[i]])
SSb[i] <- sum(blist[[i]] * (nlist[[i]] - 1) * s2list[[i]])
}
logmxE[h] <- - 1 / 2 * sum((1 - unlist(blist)) * unlist(nlist)) * log(pi) +
1 / 2 * log(prod(unlist(blist))) + sum(lgamma(df / 2) - lgamma(dfb / 2) -
1 / 2 * df * log(SS) + 1 / 2 * dfb * log(SSb))
if (is.null(RrO[[h]])) {
logmx[h] <- logmxE[h]
} else {
RrOh <- RrO[[h]][, -ncol(RrO[[h]])]
if (!is.matrix(RrOh)) {
RrOh <- t(as.matrix(RrOh))
}
RrOh_pos <- t(apply(RrOh, 1, function(x) c(which(x == -1), which(x == 1))))
unique_vars_order <- cbind(
apply(as.matrix(RrOh_pos[, 1]), 1, function(x) {
which(unlist(lapply(unique_vars, function(y) {x %in% y})))}),
apply(as.matrix(RrOh_pos[, 2]), 1, function(x) {
which(unlist(lapply(unique_vars, function(y) {x %in% y})))})
)
post_samp <- prior_samp <- matrix(NA, nrow = nsim, ncol = K)
indi_post <- indi_prior <- rep(1, times = nsim)
for (i in unique(c(unique_vars_order))) {
post_samp[, i] <- SS[i] / rchisq(nsim, df = df[i])
prior_samp[, i] <- dfb[i] / rchisq(nsim, df = dfb[i])
}
for (i in 1:nrow(unique_vars_order)) {
indi_post <- indi_post * (post_samp[, unique_vars_order[i, 1]] <
post_samp[, unique_vars_order[i, 2]])
indi_prior <- indi_prior * (prior_samp[, unique_vars_order[i, 1]] <
prior_samp[, unique_vars_order[i, 2]])
}
relfit[h] <- sum(indi_post) / nsim
relcomp[h] <- sum(indi_prior) / nsim
logmx[h] <- log(relfit[h] / relcomp[h]) + logmxE[h]
}
}
if(complement==TRUE){
relfit <- inversegamma_prob_Hc(shape1=(n-1)/2,scale1=s2*(n-1)/(2*n),relmeas=relfit,RrE1=RrE,RrO1=RrO)
relcomp <- inversegamma_prob_Hc(shape1=rep(.5,length(n)),scale1=rep(.5,length(n)),relmeas=relcomp,RrE1=RrE,RrO1=RrO)
if(length(relfit)>Th){
logmxE <- c(logmxE,logmxu)
logmx <- c(logmx,logmxu + log(relfit[Th+1]/relcomp[Th+1]))
names(logmx)[Th+1] <- "complement"
}
}
hypotheses <- names(logmx)
BFtu_confirmatory <- exp(logmx - logmxu)
BFmatrix_confirmatory <- BFtu_confirmatory %*% t(1 / BFtu_confirmatory)
diag(BFmatrix_confirmatory) <- 1
names(BFtu_confirmatory) <- row.names(BFmatrix_confirmatory) <-
colnames(BFmatrix_confirmatory) <- hypotheses
diag(BFmatrix_confirmatory) <- 1
if(is.null(prior.hyp)){
priorprobs <- rep(1/length(BFtu_confirmatory),length(BFtu_confirmatory))
}else{
if(!is.numeric(prior.hyp) || length(prior.hyp)!=length(BFtu_confirmatory)){
warning(paste0("Argument 'prior.hyp' should be numeric and of length ",as.character(length(BFtu_confirmatory)),". Equal prior probabilities are used."))
priorprobs <- rep(1/length(BFtu_confirmatory),length(BFtu_confirmatory))
}else{
priorprobs <- prior.hyp
}
}
PHP_confirmatory <- BFtu_confirmatory * priorprobs / sum(BFtu_confirmatory * priorprobs)
relcomp[which(is.na(relcomp))] <- 1
relfit[which(is.na(relfit))] <- 1
BF_E <- exp(logmxE - logmxu)
BFtable <- cbind(rep(NA,length(relfit)),relcomp,rep(NA,length(relfit)),relfit,BF_E,
relfit/relcomp,BF_E*relfit/relcomp,PHP_confirmatory)
row.names(BFtable) <- names(PHP_confirmatory)
colnames(BFtable) <- c("complex=","complex>","fit=","fit>","BF=","BF>","BF","PHP")
}
BFlm_out <- list(
BFtu_exploratory=BFtu_exploratory,
PHP_exploratory=PHP_exploratory,
BFtu_confirmatory=BFtu_confirmatory,
PHP_confirmatory=PHP_confirmatory,
BFmatrix_confirmatory=BFmatrix_confirmatory,
BFtable_confirmatory=BFtable,
prior.hyp=priorprobs,
hypotheses=hypotheses,
estimates=s2,
model=x,
bayesfactor="generalized adjusted fractional Bayes factors",
parameter="group variances",
call=match.call())
class(BFlm_out) <- "BF"
return(BFlm_out)
}
inversegamma_prob_Hc <- function(shape1,scale1,relmeas,RrE1,RrO1,samsize1=1e5){
numhyp <- length(RrE1)
whichO <- unlist(lapply(1:numhyp,function(h){is.null(RrE1[[h]])}))
numO <- sum(whichO)
numpara <- length(shape1)
if(numO==length(RrE1)){
relmeas <- c(relmeas,1)
names(relmeas)[numhyp+1] <- "complement"
}else{
if(numO==1){
relmeas <- c(relmeas,1-relmeas[whichO])
names(relmeas)[numhyp+1] <- "complement"
}else{
randomDraws <- rmvnorm(samsize1,mean=rep(0,numpara),sigma=diag(numpara))
checksOC <- lapply(which(whichO),function(h){
Rorder <- as.matrix(RrO1[[h]][,-(1+numpara)])
if(ncol(Rorder)==1){
Rorder <- t(Rorder)
}
rorder <- as.matrix(RrO1[[h]][,1+numpara])
apply(randomDraws%*%t(Rorder) > rep(1,samsize1)%*%t(rorder),1,prod)
})
checkOCplus <- Reduce("+",checksOC)
if(sum(checkOCplus > 0) < samsize1){
if(sum(checkOCplus>1)==0){
relmeas <- c(relmeas,1-sum(relmeas[whichO]))
names(relmeas)[numhyp+1] <- "complement"
}else{
randomDraws <- matrix(unlist(lapply(1:numpara,function(par){
1/rgamma(1e5,shape=shape1[par]/2,rate=scale1[par])
})),ncol=numpara)
checksOCpost <- lapply(which(whichO),function(h){
Rorder <- as.matrix(RrO1[[h]][,-(1+numpara)])
if(ncol(Rorder)==1){
Rorder <- t(Rorder)
}
rorder <- as.matrix(RrO1[[h]][,1+numpara])
apply(randomDraws%*%t(Rorder) > rep(1,samsize1)%*%t(rorder),1,prod)
})
relmeas <- c(relmeas,sum(Reduce("+",checksOCpost) == 0) / samsize1)
rownames(relmeas)[numhyp+1] <- "complement"
}
}
}
}
return(relmeas)
} |
Sys.time <- function() .POSIXct(.Internal(Sys.time()))
Sys.timezone <- function(location = TRUE)
{
if(!location)
.Deprecated(msg = "Sys.timezone(location = FALSE) is defunct and ignored")
if(!is.na(tz <- get0(".sys.timezone", baseenv(), mode = "character",
inherits = FALSE, ifnotfound = NA_character_)))
return(tz)
cacheIt <- function(tz) {
unlockBinding(".sys.timezone", baseenv())
assign(".sys.timezone", tz, baseenv())
lockBinding(".sys.timezone", baseenv())
}
tz <- Sys.getenv("TZ")
if(nzchar(tz)) return(tz)
if(.Platform$OS.type == "windows") return(.Internal(tzone_name()))
if(!nzchar(Sys.getenv("TZDIR")) && grepl("darwin", R.Version()$os) &&
dir.exists(zp <-file.path(R.home("share"), "zoneinfo"))) {
veri <- try(readLines(file.path(zp, "VERSION")), silent = TRUE)
vers <- try(readLines("/var/db/timezone/zoneinfo/+VERSION"),
silent = TRUE)
if(!inherits(veri, "try-error") && !inherits(vers, "try-error") &&
vers != veri) {
yri <- substr(veri, 1L, 4L); sufi <- substr(veri, 5, 5)
yrs <- substr(vers, 1L, 4L); sufs <- substr(vers, 5, 5)
if (yrs > yri || (yrs == yri && sufs > sufi))
Sys.setenv(TZDIR = "macOS")
}
}
if(Sys.getenv("TZDIR") == "macOS" && grepl("darwin", R.Version()$os))
Sys.setenv(TZDIR = "/var/db/timezone/zoneinfo")
tzdir <- Sys.getenv("TZDIR")
if(nzchar(tzdir) && !dir.exists(tzdir)) tzdir <- ""
if(!nzchar(tzdir)) {
if(dir.exists(tzdir <- "/usr/share/zoneinfo") ||
dir.exists(tzdir <- "/share/zoneinfo") ||
dir.exists(tzdir <- "/usr/share/lib/zoneinfo") ||
dir.exists(tzdir <- "/usrlib/zoneinfo") ||
dir.exists(tzdir <- "/usr/local/etc/zoneinfo") ||
dir.exists(tzdir <- "/etc/zoneinfo") ||
dir.exists(tzdir <- "/usr/etc/zoneinfo")) {
} else tzdir <- ""
}
if (nzchar(Sys.which("timedatectl"))) {
inf <- system("timedatectl", intern = TRUE)
lines <- grep("Time zone: ", inf)
if (length(lines)) {
tz <- sub(" .*", "", sub(" *Time zone: ", "", inf[lines[1L]]))
if(nzchar(tzdir)) {
if(file.exists(file.path(tzdir, tz))) {
cacheIt(tz)
return(tz)
} else
warning(sprintf("%s indicates the non-existent timezone name %s",
sQuote("timedatectl"), sQuote(tz)),
call. = FALSE, immediate. = TRUE, domain = NA)
} else {
cacheIt(tz)
return(tz)
}
}
}
if (grepl("linux", R.Version()$platform, ignore.case = TRUE) &&
file.exists("/etc/timezone")) {
tz0 <- try(readLines("/etc/timezone"))
if(!inherits(tz0, "try-error") && length(tz0) == 1L) {
tz <- trimws(tz0)
if(nzchar(tzdir)) {
if(file.exists(file.path(tzdir, tz))) {
cacheIt(tz)
return(tz)
} else
warning(sprintf("%s indicates the non-existent timezone name %s",
sQuote("/etc/timezone"), sQuote(tz)),
call. = FALSE, immediate. = TRUE, domain = NA)
} else {
cacheIt(tz)
return(tz)
}
}
}
if ((file.exists(lt0 <- "/etc/localtime") ||
file.exists(lt0 <- "/usr/local/etc/localtime") ||
file.exists(lt0 <- "/usr/local/etc/zoneinfo/localtime") ||
file.exists(lt0 <- "/var/db/timezone/localtime")) &&
!is.na(lt <- Sys.readlink(lt0)) && nzchar(lt)) {
tz <- NA_character_
if ((nzchar(tzdir) && grepl(pat <- paste0("^", tzdir, "/"), lt)) ||
grepl(pat <- "^/usr/share/zoneinfo.default/", lt))
tz <- sub(pat, "", lt)
else if(grepl(pat <- ".*/zoneinfo/(.*)", lt))
tz <- sub(pat, "\\1", lt)
if(!is.na(tz)) {
cacheIt(tz)
return(tz)
} else
message("unable to deduce timezone name from ", sQuote(lt))
}
if (nzchar(tzdir) &&
(is.na(lt <- Sys.readlink(lt0)) || !nzchar(lt))) {
warning(sprintf("Your system is mis-configured: %s is not a symlink",
sQuote(lt0)),
call. = FALSE, immediate. = TRUE, domain = NA)
if(nzchar(Sys.which("cmp"))) {
known <- dir(tzdir, recursive = TRUE)
for(tz in known) {
status <- system2("cmp", c("-s", shQuote(lt0),
shQuote(file.path(tzdir, tz))))
if (status == 0L) {
cacheIt(tz)
warning(sprintf("It is strongly recommended to set envionment variable TZ to %s (or equivalent)",
sQuote(tz)),
call. = FALSE, immediate. = TRUE, domain = NA)
return(tz)
}
}
warning(sprintf("%s is not identical to any known timezone file",
sQuote(lt0)),
call. = FALSE, immediate. = TRUE, domain = NA)
}
}
NA_character_
}
as.POSIXlt <- function(x, tz = "", ...) UseMethod("as.POSIXlt")
as.POSIXlt.Date <- function(x, ...) .Internal(Date2POSIXlt(x))
as.POSIXlt.POSIXct <- function(x, tz = "", ...)
{
if((missing(tz) || is.null(tz)) &&
!is.null(tzone <- attr(x, "tzone"))) tz <- tzone[1L]
.Internal(as.POSIXlt(x, tz))
}
as.POSIXlt.factor <- function(x, ...)
{
y <- as.POSIXlt(as.character(x), ...)
names(y$year) <- names(x)
y
}
as.POSIXlt.character <-
function(x, tz = "", format,
tryFormats = c("%Y-%m-%d %H:%M:%OS",
"%Y/%m/%d %H:%M:%OS",
"%Y-%m-%d %H:%M",
"%Y/%m/%d %H:%M",
"%Y-%m-%d",
"%Y/%m/%d"), optional = FALSE, ...)
{
x <- unclass(x)
if(!missing(format)) {
res <- strptime(x, format, tz = tz)
if(nzchar(tz)) attr(res, "tzone") <- tz
return(res)
}
xx <- x[!is.na(x)]
if (!length(xx)) {
res <- strptime(x, "%Y/%m/%d")
if(nzchar(tz)) attr(res, "tzone") <- tz
return(res)
} else
for(f in tryFormats)
if(all(!is.na(strptime(xx, f, tz = tz)))) {
res <- strptime(x, f, tz = tz)
if(nzchar(tz)) attr(res, "tzone") <- tz
return(res)
}
if(optional)
as.POSIXlt.character(rep.int(NA_character_, length(x)), tz=tz)
else stop("character string is not in a standard unambiguous format")
}
as.POSIXlt.numeric <- function(x, tz = "", origin, ...)
{
if(missing(origin)) {
if(!length(x))
return(as.POSIXlt.character(character(), tz))
if(!any(is.finite(x)))
return(as.POSIXlt.character(rep_len(NA_character_,
length(x)),
tz))
stop("'origin' must be supplied")
}
as.POSIXlt(as.POSIXct(origin, tz = "UTC", ...) + x, tz = tz)
}
as.POSIXlt.default <- function(x, tz = "", optional = FALSE, ...)
{
if(inherits(x, "POSIXlt")) return(x)
if(is.null(x)) return(as.POSIXlt.character(character(), tz))
if(is.logical(x) && all(is.na(x)))
return(as.POSIXlt(as.POSIXct.default(x), tz = tz))
if(optional)
as.POSIXlt.character(rep.int(NA_character_, length(x)), tz=tz)
else stop(gettextf("do not know how to convert '%s' to class %s",
deparse1(substitute(x)),
dQuote("POSIXlt")),
domain = NA)
}
as.POSIXct <- function(x, tz = "", ...) UseMethod("as.POSIXct")
as.POSIXct.Date <- function(x, ...) .POSIXct(unclass(x)*86400)
as.POSIXct.POSIXlt <- function(x, tz = "", ...)
{
tzone <- attr(x, "tzone")
if(missing(tz) && !is.null(tzone)) tz <- tzone[1L]
y <- .Internal(as.POSIXct(x, tz))
names(y) <- names(x$year)
.POSIXct(y, tz)
}
as.POSIXct.numeric <- function(x, tz = "", origin, ...)
{
if(missing(origin)) {
if(!length(x))
return(.POSIXct(numeric(), tz))
if(!any(is.finite(x)))
return(.POSIXct(x, tz))
stop("'origin' must be supplied")
}
.POSIXct(as.POSIXct(origin, tz = "GMT", ...) + x, tz)
}
as.POSIXct.default <- function(x, tz = "", ...)
{
if(inherits(x, "POSIXct")) return(x)
if(is.null(x)) return(.POSIXct(numeric(), tz))
if(is.character(x) || is.factor(x))
return(as.POSIXct(as.POSIXlt(x, tz, ...), tz, ...))
if(is.logical(x) && all(is.na(x)))
return(.POSIXct(as.numeric(x), tz))
stop(gettextf("do not know how to convert '%s' to class %s",
deparse1(substitute(x)),
dQuote("POSIXct")),
domain = NA)
}
`length<-.POSIXct` <- function(x, value)
.POSIXct(NextMethod(), attr(x, "tzone"), oldClass(x))
as.double.POSIXlt <- function(x, ...) as.double(as.POSIXct(x))
length.POSIXlt <- function(x) length(unclass(x)[[1L]])
`length<-.POSIXlt` <- function(x, value)
.POSIXlt(lapply(unclass(x), `length<-`, value),
attr(x, "tzone"), oldClass(x))
format.POSIXlt <- function(x, format = "", usetz = FALSE, ...)
{
if(!inherits(x, "POSIXlt")) stop("wrong class")
if(any(f0 <- format == "")) {
times <- unlist(unclass(x)[1L:3L])[f0]
secs <- x$sec[f0]; secs <- secs[!is.na(secs)]
np <- getOption("digits.secs")
np <- if(is.null(np)) 0L else min(6L, np)
if(np >= 1L)
for (i in seq_len(np)- 1L)
if(all( abs(secs - round(secs, i)) < 1e-6 )) {
np <- i
break
}
format[f0] <-
if(all(times[!is.na(times)] == 0)) "%Y-%m-%d"
else if(np == 0L) "%Y-%m-%d %H:%M:%S"
else paste0("%Y-%m-%d %H:%M:%OS", np)
}
y <- .Internal(format.POSIXlt(x, format, usetz))
names(y) <- names(x$year)
y
}
strftime <- function(x, format = "", tz = "", usetz = FALSE, ...)
format(as.POSIXlt(x, tz = tz), format = format, usetz = usetz, ...)
strptime <- function(x, format, tz = "")
{
y <- .Internal(strptime(as.character(x), format, tz))
names(y$year) <- names(x)
y
}
format.POSIXct <- function(x, format = "", tz = "", usetz = FALSE, ...)
{
if(!inherits(x, "POSIXct")) stop("wrong class")
if(missing(tz) && !is.null(tzone <- attr(x, "tzone"))) tz <- tzone
structure(format.POSIXlt(as.POSIXlt(x, tz), format, usetz, ...),
names = names(x))
}
print.POSIXct <-
print.POSIXlt <- function(x, tz = "", usetz = TRUE, max = NULL, ...)
{
if(is.null(max)) max <- getOption("max.print", 9999L)
FORM <- if(missing(tz))
function(z) format(z, usetz = usetz)
else function(z) format(z, tz = tz, usetz = usetz)
if(max < length(x)) {
print(FORM(x[seq_len(max)]), max=max+1, ...)
cat(" [ reached 'max' / getOption(\"max.print\") -- omitted",
length(x) - max, 'entries ]\n')
} else if(length(x))
print(FORM(x), max = max, ...)
else
cat(class(x)[1L], "of length 0\n")
invisible(x)
}
summary.POSIXct <- function(object, digits = 15L, ...)
{
x <- summary.default(unclass(object), digits = digits, ...)
if(m <- match("NA's", names(x), 0L)) {
NAs <- as.integer(x[m])
x <- x[-m]
attr(x, "NAs") <- NAs
}
.POSIXct(x,
tz = attr(object, "tzone"),
cl = c("summaryDefault", "table", oldClass(object)))
}
summary.POSIXlt <- function(object, digits = 15, ...)
summary(as.POSIXct(object), digits = digits, ...)
`+.POSIXt` <- function(e1, e2)
{
coerceTimeUnit <- function(x)
as.vector(switch(attr(x,"units"),
secs = x, mins = 60*x, hours = 60*60*x,
days = 60*60*24*x, weeks = 60*60*24*7*x))
if (nargs() == 1L) return(e1)
if(inherits(e1, "POSIXt") && inherits(e2, "POSIXt"))
stop("binary '+' is not defined for \"POSIXt\" objects")
if(inherits(e1, "POSIXlt")) e1 <- as.POSIXct(e1)
if(inherits(e2, "POSIXlt")) e2 <- as.POSIXct(e2)
if (inherits(e1, "difftime")) e1 <- coerceTimeUnit(e1)
if (inherits(e2, "difftime")) e2 <- coerceTimeUnit(e2)
.POSIXct(unclass(e1) + unclass(e2), check_tzones(e1, e2))
}
`-.POSIXt` <- function(e1, e2)
{
coerceTimeUnit <- function(x)
as.vector(switch(attr(x,"units"),
secs = x, mins = 60*x, hours = 60*60*x,
days = 60*60*24*x, weeks = 60*60*24*7*x))
if(!inherits(e1, "POSIXt"))
stop("can only subtract from \"POSIXt\" objects")
if (nargs() == 1L) stop("unary '-' is not defined for \"POSIXt\" objects")
if(inherits(e2, "POSIXt")) return(difftime(e1, e2))
if (inherits(e2, "difftime")) e2 <- coerceTimeUnit(e2)
if(!is.null(attr(e2, "class")))
stop("can only subtract numbers from \"POSIXt\" objects")
e1 <- as.POSIXct(e1)
.POSIXct(unclass(e1) - e2, attr(e1, "tzone"))
}
Ops.POSIXt <- function(e1, e2)
{
if (nargs() == 1L)
stop(gettextf("unary '%s' not defined for \"POSIXt\" objects",
.Generic), domain = NA)
boolean <- switch(.Generic, "<" = , ">" = , "==" = ,
"!=" = , "<=" = , ">=" = TRUE, FALSE)
if (!boolean)
stop(gettextf("'%s' not defined for \"POSIXt\" objects", .Generic),
domain = NA)
if(inherits(e1, "POSIXlt") || is.character(e1)) e1 <- as.POSIXct(e1)
if(inherits(e2, "POSIXlt") || is.character(e2)) e2 <- as.POSIXct(e2)
check_tzones(e1, e2)
NextMethod(.Generic)
}
Math.POSIXt <- function (x, ...)
{
stop(gettextf("'%s' not defined for \"POSIXt\" objects", .Generic),
domain = NA)
}
check_tzones <- function(...)
{
tzs <- unique(sapply(list(...), function(x) {
y <- attr(x, "tzone")
if(is.null(y)) "" else y[1L]
}))
tzs <- tzs[nzchar(tzs)]
if(length(tzs) > 1L)
warning("'tzone' attributes are inconsistent")
if(length(tzs)) tzs[1L] else NULL
}
Summary.POSIXct <- function (..., na.rm)
{
ok <- switch(.Generic, max = , min = , range = TRUE, FALSE)
if (!ok)
stop(gettextf("'%s' not defined for \"POSIXt\" objects", .Generic),
domain = NA)
args <- list(...)
tz <- do.call(check_tzones, args)
.POSIXct(NextMethod(.Generic), tz = tz, cl = oldClass(args[[1L]]))
}
Summary.POSIXlt <- function (..., na.rm)
{
ok <- switch(.Generic, max = , min = , range = TRUE, FALSE)
if (!ok)
stop(gettextf("'%s' not defined for \"POSIXt\" objects", .Generic),
domain = NA)
args <- list(...)
tz <- do.call(check_tzones, args)
args <- lapply(args, as.POSIXct)
val <- do.call(.Generic, c(args, na.rm = na.rm))
as.POSIXlt(.POSIXct(val, tz))
}
`[.POSIXct` <-
function(x, ..., drop = TRUE)
.POSIXct(NextMethod("["), attr(x, "tzone"), oldClass(x))
`[[.POSIXct` <-
function(x, ..., drop = TRUE)
.POSIXct(NextMethod("[["), attr(x, "tzone"), oldClass(x))
`[<-.POSIXct` <-
function(x, ..., value) {
if(!length(value)) return(x)
value <- unclass(as.POSIXct(value))
.POSIXct(NextMethod(.Generic), attr(x, "tzone"), oldClass(x))
}
as.character.POSIXt <- function(x, ...) format(x, ...)
as.data.frame.POSIXct <- as.data.frame.vector
as.list.POSIXct <- function(x, ...)
{
nms <- names(x)
names(x) <- NULL
y <- lapply(unclass(x), .POSIXct, attr(x, "tzone"), oldClass(x))
names(y) <- nms
y
}
is.na.POSIXlt <- function(x)
is.na(as.POSIXct(x))
anyNA.POSIXlt <- function(x, recursive = FALSE)
anyNA(as.POSIXct(x))
c.POSIXct <- function(..., recursive = FALSE) {
x <- lapply(list(...), function(e) unclass(as.POSIXct(e)))
tzones <- lapply(x, attr, "tzone")
tz <- if(length(unique(tzones)) == 1L) tzones[[1L]] else NULL
.POSIXct(c(unlist(x)), tz)
}
c.POSIXlt <- function(..., recursive = FALSE) {
as.POSIXlt(do.call(c, lapply(list(...), as.POSIXct)))
}
ISOdatetime <- function(year, month, day, hour, min, sec, tz = "")
{
if(min(vapply(list(year, month, day, hour, min, sec), length, 1, USE.NAMES=FALSE)) == 0L)
.POSIXct(numeric(), tz = tz)
else {
x <- paste(year, month, day, hour, min, sec, sep = "-")
as.POSIXct(strptime(x, "%Y-%m-%d-%H-%M-%OS", tz = tz), tz = tz)
}
}
ISOdate <- function(year, month, day, hour = 12, min = 0, sec = 0, tz = "GMT")
ISOdatetime(year, month, day, hour, min, sec, tz)
as.matrix.POSIXlt <- function(x, ...)
{
as.matrix(as.data.frame(unclass(x)), ...)
}
mean.POSIXct <- function (x, ...)
.POSIXct(mean(unclass(x), ...), attr(x, "tzone"))
mean.POSIXlt <- function (x, ...)
as.POSIXlt(mean(as.POSIXct(x), ...))
difftime <-
function(time1, time2, tz,
units = c("auto", "secs", "mins", "hours", "days", "weeks"))
{
if (missing(tz)) {
time1 <- as.POSIXct(time1)
time2 <- as.POSIXct(time2)
} else {
time1 <- as.POSIXct(time1, tz = tz)
time2 <- as.POSIXct(time2, tz = tz)
}
z <- unclass(time1) - unclass(time2)
attr(z, "tzone") <- NULL
units <- match.arg(units)
if(units == "auto")
units <-
if(all(is.na(z))) "secs"
else {
zz <- min(abs(z), na.rm = TRUE)
if(!is.finite(zz) || zz < 60) "secs"
else if(zz < 3600) "mins"
else if(zz < 86400) "hours"
else "days"
}
switch(units,
"secs" = .difftime(z, units = "secs"),
"mins" = .difftime(z/60, units = "mins"),
"hours" = .difftime(z/3600, units = "hours"),
"days" = .difftime(z/86400, units = "days"),
"weeks" = .difftime(z/(7*86400), units = "weeks")
)
}
as.difftime <- function(tim, format = "%X", units = "auto", tz = "UTC")
{
if (inherits(tim, "difftime")) return(tim)
if (is.character(tim)) {
difftime(strptime(tim, format = format),
strptime("0:0:0", format = "%X"), units = units, tz = tz)
} else {
if (!is.numeric(tim)) stop("'tim' is not character or numeric")
if (units == "auto") stop("need explicit units for numeric conversion")
if (!(units %in% c("secs", "mins", "hours", "days", "weeks")))
stop("invalid units specified")
.difftime(tim, units = units)
}
}
units <- function(x) UseMethod("units")
`units<-` <- function(x, value) UseMethod("units<-")
units.difftime <- function(x) attr(x, "units")
`units<-.difftime` <- function(x, value)
{
from <- units(x)
if (from == value) return(x)
if (!(value %in% c("secs", "mins", "hours", "days", "weeks")))
stop("invalid units specified")
sc <- cumprod(c(secs = 1, mins = 60, hours = 60, days = 24, weeks = 7))
newx <- unclass(x) * as.vector(sc[from]/sc[value])
.difftime(newx, value)
}
as.double.difftime <- function(x, units = "auto", ...)
{
if (units != "auto") units(x) <- units
as.vector(x, "double")
}
as.data.frame.difftime <- as.data.frame.vector
format.difftime <- function(x,...)
paste(format(unclass(x),...), units(x))
print.difftime <- function(x, digits = getOption("digits"), ...)
{
if(is.array(x) || length(x) > 1L) {
cat("Time differences in ", attr(x, "units"), "\n", sep = "")
y <- unclass(x); attr(y, "units") <- NULL
print(y, digits=digits, ...)
}
else
cat("Time difference of ", format(unclass(x), digits = digits), " ",
attr(x, "units"), "\n", sep = "")
invisible(x)
}
`[.difftime` <- function(x, ..., drop = TRUE)
.difftime(NextMethod("["), attr(x, "units"), oldClass(x))
diff.difftime <- function(x, ...)
.difftime(NextMethod("diff"), attr(x, "units"), oldClass(x))
Ops.difftime <- function(e1, e2)
{
coerceTimeUnit <- function(x)
{
switch(attr(x, "units"),
secs = x, mins = 60*x, hours = 60*60*x,
days = 60*60*24*x, weeks = 60*60*24*7*x)
}
if (nargs() == 1L) {
switch(.Generic, "+" = {}, "-" = {e1[] <- -unclass(e1)},
stop(gettextf("unary '%s' not defined for \"difftime\" objects",
.Generic), domain = NA, call. = FALSE)
)
return(e1)
}
boolean <- switch(.Generic, "<" = , ">" = , "==" = ,
"!=" = , "<=" = , ">=" = TRUE, FALSE)
if (boolean) {
if(inherits(e1, "difftime") && inherits(e2, "difftime")) {
e1 <- coerceTimeUnit(e1)
e2 <- coerceTimeUnit(e2)
}
NextMethod(.Generic)
} else if(.Generic == "+" || .Generic == "-") {
if(inherits(e1, "difftime") && !inherits(e2, "difftime"))
return(.difftime(NextMethod(.Generic),
units = attr(e1, "units")))
if(!inherits(e1, "difftime") && inherits(e2, "difftime"))
return(.difftime(NextMethod(.Generic),
units = attr(e2, "units")))
u1 <- attr(e1, "units")
if(attr(e2, "units") == u1) {
.difftime(NextMethod(.Generic), units = u1)
} else {
e1 <- coerceTimeUnit(e1)
e2 <- coerceTimeUnit(e2)
.difftime(NextMethod(.Generic), units = "secs")
}
} else {
stop(gettextf("'%s' not defined for \"difftime\" objects", .Generic),
domain = NA)
}
}
`*.difftime` <- function (e1, e2)
{
if(inherits(e1, "difftime") && inherits(e2, "difftime"))
stop("both arguments of * cannot be \"difftime\" objects")
if(inherits(e2, "difftime")) {tmp <- e1; e1 <- e2; e2 <- tmp}
.difftime(e2 * unclass(e1), attr(e1, "units"))
}
`/.difftime` <- function (e1, e2)
{
if(inherits(e2, "difftime"))
stop("second argument of / cannot be a \"difftime\" object")
.difftime(unclass(e1) / e2, attr(e1, "units"))
}
Math.difftime <- function (x, ...)
{
switch(.Generic,
"abs" =, "sign" =, "floor" =, "ceiling" =, "trunc" =,
"round" =, "signif" = {
units <- attr(x, "units")
.difftime(NextMethod(), units)
},
stop(gettextf("'%s' not defined for \"difftime\" objects", .Generic),
domain = NA))
}
mean.difftime <- function (x, ...)
.difftime(mean(unclass(x), ...), attr(x, "units"))
Summary.difftime <- function (..., na.rm)
{
coerceTimeUnit <- function(x)
{
as.vector(switch(attr(x,"units"),
secs = x, mins = 60*x, hours = 60*60*x,
days = 60*60*24*x, weeks = 60*60*24*7*x))
}
ok <- switch(.Generic, max = , min = , sum=, range = TRUE, FALSE)
if (!ok)
stop(gettextf("'%s' not defined for \"difftime\" objects", .Generic),
domain = NA)
x <- list(...)
Nargs <- length(x)
if(Nargs == 0) {
.difftime(do.call(.Generic), "secs")
} else {
units <- sapply(x, attr, "units")
if(all(units == units[1L])) {
args <- c(lapply(x, as.vector), na.rm = na.rm)
} else {
args <- c(lapply(x, coerceTimeUnit), na.rm = na.rm)
units <- "secs"
}
.difftime(do.call(.Generic, args), units[[1L]])
}
}
c.difftime <-
function(..., recursive = FALSE)
{
coerceTimeUnit <- function(x) {
switch(attr(x, "units"),
secs = x, mins = 60*x, hours = 60*60*x,
days = 60*60*24*x, weeks = 60*60*24*7*x)
}
args <- list(...)
if(!length(args)) return(.difftime(double(), "secs"))
ind <- sapply(args, inherits, "difftime")
pos <- which(!ind)
units <- sapply(args[ind], attr, "units")
if(all(units == (un1 <- units[1L]))) {
if(length(pos))
args[pos] <-
lapply(args[pos], as.difftime, units = un1)
.difftime(unlist(args), un1)
} else {
if(length(pos))
args[pos] <-
lapply(args[pos], as.difftime, units = "secs")
args[ind] <- lapply(args[ind], coerceTimeUnit)
.difftime(unlist(args), "secs")
}
}
`length<-.difftime` <-
function(x, value)
.difftime(NextMethod(), attr(x, "units"), oldClass(x))
seq.POSIXt <-
function(from, to, by, length.out = NULL, along.with = NULL, ...)
{
if (missing(from)) stop("'from' must be specified")
if (!inherits(from, "POSIXt")) stop("'from' must be a \"POSIXt\" object")
cfrom <- as.POSIXct(from)
if(length(cfrom) != 1L) stop("'from' must be of length 1")
tz <- attr(cfrom , "tzone")
if (!missing(to)) {
if (!inherits(to, "POSIXt")) stop("'to' must be a \"POSIXt\" object")
if (length(as.POSIXct(to)) != 1) stop("'to' must be of length 1")
}
if (!missing(along.with)) {
length.out <- length(along.with)
} else if (!is.null(length.out)) {
if (length(length.out) != 1L) stop("'length.out' must be of length 1")
length.out <- ceiling(length.out)
}
status <- c(!missing(to), !missing(by), !is.null(length.out))
if(sum(status) != 2L)
stop("exactly two of 'to', 'by' and 'length.out' / 'along.with' must be specified")
if (missing(by)) {
from <- unclass(cfrom)
to <- unclass(as.POSIXct(to))
res <- seq.int(from, to, length.out = length.out)
return(.POSIXct(res, tz))
}
if (length(by) != 1L) stop("'by' must be of length 1")
valid <- 0L
if (inherits(by, "difftime")) {
by <- switch(attr(by,"units"), secs = 1, mins = 60, hours = 3600,
days = 86400, weeks = 7*86400) * unclass(by)
} else if(is.character(by)) {
by2 <- strsplit(by, " ", fixed = TRUE)[[1L]]
if(length(by2) > 2L || length(by2) < 1L)
stop("invalid 'by' string")
valid <- pmatch(by2[length(by2)],
c("secs", "mins", "hours", "days", "weeks",
"months", "years", "DSTdays", "quarters"))
if(is.na(valid)) stop("invalid string for 'by'")
if(valid <= 5L) {
by <- c(1, 60, 3600, 86400, 7*86400)[valid]
if (length(by2) == 2L) by <- by * as.integer(by2[1L])
} else
by <- if(length(by2) == 2L) as.integer(by2[1L]) else 1
} else if(!is.numeric(by)) stop("invalid mode for 'by'")
if(is.na(by)) stop("'by' is NA")
if(valid <= 5L) {
from <- unclass(as.POSIXct(from))
if(!is.null(length.out))
res <- seq.int(from, by = by, length.out = length.out)
else {
to0 <- unclass(as.POSIXct(to))
res <- seq.int(0, to0 - from, by) + from
}
return(.POSIXct(res, tz))
} else {
r1 <- as.POSIXlt(from)
if(valid == 7L) {
if(missing(to)) {
yr <- seq.int(r1$year, by = by, length.out = length.out)
} else {
to <- as.POSIXlt(to)
yr <- seq.int(r1$year, to$year, by)
}
r1$year <- yr
} else if(valid %in% c(6L, 9L)) {
if (valid == 9L) by <- by * 3
if(missing(to)) {
mon <- seq.int(r1$mon, by = by, length.out = length.out)
} else {
to0 <- as.POSIXlt(to)
mon <- seq.int(r1$mon, 12*(to0$year - r1$year) + to0$mon, by)
}
r1$mon <- mon
} else if(valid == 8L) {
if(!missing(to)) {
length.out <- 2L + floor((unclass(as.POSIXct(to)) -
unclass(as.POSIXct(from)))/(by * 86400))
}
r1$mday <- seq.int(r1$mday, by = by, length.out = length.out)
}
r1$isdst <- -1L
res <- as.POSIXct(r1)
if(!missing(to)) {
to <- as.POSIXct(to)
res <- if(by > 0) res[res <= to] else res[res >= to]
}
res
}
}
cut.POSIXt <-
function (x, breaks, labels = NULL, start.on.monday = TRUE,
right = FALSE, ...)
{
if(!inherits(x, "POSIXt")) stop("'x' must be a date-time object")
x <- as.POSIXct(x)
if (inherits(breaks, "POSIXt")) {
breaks <- sort(as.POSIXct(breaks))
} else if(is.numeric(breaks) && length(breaks) == 1L) {
} else if(is.character(breaks) && length(breaks) == 1L) {
by2 <- strsplit(breaks, " ", fixed = TRUE)[[1L]]
if(length(by2) > 2L || length(by2) < 1L)
stop("invalid specification of 'breaks'")
valid <-
pmatch(by2[length(by2)],
c("secs", "mins", "hours", "days", "weeks",
"months", "years", "DSTdays", "quarters"))
if(is.na(valid)) stop("invalid specification of 'breaks'")
start <- as.POSIXlt(min(x, na.rm = TRUE))
incr <- 1
if(valid > 1L) { start$sec <- 0L; incr <- 60 }
if(valid > 2L) { start$min <- 0L; incr <- 3600 }
if(valid > 3L) { start$hour <- 0L; start$isdst <- -1L; incr <- 86400 }
if(valid == 5L) {
start$mday <- start$mday - start$wday
if(start.on.monday)
start$mday <- start$mday + ifelse(start$wday > 0L, 1L, -6L)
incr <- 7*86400
}
if(valid == 8L) incr <- 25*3600
if(valid == 6L) {
start$mday <- 1L
maxx <- max(x, na.rm = TRUE)
end <- as.POSIXlt(maxx)
step <- if(length(by2) == 2L) as.integer(by2[1L]) else 1L
end <- as.POSIXlt(end + (31 * step * 86400))
end$mday <- 1L
end$isdst <- -1L
breaks <- seq(start, end, breaks)
lb <- length(breaks)
if(maxx < breaks[lb-1]) breaks <- breaks[-lb]
} else if(valid == 7L) {
start$mon <- 0L
start$mday <- 1L
maxx <- max(x, na.rm = TRUE)
end <- as.POSIXlt(maxx)
step <- if(length(by2) == 2L) as.integer(by2[1L]) else 1L
end <- as.POSIXlt(end + (366 * step* 86400))
end$mon <- 0L
end$mday <- 1L
end$isdst <- -1L
breaks <- seq(start, end, breaks)
lb <- length(breaks)
if(maxx < breaks[lb-1]) breaks <- breaks[-lb]
} else if(valid == 9L) {
qtr <- rep(c(0L, 3L, 6L, 9L), each = 3L)
start$mon <- qtr[start$mon + 1L]
start$mday <- 1L
maxx <- max(x, na.rm = TRUE)
end <- as.POSIXlt(maxx)
step <- if(length(by2) == 2L) as.integer(by2[1L]) else 1L
end <- as.POSIXlt(end + (93 * step * 86400))
end$mon <- qtr[end$mon + 1L]
end$mday <- 1L
end$isdst <- -1L
breaks <- seq(start, end, paste(step * 3, "months"))
lb <- length(breaks)
if(maxx < breaks[lb-1]) breaks <- breaks[-lb]
} else {
if (length(by2) == 2L) incr <- incr * as.integer(by2[1L])
maxx <- max(x, na.rm = TRUE)
breaks <- seq(start, maxx + incr, breaks)
breaks <- breaks[seq_len(1+max(which(breaks <= maxx)))]
}
} else stop("invalid specification of 'breaks'")
res <- cut(unclass(x), unclass(breaks), labels = labels,
right = right, ...)
if(is.null(labels)) {
levels(res) <-
as.character(if (is.numeric(breaks)) x[!duplicated(res)]
else breaks[-length(breaks)])
}
res
}
julian <- function(x, ...) UseMethod("julian")
julian.POSIXt <- function(x, origin = as.POSIXct("1970-01-01", tz = "GMT"), ...)
{
origin <- as.POSIXct(origin)
if(length(origin) != 1L) stop("'origin' must be of length one")
res <- difftime(as.POSIXct(x), origin, units = "days")
structure(res, "origin" = origin)
}
weekdays <- function(x, abbreviate) UseMethod("weekdays")
weekdays.POSIXt <- function(x, abbreviate = FALSE)
{
format(x, ifelse(abbreviate, "%a", "%A"))
}
months <- function(x, abbreviate) UseMethod("months")
months.POSIXt <- function(x, abbreviate = FALSE)
{
format(x, ifelse(abbreviate, "%b", "%B"))
}
quarters <- function(x, abbreviate) UseMethod("quarters")
quarters.POSIXt <- function(x, ...)
{
x <- (as.POSIXlt(x)$mon)%/%3
paste0("Q", x+1)
}
trunc.POSIXt <-
function(x, units = c("secs", "mins", "hours", "days", "months", "years"), ...)
{
units <- match.arg(units)
x <- as.POSIXlt(x)
if(length(x$sec))
switch(units,
"secs" = {x$sec <- trunc(x$sec)},
"mins" = {x$sec[] <- 0},
"hours" = {x$sec[] <- 0; x$min[] <- 0L},
"days" = {
x$sec[] <- 0; x$min[] <- 0L; x$hour[] <- 0L;
x$isdst[] <- -1L
},
"months" = {
x$sec[] <- 0; x$min[] <- 0L; x$hour[] <- 0L;
x$mday[] <- 1L
x$isdst[] <- -1L
x <- as.POSIXlt(as.POSIXct(x))
},
"years" = {
x$sec[] <- 0; x$min[] <- 0L; x$hour[] <- 0L;
x$mday[] <- 1L; x$mon[] <- 0L
x$isdst[] <- -1L
x <- as.POSIXlt(as.POSIXct(x))
}
)
x
}
round.POSIXt <-
function(x, units = c("secs", "mins", "hours", "days", "months", "years"))
{
.round_x_to_l_or_u <- function(lx, ll, lu) {
cu <- as.POSIXct(lu)
lu <- as.POSIXlt(cu)
tu <- unclass(cu)
tx <- unclass(as.POSIXct(lx))
tl <- unclass(as.POSIXct(ll))
up <- ((tu - tx) <= (tx - tl))
up <- !is.na(up) & up
y <- ll
y[up] <- lu[up]
y
}
units <- if(is.numeric(units) && units == 0.) "secs" else match.arg(units)
if(units == "months") {
x <- as.POSIXlt(x)
ll <- trunc.POSIXt(x, "months")
lu <- ll
lu$mon <- lu$mon + 1L
.round_x_to_l_or_u(x, ll, lu)
}
else if(units == "years") {
x <- as.POSIXlt(x)
ll <- trunc.POSIXt(x, "years")
lu <- ll
lu$year <- lu$year + 1L
.round_x_to_l_or_u(x, ll, lu)
}
else
trunc.POSIXt(as.POSIXct(x) +
switch(units,
"secs" = 0.5,
"mins" = 30,
"hours" = 1800,
"days" = 43200),
units = units)
}
`[.POSIXlt` <- function(x, i, j, drop = TRUE)
{
if(!(mj <- missing(j)))
if(!is.character(j) || (length(j) != 1L))
stop("component subscript must be a character string")
if(missing(i)) {
if(mj)
x
else
unclass(x)[[j]]
} else {
if(is.character(i))
i <- match(i, names(x),
incomparables = c("", NA_character_))
if(mj)
.POSIXlt(lapply(X = unclass(x), FUN = `[`, i, drop = drop),
attr(x, "tzone"), oldClass(x))
else
unclass(x)[[j]][i]
}
}
`[<-.POSIXlt` <- function(x, i, j, value)
{
if(!(mj <- missing(j)))
if(!is.character(j) || (length(j) != 1L))
stop("component subscript must be a character string")
if(!length(value))
return(x)
cl <- oldClass(x)
class(x) <- NULL
if(missing(i)) {
if(mj)
x <- as.POSIXlt(value)
else
x[[j]] <- value
} else {
ici <- is.character(i)
nms <- names(x$year)
if(mj) {
value <- unclass(as.POSIXlt(value))
if(ici) {
for(n in names(x))
names(x[[n]]) <- nms
}
for(n in names(x))
x[[n]][i] <- value[[n]]
} else {
if(ici) {
names(x[[j]]) <- nms
}
x[[j]][i] <- value
}
}
class(x) <- cl
x
}
as.data.frame.POSIXlt <- function(x, row.names = NULL, optional = FALSE, ...)
{
value <- as.data.frame.POSIXct(as.POSIXct(x), row.names, optional, ...)
if (!optional)
names(value) <- deparse1(substitute(x))
value
}
rep.POSIXct <- function(x, ...)
.POSIXct(NextMethod(), attr(x, "tzone"), oldClass(x))
rep.POSIXlt <- function(x, ...)
.POSIXlt(lapply(X = unclass(x), FUN = rep, ...),
attr(x, "tzone"), oldClass(x))
diff.POSIXt <- function (x, lag = 1L, differences = 1L, ...)
{
ismat <- is.matrix(x)
r <- if(inherits(x, "POSIXlt")) as.POSIXct(x) else x
xlen <- if (ismat) dim(x)[1L] else length(r)
if (length(lag) != 1L || length(differences) > 1L || lag < 1L || differences < 1L)
stop("'lag' and 'differences' must be integers >= 1")
if (lag * differences >= xlen) return(.difftime(numeric(), "secs"))
i1 <- -seq_len(lag)
if (ismat) for (i in seq_len(differences)) r <- r[i1, , drop = FALSE] -
r[-nrow(r):-(nrow(r) - lag + 1), , drop = FALSE]
else for (i in seq_len(differences))
r <- r[i1] - r[-length(r):-(length(r) - lag + 1L)]
r
}
duplicated.POSIXlt <- function(x, incomparables = FALSE, ...)
{
x <- as.POSIXct(x)
NextMethod("duplicated", x)
}
unique.POSIXlt <- function(x, incomparables = FALSE, ...)
x[!duplicated(x, incomparables, ...)]
sort.POSIXlt <- function(x, decreasing = FALSE, na.last = NA, ...)
x[order(as.POSIXct(x), na.last = na.last, decreasing = decreasing)]
is.numeric.POSIXt <- function(x) FALSE
split.POSIXct <-
function(x, f, drop = FALSE, ...)
lapply(split.default(as.double(x), f, drop = drop, ...),
.POSIXct, attr(x, "tzone"), oldClass(x))
xtfrm.POSIXct <- function(x) as.numeric(x)
xtfrm.POSIXlt <- function(x) as.double(x)
xtfrm.difftime <- function(x) as.numeric(x)
is.numeric.difftime <- function(x) FALSE
.POSIXct <- function(xx, tz = NULL, cl = c("POSIXct", "POSIXt")) {
class(xx) <- cl
attr(xx, "tzone") <- tz
xx
}
.POSIXlt <- function(xx, tz = NULL, cl = c("POSIXlt", "POSIXt")) {
class(xx) <- cl
attr(xx, "tzone") <- tz
xx
}
.difftime <- function(xx, units, cl = "difftime") {
class(xx) <- cl
attr(xx, "units") <- units
xx
}
names.POSIXlt <-
function(x)
names(x$year)
`names<-.POSIXlt` <-
function(x, value)
{
names(x$year) <- value
x
}
OlsonNames <- function(tzdir = NULL)
{
if (is.null(tzdir)) {
if(.Platform$OS.type == "windows")
tzdir <- Sys.getenv("TZDIR", file.path(R.home("share"), "zoneinfo"))
else {
if(Sys.getenv("TZDIR") == "internal")
tzdir <- file.path(R.home("share"), "zoneinfo")
else if (grepl("darwin", R.Version()$os) &&
Sys.getenv("TZDIR") == "macOS") {
tzdir <- "/var/db/timezone/zoneinfo"
} else {
tzdirs <- c(Sys.getenv("TZDIR"),
file.path(R.home("share"), "zoneinfo"),
"/usr/share/zoneinfo",
"/share/zoneinfo",
"/usr/share/lib/zoneinfo",
"/usr/lib/zoneinfo",
"/usr/local/etc/zoneinfo",
"/etc/zoneinfo", "/usr/etc/zoneinfo")
tzdirs <- tzdirs[file.exists(tzdirs)]
if (!length(tzdirs)) {
warning("no Olson database found")
return(character())
} else tzdir <- tzdirs[1L]
}
}
} else if(!dir.exists(tzdir))
stop(sprintf("%s is not a directory", sQuote(tzdir)), domain = NA)
x <- list.files(tzdir, recursive = TRUE)
ver <- if(file.exists(vf <- file.path(tzdir, "VERSION")))
readLines(vf, warn = FALSE)
else if(file.exists(vf <- file.path(tzdir, "+VERSION")))
readLines(vf, warn = FALSE)
x <- setdiff(x, "VERSION")
ans <- grep("^[ABCDEFGHIJKLMNOPQRSTUVWXYZ]", x, value = TRUE)
if(!is.null(ver)) attr(ans, "Version") <- ver
ans
}
`[[.POSIXlt` <- function(x, i, drop = TRUE)
{
if(!missing(i) && is.character(i)) {
i <- match(i, names(x), incomparables = c("", NA_character_))
}
.POSIXlt(lapply(X = unclass(x), FUN = `[[`, i, drop = drop),
attr(x, "tzone"), oldClass(x))
}
as.list.POSIXlt <- function(x, ...)
{
nms <- names(x)
names(x) <- NULL
y <- lapply(X = do.call(Map, c(list, unclass(x))),
FUN = .POSIXlt, attr(x, "tzone"), oldClass(x))
names(y) <- nms
y
}
`[[<-.POSIXlt` <- function(x, i, value)
{
cl <- oldClass(x)
class(x) <- NULL
if(!missing(i) && is.character(i)) {
nms <- names(x$year)
for(n in names(x))
names(x[[n]]) <- nms
}
value <- unclass(as.POSIXlt(value))
for(n in names(x))
x[[n]][[i]] <- value[[n]]
class(x) <- cl
x
}
as.list.difftime <- function(x, ...)
lapply(unclass(x), .difftime, attr(x, "units"), oldClass(x))
rep.difftime <- function(x, ...)
.difftime(NextMethod("rep"), attr(x, "units"), oldClass(x))
`[<-.difftime` <- function(x, i, value) {
if(inherits(value, "difftime") && !identical(units(x), units(value)))
units(value) <- units(x)
NextMethod("[<-")
}
as.vector.POSIXlt <- function(x, mode = "any")
as.vector(as.list(x), mode)
|
test_that("locale is set to English", {
skip_on_cran()
set_time_locale("eng")
expect_match(Sys.getlocale("LC_TIME"), "(English|en)")
})
test_that("locale is set to German", {
skip_on_cran()
set_time_locale("deu")
expect_match(Sys.getlocale("LC_TIME"), "(German|de)")
}) |
library(liger)
set.seed(0)
X <- abs(rnorm(1000, 1))
Y <- abs(rnorm(1000, 1))
fc <- log2(X/Y)
fc <- sort(fc, decreasing=TRUE)
names(fc) <- paste0('gene', 1:length(fc))
barplot(fc, xaxt='n')
genesets <- list(
A = paste0('gene', seq(from=1, to=100, by=10)),
B = paste0('gene', seq(from=1, to=750, by=10)),
C = paste0('gene', seq(from=250, to=1000, by=10)),
D = paste0('gene', seq(from=900, to=1000, by=10))
)
iterative.bulk.gsea(values = fc, set.list = genesets)
gsea(values = fc, geneset = genesets$A)
gsea(values = fc, geneset = genesets$D)
gsea(values = fc, geneset = genesets$B)
gsea(values = fc, geneset = genesets$C) |
extractNetCDF<-function(ncdf_files, bbox = NULL, offset = 0, cells = NULL, export = TRUE,
exportDir = getwd(), exportFormat = "meteoland/txt", mpfilename = "MP.txt") {
nfiles = length(ncdf_files)
cat(paste("Number of NetCDFs: ", nfiles,"\n", sep=""))
ncname<-ncdf_files[1]
ncin <- nc_open(ncname)
lat <- ncvar_get(ncin, "lat")
lon <- ncvar_get(ncin, "lon")
varlist <- .nc_get_varlist(ncin)
nx = nrow(lat)
ny = ncol(lat)
cat(paste("NetCDF grid: nx",nx, "ny",ny,"ncells", nx*ny,"\n"))
sel = matrix(FALSE, nrow=nx, ncol=ny)
vertices = FALSE
if(!is.null(bbox)) {
if((ncol(bbox)!= 2)||(nrow(bbox)!= 2)) stop("Wrong dimensions of bbox")
if(is.null(dimnames(bbox))){
colnames(bbox)<-c("min","max")
rownames(bbox)<-c("lon","lat")
}
vertices = ("lat_vertices" %in% varlist) & ("lon_vertices" %in% varlist)
if(vertices) {
lat_ver <- ncvar_get(ncin, "lat_vertices")
lon_ver <- ncvar_get(ncin, "lon_vertices")
for(v in 1:4) {
sel1 = (lon_ver[v,,] +offset >= bbox[1,1]) &
(lon_ver[v,,] - offset <= bbox[1,2]) &
(lat_ver[v,,] +offset >= bbox[2,1]) &
(lat_ver[v,,] -offset <= bbox[2,2])
sel = sel | sel1
}
minlon = pmin(lon_ver[1,,], lon_ver[2,,], lon_ver[3,,], lon_ver[4,,])
maxlon = pmax(lon_ver[1,,], lon_ver[2,,], lon_ver[3,,], lon_ver[4,,])
minlat = pmin(lat_ver[1,,], lat_ver[2,,], lat_ver[3,,], lat_ver[4,,])
maxlat = pmax(lat_ver[1,,], lat_ver[2,,], lat_ver[3,,], lat_ver[4,,])
selbox = (bbox[1,1]>=minlon) &
(bbox[2,1]>=minlat) &
(bbox[1,2]<=maxlon) &
(bbox[2,2]<=maxlat)
sel = sel | selbox
} else {
veclat<-(lat+offset >=bbox[2,1]) & (lat -offset <=bbox[2,2])
veclon<-(lon+offset >=bbox[1,1]) & (lon - offset <=bbox[1,2])
sel=veclat & veclon
}
} else if(!is.null(cells)) {
if(!is.matrix(cells)) stop("'cells' has to be a matrix")
if(ncol(cells)!=2) stop("'cells' has to be a matrix of two columns")
for(i in 1:nrow(cells)) sel[cells[i,1],cells[i,2]] = TRUE
} else {
cat("No user cell selection. All cells will be extracted.")
}
nc_close(ncin)
ncells = sum(sel)
cat(paste("Cells to extract: ", ncells,"\n", sep=""))
if(ncells==0) stop("No cells to extract. Stopping.")
dates = NULL
for(filei in 1:nfiles) {
ncin <- nc_open(ncdf_files[filei])
t <- ncvar_get(ncin, "time")
nt = length(t)
tunits <- ncatt_get(ncin, "time", "units")
nc_close(ncin)
s = strsplit(tunits$value, " ")[[1]]
s = s[3]
t <- floor(t)
if(length(unique(t))!=length(t)) stop("Duplicated days!")
maxday <-max(t)
minday <-min(t)
refDate = as.Date(s)
datesfile <- as.character(seq.Date(refDate, length.out=maxday, by="day")[t])
dates = sort(unique(c(dates, datesfile)))
}
ndates <-length(dates)
cat(paste("Period to extract: ", dates[1]," to ", dates[length(dates)]," (", ndates," days)\n", sep=""))
cat("\n\n")
dfvec = vector("list",ncells)
dfout = data.frame(xi = rep(NA,ncells), yi = rep(NA,ncells), dir = rep("", ncells),
filename=rep("", ncells), format = rep(exportFormat, ncells),
v1_lon = rep(NA,ncells), v1_lat = rep(NA,ncells),
v2_lon = rep(NA,ncells), v2_lat = rep(NA,ncells),
v3_lon = rep(NA,ncells), v3_lat = rep(NA,ncells),
v4_lon = rep(NA,ncells), v4_lat = rep(NA,ncells))
dfout$dir = as.character(dfout$dir)
dfout$filename = as.character(dfout$filename)
rownames(dfout) = 1:ncells
cc = cbind(rep(NA, ncells), rep(NA, ncells))
rownames(cc)<-1:ncells
colnames(cc)<-c("lon","lat")
cnt = 1
for(xi in 1:nrow(sel)) {
for(yi in 1:ncol(sel)) {
if(sel[xi,yi]) {
cc[cnt,] = c(lon[xi,yi],lat[xi,yi])
cnt = cnt+1
}
}
}
points = SpatialPoints(cc, proj4string = CRS(SRS_string = "EPSG:4326"))
spdf = SpatialPointsDataFrame(points, dfout)
cnt = 1
for(xi in 1:nrow(sel)) {
for(yi in 1:ncol(sel)) {
if(sel[xi,yi]) {
spdf@data$xi[cnt] = xi
spdf@data$yi[cnt] = yi
cat(paste("Extracting data for cell (",cnt," of ",ncells,"): [",xi,", ",yi,"]\n",sep=""))
df = data.frame(matrix(NA, nrow = ndates, ncol = 9), row.names = as.character(dates))
names(df) = c("DOY","MeanTemperature","MinTemperature",
"MaxTemperature","Precipitation","SpecificHumidity", "MeanRelativeHumidity",
"Radiation","WindSpeed")
df[,"DOY"] = as.POSIXlt(as.Date(dates))$yday+1
pb = txtProgressBar(0, nfiles, 0, style = 3)
for(filei in 1:nfiles) {
setTxtProgressBar(pb, filei-1)
ncin <- nc_open(ncdf_files[filei])
t <- ncvar_get(ncin, "time")
nt = length(t)
tunits <- ncatt_get(ncin, "time", "units")
s = strsplit(tunits$value, " ")[[1]]
s = s[3]
t <- floor(t)
if(length(unique(t))!=length(t)) stop("Duplicated days!")
maxday <-max(t)
minday <-min(t)
refDate = as.Date(s)
datesfile <- as.character(seq.Date(refDate, length.out=maxday, by="day")[t])
varlist = .nc_get_varlist(ncin)
for(var in varlist) {
if(var=="huss") {
vec = ncvar_get(ncin,varid = var, start = c(xi, yi, 1), count=c(1,1,length(datesfile)))
df[datesfile,"SpecificHumidity"] = vec
} else if(var=="tas") {
vec = ncvar_get(ncin,varid = var, start = c(xi, yi, 1), count=c(1,1,length(datesfile)))
df[datesfile,"MeanTemperature"] = vec - 273.15
} else if(var=="tasmin") {
vec = ncvar_get(ncin,varid = var, start = c(xi, yi, 1), count=c(1,1,length(datesfile)))
df[datesfile,"MinTemperature"] = vec - 273.15
} else if(var=="tasmax") {
vec = ncvar_get(ncin,varid = var, start = c(xi, yi, 1), count=c(1,1,length(datesfile)))
df[datesfile,"MaxTemperature"] = vec - 273.15
} else if(var=="pr") {
vec = ncvar_get(ncin,varid = var, start = c(xi, yi, 1), count=c(1,1,length(datesfile)))
df[datesfile,"Precipitation"] = vec*3600*24
} else if(var=="rsds") {
vec = ncvar_get(ncin,varid = var, start = c(xi, yi, 1), count=c(1,1,length(datesfile)))
df[datesfile,"Radiation"] = vec*3600*24/1000000
} else if(var=="sfcWind") {
vec = ncvar_get(ncin,varid = var, start = c(xi, yi, 1), count=c(1,1,length(datesfile)))
df[datesfile,"WindSpeed"] = vec
}
}
nc_close(ncin)
cat("\n")
}
close(pb)
if(sum((!is.na(df$MeanTemperature)) & (!is.na(df$SpecificHumidity)))>0) {
df$MeanRelativeHumidity = humidity_specific2relative(Tc=df$MeanTemperature ,HS=df$SpecificHumidity, allowSaturated = TRUE)
}
if(!export) {
dfvec[[cnt]] = df
} else {
if(exportFormat %in% c("meteoland/txt","castanea/txt")) formatType = "txt"
else if (exportFormat %in% c("meteoland/rds","castanea/rds")) formatType = "rds"
filename = paste0("P_",xi,"_",yi,".",formatType)
if(exportDir!="") dir = paste(getwd(),exportDir,sep="/")
else dir = getwd()
spdf@data$dir[cnt] = dir
spdf@data$filename[cnt] = filename
if(vertices) {
spdf@data$v1_lat[cnt] = lat_ver[1,xi,yi]
spdf@data$v2_lat[cnt] = lat_ver[2,xi,yi]
spdf@data$v3_lat[cnt] = lat_ver[3,xi,yi]
spdf@data$v4_lat[cnt] = lat_ver[4,xi,yi]
spdf@data$v1_lon[cnt] = lon_ver[1,xi,yi]
spdf@data$v2_lon[cnt] = lon_ver[2,xi,yi]
spdf@data$v3_lon[cnt] = lon_ver[3,xi,yi]
spdf@data$v4_lon[cnt] = lon_ver[4,xi,yi]
}
if(exportDir!="") f = paste(exportDir,filename, sep="/")
else f = filename
writemeteorologypoint(df,f, exportFormat)
cat(paste(" File output: ",f, "\n", sep=""))
if(exportDir!="") f = paste(exportDir,mpfilename, sep="/")
else f = mpfilename
write.table(as.data.frame(spdf),
file= f ,sep="\t", quote=FALSE)
}
cnt = cnt+1
cat(paste("\n"))
}
}
}
cat(paste("Done.","\n"))
if(is.null(exportFormat)) return(SpatialPointsMeteorology(points = points,data = dfvec, dates = dates))
return(spdf)
} |
differentially_uprate_wage <- function(wage = 1, from_fy, to_fy, ...){
stopifnot(all(from_fy %in% c("2003-04", "2004-05", "2005-06",
"2006-07", "2007-08", "2008-09",
"2009-10", "2010-11", "2011-12",
"2012-13", "2013-14", "2014-15",
"2015-16", "2016-17")))
input <-
data.table(fy.year = from_fy,
wage = wage) %>%
.[, `_order` := 1:.N] %>%
setkeyv(cols = c("fy.year", "wage"))
`_order` <- NULL
differential_sw_uprates %>%
assertthat::has_name(., "uprate_factor") %>%
assertthat::validate_that(.)
uprate_factor <- NULL
`_out` <- NULL
differential_sw_uprates[salary_by_fy_swtile, on = "Sw_amt_percentile"] %>%
setnames(old = "min_salary", new = "wage") %>%
setkeyv(cols = c("fy.year", "wage")) %>%
.[input, roll = "nearest"] %>%
.[, `_out` := wage * (uprate_factor * (wage_inflator(from_fy = from_fy, to_fy = to_fy, ...) - 1) + 1)] %>%
setkeyv("_order") %>%
.[["_out"]]
} |
get_positions_options <- function(RH, trim_pending = TRUE) {
RobinHood::check_rh(RH)
options <- RobinHood::api_positions_options(RH)
x <- unique(options$option)
options_instruments <- data.frame()
for (i in x) {
y <- RobinHood::api_instruments_options(RH, method = "url", option_instrument_url = i)
y <- y %>%
dplyr::select(c("url", "type", "state", "strike_price", "rhs_tradability", "tradability")) %>%
dplyr::rename(c("option_type" = "type", "option" = "url"))
options_instruments <- rbind(options_instruments, y)
}
options$option <- as.character(options$option)
options_instruments$option <- as.character(options_instruments$option)
options <- dplyr::inner_join(options, options_instruments, by = "option") %>%
dplyr::select(-c("updated_at"))
if (nrow(options) == 0) stop("You dont have any open positions")
x <- gsub("https://api.robinhood.com/options/instruments/", "", options$option)
x <- gsub("/", "", x)
option_market_data <- data.frame()
for (i in x) {
y <- RobinHood::api_marketdata(RH, i)
option_market_data <- rbind(option_market_data, y)
}
option_market_data <- option_market_data %>%
dplyr::rename("option" = "instrument") %>%
dplyr::mutate_at("option", as.character)
options <- dplyr::inner_join(options, option_market_data, by = "option")
options$current_value <- options$trade_value_multiplier * options$last_trade_price
if (trim_pending == TRUE) {
options <- options %>%
dplyr::select(c("chain_symbol", "option_type", "state", "strike_price", "average_price", "quantity",
"trade_value_multiplier", "last_trade_price", "current_value", "rhs_tradability", "tradability",
"type", "created_at", "updated_at"))
} else {
options <- options %>%
dplyr::select(c("chain_symbol", "option_type", "state", "strike_price", "average_price", "quantity",
"trade_value_multiplier", "last_trade_price", "current_value", "pending_buy_quantity",
"pending_expired_quantity", "pending_expiration_quantity", "pending_exercise_quantity",
"pending_assignment_quantity", "pending_sell_quantity", "intraday_quantity",
"intraday_average_open_price", "rhs_tradability", "tradability", "type", "created_at", "updated_at"))
}
return(options)
} |
identifyReplacementVariables <- function(filename_s) {
l <- lapply(filename_s, function(f) {
if (!file.exists(f)) abort(paste(f, 'is not an existing file'))
r <- readLines(f, warn = FALSE)
unlist(
Filter(function(e) length(e) > 0,
regmatches(r, gregexpr('XXX_[\\d]{3}', r, perl = TRUE)))
)
})
names(l) <- filename_s
l
} |
suppressMessages({
library(tiledb)
library(rbenchmark)
})
setwd("/tmp")
url <- "https://raw.githubusercontent.com/eddelbuettel/data-examples/master/flights/flights14.csv"
flights <- data.table::fread(url, data.table=FALSE)
createArrays <- function() {
fromDataFrame(flights, "flightsNONE", sparse = TRUE, allows_dups = TRUE, filter = "NONE")
fromDataFrame(flights, "flightsGZIP", sparse = TRUE, allows_dups = TRUE, filter = "GZIP")
fromDataFrame(flights, "flightsZSTD", sparse = TRUE, allows_dups = TRUE, filter = "ZSTD")
fromDataFrame(flights, "flightsLZ4", sparse = TRUE, allows_dups = TRUE, filter = "LZ4")
fromDataFrame(flights, "flightsBZIP2", sparse = TRUE, allows_dups = TRUE, filter = "BZIP2")
fromDataFrame(flights, "flightsRLE", sparse = TRUE, allows_dups = TRUE, filter = "RLE")
fromDataFrame(flights, "flightsDD", sparse = TRUE, allows_dups = TRUE, filter = "DOUBLE_DELTA")
}
time1 <- function() {
get1 <- function(uri) {
arr <- tiledb_array(uri)
arr[]
}
cat("\nTiming reading all data\n")
res <- benchmark(none=get1("flightsNONE"),
gzip=get1("flightsGZIP"),
zstd=get1("flightsZSTD"),
lz4=get1("flightsLZ4"),
bz2=get1("flightsBZIP2"),
rle=get1("flightsRLE"),
dd=get1("flightsDD"),
order="relative")
print(res[,1:4])
invisible(NULL)
}
time2 <- function() {
get <- function(uri) {
arr <- tiledb_array(uri)
selected_ranges(arr) <- list(matrix(c(100,200,1300,1400),2,2,byrow=TRUE))
arr[]
}
cat("\nTiming reading slice of data\n")
res <- benchmark(none=get("flightsNONE"),
gzip=get("flightsGZIP"),
zstd=get("flightsZSTD"),
lz4=get("flightsLZ4"),
bz2=get("flightsBZIP2"),
rle=get("flightsRLE"),
dd=get("flightsDD"),
order="relative", replications=1000)
print(res[,1:4])
invisible(NULL)
}
createArrays()
time1()
time2() |
formula.epiformula <- function(x, ...) {
return(parse_formula(x, ...))
}
parse_formula <- function(x, fixed.only = FALSE, random.only = FALSE, ...) {
if (missing(fixed.only) && random.only)
fixed.only <- FALSE
if (fixed.only && random.only)
stop("'fixed.only' and 'random.only' can't both be TRUE.", call. = FALSE)
if (fixed.only) {
form[[length(x)]] <- lme4::nobars(x[[length(x)]])
form <- norws(form)
}
if (random.only) {
x <- justRE(x, response=TRUE)
form <- norws(form)
}
return(x)
}
norws <- function(x) {
form <- as.string.formula(x)
form <- gsub("rw\\(.*?\\) \\+ ", "", form)
form <- gsub("\\+ rw\\(.*?\\)", "", form)
form <- gsub("rw\\(.*?\\)", "", form)
form <- tryCatch({
as.formula(form)
},
error = function(cond) {
as.formula(paste(form, 1))
}
)
return(form)
}
as.string.formula <- function(x) {
form <- paste(deparse(x), collapse = " ")
form <- gsub("\\s+", " ", form, perl = FALSE)
return(form)
} |
library(checkargs)
context("isNegativeIntegerOrNaOrInfVectorOrNull")
test_that("isNegativeIntegerOrNaOrInfVectorOrNull works for all arguments", {
expect_identical(isNegativeIntegerOrNaOrInfVectorOrNull(NULL, stopIfNot = FALSE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNegativeIntegerOrNaOrInfVectorOrNull(TRUE, stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNegativeIntegerOrNaOrInfVectorOrNull(FALSE, stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNegativeIntegerOrNaOrInfVectorOrNull(NA, stopIfNot = FALSE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNegativeIntegerOrNaOrInfVectorOrNull(0, stopIfNot = FALSE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNegativeIntegerOrNaOrInfVectorOrNull(-1, stopIfNot = FALSE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNegativeIntegerOrNaOrInfVectorOrNull(-0.1, stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNegativeIntegerOrNaOrInfVectorOrNull(0.1, stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNegativeIntegerOrNaOrInfVectorOrNull(1, stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNegativeIntegerOrNaOrInfVectorOrNull(NaN, stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNegativeIntegerOrNaOrInfVectorOrNull(-Inf, stopIfNot = FALSE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNegativeIntegerOrNaOrInfVectorOrNull(Inf, stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNegativeIntegerOrNaOrInfVectorOrNull("", stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNegativeIntegerOrNaOrInfVectorOrNull("X", stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNegativeIntegerOrNaOrInfVectorOrNull(c(TRUE, FALSE), stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNegativeIntegerOrNaOrInfVectorOrNull(c(FALSE, TRUE), stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNegativeIntegerOrNaOrInfVectorOrNull(c(NA, NA), stopIfNot = FALSE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNegativeIntegerOrNaOrInfVectorOrNull(c(0, 0), stopIfNot = FALSE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNegativeIntegerOrNaOrInfVectorOrNull(c(-1, -2), stopIfNot = FALSE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNegativeIntegerOrNaOrInfVectorOrNull(c(-0.1, -0.2), stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNegativeIntegerOrNaOrInfVectorOrNull(c(0.1, 0.2), stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNegativeIntegerOrNaOrInfVectorOrNull(c(1, 2), stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNegativeIntegerOrNaOrInfVectorOrNull(c(NaN, NaN), stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNegativeIntegerOrNaOrInfVectorOrNull(c(-Inf, -Inf), stopIfNot = FALSE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNegativeIntegerOrNaOrInfVectorOrNull(c(Inf, Inf), stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNegativeIntegerOrNaOrInfVectorOrNull(c("", "X"), stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNegativeIntegerOrNaOrInfVectorOrNull(c("X", "Y"), stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNegativeIntegerOrNaOrInfVectorOrNull(NULL, stopIfNot = TRUE, message = NULL, argumentName = NULL), TRUE)
expect_error(isNegativeIntegerOrNaOrInfVectorOrNull(TRUE, stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNegativeIntegerOrNaOrInfVectorOrNull(FALSE, stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_identical(isNegativeIntegerOrNaOrInfVectorOrNull(NA, stopIfNot = TRUE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNegativeIntegerOrNaOrInfVectorOrNull(0, stopIfNot = TRUE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNegativeIntegerOrNaOrInfVectorOrNull(-1, stopIfNot = TRUE, message = NULL, argumentName = NULL), TRUE)
expect_error(isNegativeIntegerOrNaOrInfVectorOrNull(-0.1, stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNegativeIntegerOrNaOrInfVectorOrNull(0.1, stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNegativeIntegerOrNaOrInfVectorOrNull(1, stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNegativeIntegerOrNaOrInfVectorOrNull(NaN, stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_identical(isNegativeIntegerOrNaOrInfVectorOrNull(-Inf, stopIfNot = TRUE, message = NULL, argumentName = NULL), TRUE)
expect_error(isNegativeIntegerOrNaOrInfVectorOrNull(Inf, stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNegativeIntegerOrNaOrInfVectorOrNull("", stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNegativeIntegerOrNaOrInfVectorOrNull("X", stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNegativeIntegerOrNaOrInfVectorOrNull(c(TRUE, FALSE), stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNegativeIntegerOrNaOrInfVectorOrNull(c(FALSE, TRUE), stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_identical(isNegativeIntegerOrNaOrInfVectorOrNull(c(NA, NA), stopIfNot = TRUE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNegativeIntegerOrNaOrInfVectorOrNull(c(0, 0), stopIfNot = TRUE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNegativeIntegerOrNaOrInfVectorOrNull(c(-1, -2), stopIfNot = TRUE, message = NULL, argumentName = NULL), TRUE)
expect_error(isNegativeIntegerOrNaOrInfVectorOrNull(c(-0.1, -0.2), stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNegativeIntegerOrNaOrInfVectorOrNull(c(0.1, 0.2), stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNegativeIntegerOrNaOrInfVectorOrNull(c(1, 2), stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNegativeIntegerOrNaOrInfVectorOrNull(c(NaN, NaN), stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_identical(isNegativeIntegerOrNaOrInfVectorOrNull(c(-Inf, -Inf), stopIfNot = TRUE, message = NULL, argumentName = NULL), TRUE)
expect_error(isNegativeIntegerOrNaOrInfVectorOrNull(c(Inf, Inf), stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNegativeIntegerOrNaOrInfVectorOrNull(c("", "X"), stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNegativeIntegerOrNaOrInfVectorOrNull(c("X", "Y"), stopIfNot = TRUE, message = NULL, argumentName = NULL))
}) |
library(DisImpact)
library(dplyr)
data(student_equity)
dim(student_equity)
library(knitr)
kable(student_equity[1:6, ], caption='A few rows from the `student_equity` data set.')
df_di_summary <- di_iterate(data=student_equity
, success_vars=c('Math', 'English', 'Transfer')
, group_vars=c('Ethnicity', 'Gender')
, cohort_vars=c('Cohort_Math', 'Cohort_English', 'Cohort')
, scenario_repeat_by_vars=c('Ed_Goal', 'College_Status')
)
dim(df_di_summary)
df_di_summary %>% head %>% as.data.frame
table(df_di_summary$Ed_Goal)
table(df_di_summary$College_Status)
table(df_di_summary$disaggregation)
df_di_summary %>%
filter(Ed_Goal=='- All', College_Status=='- All', disaggregation=='- None') %>%
as.data.frame
df_di_summary %>%
filter(Ed_Goal=='- All', College_Status=='- All', disaggregation=='- None') %>%
as.data.frame %>%
kable
df_di_summary %>%
filter(Ed_Goal=='- All', College_Status=='- All', success_variable=='Math', disaggregation=='- None') %>%
as.data.frame
library(ggplot2)
library(forcats)
library(scales)
df_di_summary %>%
filter(Ed_Goal=='- All', College_Status=='- All', success_variable=='Math', disaggregation=='- None') %>%
select(cohort, group, n, pct, di_indicator_ppg, di_indicator_prop_index, di_indicator_80_index) %>%
mutate(group=factor(group) %>% fct_reorder(desc(pct))) %>%
ggplot(data=., mapping=aes(x=factor(cohort), y=pct, group=group, color=group)) +
geom_point() +
geom_line() +
xlab('Cohort') +
ylab('Rate') +
theme_bw() +
scale_color_manual(values=c('
scale_y_continuous(labels = percent, limits=c(0, 1)) +
ggtitle('Dashboard drop-down selections:', subtitle=paste0("Ed Goal = '- All' | College Status = '- All' | Outcome = 'Math' | Disaggregation = '- None'"))
df_di_summary %>%
filter(Ed_Goal=='- All', College_Status=='- All', success_variable=='Math', disaggregation=='Ethnicity') %>%
select(cohort, group, n, pct, di_indicator_ppg, di_indicator_prop_index, di_indicator_80_index) %>%
as.data.frame
df_di_summary %>%
filter(Ed_Goal=='- All', College_Status=='- All', success_variable=='Math', disaggregation=='Ethnicity') %>%
select(cohort, group, n, pct, di_indicator_ppg, di_indicator_prop_index, di_indicator_80_index) %>%
mutate(group=factor(group) %>% fct_reorder(desc(pct))) %>%
ggplot(data=., mapping=aes(x=factor(cohort), y=pct, group=group, color=group)) +
geom_point(aes(size=factor(di_indicator_ppg, levels=c(0, 1), labels=c('Not DI', 'DI')))) +
geom_line() +
xlab('Cohort') +
ylab('Rate') +
theme_bw() +
scale_color_manual(values=c('
labs(size='Disproportionate Impact') +
scale_y_continuous(labels = percent, limits=c(0, 1)) +
ggtitle('Dashboard drop-down selections:', subtitle=paste0("Ed Goal = '- All' | College Status = '- All' | Outcome = 'Math' | Disaggregation = 'Ethnicity'"))
df_di_summary %>%
filter(Ed_Goal=='Deg/Transfer', College_Status=='- All', success_variable=='Math', disaggregation=='Ethnicity') %>%
select(cohort, group, n, pct, di_indicator_ppg, di_indicator_prop_index, di_indicator_80_index) %>%
as.data.frame
df_di_summary %>%
filter(Ed_Goal=='Deg/Transfer', College_Status=='- All', success_variable=='Math', disaggregation=='Ethnicity') %>%
select(cohort, group, n, pct, di_indicator_ppg, di_indicator_prop_index, di_indicator_80_index) %>%
mutate(group=factor(group) %>% fct_reorder(desc(pct))) %>%
ggplot(data=., mapping=aes(x=factor(cohort), y=pct, group=group, color=group)) +
geom_point(aes(size=factor(di_indicator_ppg, levels=c(0, 1), labels=c('Not DI', 'DI')))) +
geom_line() +
xlab('Cohort') +
ylab('Rate') +
theme_bw() +
scale_color_manual(values=c('
labs(size='Disproportionate Impact') +
scale_y_continuous(labels = percent, limits=c(0, 1)) +
ggtitle('Dashboard drop-down selections:', subtitle=paste0("Ed Goal = 'Deg/Transfer' | College Status = '- All' | Outcome = 'Math' | Disaggregation = 'Ethnicity'"))
df_di_summary %>%
filter(Ed_Goal=='Deg/Transfer', College_Status=='- All', success_variable=='English', disaggregation=='Gender') %>%
as.data.frame
df_di_summary %>%
filter(Ed_Goal=='Deg/Transfer', College_Status=='- All', success_variable=='English', disaggregation=='Gender') %>%
select(cohort, group, n, pct, di_indicator_ppg, di_indicator_prop_index, di_indicator_80_index) %>%
mutate(group=factor(group) %>% fct_reorder(desc(pct))) %>%
ggplot(data=., mapping=aes(x=factor(cohort), y=pct, group=group, color=group)) +
geom_point(aes(size=factor(di_indicator_ppg, levels=c(0, 1), labels=c('Not DI', 'DI')))) +
geom_line() +
xlab('Cohort') +
ylab('Rate') +
theme_bw() +
scale_color_manual(values=c('
labs(size='Disproportionate Impact') +
scale_y_continuous(labels = percent, limits=c(0, 1)) +
ggtitle('Dashboard drop-down selections:', subtitle=paste0("Ed Goal = 'Deg/Transfer' | College Status = '- All' | Outcome = 'English' | Disaggregation = 'Gender'"))
args(di_iterate)
dim(student_equity)
student_equity_summ <- student_equity %>%
group_by(Ethnicity, Gender, Cohort, Cohort_Math, Cohort_English, Ed_Goal, College_Status) %>%
summarize(N=n() %>% as.numeric
, Math=sum(Math, na.rm=TRUE)
, English=sum(English, na.rm=TRUE)
, Transfer=sum(Transfer, na.rm=TRUE)
) %>%
ungroup
dim(student_equity_summ)
student_equity_summ %>% head %>% as.data.frame
df_di_summary_2 <- di_iterate(data=student_equity_summ
, success_vars=c('Math', 'English', 'Transfer')
, group_vars=c('Ethnicity', 'Gender')
, cohort_vars=c('Cohort_Math', 'Cohort_English', 'Cohort')
, scenario_repeat_by_vars=c('Ed_Goal', 'College_Status')
, weight_var='N'
)
dim(df_di_summary)
dim(df_di_summary_2)
dim(df_di_summary_2 %>% filter(!is.na(cohort)))
all.equal(df_di_summary
, df_di_summary_2 %>% filter(!is.na(cohort))
)
df_di_summary_2 <- di_iterate(data=student_equity
, success_vars=c('Math', 'English', 'Transfer')
, group_vars=c('Ethnicity', 'Gender')
, cohort_vars=c('Cohort_Math', 'Cohort_English', 'Cohort')
, scenario_repeat_by_vars=c('Ed_Goal', 'College_Status')
, include_non_disagg_results=FALSE
)
dim(df_di_summary)
dim(df_di_summary_2)
table(df_di_summary$disaggregation)
table(df_di_summary_2$disaggregation)
df_di_summary_2 <- di_iterate(data=student_equity
, success_vars=c('Math', 'English', 'Transfer')
, group_vars=c('Ethnicity', 'Gender')
, cohort_vars=c('Cohort_Math', 'Cohort_English', 'Cohort')
, scenario_repeat_by_vars=c('Ed_Goal', 'College_Status')
, ppg_reference_groups='hpg'
)
df_di_summary_2 <- di_iterate(data=student_equity
, success_vars=c('Math', 'English', 'Transfer')
, group_vars=c('Ethnicity', 'Gender')
, cohort_vars=c('Cohort_Math', 'Cohort_English', 'Cohort')
, scenario_repeat_by_vars=c('Ed_Goal', 'College_Status')
, ppg_reference_groups='all but current'
)
df_di_summary_2 <- di_iterate(data=student_equity
, success_vars=c('Math', 'English', 'Transfer')
, group_vars=c('Ethnicity', 'Gender')
, cohort_vars=c('Cohort_Math', 'Cohort_English', 'Cohort')
, scenario_repeat_by_vars=c('Ed_Goal', 'College_Status')
, ppg_reference_groups=c('White', 'Male')
)
df_di_summary_2 <- di_iterate(data=student_equity
, success_vars=c('Math', 'English', 'Transfer')
, group_vars=c('Ethnicity', 'Gender')
, cohort_vars=c('Cohort_Math', 'Cohort_English', 'Cohort')
, scenario_repeat_by_vars=c('Ed_Goal', 'College_Status')
, di_prop_index_cutoff=0.9
)
df_di_summary_2 <- di_iterate(data=student_equity
, success_vars=c('Math', 'English', 'Transfer')
, group_vars=c('Ethnicity', 'Gender')
, cohort_vars=c('Cohort_Math', 'Cohort_English', 'Cohort')
, scenario_repeat_by_vars=c('Ed_Goal', 'College_Status')
, di_80_index_reference_groups=c('White', 'Male')
)
df_di_summary_2 <- di_iterate(data=student_equity
, success_vars=c('Math', 'English', 'Transfer')
, group_vars=c('Ethnicity', 'Gender')
, cohort_vars=c('Cohort_Math', 'Cohort_English', 'Cohort')
, scenario_repeat_by_vars=c('Ed_Goal', 'College_Status')
, di_80_index_cutoff=0.5
)
df_di_summary_long <- bind_rows(
di_iterate(data=student_equity
, success_vars=c('Math', 'English', 'Transfer')
, group_vars=c('Ethnicity', 'Gender')
, cohort_vars=c('Cohort_Math', 'Cohort_English', 'Cohort')
, scenario_repeat_by_vars=c('Ed_Goal', 'College_Status')
)
, di_iterate(data=student_equity
, success_vars=c('Math', 'English', 'Transfer')
, group_vars=c('Ethnicity', 'Gender')
, cohort_vars=c('Cohort_Math', 'Cohort_English', 'Cohort')
, scenario_repeat_by_vars=c('Ed_Goal', 'College_Status')
, ppg_reference_groups=c('White', 'Male')
, include_non_disagg_results = FALSE
)
)
dim(df_di_summary_long)
sessionInfo() |
d <- tempfile()
with_mock_api({
capture_while_mocking(simplify = FALSE, path = d, {
a <- request("api/") %>%
req_headers(`Authorization` = "Bearer token") %>%
req_perform()
})
test_that("The mock file does not have the request headers", {
expect_false(any(grepl("Bearer token", readLines(file.path(d, "api.R")))))
})
test_that("And the redacted .R mock can be loaded", {
with_mock_path(d, {
b <- request("api/") %>%
req_headers(`Authorization` = "Bearer token") %>%
req_perform()
})
expect_equal(resp_body_json(b), resp_body_json(a))
})
c2_req <- request("http://httpbin.org/cookies/set") %>%
req_url_query(token = 12345)
test_that("redact_cookies: the response has the set-cookie in the response", {
capture_while_mocking(simplify = FALSE, path = d, {
c2 <- req_perform(c2_req)
expect_equal(
resp_header(c2, "set-cookie"),
"token=12345; Domain=example.com; Max-Age=31536000; Path=/"
)
})
})
test_that("redact_cookies removes set-cookies from response in the mock file", {
expect_length(
grep(
"REDACTED",
readLines(file.path(d, "httpbin.org", "cookies", "set-5b2631.R"))
),
1
)
})
test_that("And when loading that .R mock, the redacted value doesn't appear", {
with_mock_path(d, {
expect_identical(
req_perform(c2_req) %>% resp_header("set-cookie"),
"REDACTED"
)
})
})
auth_req <- request("http://httpbin.org/basic-auth/user/passwd") %>%
req_auth_basic("user", "SeCrEtPaSsWoRd!") %>%
req_error(is_error = ~FALSE)
capture_while_mocking(simplify = FALSE, path = d, {
pwauth <- req_perform(auth_req)
})
test_that("there is no password in the mock", {
expect_false(any(grepl(
"SeCrEtPaSsWoRd!",
readLines(file.path(d, "httpbin.org", "basic-auth", "user", "passwd.R"))
)))
})
test_that("And the redacted .R mock can be loaded", {
with_mock_path(d, {
pwauthb <- req_perform(auth_req)
})
expect_equal(resp_body_json(pwauthb), resp_body_json(pwauth))
})
my_redactor <- function(response) {
if (!grepl("get_current_redactor()(req)", unlist(tail(sys.calls(), 1)), fixed = TRUE)) {
response$url <- "http://example.com/fakeurl"
}
cleaner <- function(x) gsub("loaded", "changed", x)
response <- within_body_text(response, cleaner)
return(response)
}
with_redactor(
my_redactor,
capture_while_mocking(simplify = FALSE, path = d, {
r <- request("http://example.com/get") %>% req_perform()
})
)
test_that("The real request is not affected by the redactor", {
expect_identical(r$url, "http://example.com/get")
expect_identical(resp_body_json(r), list(loaded = TRUE))
})
test_that("But the mock file gets written to the modified path with altered content", {
with_mock_path(d, replace = TRUE, {
expect_GET(
request("http://example.com/get") %>% req_perform(),
"http://example.com/get"
)
expect_error(alt <- request("http://example.com/fakeurl") %>% req_perform(), NA)
expect_identical(resp_body_json(alt), list(changed = TRUE))
})
})
test_that("Redactors are applied when making requests to alter the mock file path we're reading", {
with_redactor(
function(resp) gsub_response(resp, "long/url.*$", "get"),
r <- request("http://example.com/long/url/with/lots/of/segments") %>% req_perform()
)
expect_identical(r$url, "http://example.com/long/url/with/lots/of/segments")
expect_identical(resp_body_json(r), list(loaded = TRUE))
})
a <- request("api/") %>%
req_headers(`Authorization` = "Bearer token") %>%
req_perform()
test_that("gsub_response", {
asub <- gsub_response(a, "api", "OTHER")
expect_identical(asub$url, "OTHER/")
expect_identical(resp_body_json(asub), list(value = "OTHER/object1/"))
})
test_that("as.redactor", {
a2 <- prepare_redactor(~ gsub_response(., "api", "OTHER"))(a)
expect_identical(resp_body_json(a2), list(value = "OTHER/object1/"))
})
loc <- request("http://httpbin.org/response-headers") %>%
req_url_query(Location = "http://httpbin.org/status/201") %>%
req_perform()
loc_sub <- gsub_response(
loc, "http://httpbin.org/status/201",
"http://httpbin.org/status/404"
)
test_that("gsub_response touches Location header", {
expect_identical(
resp_header(loc_sub, "location"),
"http://httpbin.org/status/404"
)
expect_identical(
resp_body_json(loc_sub)$Location,
"http://httpbin.org/status/404"
)
})
test_that("gsub_response handles URL encoding", {
skip("TODO: handle URL escaping")
expect_identical(
loc_sub$url,
"http://httpbin.org/response-headers?Location=http%3A%2F%2Fhttpbin.org%2Fstatus%2F404"
)
})
})
test_that("chain_redactors", {
f1 <- function(x) x * 4
f2 <- ~ sum(c(., 3))
f12 <- chain_redactors(list(f1, f2))
f21 <- chain_redactors(list(f2, f1))
expect_equal(f12(5), 23)
expect_equal(f21(5), 32)
})
reset_redactors() |
dangl2014 <- function(setnr = NULL,
seedinfo = list(100,
paste(R.version$major, R.version$minor, sep = "."),
RNGkind()),
info = FALSE,
metaseedinfo = list(100,
paste(R.version$major, R.version$minor, sep = "."),
RNGkind())){
inf <- data.frame(n = c(50, 40), k = c(2,2), shape = c("spherical", "spherical"))
ref <- "Dangl R. (2014) A small simulation study. Journal of Simple Datasets 10(2), 1-10"
if(info == T) return(list(summary = inf, reference = ref))
if(is.null(metaseedinfo)) metaseedinfo <- seedinfo
set.seed(metaseedinfo[[1]])
RNGversion(metaseedinfo[[2]])
RNGkind(metaseedinfo[[3]][1], metaseedinfo[[3]][2])
if(setnr == 1) {
return(new("metadata.metric",
clusters = list(c1 = list(n = 25, mu = c(4,5), Sigma=diag(1,2)),
c2 = list(n = 25, mu = c(-1,-2), Sigma=diag(1,2))),
genfunc = MASS::mvrnorm, seedinfo = seedinfo))
}
if(setnr == 2){
return(new("metadata.metric",
clusters = list(c1 = list(n = 20, mu = c(0,2), Sigma=diag(1,2)),
c2 = list(n = 20, mu = c(-1,-2), Sigma=diag(1,2))),
genfunc = MASS::mvrnorm, seedinfo = seedinfo))
}
} |
tar_test("value$object", {
x <- value_init(object = "abc", iteration = "list")
expect_equal(x$object, "abc")
})
tar_test("misspell list", {
expect_error(
value_init(object = "abc", iteration = "lst_dlkfjks"),
class = "tar_condition_validate"
)
})
tar_test("value_count_slices(list)", {
x <- value_init(object = "abc", iteration = "list")
x$object <- data_frame(x = seq_len(26), y = letters)
expect_equal(value_count_slices(x), 2L)
})
tar_test("value_produce_slice(list)", {
x <- value_init(object = "abc", iteration = "list")
x$object <- data_frame(x = seq_len(26), y = letters)
expect_equal(value_produce_slice(x, 1L), seq_len(26))
expect_equal(value_produce_slice(x, 2L), letters)
})
tar_test("value_hash_slice(list)", {
x <- value_init(object = "abc", iteration = "list")
x$object <- data_frame(x = seq_len(26), y = letters)
expect_equal(value_hash_slice(x, 1L), digest_obj32(seq_len(26)))
expect_equal(value_hash_slice(x, 2L), digest_obj32(letters))
})
tar_test("value_hash_slices(list)", {
x <- value_init(object = "abc", iteration = "list")
x$object <- data_frame(x = seq_len(26), y = letters)
exp <- c(digest_obj32(seq_len(26)), digest_obj32(letters))
expect_equal(value_hash_slices(x), exp)
})
tar_test("list$validate()", {
x <- value_init(object = "abc", iteration = "list")
expect_silent(value_validate(x))
}) |
twoSamples <- function(X, B = 1000,
seed = 1234, permReturn = TRUE,
label = NULL){
set.seed(seed)
if(is.null(dim(X))){X <- matrix(X, ncol = 1)}
if(is.null(label)){
if(is.null(rownames(X))){
stop("Please insert the labels of the observations which describe the two groups to perform the two-samples t-test.")
}
label <- rownames(X)}
label <- factor(label)
levels(label) <- c(0,1)
n <- nrow(X)
m <- ncol(X)
id <- levels(label)
n1 <- sum(label==id[1])
n2 <- sum(label==id[2])
colV1 <- colVariances(X[,label == id[1]])
colV2 <-colVariances(X[,label == id[2]])
colM1 <- colMeans(X[,label == id[1]])
colM2 <-colMeans(X[,label == id[2]])
pooled.var <- (colV1/n1 + colV2/n2)
Test <- (colM1 - colM2)/sqrt(pooled.var)
Test <- ifelse(is.na(Test), 0 , Test)
if(permReturn){
Test_H0 <- permGroup(as.matrix(t(X)),B-1,label)
Test_H0 <- ifelse(is.na(Test_H0), 0 , Test_H0)
Test <- matrix(cbind(Test, Test_H0), ncol = B)
}
return(Test)
} |
dbBegin_SQLiteConnection <- function(conn, .name = NULL, ..., name = NULL) {
name <- compat_name(name, .name)
if (is.null(name)) {
dbExecute(conn, "BEGIN")
} else {
dbExecute(conn, paste0("SAVEPOINT ", dbQuoteIdentifier(conn, name)))
}
invisible(TRUE)
}
setMethod("dbBegin", "SQLiteConnection", dbBegin_SQLiteConnection) |
impute.QRILC = function(dataSet.mvs,tune.sigma = 1){
nFeatures = dim(dataSet.mvs)[1]
nSamples = dim(dataSet.mvs)[2]
dataSet.imputed = dataSet.mvs
QR.obj = list()
for (i in 1:nSamples){
curr.sample = dataSet.mvs[,i]
pNAs = length(which(is.na(curr.sample)))/length(curr.sample)
upper.q = 0.99
q.normal = qnorm(seq((pNAs+0.001),(upper.q+0.001),(upper.q-pNAs)/(upper.q*100)),
mean = 0, sd = 1)
q.curr.sample = quantile(curr.sample,
probs = seq(0.001,(upper.q+0.001),0.01),
na.rm = T)
temp.QR = lm(q.curr.sample ~ q.normal)
QR.obj[[i]] = temp.QR
mean.CDD = temp.QR$coefficients[1]
sd.CDD = as.numeric(temp.QR$coefficients[2])
data.to.imp = rtmvnorm(n=nFeatures,
mean = mean.CDD,
sigma = sd.CDD*tune.sigma,
upper = qnorm((pNAs+0.001),
mean = mean.CDD,
sd = sd.CDD),
algorithm=c("gibbs"))
curr.sample.imputed = curr.sample
curr.sample.imputed[which(is.na(curr.sample))] = data.to.imp[which(is.na(curr.sample))]
dataSet.imputed[,i] = curr.sample.imputed
}
results = list(dataSet.imputed,QR.obj)
return(results)
} |
makeKeaneFunction = function() {
makeSingleObjectiveFunction(
name = "Keane Function",
id = "keane_2d",
fn = function(x) {
assertNumeric(x, len = 2L, any.missing = FALSE, all.missing = FALSE)
a = sin(x[1] - x[2])^2 * sin(x[1] + x[2])^2
b = sqrt(x[1]^2 + x[2]^2)
return (a / b)
},
par.set = makeNumericParamSet(
len = 2L,
id = "x",
lower = c(0, 0),
upper = c(10, 10),
vector = TRUE
),
minimize = FALSE,
tags = attr(makeKeaneFunction, "tags"),
global.opt.params = matrix(
c(0, 1.39325,
1.39325, 0),
ncol = 2L, byrow = TRUE),
global.opt.value = 0.673668
)
}
class(makeKeaneFunction) = c("function", "smoof_generator")
attr(makeKeaneFunction, "name") = c("Keane")
attr(makeKeaneFunction, "type") = c("single-objective")
attr(makeKeaneFunction, "tags") = c("single-objective", "continuous", "differentiable", "non-separable", "non-scalable", "multimodal") |
raplot.paired <- function(x,y,xlab="Species rank",ylab="log (Relative abundance)",
main="Rank-abundance",
sym=c(1,2),alpha=0.05) {
nz <- max(sum(y!=0),sum(x!=0))
Sx <- length(x)
Nx <- sum(x)
indexx <- order(x, decreasing=TRUE)
rel.abundance.sorted.x <- x[indexx]/Nx
log.rel.abu.x <- log(rel.abundance.sorted.x)
speciesrank.x <- 1:Sx
ppar <- par(pty="s")
on.exit(par(ppar))
plot(speciesrank.x[1:nz],log.rel.abu.x[1:nz],pch=sym[1],main=main,
xlab=xlab,ylab=ylab,col="red")
xs <- sort(x,decreasing = TRUE)
k.ml.x <- k_ml(xs)
lpi.ml.x <- estim(Sx,k.ml.x)
Sy <- length(y)
Ny <- sum(y)
indexy <- order(y, decreasing=TRUE)
rel.abundance.sorted.y <- y[indexy]/Ny
log.rel.abu.y <- log(rel.abundance.sorted.y)
speciesrank.y <- 1:Sy
points(speciesrank.y[1:nz],log.rel.abu.y[1:nz],pch=sym[2],col="blue")
ys <- sort(y,decreasing = TRUE)
k.ml.y <- k_ml(ys)
lpi.ml.y <- estim(Sy,k.ml.y)
vml <- var.ml(Nx,Sx,k.ml.x)
ml.ll.x <- k.ml.x - qnorm(1-alpha/2)*sqrt(vml)
ml.ul.x <- k.ml.x + qnorm(1-alpha/2)*sqrt(vml)
slope.seq <- seq(ml.ll.x,ml.ul.x,by=0.001)
i <- 1:Sx
vml.y <- var.ml(Ny,Sy,k.ml.y)
ml.ll.y <- k.ml.y - qnorm(1-alpha/2)*sqrt(vml.y)
ml.ul.y <- k.ml.y + qnorm(1-alpha/2)*sqrt(vml.y)
slope.seq.y <- seq(ml.ll.y,ml.ul.y,by=0.001)
i <- 1:Sy
for(j in 1:length(slope.seq)) {
lpi.ml.lims.x <- estim(Sx,slope.seq[j])
segments(i[1],lpi.ml.lims.x[1],i[Sx],lpi.ml.lims.x[Sx],col="grey",lwd=2)
lpi.ml.lims.y <- estim(Sy,slope.seq.y[j])
segments(i[1],lpi.ml.lims.y[1],i[Sy],lpi.ml.lims.y[Sy],col="grey",lwd=2)
}
segments(i[1],lpi.ml.x[1],i[Sx],lpi.ml.x[Sx],col="red")
segments(i[1],lpi.ml.y[1],i[Sx],lpi.ml.y[Sx],col="blue")
points(speciesrank.x[1:nz],log.rel.abu.x[1:nz],pch=sym[1],col="red")
points(speciesrank.y[1:nz],log.rel.abu.y[1:nz],pch=sym[2],col="blue")
par(ppar)
} |
d13C.to.iWUE <- function(d13C, year, elevation, temp, frac = 0) {
d13C.plant <- d13C
d13C.atm <- CO2data[which(CO2data$yr == year),3]
Ca <- CO2data[which(CO2data$yr == year),2]
a <- 4.4
b <- 28
d <- frac
D13C <- ((d13C.atm - (d13C.plant - d))/(1 + ((d13C.plant - d)/1000)))
f <- 12
P0 <- 101325
Base.temp <- 298.15
ALR <- 0.0065
Grav <- 9.80665
R <- 8.3145
MWair <- 0.028963
Patm <- P0*(1.0 - ALR*elevation/Base.temp)^(Grav*MWair/(R*ALR))
deltaHa <- 37830
Temp.C <- temp
Gammastar25 <- 4.332
Gammastar <- Gammastar25*Patm/P0*exp(1)^((deltaHa*((Temp.C+273.15)-298.15))/(R*(Temp.C+273.15)*298.15))
pCa <- (1.0e-6)*Ca*Patm
Ci <- ((D13C-a+f*(Gammastar/pCa))/(b-a))*Ca
iWUE <- (Ca - Ci)*0.625
return(iWUE)
} |
SUE.lm <-
function(formula, data=list(),k, ns, r, constant = 0.25, consistency.check= TRUE){
call <- match.call()
if (missing(data))
data <- environment(formula)
mf=model.frame(formula=formula, data=data)
if (missing(k)){
para=parameters(nrow(mf), method="appro.k")
k=para$k
r=para$r
ns=para$ns
}
subsample=mf[1:ns,]
for (i in 1:(k-1)){
subsample=rbind(subsample,mf[1:ns,])
}
mse=seq(k)
for (i in 1:k){
choice=sample(1:nrow(mf),ns)
subsample[((i-1)*ns+1):(i*ns),]=mf[choice,]
fit=lm(formula=formula, data=subsample[((i-1)*ns+1):(i*ns),])
mse[i]=anova(fit)$Mean[length(anova(fit)$Mean)]
}
index=order(mse)[1:r]
aa=rbind(subsample[((index[1]-1)*ns+1):(index[1]*ns),],subsample[((index[2]-1)*ns+1):(index[2]*ns),])
Sg=unique(aa)
for (i in 1:(r-2)){
bb=rbind(Sg,subsample[((index[i+2]-1)*ns+1):(index[i+2]*ns),])
Sg=unique(bb)
}
B=seq(k*length(fit$coeff))
dim(B)=c(k,length(fit$coeff))
for (i in 1:k){
fit=lm(formula=formula, data=subsample[((i-1)*ns+1):(i*ns),])
B[i,]=fit$coefficients
}
distance=function(a,b){
t1=abs(a)+abs(b)
t2=1+abs(a)
t1[which(t2>t1)]=t2[which(t2>t1)]
dis=max(abs(a-b)/t1)
dis
}
n=0
d=seq(length(index)-1)
for (i in 1:(length(index)-1)){
d[i]=distance(B[index[1],],B[index[i+1],])
}
output=lm(formula=formula, data=Sg)
output$call=call
output$p=list(formula=formula, data=data ,k=k, ns=ns, r=r, constant = 0.25)
output$combined.sample=Sg
output$sample.size=nrow(Sg)
output$mse=sort(mse)[1:length(index)]
output$beta=B[index,]
output$distance=d
output$subsample=subsample
output$MSE=mse
if (any(d > 0.25) == T) output$check= "NO"
else output$check= "YES"
output
} |
source("ESEUR_config.r")
library("reshape2")
pal_col=rainbow(9)
adams=read.csv(paste0(ESEUR_dir, "reliability/adams84.csv.xz"), as.is=TRUE)
adams_l=melt(adams, "P1", variable.name="product", value.name="defect_p")
adams_l=subset(adams_l, defect_p != 0)
adams_l$l_P1=log(adams_l$P1)
plot(adams_l$P1, adams_l$defect_p, log="xy", col=pal_col[adams_l$product],
xlab="Product usage time (months)", ylab="Reported faults (percentage)\n") |
nearZeroVar <- function (x, freqCut = 95/5, uniqueCut = 10, saveMetrics = FALSE, names = FALSE, foreach = FALSE, allowParallel = TRUE) {
if(!foreach) return(nzv(x, freqCut = freqCut, uniqueCut = uniqueCut, saveMetrics = saveMetrics, names = names))
`%op%` <- getOper(foreach && allowParallel && getDoParWorkers() > 1)
if(saveMetrics) {
res <- foreach(name = colnames(x), .combine=rbind) %op% {
r <- nzv(x[[name]], freqCut = freqCut, uniqueCut = uniqueCut, saveMetrics = TRUE)
r[,"column" ] <- name
r
}
res <- res[, c(5, 1, 2, 3, 4)]
rownames(res) <- as.character(res$column)
res$column <- NULL
} else {
res <- foreach(name = colnames(x), .combine=c) %op% {
r <- nzv(x[[name]], freqCut = freqCut, uniqueCut = uniqueCut, saveMetrics = FALSE)
if (length(r) > 0 && r == 1) TRUE else FALSE
}
res <- which(res)
if(names){
res <- colnames(x)[res]
}
}
res
}
nzv <- function (x, freqCut = 95/5, uniqueCut = 10, saveMetrics = FALSE, names = FALSE)
{
if (is.null(dim(x))) x <- matrix(x, ncol = 1)
freqRatio <- apply(x, 2, function(data)
{
t <- table(data[!is.na(data)])
if (length(t) <= 1) {
return(0);
}
w <- which.max(t);
return(max(t, na.rm=TRUE)/max(t[-w], na.rm=TRUE))
})
lunique <- apply(x, 2, function(data) length(unique(data[!is.na(data)])))
percentUnique <- 100 * lunique / apply(x, 2, length)
zeroVar <- (lunique == 1) | apply(x, 2, function(data) all(is.na(data)))
if (saveMetrics)
{
out <- data.frame(freqRatio = freqRatio,
percentUnique = percentUnique,
zeroVar = zeroVar,
nzv = (freqRatio > freqCut & percentUnique <= uniqueCut) | zeroVar)
}
else {
out <- which((freqRatio > freqCut & percentUnique <= uniqueCut) | zeroVar)
names(out) <- NULL
if(names){
out <- colnames(x)[out]
}
}
out
}
zeroVar <- function(x)
{
x <- x[,colnames(x) != ".outcome", drop = FALSE]
which(apply(x, 2, function(x) length(unique(x)) < 2))
}
checkConditionalX <- function(x, y)
{
x$.outcome <- y
unique(unlist(dlply(x, .(.outcome), zeroVar)))
}
checkResamples <- function(index, x, y)
{
if(!is.factor(y)) stop("y must be a factor")
if(length(levels(y)) < 2) stop("y must have at least 2 levels")
wrap <- function(index, x, y) checkConditionalX(x[index,,drop=FALSE], y[index])
unique(unlist(lapply(index, wrap, x = x, y = y)))
} |
library(glmm)
data(BoothHobert)
clust <- makeCluster(2)
set.seed(1234)
mod.mcml1<-glmm(y~0+x1,list(y~0+z1),varcomps.names=c("z1"), data=BoothHobert, family.glmm=bernoulli.glmm, m=21, doPQL=TRUE, debug=TRUE, cluster=clust)
mod.mcml<-mod.mcml1$mod.mcml
z<-mod.mcml$z[[1]]
x<-mod.mcml$x
y<-mod.mcml$y
stuff<-mod.mcml1$debug
beta.pql<-stuff$beta.pql
nu.pql<-stuff$nu.pql
u.pql<-u.star<-stuff$u.star
umat<-stuff$umat
m1<-stuff$m1
family.glmm<-bernoulli.glmm
objfun<-glmm:::objfun
getEk<-glmm:::getEk
addVecs<-glmm:::addVecs
if(is.null(mod.mcml$weights)){
wts <- rep(1, length(y))
} else{
wts <- mod.mcml$weights
}
logfyuk<-function(eta,x,y){
value<-sum(y*eta)-sum(log(1+exp(eta)))
Pi<-exp(eta)/(1+exp(eta))
gradient<-sum(y*x)-sum(x*Pi)
hessian<-sum(x^2*(-Pi+Pi^2) )
list(value=value,gradient=gradient,hessian=hessian)
}
eta<-rep(2,150)
ntrials <- rep(1, 150)
this<-.C(glmm:::C_elc,as.double(mod.mcml$y), as.double(mod.mcml$x), as.integer(nrow(mod.mcml$x)), as.integer(ncol(mod.mcml$x)), as.double(eta), as.integer(1), ntrials=as.integer(ntrials), wts=as.double(wts),value=double(1), gradient=double(ncol(mod.mcml$x)), hessian=double((ncol(mod.mcml$x)^2)))
that<-logfyuk(eta,mod.mcml$x,mod.mcml$y)
all.equal(as.numeric(this$value),as.numeric(that$value))
all.equal(as.numeric(this$gradient),as.numeric(that$gradient))
all.equal(as.numeric(this$hessian),as.numeric(that$hessian))
this<-.C(glmm:::C_elval, as.double(mod.mcml$y), as.integer(nrow(mod.mcml$x)), as.integer(ncol(mod.mcml$x)), as.double(eta), as.integer(1), ntrials=as.integer(ntrials), wts=as.double(wts), value=double(1))
all.equal(as.numeric(this$value),as.numeric(that$value))
this<-.C(glmm:::C_elGH, as.double(mod.mcml$y), as.double(mod.mcml$x), as.integer(nrow(mod.mcml$x)), as.integer(ncol(mod.mcml$x)), as.double(eta), as.integer(1), ntrials=as.integer(ntrials), wts=as.double(wts), gradient=double(ncol(mod.mcml$x)), hessian=double((ncol(mod.mcml$x)^2)))
all.equal(as.numeric(this$gradient),as.numeric(that$gradient))
all.equal(as.numeric(this$hessian),as.numeric(that$hessian))
distRandCheck<-function(nu,uvec,muvec){
ukmuk<-sum((uvec-muvec)^2)
value<- -length(uvec)*.5*log(2*pi)-5*log(nu)-ukmuk/(2*nu)
gradient<- -5/nu +ukmuk/(2*nu^2)
hessian<- 5/(nu^2)-ukmuk/(nu^3)
hessian<-as.matrix(hessian)
list(value=value,gradient=gradient,hessian=hessian)
}
distRand <-
function(nu,U,z.list,mu){
T<-length(z.list)
nrand<-lapply(z.list,ncol)
nrandom<-unlist(nrand)
totnrandom<-sum(nrandom)
mu.list<-U.list<-NULL
if(T==1) {
U.list[[1]]<-U
mu.list[[1]]<-mu
}
if(T>1){
U.list[[1]]<-U[1:nrandom[1]]
mu.list[[1]]<-mu[1:nrandom[1]]
for(t in 2:T){
thing1<-sum(nrandom[1:t-1])+1
thing2<-sum(nrandom[1:t])
U.list[[t]]<-U[thing1:thing2]
mu.list[[t]]<-mu[thing1:thing2]
}
}
val<-gradient<-Hessian<-rep(0,T)
for(t in 1:T){
you<-as.vector(U.list[[t]])
mew<-as.vector(mu.list[[t]])
Umu<-(you-mew)%*%(you-mew)
val[t]<--length(you)*.5*log(2*pi)+ as.numeric(-.5*nrandom[t]*log(nu[t])-Umu/(2*nu[t]))
gradient[t]<- -nrandom[t]/(2*nu[t])+Umu/(2*(nu[t])^2)
Hessian[t]<- nrandom[t]/(2*(nu[t])^2)- Umu/((nu[t])^3)
}
value<-sum(val)
if(T>1) hessian<-diag(Hessian)
if(T==1) hessian<-matrix(Hessian,nrow=1,ncol=1)
list(value=value,gradient=gradient,hessian=hessian)
}
you<-umat[1,]
this<-distRandCheck(2,you,u.pql)
that<-distRand(2,you,mod.mcml$z,u.pql)
all.equal(this,that)
del<-10^(-9)
thisdel<-distRandCheck(2+del,you,u.pql)
firstthing<-thisdel$value-this$value
secondthing<-as.vector(this$gradient%*%del)
all.equal(firstthing,secondthing)
mynu<-2
mymu<-rep(0,10)
T<-1
nrandom<-10
meow<-c(0,10)
set.seed(1234)
myyou<-rnorm(10)
hohum<-.C(glmm:::C_distRand3C,as.double(mynu), as.double(mymu), as.integer(T), as.integer(nrandom), as.integer(meow), as.double(myyou), double(T), double(T^2))
drcheck<-distRandCheck(mynu,myyou,mymu)
all.equal(drcheck$gradient,hohum[[7]])
all.equal(drcheck$hessian,matrix(hohum[[8]],nrow=T,byrow=F))
distRandGeneral<-function(uvec,mu,Sigma.inv){
logDetSigmaInv<-sum(log(eigen(Sigma.inv,symmetric=TRUE)$values))
umu<-uvec-mu
piece2<-t(umu)%*%Sigma.inv%*%umu
out<-as.vector(.5*(logDetSigmaInv-piece2))
const<-length(uvec)*.5*log(2*pi)
out<-out-const
out
}
D.star<-2*diag(10)
D.star.inv<-.5*diag(10)
A.star<-sqrt(2)*diag(10)
this<-distRandGeneral(you,u.pql,D.star.inv)
all.equal(this,that$value)
logdet<-sum(log(eigen(D.star.inv)$values))
stuff<-.C(glmm:::C_distRandGenC,as.double(D.star.inv),as.double(logdet), as.integer(length(you)), as.double(you), as.double(u.pql), double(1))[[6]]
all.equal(that$value,stuff)
vars <- new.env(parent = emptyenv())
debug<-mod.mcml1$debug
vars$m1 <- debug$m1
m2 <- debug$m2
m3 <- debug$m3
vars$zeta <- 5
vars$cl <- mod.mcml1$cluster
registerDoParallel(vars$cl)
vars$no_cores <- length(vars$cl)
vars$mod.mcml<-mod.mcml1$mod.mcml
vars$nu.pql <- debug$nu.pql
vars$umat<-debug$umat
vars$newm <- nrow(vars$umat)
vars$u.star<-debug$u.star
D <- vars$D.star <- Dstarnotsparse<-2*diag(10)
D.inv <- D.star.inv <-.5*diag(10)
getEk<-glmm:::getEk
addVecs<-glmm:::addVecs
genRand<-glmm:::genRand
vars$family.glmm<-mod.mcml1$family.glmm
vars$ntrials<- rep(1, length(mod.mcml1$y) )
beta.pql <- debug$beta.pql
vars$wts<-wts
length(wts) == length(vars$ntrials)
simulate <- function(vars, Dstarnotsparse, m2, m3, beta.pql, D.star.inv){
if(vars$m1>0) genData<-rmvt(ceiling(vars$m1/vars$no_cores),sigma=Dstarnotsparse,df=vars$zeta,type=c("shifted"))
if(vars$m1==0) genData<-NULL
if(m2>0) genData2<-genRand(vars$u.star,vars$D.star,ceiling(m2/vars$no_cores))
if(m2==0) genData2<-NULL
if(m3>0){
Z=do.call(cbind,vars$mod.mcml$z)
eta.star<-as.vector(vars$mod.mcml$x%*%beta.pql+Z%*%vars$u.star)
if(vars$family.glmm$family.glmm=="bernoulli.glmm") {cdouble<-vars$family.glmm$cpp(eta.star)}
if(vars$family.glmm$family.glmm=="poisson.glmm"){cdouble<-vars$family.glmm$cpp(eta.star)}
if(vars$family.glmm$family.glmm=="binomial.glmm"){cdouble<-vars$family.glmm$cpp(eta.star, vars$ntrials)}
cdouble<-Diagonal(length(cdouble),cdouble)
Sigmuh.inv<- t(Z)%*%cdouble%*%Z+D.star.inv
Sigmuh<-solve(Sigmuh.inv)
genData3<-genRand(vars$u.star,Sigmuh,ceiling(m3/vars$no_cores))
}
if(m3==0) genData3<-NULL
umat<-rbind(genData,genData2,genData3)
m <- nrow(umat)
list(umat=umat, m=m, Sigmuh.inv=Sigmuh.inv)
}
clusterSetRNGStream(vars$cl, 1234)
clusterExport(vars$cl, c("vars", "Dstarnotsparse", "m2", "m3", "beta.pql", "D.star.inv", "simulate", "genRand"), envir = environment())
noprint <- clusterEvalQ(vars$cl, umatparams <- simulate(vars=vars, Dstarnotsparse=Dstarnotsparse, m2=m2, m3=m3, beta.pql=beta.pql, D.star.inv=D.star.inv))
vars$nbeta <- 1
vars$p1=vars$p2=vars$p3=1/3
objfun<-glmm:::objfun
umats <- clusterEvalQ(vars$cl, umatparams$umat)
umat <- Reduce(rbind, umats)
Sigmuh.invs <- clusterEvalQ(vars$cl, umatparams$Sigmuh.inv)
Sigmuh.inv <- Sigmuh.invs[[1]]
Sigmuh <- solve(Sigmuh.inv)
dbb<-db<-b<-rep(0,vars$newm)
sigsq<-nu<-2
beta<-6
Z<-vars$mod.mcml$z[[1]]
A<-sqrt(2)*diag(10)
eta.star<-x*beta.pql+as.vector(Z%*%u.star)
cdouble<-as.vector(bernoulli.glmm()$cpp(eta.star))
cdouble<-diag(cdouble)
piece3<-rep(0,3)
cache<-new.env(parent = emptyenv())
that<-objfun(c(beta,nu), cache=cache,vars=vars)
tconstant<-glmm:::tconstant
zeta<-5
tconst<-tconstant(zeta,10,diag(D.star.inv))
tdist2<-function(tconst,u, Dstarinv,zeta,myq){
inside<-1+t(u)%*%Dstarinv%*%u/zeta
logft<-tconst - ((zeta+myq)/2)*log(inside)
as.vector(logft)
}
for(k in 1:vars$newm){
uvec<-umat[k,]
eta<-x*beta+as.vector(Z%*%uvec)
piece1<- logfyuk(eta,x,y)$value
piece2<- distRandCheck(nu,uvec,rep(0,10))$value
piece3[1]<-tdist2(tconst,uvec,D.star.inv,zeta,10)
piece3[2]<- distRandGeneral(uvec, vars$u.star, D.star.inv)
piece3[3]<-distRandGeneral(uvec,vars$u.star,Sigmuh.inv)
damax<-max(piece3)
blah<-sum(exp(piece3-damax)/3)
lefoo<-damax+log(blah)
b[k]<-piece1+piece2-lefoo
}
a<-max(b)
top<-exp(b-a)
value<-a+log(mean(top))
all.equal(value,that$value)
stopCluster(clust) |
SDGM1 <- function(N=200, p = 15, c_mean = 0.4){
finalcenrate = rep(0,20)
for (repnum in 1:20) {
mu = rep(0,p)
Si = matrix(0,p,p)
for (i in 1:p) {
for (j in 1:p) {
Si[i,j] = 0.9^abs(i-j)
}
}
W = mvrnorm(N,mu,Si)
Ti = rep(0,N)
for (i in 1:N) {
t.mu = exp(0.1*sum(W[i,(floor(p/2)+1):p]))
Ti[i] = rexp(1,1/t.mu)
}
c.time = rexp(N,c_mean)
mydata.x = as.data.frame(W)
mydata.time = data.frame('time'=Ti,'c.time' = c.time,'status' = 0)
mydata.time$status = ifelse(mydata.time$time < mydata.time$c.time,1,0)
mydata.time$time = apply(mydata.time[,1:2],1,min)
mydata0 = data.frame('time' = mydata.time$time,'status' = mydata.time$status,W)
cen.rate = 1 - sum(mydata0$status)/N
finalcenrate[repnum] = cen.rate
}
print(paste("censoring rate is:",round(mean(finalcenrate),3)))
return(mydata0)
} |
collapse_sections = function(rmd_tbl, drop_na = TRUE) {
checkmate::check_class(rmd_tbl, "rmd_tibble")
rmd_tbl$secs = purrr::pmap(
dplyr::select(rmd_tbl, dplyr::starts_with("sec_h")),
function(...) as.character(list(...))
)
if (drop_na)
rmd_tbl$secs = purrr::map(rmd_tbl$secs, ~ .x[!is.na(.x)])
rmd_tbl = dplyr::select(rmd_tbl, -dplyr::starts_with("sec_h"))
dplyr::relocate(rmd_tbl, .data[["secs"]])
} |
esp_get_rivers <- function(epsg = "4258",
cache = TRUE,
update_cache = FALSE,
cache_dir = NULL,
verbose = FALSE,
resolution = "3",
spatialtype = "line",
name = NULL) {
init_epsg <- as.character(epsg)
if (!init_epsg %in% c("4326", "4258", "3035", "3857")) {
stop("epsg value not valid. It should be one of 4326, 4258, 3035 or 3857")
}
validspatialtype <- c("area", "line")
if (!spatialtype %in% validspatialtype) {
stop(
"spatialtype should be one of '",
paste0(validspatialtype, collapse = "', "),
"'"
)
}
type <- paste0("river", spatialtype)
rivers_sf <-
esp_hlp_get_siane(
type,
resolution,
cache,
cache_dir,
update_cache,
verbose,
Sys.Date()
)
rivernames <-
esp_hlp_get_siane(
"rivernames",
resolution,
cache,
cache_dir,
update_cache,
verbose,
Sys.Date()
)
rivernames$id_rio <- rivernames$PFAFRIO
rivernames <- rivernames[, c("id_rio", "NOM_RIO")]
rivers_sf_merge <- merge(rivers_sf,
rivernames,
all.x = TRUE
)
if (!is.null(name)) {
getrows1 <- grep(name, rivers_sf_merge$rotulo)
getrows2 <- grep(name, rivers_sf_merge$NOM_RIO)
getrows <- unique(c(getrows1, getrows2))
rivers_sf_merge <- rivers_sf_merge[getrows, ]
if (nrow(rivers_sf_merge) == 0) {
stop(
"Your value '",
name,
"' for name does not produce any result ",
"for spatialtype = '",
spatialtype,
"'"
)
}
}
if (spatialtype == "area") {
rivers_sf_merge <-
rivers_sf_merge[, -match("NOM_RIO", colnames(rivers_sf_merge))]
}
rivers_sf_merge <-
sf::st_transform(rivers_sf_merge, as.double(init_epsg))
return(rivers_sf_merge)
} |
set.seed(20150618L) |
vector_transpose <- function( data_table,
names_to = "nace_r2",
values_to = "value",
.keep = FALSE ) {
is_key_column_present(data_table)
key_column <- names(data_table)[1]
return_df <- data_table %>% tidyr::pivot_longer(
-any_of(key_column),
names_to = names_to,
values_to = values_to
)
if (.keep) return_df else return_df[,-1]
} |
PRS_PGx_Lasso <- function(Y, Tr, G, intercept = TRUE, lambda, method, alpha=0.5){
G <- as.matrix(G)
X <- cbind(G, Tr*G)
group <- c(c(1:(ncol(G))), c(1:(ncol(G))))
if(method == 1){
fit <- glmnet(x=X, y=Y, family="gaussian", intercept = intercept, lambda = lambda, maxit = 100000, thresh = 0.00001)
b.hat <- as.vector(coef(fit)); b.hat <- b.hat[-1]
}
if(method == 2){
fit <- gglasso(x=X, y=Y, group=group, loss="ls", intercept = intercept, lambda = lambda, nlambda = 1, eps = 0.0001, maxit = 100000)
b.hat <- as.vector(coef(fit)); b.hat <- b.hat[-1]
}
if(method == 3){
data <- list(x=X, y=Y)
fit <- SGL(data, index = group, type = "linear", alpha = alpha, nlam = 1, lambdas = lambda, maxit = 100000, thresh = 0.01, standardize = TRUE)
b.hat <- fit$beta
}
coef.G <- b.hat[1:(length(b.hat)/2)]
coef.TG <- b.hat[(length(b.hat)/2+1):length(b.hat)]
names(coef.G) <- names(coef.TG) <- colnames(G)
re <- list(coef.G = coef.G, coef.TG = coef.TG)
return(re)
} |
pseudo.spectrum <- function(mod, ar)
{
ma.total <- c(1, mod$model$theta[seq_len(mod$arma[2]+mod$arma[4]*mod$arma[5])])
ma <- coef(mod)[seq.int(mod$arma[1]+1, length.out=mod$arma[2])]
sma <- coef(mod)[seq.int(mod$arma[1]+mod$arma[2]+mod$arma[3]+1, length.out=mod$arma[4])]
tmp <- polyprod(c(1, ma), c(1, rbind(array(0, dim=c(mod$arma[5]-1, length(sma))), sma)))
tmp <- c(1, mod$model$theta[seq_len(mod$arma[2]+mod$arma[4]*mod$arma[5])])
stopifnot(all.equal(as.vector(tmp), ma.total))
if (length(ma.total) > 1) {
ma.total.bf <- stats::convolve(ma.total, ma.total, type="open")[-seq_along(ma.total[-1])]
} else
ma.total <- ma.total.bf <- 1
den.trend.bf <- stats::convolve(ar$trend, ar$trend, type="open")[-seq_along(ar$trend[-1])]
den.trans.bf <- stats::convolve(ar$transitory, ar$transitory, type="open")[-seq_along(ar$transitory[-1])]
den.seas.bf <- stats::convolve(ar$seasonal, ar$seasonal, type="open")[-seq_along(ar$seasonal[-1])]
num.psp.total <- acgf2poly(ma.total.bf)
if ((ntrend <- max(0, length(den.trend.bf)-1)) > 0) {
den.psp.trend <- acgf2poly(den.trend.bf)
} else den.psp.trend <- 1
if ((ntrans <- max(0, length(den.trans.bf)-1)) > 0 ) {
den.psp.trans <- acgf2poly(den.trans.bf)
} else den.psp.trans <- 1
if ((nseas <- max(0, length(den.seas.bf)-1)) > 0 ) {
den.psp.seas <- acgf2poly(den.seas.bf)
} else den.psp.seas <- 1
den.psp.total <- polyprod(polyprod(den.psp.trend, den.psp.trans), den.psp.seas)
if (length(num.psp.total)-1 >= ntrend + ntrans + nseas)
{
tmp <- polydiv(num.psp.total, den.psp.total)
quotient <- unname(tmp$quotient)
num.psp.total <- tmp$remainder
} else
quotient <- 0
pfd <- partial.fraction(num.psp.total,
den.psp.trend, den.psp.trans, den.psp.seas)
structure(list(quotient = quotient,
total.numerator = num.psp.total, total.denominator = den.psp.total,
numerators = list(trend = pfd$num.trend, transitory = pfd$num.transitory,
seasonal = pfd$num.seasonal),
denominators = list(trend = den.psp.trend, transitory = den.psp.trans,
seasonal = den.psp.seas)), class="tsdecPSP")
}
print.tsdecPSP <- function(x, ...)
{
nums <- lapply(x$numerators, polystring, ...)
dens <- lapply(x$denominators, polystring, ...)
cat("Num/Den = Anum/Aden + Bnum/Bden + Cnum/Cden\n----\n")
cat("Total numerator (Num):\n")
cat(polystring(x$total.numerator, ...), "\n")
cat("Total denominator (Den):\n")
cat(polystring(x$total.denominator, ...), "\n")
cat("Trend numerator (Anum):\n")
cat(nums$trend, "\n")
cat("Trend denominator (Aden):\n")
cat(dens$trend, "\n")
cat("Transitory numerator (Bnum):\n")
cat(nums$transitory, "\n")
cat("Transitory denominator (Bden):\n")
cat(dens$transitory, "\n")
cat("Seasonal numerator (Snum):\n")
cat(nums$seasonal, "\n")
cat("Seasonal denominator (Sden):\n")
cat(dens$seasonal, "\n")
} |
relativeBatsmanSR <- function(frames, names) {
col1 <- rainbow(length(frames))
for(i in 1:length(frames))
{
batsman <- clean(frames[[i]])
maxi <- (max(batsman$Runs/15) + 1) *15
v <- seq(0,maxi,by=15)
a <- hist(batsman$Runs,breaks=v,plot=FALSE)
SR <- NULL
for(j in 2:length(a$breaks)) {
b <- batsman$Runs > a$breaks[j-1] & batsman$Runs <= a$breaks[j]
c <- batsman[b,]
SR[j-1] <- mean(as.numeric(as.character(c$SR)))
}
b <- !is.na(SR)
c <- a$mid[b]
SR <- SR[b]
par(mar=c(4,4,1,1))
if(i==1) {
plot(c,predict(loess(SR~c)),xlab="Runs",ylab="Mean Strike Rate",
xlim=c(0,400), ylim=c(0,90), type="l",lty=1,lwd=3, col=col1[i],
main="Relative Mean Strike Rate")
} else {
lines(c,predict(loess(SR~c)),col=col1[i],lwd=3)
}
}
type = rep(1,length(frames))
width = rep(2.5,length(frames))
legend(x="topright",legend=names, lty=type,
lwd=width,col=col1,bty="n",cex=0.8)
mtext("Data source-Courtesy:ESPN Cricinfo", side=1, line=4, adj=1.0, cex=0.8, col="blue")
} |
test_that("score_type1",
{
correct_s1_r <- c(255, 387, 439, 625)
correct_s1_p <- c(255, 387, 439, 625)
correct_s1_d <- c(255, 387, 439, 625)
expect_equal(which(score_type1(a, w2) > thr & lp2), correct_s1_r)
expect_equal(which(score_type1(a, w2, "periodic") > thr & lp2), correct_s1_p)
expect_equal_na_allowed(which(score_type1(a, w2, "discard") > thr & lp2), correct_s1_d)
}) |
skip_on_cran()
oldtz <- Sys.getenv('TZ', unset = NA)
Sys.setenv(TZ = 'UTC')
tests.home <- getwd()
setwd(tempdir())
exampleWorkspace("exampleWorkspace")
setwd("exampleWorkspace")
write.csv(example.distances, "distances.csv")
study.data <- suppressWarnings(loadStudyData(tz = "Europe/Copenhagen", start.time = NULL,
stop.time = NULL, section.order = c("River", "Fjord", "Sea"), exclude.tags = NULL))
detections.list <- study.data$detections.list
bio <- study.data$bio
spatial <- study.data$spatial
dist.mat <- study.data$dist.mat
arrays <- study.data$arrays
dotmat <- study.data$dotmat
paths <- study.data$paths
moves <- groupMovements(detections.list = detections.list, bio = bio, spatial = spatial,
speed.method = "last to first", max.interval = 60, tz = "Europe/Copenhagen",
dist.mat = dist.mat)
aux <- names(moves)
moves <- lapply(names(moves), function(tag) {
speedReleaseToFirst(tag = tag, bio = bio, movements = moves[[tag]],
dist.mat = dist.mat, speed.method = "last to first")
})
names(moves) <- aux
rm(aux)
xmoves <- moves
attributes(xmoves[[1]])$p.type <- "Manual"
xmoves[[1]]$Valid[18] <- FALSE
vm <- xmoves
vm[[1]] <- vm[[1]][-18, ]
secmoves <- lapply(seq_along(vm), function(i) {
tag <- names(vm)[i]
appendTo("debug", paste0("debug: Compiling valid section movements for tag ", tag,"."))
output <- sectionMovements(movements = vm[[i]], spatial = spatial, valid.dist = attributes(dist.mat)$valid)
return(output)
})
names(secmoves) <- names(vm)
timetable <- assembleTimetable(secmoves = secmoves, valid.moves = vm, all.moves = xmoves, spatial = spatial,
arrays = arrays, dist.mat = dist.mat, speed.method = "last to first",
if.last.skip.section = TRUE, success.arrays = "A9", bio = bio, tz = "Europe/Copenhagen")
status.df <- assembleOutput(timetable = timetable, bio = bio, spatial = spatial,
dist.mat = dist.mat, tz = "Europe/Copenhagen")
test_that("assembleMatrices works as expected", {
output <- assembleMatrices(spatial = spatial, movements = vm, status.df = status.df,
arrays = arrays, paths = paths, dotmat = dotmat)
expect_equal(names(output), c("maxmat", "minmat"))
expect_equal(names(output[[1]]), c("A.RS1", "B.RS1"))
source(paste0(tests.home, "/aux_assembleMatrices.R"))
capture <- lapply(1:2, function(i) {
lapply(1:2, function(j) {
rownames(aux_assembleMatrixes[[i]][[j]]) <- as.character(rownames(aux_assembleMatrixes[[i]][[j]]))
expect_equal(output[[i]][[j]], aux_assembleMatrixes[[i]][[j]])
})
})
the.matrices <<- output[[2]]
})
test_that("breakMatricesByArray works as expected.", {
expect_warning(output <- breakMatricesByArray(m = the.matrices, arrays = arrays, type = "peers"),
"No tags passed through array A0. Skipping efficiency estimations for this array.", fixed = TRUE)
m.by.array <<- output
source(paste0(tests.home, "/aux_breakMatricesByArray.R"))
capture <- lapply(1:2, function(i) {
lapply(1:2, function(j) {
rownames(aux_breakMatricesByArray[[i]][[j]]) <- as.character(rownames(aux_breakMatricesByArray[[i]][[j]]))
expect_equal(output[[i]][[j]], aux_breakMatricesByArray[[i]][[j]])
})
})
expect_warning(output <- breakMatricesByArray(m = the.matrices, arrays = arrays, type = "all"),
"No tags passed through array A0. Skipping efficiency estimations for this array.", fixed = TRUE)
xmatrices <- the.matrices
xmatrices[[1]][, "A9"] <- 0
xmatrices[[2]][, "A9"] <- 0
expect_warning(output <- breakMatricesByArray(m = xmatrices, arrays = arrays, type = "all"),
"No tags passed through any of the efficiency peers of array A8. Skipping efficiency estimations for this array.", fixed = TRUE)
xarrays <- lapply(arrays, function(x) {
x$after.peers <- NULL
return(x)
})
expect_warning(output <- breakMatricesByArray(m = the.matrices, arrays = xarrays, type = "peers"),
"None of the arrays has valid efficiency peers.", fixed = TRUE)
})
test_that("simpleCJS works as expected.", {
expect_error(simpleCJS("test"), "input must be a matrix or data frame containing only 0's and 1's.", fixed = TRUE)
xm <- m.by.array[[1]][[1]]
xm[1, 1] <- 2
expect_error(simpleCJS(xm), "input must be a matrix or data frame containing only 0's and 1's.", fixed = TRUE)
xm <- m.by.array[[1]][[1]]
xm[1:5, 1] <- 0
expect_error(simpleCJS(xm),
"The first column of the input should only contain 1's (i.e. release point).", fixed = TRUE)
expect_error(simpleCJS(m.by.array[[1]][[1]], estimate = 1, fixed.efficiency = 1),
"Please choose only one of 'estimate' or 'fixed.efficiency'.", fixed = TRUE)
expect_error(simpleCJS(m.by.array[[1]][[1]], estimate = 1:5),
"Please use only one value for estimate.", fixed = TRUE)
expect_error(simpleCJS(m.by.array[[1]][[1]], estimate = 2),
"'estimate' must be between 0 and 1.", fixed = TRUE)
expect_error(simpleCJS(m.by.array[[1]][[1]], fixed.efficiency = 1),
"Fixed efficiency was set but its length is not the same as the number of columns in the input.", fixed = TRUE)
expect_error(simpleCJS(m.by.array[[1]][[1]], fixed.efficiency = 1:3),
"Fixed efficiency estimates must be between 0 and 1.", fixed = TRUE)
expect_message(simpleCJS(m.by.array[[1]][[1]], fixed.efficiency = c(1,1,1), silent = FALSE),
"M: Running CJS with fixed efficiency estimates.", fixed = TRUE)
xm <- m.by.array[[1]][[1]]
xm[, 3] <- 0
expect_warning(simpleCJS(xm, silent = FALSE),
"Array 'A1' had 0% efficiency. Skipping survival estimation.", fixed = TRUE)
xm <- m.by.array[[1]][[1]]
xm[, 2] <- 0
expect_warning(simpleCJS(xm, silent = FALSE),
"No tags were detected at array 'A1'. Skipping survival estimation.", fixed = TRUE)
xm <- m.by.array[[1]][[1]]
xm[1:20, 2] <- 0
expect_warning(simpleCJS(xm, fixed.efficiency = c(1, 1, 1), silent = FALSE),
"The fixed efficiency caused a too low estimate at iteration 2. Forcing higher estimate.", fixed = TRUE)
xm <- m.by.array[[1]][[1]]
output <- simpleCJS(xm, fixed.efficiency = c(1, 0.2, 1), silent = FALSE)
expect_equal(output$absolutes["estimated", "A1"], 30)
expect_equal(output$efficiency, c(FakeStart = 1.0, A1 = 0.2, AnyPeer = 1.0))
output <- simpleCJS(xm, estimate = 0, silent = FALSE)
expect_equal(output$absolutes["estimated", "AnyPeer"], 26)
expect_equal(output$efficiency, c(FakeStart = 1, A1 = 1, AnyPeer = 0))
output <- simpleCJS(xm, estimate = 0.2, silent = FALSE)
expect_equal(output$absolutes["estimated", "AnyPeer"], 26)
expect_equal(output$efficiency, c(FakeStart = 1.0, A1 = 1.0, AnyPeer = 0.2))
expect_equal(output$survival[2], 1)
output <- simpleCJS(m.by.array[[1]][[1]])
expect_equal(names(output), c("absolutes", "efficiency", "survival", "lambda"))
check <- read.csv(text = ',FakeStart,A1,AnyPeer
"detected",30,26,26
"here plus on peers",26,26,NA
"not here but on peers",0,0,NA
"known",30,26,26
"estimated",30,26,NA', row.names = 1)
expect_equal(output$absolutes, as.matrix(check))
expect_equal(output$efficiency, c(FakeStart = 1, A1 = 1, AnyPeer = NA))
check <- as.matrix(read.csv(text = '"FakeStart -> A1 =",0.8666667
" A1 -> AnyPeer =",NA', header = FALSE, row.names = 1))
expect_equal(rownames(check), rownames(output$survival))
expect_true(check[1] - output$survival[1] < 0.000000034)
expect_true(is.na(output$survival[2]))
expect_equal(output$lambda, 1)
})
test_that("combineCJS works as expected.", {
expect_error(combineCJS(estimate = 1, fixed.efficiency = 1),
"Please choose only one of 'estimate' or 'fixed.efficiency'.", fixed = TRUE)
expect_error(combineCJS(estimate = 1:2),
"Please use only one value for estimate.", fixed = TRUE)
expect_error(combineCJS(estimate = 2),
"'estimate' must be between 0 and 1.", fixed = TRUE)
expect_error(combineCJS(fixed.efficiency = 2),
"Fixed efficiency estimates must be between 0 and 1.", fixed = TRUE)
expect_error(combineCJS(list(A = 1)),
"Input appears to contain a list with only one element.", fixed = TRUE)
expect_error(combineCJS("test"),
"Only one object provided but it is not a list.", fixed = TRUE)
expect_error(combineCJS(list(A = "a", B = "b")),
"Not all objects provided are matrices or data frames. Please use either one list of matrices/data frames or multiple matrices/data frames.", fixed = TRUE)
xm <- m.by.array[[1]]
colnames(xm[[1]])[3] <- "test"
expect_error(combineCJS(xm),
"The last array is not the same in all input matrices.", fixed = TRUE)
expect_error(combineCJS(m.by.array[[1]], fixed.efficiency = c(1, 1)),
"Fixed efficiency was set but its length is not the same as the maximum number of columns in the input.", fixed = TRUE)
expect_message(combineCJS(m.by.array[[1]], fixed.efficiency = c(1, 1, 1), silent = FALSE),
"M: Running CJS with fixed efficiency values.", fixed = TRUE)
output <- combineCJS(m.by.array[[1]])
expect_equal(names(output), c("absolutes", "efficiency", "survival", "lambda"))
check <- read.csv(text = ',FakeStart,A1,AnyPeer
"detected",60,54,54
"here plus on peers",54,54,NA
"not here but on peers",0,0,NA
"known",60,54,54
"estimated",60,54,NA', row.names = 1)
expect_equal(output$absolutes, as.matrix(check))
expect_equal(output$efficiency, c(FakeStart = 1, A1 = 1, AnyPeer = NA))
check <- as.matrix(read.csv(text = '"FakeStart -> A1 =",0.9
" A1 -> AnyPeer =",NA', header = FALSE, row.names = 1))
colnames(check) <- ""
expect_equal(check, output$survival)
})
test_that("assembleArrayCJS works as expected.",{
CJS.list <- lapply(m.by.array, function(m) {
if (length(m) == 1)
simpleCJS(m[[1]])
else
combineCJS(m)
})
release_nodes <- as.data.frame(table(bio$Group, bio$Release.site))
colnames(release_nodes) <- c("Group", "Release.site", "n")
release_nodes$Array <- spatial$release.sites$Array[match(release_nodes$Release.site, spatial$release.sites$Standard.name)]
release_nodes$Combined <- paste(release_nodes[, 1], release_nodes[, 2], sep = ".")
release_nodes <<- release_nodes
output <- assembleArrayCJS(mat = the.matrices, CJS = CJS.list, arrays = arrays, releases = release_nodes)
check <- read.csv(text = ',A0,A1,A2,A3,A4,A5,A6,A7,A8,A9
"detected",0,54,54,52,52,52,52,49,44,34
"here plus on peers",NA,54,54,50,52,52,50,43,34,NA
"not here but on peers",NA,0,0,2,0,0,0,1,0,NA
"known",0,54,54,54,52,52,52,50,44,34
"estimated",NA,54,54,54,52,52,52,50,44,NA', row.names = 1)
expect_equal(output$absolutes, check)
check <- c(A0 = NA, A1 = 1, A2 = 1, A3 = 0.96154, A4 = 1, A5 = 1, A6 = 1, A7 = 0.97727, A8 = 1, A9 = NA)
expect_equal(round(output$efficiency, 5), check)
overall.CJS <<- output
})
test_that("advEfficiency can plot overall.CJS results", {
expect_message(output <- round(advEfficiency(x = overall.CJS), 7),
"M: Some arrays were estimated to have either 0% or 100% efficiency, skipping plotting for those arrays.", fixed = TRUE)
check <- read.csv(text = '"","2.5%","50%","97.5%"
"A1", 1.0000000, 1.0000000, 1.0000000
"A2", 1.0000000, 1.0000000, 1.0000000
"A3", 0.8955251, 0.9673092, 0.9952150
"A4", 1.0000000, 1.0000000, 1.0000000
"A5", 1.0000000, 1.0000000, 1.0000000
"A6", 1.0000000, 1.0000000, 1.0000000
"A7", 0.9177889, 0.9840095, 0.9994114
"A8", 1.0000000, 1.0000000, 1.0000000
', row.names = 1)
colnames(check) <- c("2.5%","50%","97.5%")
expect_equal(output, check)
})
test_that("getDualMatrices throws a warning if efficiency has already been calculated", {
expect_warning(getDualMatrices(replicates = list(A7 = c("St.10", "St.11")), CJS = overall.CJS, spatial = spatial, detections.list = detections.list),
"An inter-array efficiency has already been calculated for array A7", fixed = TRUE)
})
test_that("includeIntraArrayEstimates throws errors if expected conditions are not met", {
expect_error(includeIntraArrayEstimates(m = NULL, CJS = overall.CJS, efficiency = "test"),
"Use only one of 'efficiency' or 'CJS' at a time.", fixed = TRUE)
})
test_that("replicate functions work as expected.", {
intra.array.matrices <<- getDualMatrices(replicates = list(A9 = c("St.16", "St.17")), CJS = overall.CJS, spatial = spatial, detections.list = detections.list)
check <- read.csv(text = '"","R1","R2"
"R64K-4451",TRUE,TRUE
"R64K-4453",FALSE,TRUE
"R64K-4454",FALSE,TRUE
"R64K-4456",TRUE,TRUE
"R64K-4457",FALSE,FALSE
"R64K-4459",FALSE,TRUE
"R64K-4462",TRUE,TRUE
"R64K-4465",TRUE,TRUE
"R64K-4466",FALSE,FALSE
"R64K-4469",TRUE,FALSE
"R64K-4470",FALSE,FALSE
"R64K-4472",FALSE,FALSE
"R64K-4473",TRUE,FALSE
"R64K-4474",TRUE,TRUE
"R64K-4477",TRUE,TRUE
"R64K-4480",TRUE,TRUE
"R64K-4481",TRUE,TRUE
"R64K-4484",TRUE,FALSE
"R64K-4486",FALSE,FALSE
"R64K-4488",FALSE,TRUE
"R64K-4490",TRUE,TRUE
"R64K-4492",FALSE,FALSE
"R64K-4494",TRUE,TRUE
"R64K-4496",TRUE,TRUE
"R64K-4498",TRUE,TRUE
"R64K-4499",TRUE,TRUE
"R64K-4502",TRUE,TRUE
"R64K-4503",FALSE,TRUE
"R64K-4505",FALSE,FALSE
"R64K-4508",TRUE,TRUE
"R64K-4509",FALSE,FALSE
"R64K-4510",TRUE,TRUE
"R64K-4511",TRUE,TRUE
"R64K-4512",FALSE,FALSE
"R64K-4513",FALSE,TRUE
"R64K-4514",TRUE,FALSE
"R64K-4515",FALSE,FALSE
"R64K-4516",TRUE,TRUE
"R64K-4517",TRUE,TRUE
"R64K-4518",FALSE,FALSE
"R64K-4519",TRUE,TRUE
"R64K-4521",FALSE,FALSE
"R64K-4522",FALSE,FALSE
"R64K-4524",FALSE,FALSE
"R64K-4526",TRUE,TRUE
"R64K-4529",FALSE,FALSE
"R64K-4532",TRUE,TRUE
"R64K-4534",FALSE,FALSE
"R64K-4536",FALSE,FALSE
"R64K-4541",FALSE,TRUE
"R64K-4543",FALSE,FALSE
"R64K-4545",TRUE,TRUE
"R64K-4547",TRUE,TRUE
"R64K-4549",FALSE,FALSE
', row.names = 1)
expect_equal(intra.array.matrices[[1]], check)
expect_equal(names(intra.array.matrices), "A9")
recipient <- includeIntraArrayEstimates(m = intra.array.matrices, CJS = overall.CJS)
expect_equal(names(recipient), c("CJS", "intra.CJS"))
check <- read.csv(text = ',A0,A1,A2,A3,A4,A5,A6,A7,A8,A9
"detected",0,54,54,52,52,52,52,49,44,34
"here plus on peers",NA,54,54,50,52,52,50,43,34,NA
"not here but on peers",NA,0,0,2,0,0,0,1,0,NA
"known",0,54,54,54,52,52,52,50,44,34
"estimated",NA,54,54,54,52,52,52,50,44,35', row.names = 1)
expect_equal(recipient$CJS$absolutes, check)
check <- c(A0 = NA, A1 = 1, A2 = 1, A3 = 0.96154, A4 = 1, A5 = 1, A6 = 1, A7 = 0.97727, A8 = 1, A9 = 0.96774)
expect_equal(round(recipient$CJS$efficiency, 5), check)
expect_equal(names(recipient$intra.CJS), "A9")
expect_equal(names(recipient$intra.CJS$A9), c("absolutes", "single.efficiency", "combined.efficiency"))
check <- as.matrix(read.csv(text = '"detected at R1: ",28
"detected at R2: ",31
"detected at both: ",24', header = FALSE, row.names = 1))
colnames(check) <- ""
expect_equal(recipient$intra.CJS$A9$absolutes, check)
expect_equal(round(recipient$intra.CJS$A9$single.efficiency, 5), c(R1 = 0.77419, R2 = 0.85714))
expect_equal(round(recipient$intra.CJS$A9$combined.efficiency, 5), 0.96774)
overall.CJS <<- recipient[[1]]
intra.array.CJS <<- recipient[[2]]
})
test_that("advEfficiency can plot intra.array.CJS results", {
expect_message(output <- round(advEfficiency(intra.array.CJS[[1]]), 7),
"M: For each quantile, 'Combined' estimates are calculated as 1-((1-R1)*(1-R2)).", fixed = TRUE)
check <- read.csv(text = '"","2.5%","50%","97.5%"
"R1", 0.6143335, 0.7801434, 0.9006621
"R2", 0.7084131, 0.8656773, 0.9581126
"Combined", 0.8875447, 0.9704683, 0.9958390
', row.names = 1)
colnames(check) <- c("2.5%","50%","97.5%")
expect_equal(output, check)
output <- advEfficiency(intra.array.CJS[[1]], labels = c(1, 2))
expect_equal(row.names(output), c("1", "2", "Combined"))
expect_error(advEfficiency(intra.array.CJS[[1]], labels = 1:3),
"Wrong number of panel names", fixed = TRUE)
output <- advEfficiency(intra.array.CJS[[1]], force.grid = c(2, 1), title = "Top/Bottom")
expect_error(advEfficiency(x = 1),
"Could not recognise the input as an efficiency object from actel", fixed = TRUE)
expect_error(advEfficiency(x = list(a = 1)),
"Could not recognise the input as an efficiency object from actel", fixed = TRUE)
})
test_that("split CJS functions work as expected.", {
aux <- mbSplitCJS(mat = m.by.array, fixed.efficiency = overall.CJS$efficiency)
load(paste0(tests.home, "/aux_mbSplitCJS.RData"))
expect_equal(aux, aux_mbSplitCJS)
xefficiency <- overall.CJS$efficiency
xefficiency[4] <- NA
output <- mbSplitCJS(mat = m.by.array, fixed.efficiency = xefficiency)
expect_equal(round(output$A.RS1$A3$efficiency, 7), c(FakeStart = 1, A3 = 0.9615385, AnyPeer = NA))
aux <- aux[names(the.matrices)]
split.CJS <- assembleSplitCJS(mat = the.matrices, CJS = aux, arrays = arrays, releases = release_nodes, intra.CJS = intra.array.CJS)
expect_equal(names(split.CJS), c("A.RS1", "B.RS1"))
check <- read.csv(text = '"","A0","A1","A2","A3","A4","A5","A6","A7","A8","A9"
"detected",0,26,26,25,26,26,26,26,25,19
"here plus on peers",NA,26,26,25,26,26,26,25,19,NA
"not here but on peers",NA,0,0,1,0,0,0,0,0,NA
"known",0,26,26,26,26,26,26,26,25,19
"estimated",NA,26,26,26,26,26,26,26,25,20
"difference",NA,0,0,0,0,0,0,0,0,1
', row.names = 1)
expect_equal(split.CJS[[1]], check)
check <- read.csv(text = '"","A0","A1","A2","A3","A4","A5","A6","A7","A8","A9"
"detected",0,28,28,27,26,26,26,23,19,15
"here plus on peers",NA,28,28,25,26,26,24,18,15,NA
"not here but on peers",NA,0,0,1,0,0,0,1,0,NA
"known",0,28,28,28,26,26,26,24,19,15
"estimated",NA,28,28,28,26,26,26,24,19,16
"difference",NA,0,0,0,0,0,0,0,0,1
', row.names = 1)
expect_equal(split.CJS[[2]], check)
})
test_that("group CJS functions work as expected.", {
aux <- mbGroupCJS(mat = m.by.array, status.df = status.df, fixed.efficiency = overall.CJS$efficiency)
load(paste0(tests.home, "/aux_mbGroupCJS.RData"))
expect_equal(aux, aux_mbGroupCJS)
xefficiency <- overall.CJS$efficiency
xefficiency[4] <- NA
output <- mbGroupCJS(mat = m.by.array, status.df = status.df, fixed.efficiency = xefficiency)
expect_equal(round(output$A$A3$efficiency, 7), c(FakeStart = 1, A3 = 0.9615385, AnyPeer = NA))
group.CJS <- assembleGroupCJS(mat = the.matrices, CJS = aux, arrays = arrays, releases = release_nodes, intra.CJS = intra.array.CJS)
expect_equal(names(group.CJS), c("A", "B"))
check <- read.csv(text = '"","A0","A1","A2","A3","A4","A5","A6","A7","A8","A9"
"detected",0,26,26,25,26,26,26,26,25,19
"here plus on peers",NA,26,26,25,26,26,26,25,19,NA
"not here but on peers",NA,0,0,1,0,0,0,0,0,NA
"known",0,26,26,26,26,26,26,26,25,19
"estimated",NA,26,26,26,26,26,26,26,25,20
"difference",NA,0,0,0,0,0,0,0,0,1
', row.names = 1)
expect_equal(group.CJS[[1]], check)
check <- read.csv(text = '"","A0","A1","A2","A3","A4","A5","A6","A7","A8","A9"
"detected",0,28,28,27,26,26,26,23,19,15
"here plus on peers",NA,28,28,25,26,26,24,18,15,NA
"not here but on peers",NA,0,0,1,0,0,0,1,0,NA
"known",0,28,28,28,26,26,26,24,19,15
"estimated",NA,28,28,28,26,26,26,24,19,16
"difference",NA,0,0,0,0,0,0,0,0,1
', row.names = 1)
expect_equal(group.CJS[[2]], check)
})
test_that("special cases in oneWayMoves are working as expected", {
expect_equal(oneWayMoves(moves[[1]][1, ], arrays), moves[[1]][1, ])
})
setwd("..")
unlink("exampleWorkspace", recursive = TRUE)
setwd(tests.home)
if (is.na(oldtz)) Sys.unsetenv("TZ") else Sys.setenv(TZ = oldtz)
rm(list = ls()) |
pgfInegativebinomial <-
function(s,params) {
k<-s[abs(s)>1]
if (length(k)>0)
warning("At least one element of the vector s are out of interval [-1,1]")
if (length(params)<2) stop("At least one value in params is missing")
if (length(params)>2) stop("The length of params is 2")
theta<-params[1]
k<-params[2]
if ((theta>=1)|(theta<=0))
stop ("Parameter theta belongs to the interval (0,1)")
if (k<=0)
stop("Parameter k must be positive")
(1-theta*s^(-1/k))/(1-theta)
} |
glmkrigecv <- function (formula.glm = NULL, longlat, trainxy, y, family = "gaussian", transformation = "none", delta = 1, formula.krige = res1 ~ 1, vgm.args = c("Sph"), anis = c(0, 1), alpha = 0, block = 0, beta, nmaxkrige = 12, validation = "CV", cv.fold = 10, predacc = "VEcv", ...) {
if (validation == "LOO") {idx <- 1:length(y)}
if (validation == "CV") {idx <- datasplit(y, k.fold = cv.fold)}
names(longlat) <- c("long", "lat")
n <- nrow(trainxy)
p <- ncol(trainxy) - 1
cv.pred <- NULL
if (validation == "LOO") {
for (i in 1 : length(y)) {
data.dev <- trainxy[idx != i, , drop = FALSE]
data.pred <- trainxy[idx == i, , drop = FALSE]
glm1 <- stats::glm(formula.glm, data.dev, family = family)
pred.glm1 <- stats::predict(glm1, data.pred, type = "response")
data.dev1 <- longlat[idx != i, , drop = FALSE]
data.pred1 <- longlat[idx == i, , drop = FALSE]
dev.glm1 <- stats::predict(glm1, data.dev, type="response")
res1 <- y[idx != i] - dev.glm1
if (transformation == "none") {data.dev1$res1 = res1} else (
if (transformation == "sqrt") {data.dev1$res1 = sqrt(res1 + abs(min(res1)))} else (
if (transformation == "arcsine") {data.dev1$res1 = asin(sqrt((res1 + abs(min(res1))) / 100))} else (
if (transformation == "log") {data.dev1$res1 = log(res1 + abs(min(res1)) + delta)} else (
stop ("This transfromation is not supported in this version!")))))
sp::coordinates(data.dev1) = ~ long + lat
vgm1 <- gstat::variogram(object = formula.krige, data.dev1, alpha = alpha)
model.1 <- gstat::fit.variogram(vgm1, gstat::vgm(mean(vgm1$gamma), vgm.args, mean(vgm1$dist), min(vgm1$gamma)/10, anis = anis))
if (model.1$range[2] <= 0) (cat("A zero or negative range was fitted to variogram", "\n"))
if (model.1$range[2] <= 0) (model.1$range[2] <- min(vgm1$dist))
sp::coordinates(data.pred1) = ~long + lat
pred.krige1 <- gstat::krige(formula = formula.krige, data.dev1, data.pred1, model = model.1, nmax=nmaxkrige, block = block, beta = beta)$var1.pred
if (transformation == "none") {pred.krige = pred.krige1}
if (transformation == "sqrt") {pred.krige = pred.krige1 ^ 2 - abs(min(res1))}
if (transformation == "arcsine") {pred.krige = (sin(pred.krige1)) ^ 2 * 100 - abs(min(res1))}
if (transformation == "log") {pred.krige = exp(pred.krige1) - abs(min(res1)) - delta}
cv.pred[idx == i] <- pred.krige + pred.glm1
}
}
if (validation == "CV") {
for (i in 1 : cv.fold) {
data.dev <- trainxy[idx != i, , drop = FALSE]
data.pred <- trainxy[idx == i, , drop = FALSE]
glm1 <- stats::glm(formula.glm, data.dev, family = family)
pred.glm1 <- stats::predict(glm1, data.pred, type = "response")
data.dev1 <- longlat[idx != i, , drop = FALSE]
data.pred1 <- longlat[idx == i, , drop = FALSE]
dev.glm1 <- stats::predict(glm1, data.dev, type="response")
res1 <- y[idx != i] - dev.glm1
if (transformation == "none") {data.dev1$res1 = res1} else (
if (transformation == "sqrt") {data.dev1$res1 = sqrt(res1 + abs(min(res1)))} else (
if (transformation == "arcsine") {data.dev1$res1 = asin(sqrt((res1 + abs(min(res1))) / 100))} else (
if (transformation == "log") {data.dev1$res1 = log(res1 + abs(min(res1)) + delta)} else (
stop ("This transfromation is not supported in this version!")))))
sp::coordinates(data.dev1) = ~ long + lat
vgm1 <- gstat::variogram(object = formula.krige, data.dev1, alpha = alpha)
model.1 <- gstat::fit.variogram(vgm1, gstat::vgm(mean(vgm1$gamma), vgm.args, mean(vgm1$dist), min(vgm1$gamma)/10, anis = anis))
if (model.1$range[2] <= 0) (cat("A zero or negative range was fitted to variogram", "\n"))
if (model.1$range[2] <= 0) (model.1$range[2] <- min(vgm1$dist))
sp::coordinates(data.pred1) = ~long + lat
pred.krige1 <- gstat::krige(formula = formula.krige, data.dev1, data.pred1, model = model.1, nmax=nmaxkrige, block = block, beta = beta)$var1.pred
if (transformation == "none") {pred.krige = pred.krige1}
if (transformation == "sqrt") {pred.krige = pred.krige1 ^ 2 - abs(min(res1))}
if (transformation == "arcsine") {pred.krige = (sin(pred.krige1)) ^ 2 * 100 - abs(min(res1))}
if (transformation == "log") {pred.krige = exp(pred.krige1) - abs(min(res1)) - delta}
cv.pred[idx == i] <- pred.krige + pred.glm1
}
}
if (predacc == "VEcv") {predictive.accuracy = spm::vecv(y, cv.pred)} else (
if (predacc == "ALL") {predictive.accuracy = spm::pred.acc(y, cv.pred)} else (
stop ("This measure is not supported in this version!")))
predictive.accuracy
} |
efit2file <-
function (filename, skip = 2, numcol, nrows = vector())
{
xx <- as.matrix(scan(filename, skip = skip, na.strings=c("NA","NAN",
"NaNQ")))
if (length(nrows) == 0)
nrows <- length(xx)/numcol
dim(xx) <- c(numcol, sum(nrows))
xx <- t(xx)
cnt <- 1
for (i in 1:length(nrows)) {
write.table(xx[cnt:(cnt + nrows[i] - 1), ], row.names = FALSE,
col.names = FALSE, file = paste("plainmat", i, filename,
sep = "_"))
cnt <- cnt + nrows[i]
cat("Wrote the file", paste("plainmat", i, filename,
sep = "_"), "\n")
}
} |
.get.exepath <- function(prg) {
paths <- list(
pymol = list(
Linux = c("/usr/bin/pymol",
"/usr/local/bin/pymol"),
Darwin = c("/Applications/MacPyMOL.app/Contents/MacOS/MacPyMOL",
"/Applications/MacPyMOLX11Hybrid.app/Contents/MacOS/MacPyMOL",
"/usr/bin/pymol",
"/usr/local/bin/pymol"),
Windows = c("C:/python27/PyMOL/pymol.exe",
"C:/Program Files/PyMOL/PyMOL/PymolWin.exe",
"C:/Program Files/PyMOL/PymolWin.exe"),
ver = "-cq"
),
muscle = list(
Linux = c("/usr/bin/muscle",
"/usr/local/bin/muscle"),
Darwin = c("/usr/bin/muscle",
"/usr/local/bin/muscle"),
Windows = c("C:/Program Files/muscle.exe",
"C:/Program Files/muscle3.8.31_i86win32.exe",
"C:/Program Files/muscle/muscle.exe",
"C:/Program Files/Muscle/muscle.exe",
"C:/Program Files/seaview/muscle.exe",
"C:/Program Files/seaview4/muscle.exe"),
ver = "-version"
),
foldx = list(
Linux = c("/usr/bin/foldx",
"/usr/local/bin/foldx"),
Darwin = c("/usr/bin/foldx",
"/usr/local/bin/foldx"),
Windows = c("C:/Program Files/foldx.exe",
"C:/Program Files/foldx/foldx.exe",
"C:/Program Files/FoldX/foldx.exe"),
ver = "--version"
),
dssp = list(
Linux = c("/usr/bin/dssp",
"/usr/local/bin/dssp"),
Darwin = c("/usr/bin/dssp",
"/usr/local/bin/dssp",
"/usr/bin/mkdssp",
"/usr/local/bin/mkdssp",
"/anaconda3/bin/dssp",
"/anaconda3/bin/mkdssp"),
Windows = c("C:/Program Files/dssp.exe",
"C:/Program Files/dssp-2.0.4-win32.exe",
"C:/Program Files/dssp/dssp.exe",
"C:/Program Files/Dssp/dssp.exe"),
ver = "--version"
)
)
if(file.exists(prg) & !dir.exists(prg)) {
return(prg)
}
exefile <- Sys.which(prg)
if(nchar(exefile) == 0) {
if(prg %in% c("pymol", "muscle", "clustalo", "dssp")) {
os1 <- Sys.info()["sysname"]
exefiles <- paths[[prg]][[os1]]
fe <- file.exists(exefiles)
if(any(fe)) {
exefile <- exefiles[which(fe)[1]]
}
else {
exefile <- NULL
}
}
else {
exefile <- NULL
}
}
if(is.null(exefile)) {
stop(paste0("could not determine path to '", prg, "'"))
}
return(exefile)
}
.test.exefile <- function(exefile) {
prg <- tolower(basename(exefile))
if(grepl("muscle", prg)) {
ver <- "-version"
}
if(grepl("pymol", prg)) {
ver <- "-cq"
}
if(grepl("foldx", prg)) {
ver <- "--version"
}
if(grepl("dssp", prg)) {
ver <- "--version"
}
os1 <- Sys.info()["sysname"]
if (os1 == "Windows") {
success <- shell(paste(shQuote(exefile), ver))
}
else {
success <- system(paste(exefile, ver),
ignore.stderr = TRUE, ignore.stdout = TRUE)
}
if(!(success %in% c(0,1))) {
return(FALSE)
}
else {
return(TRUE)
}
}
.get.url <- function(url, n_tries = 3){
while (n_tries > 0){
resp <- tryCatch(httr::GET(url), error = identity)
if (!inherits(resp, "error")){
break
}
n_tries <- n_tries - 1
}
if (n_tries == 0){
stop(" Sorry, web resource couldn't be reached:",
"\n URL: ", url,
"\n error: ", conditionMessage(resp))
}
return(resp)
} |
RiverPoint <- function(site, river, distance, value, riverlayout,
range = NA,
type = "l",
pt.col = "grey40",
pt.bg = "black",
pt.pch = 20,
pt.cex = 1,
lbl.cex = 0.7,
lbl.adj = c(0.5,2),
lbl.ofs = 0.5,
lbl.col = "black",
lbl.srt = 0,
lbl.pos = NULL,
lbl.shw = FALSE,
ln.lwd = 1){
RIVER.DATA <- riverlayout[[1]]
H.MAX <- riverlayout[[2]]
H.SIZE <- riverlayout[[3]]
W.MAX <- riverlayout[[4]]
W.SIZE <- riverlayout[[5]]
X1 <- riverlayout[[6]]
X2 <- riverlayout[[7]]
Y <- riverlayout[[8]]
DIRECTION <- riverlayout[[9]]
if (all(is.na(range))){
VALUE.MAX <- max(value)
VALUE.MIN <- 0
} else{
VALUE.MAX <- max(range)
VALUE.MIN <- min(range)
}
VALUE.SIZE <- H.SIZE * 0.9/(VALUE.MAX - VALUE.MIN)
if (DIRECTION == -1){
length <- RIVER.DATA$length[match(river, RIVER.DATA$river)]
distance <- length - distance
X.VALUE <- X2[match(river, RIVER.DATA$river)] + distance * W.SIZE
}else{
X.VALUE <- X1[match(river, RIVER.DATA$river)] + distance * W.SIZE
}
Y.VALUE <- Y[match(river, RIVER.DATA$river)] + (value - VALUE.MIN) * VALUE.SIZE + H.SIZE * 0.1
V <- data.frame(river=factor(river), X.VALUE, Y.VALUE)
for (i in RIVER.DATA$river){
points(V[which(river==i),]$X.VALUE,
V[which(river==i),]$Y.VALUE, type=type, col = pt.col, bg = pt.bg, pch = pt.pch, lwd = ln.lwd, cex = pt.cex)
}
if (lbl.shw){
X.SITE <- X.VALUE
Y.SITE <- Y[match(river, RIVER.DATA$river)]
text(X.SITE, Y.SITE, labels = site, cex = lbl.cex, adj = lbl.adj, srt = lbl.srt, offset = lbl.ofs, col = lbl.col, pos = lbl.pos)
}
} |
library("testthat")
library("gratia")
library("mgcv")
context("Testing fderiv()")
test_that("fderiv() can create newdata with factors in model", {
dat <- gamSim(4, n = 401, dist = "normal", scale = 2, verbose = FALSE)
mod <- gam(y ~ s(x0) + s(x1) + fac, data = dat, method = "REML")
fd <- fderiv(mod)
expect_s3_class(fd, "fderiv")
})
test_that("fderiv() can handle factors in user-supplied newdata", {
dat <- gamSim(4, n = 400, dist = "normal", scale = 2, verbose = FALSE)
mod <- gam(y ~ s(x0) + s(x1) + fac, data = dat, method = "REML")
newd <- dat[1,]
fd <- fderiv(mod, newdata = newd)
expect_s3_class(fd, "fderiv")
})
test_that("fderiv() can handle offsets", {
dat <- gamSim(4, n = 400, dist = "normal", scale = 2, verbose = FALSE)
mod <- gam(y ~ fac + s(x1) + offset(x0), data = dat,
method = "REML")
fd <- fderiv(mod)
expect_s3_class(fd, "fderiv")
newd <- dat[1,]
fd <- fderiv(mod, newdata = newd)
expect_s3_class(fd, "fderiv")
}) |
google_map <- function(data = NULL,
key = get_api_key("map"),
location = NULL,
zoom = NULL,
width = NULL,
height = NULL,
padding = 0,
styles = NULL,
search_box = FALSE,
update_map_view = TRUE,
zoom_control = TRUE,
map_type = c("roadmap","satellite","hybrid","terrain"),
map_type_control = TRUE,
scale_control = FALSE,
street_view_control = TRUE,
rotate_control = TRUE,
fullscreen_control = TRUE,
libraries = NULL,
split_view = NULL,
split_view_options = NULL,
geolocation = FALSE,
event_return_type = c("list", "json")) {
logicalCheck(zoom_control)
logicalCheck(map_type_control)
logicalCheck(scale_control)
logicalCheck(street_view_control)
logicalCheck(rotate_control)
logicalCheck(fullscreen_control)
logicalCheck(update_map_view)
logicalCheck(geolocation)
map_type <- match.arg(map_type)
event_return_type <- match.arg(event_return_type)
split_view_options <- splitViewOptions(split_view_options)
if(is.null(libraries))
libraries <- c("visualization", "geometry", "places", "drawing")
if(is.null(location))
location <- c(-37.9, 144.5)
if(is.null(zoom))
zoom <- 8
x = list(
lat = location[1],
lng = location[2],
zoom = zoom,
styles = styles,
search_box = search_box,
update_map_view = update_map_view,
zoomControl = zoom_control,
mapType = map_type,
mapTypeControl = map_type_control,
scaleControl = scale_control,
streetViewControl = street_view_control,
rotateControl = rotate_control,
fullscreenControl = fullscreen_control,
event_return_type = event_return_type,
split_view = split_view,
split_view_options = split_view_options,
geolocation = geolocation
)
data <- normaliseData(data)
googlemap <- htmlwidgets::createWidget(
name = 'google_map',
x = structure(
x,
google_map_data = data
),
package = 'googleway',
width = width,
height = height,
sizingPolicy = htmlwidgets::sizingPolicy(
defaultWidth = '100%',
defaultHeight = 800,
padding = padding,
browser.fill = FALSE
)
)
header <- paste0('<script src="https://maps.googleapis.com/maps/api/js?key=',
key, '&libraries=', paste0(libraries, collapse = ","), '"></script>',
'<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>')
googlemap$dependencies <- c(
googlemap$dependencies,
list(
htmltools::htmlDependency(
name = "googleway",
version = "9999",
src= ".",
head = header,
all_files = FALSE
)
)
)
return(googlemap)
}
splitViewOptions <- function(split_view_options) {
if(is.null(split_view_options)) split_view_options <- list()
split_view_options <- splitViewDefault(split_view_options, 'heading', 34)
split_view_options <- splitViewDefault(split_view_options, 'pitch', 10)
return(split_view_options)
}
splitViewDefault <- function(lst, key, default) {
v <- lst[[key]]
lst[[key]] <- ifelse(is.null(v), default, v)
return(lst)
}
normaliseData <- function(data) UseMethod("normaliseData")
normaliseData.sf <- function(data) googlePolylines::encode(data)
normaliseData.default <- function(data) data
clear_search <- function(map){
invoke_method(map, 'clear_search')
}
update_style <- function(map, styles = NULL){
if(!is.null(styles))
jsonlite::validate(styles)
invoke_method(map, 'update_style', styles)
}
update_pano <- function(map, pano, lat, lon) {
invoke_method(map, "update_pano", pano, lat, lon)
}
google_mapOutput <- function(outputId, width = '100%', height = '400px'){
htmlwidgets::shinyWidgetOutput(outputId,
'google_map',
width,
height,
package = 'googleway')
}
renderGoogle_map <- function(expr, env = parent.frame(), quoted = FALSE) {
if (!quoted) { expr <- substitute(expr) }
htmlwidgets::shinyRenderWidget(expr, google_mapOutput, env, quoted = TRUE)
}
get_map_data = function(map){
attr(map$x, "google_map_data", exact = TRUE)
}
map_styles <- function(){
standard <- '[]'
silver <- '[{"elementType": "geometry","stylers": [{"color": "
retro <- '[{"elementType": "geometry","stylers": [{"color": "
dark <- '[{"elementType": "geometry","stylers": [{"color": "
night <- '[{"elementType": "geometry","stylers": [{"color": "
aubergine <- '[{"elementType": "geometry","stylers": [{"color": "
return(list(standard = standard,
silver = silver,
retro = retro,
dark = dark,
night = night,
aubergine = aubergine))
} |
source("ESEUR_config.r")
library("plyr")
plot_pts=function(df)
{
points(df$MIPS, df$Price, col=df$col_str)
}
ein=read.csv(paste0(ESEUR_dir, "ecosystems/eindor1985.csv.xz"), as.is=TRUE)
cat_str=unique(ein$Category)
pal_col=rainbow(length(cat_str))
ein$col_str=mapvalues(ein$Category, cat_str, pal_col)
plot(0.1, type="n", log="xy",
xlim=range(ein$MIPS), ylim=range(ein$Price),
xlab="Performance (MIPS)", ylab="Price ($thousand)\n")
d_ply(ein, .(Category), plot_pts)
legend(x="bottomright", legend=cat_str, bty="n", fill=pal_col, cex=1.2) |
`flomax` <- function(x,
info.observed = TRUE,
plot = FALSE,
scaleData = TRUE,
cov = TRUE) {
Cvg <- TRUE
if (any(x <= 0.0)) stop("all elements in 'x' must be > 0")
parnames <- c("shape", "scale")
n <- length(x)
M1 <- mean(x)
if (scaleData) {
x <- x / M1
CV <- sqrt(1.0 - 1.0 / n) * sd(x)
cLogLik <- - n * log(M1)
trans <- diag(c(1.0, 1.0 / M1))
} else {
CV <- sqrt(1.0 - 1.0 / n) * sd(x) / M1
}
if (CV < 1.00) stop("CV < 1. Estimation impossible for \"lomax\"")
M2 <- mean(x^2)
M3 <- mean(x^3)
if (scaleData) {
betaRoots <- polyroot(c(M3, M3 - M2, 1.0 - M2 / 2))
} else {
betaRoots <- polyroot(c(M1 * M3, M3 - M1 * M2, M1^2 - M2 / 2))
}
betaLower <- 0.15 * min(x)
betaUpper <- max(Re(betaRoots))
mind <- max(x)
if (betaUpper < mind) betaUpper <- mind
interv <- c(betaLower, betaUpper)
logLc <- function (beta) {
xmod <- x / beta
R <- mean(log(1.0 + xmod))
-n * (log(R) + log(beta) + R + 1.0)
}
if (plot) cov <- TRUE
if (cov) {
log2L <- function (alpha, beta) {
xmod <- x / beta
xmod1 <- 1.0 + xmod
s1 <- sum(log(xmod1))
s2 <- sum(xmod / xmod1)
alpha1 <- 1.0 + alpha
logL <- n * log(alpha / beta) - alpha1 * s1
dlogL <- c(shape = n / alpha - s1,
scale = (-n + alpha1 * s2) / beta)
d2logL <- array(0, dim = c(2L, 2L), dimnames = list(parnames, parnames))
d2logL["shape", "shape"] <- -n / alpha / alpha
d2logL["shape", "scale"] <- s2 / beta
d2logL["scale", "shape"] <- d2logL["shape", "scale"]
d2logL["scale", "scale"] <-
(n - alpha1 * s2 -alpha1 * sum(xmod / xmod1 / xmod1)) / beta / beta
if (scaleData) {
logL <- logL + cLogLik
dlogL <- trans %*% dlogL
d2logL <- trans %*% d2logL %*% trans
rownames(dlogL) <- parnames
dimnames(d2logL) <- list(parnames, parnames)
}
list(logL = logL,
dlogL = dlogL,
d2logL = d2logL)
}
}
res <- optimize(f = logLc, interval = interv, maximum = TRUE)
beta.hatS <- res$maximum
alpha.hat <- 1.0 / mean(log(1.0 + x / beta.hatS))
if (scaleData) beta.hat <- M1 * beta.hatS
else beta.hat <- beta.hatS
if (!cov) {
loglik <- res$objective
if (scaleData) loglik <- loglik + cLogLik
return(list(estimate = c(shape = alpha.hat, scale = beta.hat),
CV = CV,
loglik = loglik,
cvg = Cvg))
}
res2 <- log2L(alpha = alpha.hat, beta = beta.hatS)
if (info.observed) {
info <- -res2$d2logL
vcov <- try(solve(info))
if (inherits(vcov, "try-error")) {
vcov <- NULL
sds <- NULL
warning("hessian could not be inverted")
} else {
sds <- sqrt(diag(vcov))
}
} else {
a1 <- alpha.hat + 1.0
a2 <- alpha.hat + 2.0
i11 <- 1.0 / alpha.hat / alpha.hat
i12 <- -1.0 / beta.hat / a1
i22 <- alpha.hat / a2 / beta.hat / beta.hat
info <- matrix(n * c(i11, i12, i12, i22), nrow = 2L, ncol = 2L)
colnames(info) <- rownames(info) <- parnames
c11 <- alpha.hat^2 * a1^2
c12 <- alpha.hat * a1 * a2 * beta.hat
c22 <- a1^2 * a2 * beta.hat^2 / alpha.hat
vcov <- matrix(c(c11, c12, c12, c22) / n, nrow = 2L, ncol = 2L)
colnames(vcov) <- rownames(vcov) <- parnames
sds <- sqrt(diag(vcov))
}
if (plot) {
dlogLc <- function (beta) {
xmod <- x / beta
R <- mean(log(1.0 + xmod))
dR <- -mean(xmod / (1.0 + xmod)) / beta
-n * ((R + 1.0) * dR / R + 1.0 / beta)
}
if (scaleData) beta.sol <- beta.hat / M1
else beta.sol <- beta.hat
lcInf <- -n * (1 + log(mean(x)))
betas <- seq(from = interv[1], to = interv[2], length = 200)
fs <- sapply(betas, logLc)
dfs <- sapply(betas, dlogLc)
ind <- 1L:length(betas)
ind <- (dfs < 20)
Stext <- ifelse(scaleData, "(scaled data)", "")
opar <- par(mfrow = c(2L, 1L))
par(mar = c(0, 5, 5, 5))
plot(betas[ind], dfs[ind],
type = "n",
main = sprintf("'Lomax' concentrated log-lik CV = %4.2f %s", CV, Stext),
xlab = " ", ylab = "dlogL",
xaxt = "n", yaxt = "n",
xlim = interv)
axis(side = 4)
abline(h = 0)
abline(v = beta.sol, col = "orangered")
abline(v = interv, col = "darkcyan", lwd = 2)
abline(v = M1, col = "Chartreuse4", lty = "dotted")
abline(h = 0, col = "gray")
lines(betas[ind], dfs[ind],
type = "l", lty = "solid", col = "red3")
par(mar = c(5, 5, 0, 5))
plot(betas[ind], fs[ind], type = "l",
lty = "solid", col = "red3",
xlab = "beta (scale param.)", ylab = "logL",
xlim = interv, ylim = range(fs[ind], lcInf))
abline(h = lcInf, col = "orchid")
mtext(text = "lim.", side = 4, at = lcInf,
col = "orchid")
abline(v = interv, col = "darkcyan", lwd = 2)
abline(v = beta.sol, col = "orangered")
mtext(text = "betaHat", col = "orangered",
side = 1, at = beta.sol, line = 0.5)
abline(v = M1, col = "Chartreuse4", lty = "dotted")
par(opar)
}
list(estimate = c(shape = alpha.hat, scale = beta.hat),
sd = sds,
loglik = res2$logL,
dloglik = res2$dlogL,
cov = vcov,
info = info,
cvg = Cvg)
}
`flomax1` <- function(x,
shape = 4.0,
info.observed = TRUE,
scaleData = TRUE,
cov = TRUE,
plot = FALSE) {
Cvg <- TRUE
if (any(x <= 0)) stop("all elements in 'x' must be >0")
n <- length(x)
M1 <- mean(x)
if (scaleData) {
x <- x / M1
CV <- sqrt(1.0 - 1.0 / n) * sd(x)
cLogLik <- - n * log(M1)
trans <- 1.0 / M1
} else {
CV <- sqrt(1.0 - 1.0 / n) * sd(x) / M1
}
N1 <- mean(1.0 / x)
alpha <- unname(shape)
alpha1 <- shape + 1.0
dlogL1 <- function (beta) {
xmod <- x / beta
R <- mean(log(1.0 + xmod))
S1 <- mean(xmod / (1.0 + xmod))
n * (-1.0 + alpha1 * S1) / beta
}
logL1 <- function (beta) {
xmod <- x / beta
R <- mean(log(1.0 + xmod))
n * (log(alpha) - log(beta) - alpha1 * R)
}
if (plot) cov <- TRUE
if (cov) {
log2L1 <- function (beta) {
xmod <- x / beta
xmod1 <- 1.0 + xmod
R <- mean(log(xmod1))
S1 <- mean(xmod / xmod1)
alpha1 <- 1.0 + alpha
logL <- n * (log(alpha) - log(beta) - alpha1 * R)
dlogL <- n * (-1.0 + alpha1 * S1) / beta
d2logL <- n * (1.0 - alpha1 * S1 - alpha1 * mean(xmod / xmod1 / xmod1)) /
beta / beta
if (scaleData) {
logL <- logL + cLogLik
dlogL <- trans * dlogL
d2logL <- trans * d2logL * trans
}
list(logL = logL,
dlogL = dlogL,
d2logL = d2logL)
}
}
if (scaleData) interv <- c((1.0 - 1.0 / alpha1) / N1, alpha1)
else interv <- c((1.0 - 1.0 / alpha1) / N1, M1 * alpha1)
checks <- unlist(sapply(interv, dlogL1))
if ((checks[1] < 0) || (checks[2] > 0)) {
warning("no interval found to maximise loglik")
Cvg <- FALSE
}
res <- optimize(f = logL1, interval = interv, maximum = TRUE,
tol = .Machine$double.eps^0.3)
beta.hatS <- res$maximum
alpha.hat <- alpha
if (scaleData) beta.hat <- M1 * beta.hatS
else beta.hat <- beta.hatS
if (!cov) {
loglik <- res$objective
if (scaleData) loglik <- loglik + cLogLik
return(list(estimate = c(scale = beta.hat),
CV = CV,
loglik = loglik,
cvg = Cvg))
}
res2 <- log2L1(beta = beta.hatS)
loglik <- res2$logL
if (info.observed) {
info <- -res2$d2logL
} else {
info <- n * alpha.hat / (alpha.hat + 2.0) / beta.hat / beta.hat
}
cov <- 1.0 / info
sds <- sqrt(cov)
if (plot) {
Stext <- ifelse(scaleData, "(scaled data)", "")
betas <- seq(from = interv[1], to = interv[2], length = 200)
fs <- sapply(betas, logL1)
dfs <- c(NA, diff(fs) / diff(betas))
ind <- (dfs < 20)
plot(betas[ind], dfs[ind],
type = "l", lty = "dotted",
main = sprintf("'Lomax' log-lik derivative %s", Stext),
xlab = "beta (scale)", ylab = "dlogL")
lines(betas[ind], sapply(betas[ind], dlogL1), col = "orangered")
abline(h = 0, v = beta.hatS, col = "SpringGreen3")
}
list(estimate = c(scale = beta.hat),
sd = sds,
CV = CV,
loglik = loglik,
dloglik = res2$dlogL,
cov = cov,
info = info,
cvg = Cvg)
} |
context("get_params")
test_that("error messages", {
expect_error(get_params("A"), "data must be a data frame or matrix")
expect_error(get_params(iris, FALSE), "between must be a numeric or character vector")
})
test_that("defaults", {
checkiris <- get_params(iris)
irisnames <- c("n", "var", "Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width", "mean", "sd")
expect_equal(nrow(checkiris), 4)
expect_equal(ncol(checkiris), 8)
expect_equal(names(checkiris), irisnames)
})
test_that("defaults with between", {
checkiris <- get_params(iris, "Species")
irisnames <- c("Species", "n", "var", "Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width", "mean", "sd")
expect_equal(nrow(checkiris), 12)
expect_equal(ncol(checkiris), 9)
expect_equal(names(checkiris), irisnames)
})
test_that("long", {
df_long <- sim_design(within = 2, between = 2, r = 0.5,
empirical = TRUE, long = TRUE, plot = FALSE)
checklong <- get_params(df_long)
expect_equal(checklong$B1, c("B1a", "B1a", "B1b", "B1b") %>% as.factor())
expect_equal(checklong$n, c(100,100,100,100))
expect_equal(checklong$var, factor(c("W1a", "W1b", "W1a", "W1b")))
expect_equal(checklong$mean, c(0,0,0,0))
expect_equal(checklong$sd, c(1,1,1,1))
expect_equal(checklong$W1a, c(1,.5,1,.5))
expect_equal(checklong$W1b, c(.5,1,.5,1))
})
test_that("is_pos_def", {
expect_equal(is_pos_def(matrix(c(1, .5, .5, 1), 2)), TRUE)
bad_matrix <- matrix(c(1, .9, .9,
.9, 1, -.2,
.9, -.2, 1), 3)
expect_equal(is_pos_def(bad_matrix), FALSE)
})
test_that("order", {
x <- sim_design(
within = list(time = c("pre", "post"),
condition = c("ctl", "exp"))
)
p <- get_params(x)
expect_equal(as.character(p$var), names(p)[3:6])
x <- sim_design(
between = list(grp = c("B", "A")),
within = list(time = c("pre", "post"),
condition = c("ctl", "exp"))
)
p <- get_params(x, between = "grp")
expect_equal(as.character(p$grp), rep(LETTERS[2:1], each = 4))
expect_equal(as.character(p$var), rep(names(p)[4:7], 2))
})
test_that("from design", {
x <- sim_design(
between = list(grp = c("B", "A")),
within = list(time = c("pre", "post"),
condition = c("ctl", "exp"))
)
p <- get_params(x)
expect_equal(as.character(p$grp), rep(LETTERS[2:1], each = 4))
expect_equal(as.character(p$var), rep(names(p)[4:7], 2))
p <- get_params(x, between = 0)
expect_true(!"grp" %in% names(p))
expect_equal(as.character(p$var), names(p)[3:6])
p <- get_params(x, dv = c("pre_exp", "post_ctl"))
expect_equal(as.character(p$grp), rep(LETTERS[2:1], each = 2))
expect_equal(as.character(p$var), rep(c("pre_exp", "post_ctl"), 2))
x <- sim_design(
between = list(grp = c("B", "A")),
within = list(time = c("pre", "post"),
condition = c("ctl", "exp")),
long = TRUE
)
p <- get_params(x)
expect_equal(as.character(p$grp), rep(LETTERS[2:1], each = 4))
expect_equal(as.character(p$var), rep(names(p)[4:7], 2))
})
|
sscor.test <- function(x,y,rho0=0,alternative=c("two.sided","less","greater"),conf.level=0.95,...) {
if(length(alternative)>1) alternative <- alternative[1]
if(sum(alternative==c("two.sided","less","greater"))==0) {
warning("Alternative is not implemented. Two sided test will be applied.")
alternative <- "two.sided"
}
data <- cbind(x,y)
n <- length(y)
rho <- sscor(data,...)[1,2]
rhotrafo <- trafofisher(rho)
if (alternative=="two.sided") {
konfb2<- c(max(-1,trafofisherinv(qnorm((1-conf.level)/2,rhotrafo,sd=1/sqrt(n)))),min(1,trafofisherinv(qnorm(1-(1-conf.level)/2,rhotrafo,sd=1/sqrt(n)))))
dif <- sqrt(n)*abs(trafofisher(rho0)-rhotrafo)
pwert <- (pnorm(-dif))*2
}
if (alternative=="less") {
konfb2 <- c(-1,min(1,trafofisherinv(qnorm(conf.level,rhotrafo,sd=1/sqrt(n)))))
dif <- sqrt(n)*(trafofisher(rho0)-rhotrafo)
pwert <- pnorm(-dif)
}
if (alternative=="greater") {
konfb2 <- c(max(-1,trafofisherinv(qnorm(1-conf.level,rhotrafo,sd=1/sqrt(n)))),1)
dif <- sqrt(n)*(trafofisher(rho0)-rhotrafo)
pwert <- pnorm(dif)
}
data.name <- paste(deparse(substitute(x)),deparse(substitute(y)),sep=" and ")
names(dif) <- "norm"
names(rho) <- "cor"
names(rho0) <- "correlation"
attributes(konfb2) <- list(conf.level=conf.level)
erg <- list(statistic=dif,p.value=pwert,estimate=rho,null.value=rho0,
alternative=alternative,method="Spatial sign correlation",conf.int=konfb2)
class(erg) <- "htest"
return(erg)
} |
hhsmmdata<-function(x,N=NULL){
if(is.null(N)) N = nrow(x)
if(nrow(x)!=sum(N)) stop("nrow of x != sum(N) !")
data <- list(x = x, N = N)
class(data) <- "hhsmmdata"
data
} |
train <- rbind(iris3[1:25,,1], iris3[1:25,,2], iris3[1:25,,3])
test <- rbind(iris3[26:50,,1], iris3[26:50,,2], iris3[26:50,,3])
cl <- factor(c(rep("s",25), rep("c",25), rep("v",25)))
knn(train, test, cl, k = 3, prob=TRUE)
attributes(.Last.value) |
test_that("Action button accepts class arguments", {
make_button <- function(class) {
if (missing(class)) {
actionButton("id", "label")
} else {
actionButton("id", "label", class = class)
}
}
act <- make_button()
get_class <- function(act) {
act_html <- format(act)
regmatches(act_html, regexec("class=\"[^\"]\"", act_html))[[1]]
}
act_class <- get_class(act)
expect_equal(
get_class(make_button(NULL)), act_class
)
expect_equal(
get_class(make_button(NA)), act_class
)
expect_equal(
get_class(make_button("extra")), sub("\"$", " extra\"", act_class)
)
expect_equal(
get_class(make_button("extra extra2")), sub("\"$", " extra extra2\"", act_class)
)
})
test_that("Action link accepts class arguments", {
make_link <- function(class) {
if (missing(class)) {
actionLink("id", "label")
} else {
actionLink("id", "label", class = class)
}
}
act <- make_link()
get_class <- function(act) {
act_html <- format(act)
regmatches(act_html, regexec("class=\"[^\"]\"", act_html))[[1]]
}
act_class <- get_class(act)
expect_equal(
get_class(make_link(NULL)), act_class
)
expect_equal(
get_class(make_link(NA)), act_class
)
expect_equal(
get_class(make_link("extra")), sub("\"$", " extra\"", act_class)
)
expect_equal(
get_class(make_link("extra extra2")), sub("\"$", " extra extra2\"", act_class)
)
}) |
aac_pssm <- function(pssm_name){
x<-read.delim(pssm_name,skip = 2,sep = "",header = FALSE)
x<-x[-1,-c(1,23:44)]
d<-which(x=="Lambda")
if(length(d)!=0){
x<-x[-c(d:dim(x)[1]),]
}
x<-x[,-1]
colnames(x)<-NULL
rownames(x)<-NULL
x<-as.matrix(x)
mode(x)<-"integer"
m2<-x
L<-dim(m2)[1]
AAC<-apply(m2,2,mean)
names(AAC)<-NULL
AAC<-round(AAC,digits = 4)
return(AAC)
} |
idesc_get_built <- function(self, private) {
built <- unname(self$get("Built"))
if (is.na(built)) stop("No ", sQuote('Built'), " field found")
built <- as.list(strsplit(built, "; ")[[1L]])
if (length(built) != 4L) {
stop(sQuote('Built'), " field is corrupted")
}
names(built) <- c("R", "Platform", "Date", "OStype")
built[["R"]] <- R_system_version(sub("^R ([0-9.]+)", "\\1", built[["R"]]))
built
} |
context("rbind_fill")
test_that("rbind_fill works with built in datasets", {
skip_on_cran()
df1 <- data.frame(a = 1:4, b = 5:8)
df2 <- data.frame(a = 1:4, c = 5:8)
aa <- geojsonio:::rbind_fill(df1, df2)
expect_is(df1, "data.frame")
expect_is(df2, "data.frame")
expect_is(aa, "data.frame")
expect_named(df1, c('a', 'b'))
expect_named(df2, c('a', 'c'))
expect_named(aa, c('a', 'b', 'c'))
}) |
library(testthat)
library(superheat)
test_check("superheat") |
context("readOptimCriteria")
sapply(studyPathS, function(studyPath){
opts <- setSimulationPath(studyPath, 1)
if(!isH5Opts(opts))
{
describe("readOptimCriteria", {
it("returns an antaresDataTable", {
optimCrit <- readOptimCriteria(opts)
expect_is(optimCrit, "antaresDataTable")
expect_equal(attr(optimCrit, "type"), "optimCriteria")
expect_equal(attr(optimCrit, "synthesis"), FALSE)
expect_equal(attr(optimCrit, "timeStep"), "weekly")
})
})
}
}) |
`predcoca.simpls` <- function(y, x, R0 = NULL, n.axes = NULL,
nam.dat = NULL) {
if(is.null(nam.dat)) {
namY <- deparse(substitute(y))
namX <- deparse(substitute(x))
} else {
namY <- nam.dat$namY
namX <- nam.dat$namX
}
nam.dat = list(namY = namY, namX = namX)
site.names1 <- rownames(y)
site.names2 <- rownames(x)
spp.names1 <- colnames(y)
spp.names2 <- colnames(x)
y <- as.matrix(y)
x <- as.matrix(x)
dimnames(y) <- dimnames(x) <- NULL
Yrsum <- rowSums(y)
Ycsum <- colSums(y)
Ytot <- sum(Yrsum)
Xrsum <- rowSums(x)
Xcsum <- colSums(x)
Xtot <- sum(Xrsum)
if (is.null(R0))
R0 <- Yrsum
p <- ncol(y)
q <- ncol(x)
n.row <- nrow(y)
max.axes <- min(p, q, n.row, nrow(x)) - 1
if(is.null(n.axes)) {
n.axes <- max.axes
} else {
if(n.axes > max.axes) {
n.axes <- max.axes
warning("n.axes greater than min(n,p,q)-1, reset to min(n,p,q)-1")
}
}
R0.scaled <- R0 / sum(R0)
Ycsum.scaled <- Ycsum / Ytot
Xcsum.scaled <- Xcsum / Xtot
Ychi1 <- mcChi(y, R0)
Ychi2 <- mcChi(x, R0)
pls.res <- simpls(Ychi2$Ychi, Ychi1$Ychi, n.axes)
U2 <- diag(1 / sqrt(Xcsum.scaled)) %*% pls.res$projection
X2 <- diag(1 / Xrsum) %*% x %*% U2
U1 <- diag(1 / sqrt(Ycsum.scaled)) %*% pls.res$Yloadings
X1 <- diag(1 / Yrsum) %*% y %*% U1
loadings1 <- U1
loadings2 <- diag(1 / sqrt(Xcsum.scaled)) %*% pls.res$loadings
Yhat <- Ychi2$Ychi %*% pls.res$coefficients[, , n.axes]
Yhat1 <- diag(1 / sqrt(R0.scaled)) %*% Yhat %*% diag(1 / sqrt(Ycsum.scaled))
Yhat1 <- Yhat1 + matrix(1, n.row, 1) %*% matrix(1, 1, p)
Yhat1 <- diag(Yrsum) %*% Yhat1 %*% diag(Ycsum / Ytot)
rownames(U1) <- spp.names1
rownames(U2) <- spp.names2
rownames(X1) <- site.names1
rownames(X2) <- site.names2
rownames(loadings1) <- spp.names1
rownames(loadings2) <- spp.names2
rownames(Yhat1) <- rownames(Yhat) <- site.names1
colnames(Yhat1) <- colnames(Yhat) <- spp.names1
retval <- list(nam.dat = nam.dat, call = match.call(), method = "simpls",
scores = list(species = list(Y = U1, X = U2),
site = list(Y = X1, X = X2)),
loadings = list(Y = loadings1, X = loadings2),
fitted = list(Yhat = Yhat, Yhat1 = Yhat1),
varianceExp = list(Xblock = pls.res$Xvar,
Yblock = pls.res$Yvar),
totalVar = list(Xblock = pls.res$Xtotvar,
Yblock = pls.res$Ytotvar),
lambda = NULL, n.axes = n.axes,
Ychi = list(Ychi1 = Ychi1$Ychi, Ychi2 = Ychi2$Ychi),
R0 = R0)
class(retval) <- c("predcoca", "coca", "list")
retval
} |
plot.cv.cocktail=function(x,sign.lambda=1,...){
cvobj=x
xlab="log(Lambda)"
if(sign.lambda<0)xlab=paste("-",xlab,sep="")
plot.args=list(x=sign.lambda*log(cvobj$lambda),y=cvobj$cvm,ylim=range(cvobj$cvup,cvobj$cvlo),xlab=xlab,ylab=cvobj$name,type="n")
new.args=list(...)
if(length(new.args))plot.args[names(new.args)]=new.args
do.call("plot",plot.args)
error.bars(sign.lambda*log(cvobj$lambda),cvobj$cvup,cvobj$cvlo,width=0.01,col="darkgrey")
points(sign.lambda*log(cvobj$lambda),cvobj$cvm,pch=20,col="red")
axis(side=3,at=sign.lambda*log(cvobj$lambda),labels=paste(cvobj$nz),tick=FALSE,line=0)
abline(v=sign.lambda*log(cvobj$lambda.min),lty=3)
abline(v=sign.lambda*log(cvobj$lambda.1se),lty=3)
invisible()
}
plot.cocktail <- function(x, xvar = c("norm", "lambda"), color = FALSE,
label = FALSE, ...) {
beta <- x$beta
lambda <- x$lambda
xvar <- match.arg(xvar)
which <- nonzeroCoef(beta)
beta <- as.matrix(beta[which, ])
xvar <- match.arg(xvar)
switch(xvar, norm = {
index <- apply(abs(beta), 2, sum)
iname <- "L1 Norm"
}, lambda = {
index <- log(lambda)
iname <- "Log Lambda"
})
xlab <- iname
ylab <- "Coefficients"
dotlist <- list(...)
type <- dotlist$type
if (is.null(type)) {
if (color == FALSE)
matplot(index, t(beta), lty = 1, xlab = xlab, ylab = ylab, type = "l",
pch = 200, col = gray.colors(12, start = 0.05, end = 0.7, gamma = 2.2),
...) else matplot(index, t(beta), lty = 1, xlab = xlab, ylab = ylab, type = "l",
pch = 500, ...)
} else matplot(index, t(beta), lty = 1, xlab = xlab, ylab = ylab, ...)
if (label) {
nnz <- length(which)
xpos <- max(index)
pos <- 4
if (xvar == "lambda") {
xpos <- min(index)
pos <- 2
}
xpos <- rep(xpos, nnz)
ypos <- beta[, ncol(beta)]
text(xpos, ypos, paste(which), cex = 0.5, pos = pos)
}
}
predict.cocktail <- function(object, newx, s = NULL, type = c("link",
"response", "coefficients", "nonzero"), ...) {
type <- match.arg(type)
if (missing(newx)) {
if (!match(type, c("coefficients", "nonzero"), FALSE))
stop("You need to supply a value for 'newx'")
}
nbeta <- object$beta
if (!is.null(s)) {
vnames <- dimnames(nbeta)[[1]]
dimnames(nbeta) <- list(NULL, NULL)
lambda <- object$lambda
lamlist <- lambda.interp(lambda, s)
nbeta=nbeta[,lamlist$left,drop=FALSE]%*%Diagonal(x=lamlist$frac)
+nbeta[,lamlist$right,drop=FALSE]%*%Diagonal(x=1-lamlist$frac)
dimnames(nbeta) <- list(vnames, paste(seq(along = s)))
}
if (type == "coefficients")
return(nbeta)
if (type == "nonzero")
return(nonzeroCoef(nbeta, bystep = TRUE))
nfit <- as.matrix(newx %*% nbeta)
switch(type, response = exp(nfit), link = nfit)
}
print.cocktail <- function(x, digits = max(3, getOption("digits") -
3), ...) {
cat("\nCall: ", deparse(x$call), "\n\n")
print(cbind(Df = x$df, Lambda = signif(x$lambda, digits)))
}
|
ezglm <- function(y, x1, x2, thr = 1, family=c("gaussian","binomial")) {
this.call <- match.call()
family <- match.arg(family)
thr <- as.double(thr)
x1 <- as.double(x1)
x2 <- as.double(x2)
y <- as.double(y)
no <- as.integer(length(x1))
fit <- switch(family,
binomial = .Fortran("logr",no, x1, x2, y, thr, res = double(4*3)),
gaussian = .Fortran("lsr",no, x1, x2, y, thr, res = double(4*3)))
res = matrix(fit$res, ncol = 3)
rownames(res) = c("(Intercept)", "x1", "x2", "x1*x2")
colnames(res) = c("Estimate", "Std. Error", "Pr(>|t|)")
res
} |
waterUptakeRatio = function(microbeNames, stoichiom, Rtype, numPaths) {
Lp = max(numPaths)
pathNames = paste("path", seq(1, Lp), sep = "")
water.ratio = matrix(0, nrow = length(microbeNames), ncol = max(numPaths),
dimnames = list(microbeNames, pathNames))
for (gname in microbeNames) {
for (p in 1:numPaths[gname]) {
path.name = pathNames[p]
if ("Sw" %in% Rtype[gname, , path.name]) {
if (any(Rtype[gname, , path.name] == "S")) {
upS = mean(stoichiom[gname, Rtype[gname, , path.name] == "S",
path.name], na.rm = TRUE)
} else {
upS = 0
}
up = upS +
sum(stoichiom[gname,Rtype[gname, ,path.name]=="Se", path.name]) +
sum(stoichiom[gname,Rtype[gname, ,path.name]=="Sb", path.name]) +
sum(stoichiom[gname,Rtype[gname, ,path.name]=="Sm", path.name])
water.ratio[gname, path.name] = sum(stoichiom[gname,
Rtype[gname, , path.name] == "Sw", path.name])/up
}
}
}
return(water.ratio)
} |
HatchingSuccess.MHmcmc_p<-function(result=NULL, parameters=NULL, fixed.parameters=NULL,
accept=FALSE) {
if (is.null(result) & is.null(parameters)) {
stop("Or result or parameters must be provided")
}
if (is.null(parameters)) parameters <- result$par
if (is.null(fixed.parameters)) fixed.parameters <- result$fixed.parameters
par <- parameters
allpar <- c(parameters, fixed.parameters)
P.low <- abs(allpar["P.low"])
deltaP <- abs(allpar["deltaP"])
P.high <- abs(allpar["P.high"])
S.low <- allpar["S.low"]
S.high <- allpar["S.high"]
K1.low <- allpar["K1.low"]
K1.high <- allpar["K1.high"]
K2.low <- allpar["K2.low"]
K2.high <- allpar["K2.high"]
if (is.na(P.low)) P.low <- P.high - deltaP
if (is.na(P.high)) P.high <- P.low + deltaP
if (is.na(deltaP)) deltaP <- P.high - P.low
S.low <- c("dunif", 0, max(allpar["S.low"]*2, 10), 2, 0, max(allpar["S.low"]*2, 10), par["S.low"])
S.high <- c("dunif", 0, max(allpar["S.high"]*2, 10), 2, 0, max(par["S.high"]*2, 10), par["S.high"])
P.low <- c("dunif", 0, max(P.low * 2, 30), 2, 0, max(P.low * 2, 30), P.low)
P.high <- c("dunif", 0, max(P.high * 2, 40), 2, 0, max(P.high * 2, 40), P.high)
deltaP <- c("dunif", 0, max(deltaP * 2, 10), 2, 0, max(deltaP * 2, 10), deltaP)
MaxHS <- c("dunif", 0, 1, 2, 0, 1, par["MaxHS"])
K1.low <- c("dunif", min(K1.low * 2, -10), max(K1.low * 2, 10), 2, min(K1.low * 2, -10), max(K1.low * 2, 10), K1.low)
K1.high <- c("dunif", min(K1.high * 2, -10), max(K1.high * 2, 10), 2, min(K1.high * 2, -10), max(K1.high * 2, 10), K1.high)
K2.low <- c("dunif", min(K2.low * 2, -10), max(K2.low * 2, 10), 2, min(K2.low * 2, -10), max(K2.low * 2, 10), K2.low)
K2.high <- c("dunif", min(K2.high * 2, -10), max(K2.high * 2, 10), 2, min(K2.high * 2, -10), max(K2.high * 2, 10), K2.high)
priors <- list(S.low=S.low, S.high=S.high, P.low=P.low, P.high=P.high, deltaP=deltaP, MaxHS=MaxHS,
K1.low=K1.low, K1.high=K1.high, K2.low=K2.low, K2.high=K2.high)
prencours <- NULL
for (i in 1:length(par)) {
prencours <- c(prencours, priors[[names(par)[i]]])
}
parametersMCMC <- matrix(prencours, ncol=7, byrow=T)
colnames(parametersMCMC) <- c("Density", "Prior1", "Prior2", "SDProp", "Min", "Max", "Init")
rownames(parametersMCMC)<-names(par)
parametersMCMC <- as.data.frame(parametersMCMC, stringsAsFactors = FALSE)
for (i in c("Prior1", "Prior2", "SDProp", "Min", "Max", "Init"))
parametersMCMC[,i] <- as.numeric(parametersMCMC[,i])
parameters <- parametersMCMC
if (accept) {
return(parameters)
} else {
repeat {
cat("Proposition:\n")
print(parameters)
cat("Name of the parameter to change or Enter to quit:\n")
f<-scan(nmax=1, quiet=TRUE, what=character())
if (length(f)==0) f <- "q"
if (f=="q") {
return(parameters)
} else {
variable <- which(f==names(par))
if (length(variable)==0) {
cat("The parameter does not exist:\n")
} else {
print(variable)
cat(paste("Change for the parameter ",names(par)[variable],":\n",sep=""))
cat(paste("Distribution of the prior (Enter for default ",parameters[variable, "Density"], "):", sep=""))
density<-scan(nmax=1, quiet=TRUE, what=character())
if (length(density)!=0) { parameters[variable, "Density"] <- density } else { density <- parameters[variable, "Density"] }
if (density == "dunif") {
cat(paste("Distribution of the prior, Minimum (Enter for default ",parameters[variable, "Prior1"], "):", sep=""))
f<-scan(nmax=1, quiet=TRUE, what=character())
if (length(f)!=0) parameters[variable, "Prior1"] <- f
cat(paste("Distribution of the prior, Maximum (Enter for default ",parameters[variable, "Prior2"], "):", sep=""))
f<-scan(nmax=1, quiet=TRUE, what=character())
if (length(f)!=0) parameters[variable, "Prior2"] <- f
} else {
if (density == "dnorm") {
cat(paste("Distribution of the prior, Mean (Enter for default ",parameters[variable, "Prior1"], "):", sep=""))
f<-scan(nmax=1, quiet=TRUE, what=character())
if (length(f)!=0) parameters[variable, "Prior1"] <- f
cat(paste("Distribution of the prior, Standard deviation (Enter for default ",parameters[variable, "Prior2"], "):", sep=""))
f<-scan(nmax=1, quiet=TRUE, what=character())
if (length(f)!=0) parameters[variable, "Prior2"] <- f
} else {
cat(paste("Distribution of the prior, value 1 (Enter for default ",parameters[variable, "Prior1"], "):", sep=""))
f<-scan(nmax=1, quiet=TRUE, what=character())
if (length(f)!=0) parameters[variable, "Prior1"] <- f
cat(paste("Distribution of the prior, value 2 (Enter for default ",parameters[variable, "Prior2"], "):", sep=""))
f<-scan(nmax=1, quiet=TRUE, what=character())
if (length(f)!=0) parameters[variable, "Prior2"] <- f
}
}
cat(paste("SD of new proposition (Enter for default ",parameters[variable, "SDProp"], "):", sep=""))
f<-scan(nmax=1, quiet=TRUE, what=character())
if (length(f)!=0) parameters[variable, "SDProp"] <- f
cat(paste("Minimum for the parameter (default ",parameters[variable, "Min"], "):", sep=""))
f<-scan(nmax=1, quiet=TRUE, what=character())
if (length(f)!=0) parameters[variable, "Min"] <- f
cat(paste("Maximum for the parameter (Enter for default ",parameters[variable, "Max"], "):", sep=""))
f<-scan(nmax=1, quiet=TRUE, what=character())
if (length(f)!=0) parameters[variable, "Max"] <- f
cat(paste("Initial value (Enter for default ",parameters[variable, "Init"], "):", sep=""))
f<-scan(nmax=1, quiet=TRUE, what=character())
if (length(f)!=0) parameters[variable, "Init"] <- f
}
}
}
}
} |
form_mix <- function(f_list, weights = NULL) {
assert_f_list(f_list, allow_numbers = FALSE)
assert_type(weights, is.numeric, allow_null = TRUE)
weights <- impute_weights(weights, length(f_list))
disable_asserting_locally()
f_list_meta <- compute_f_list_meta(f_list)
res_type <- f_list_meta[["type"]]
sec_col <- if (res_type == "discrete") {
"prob"
} else {
"y"
}
x_tbl_list <- lapply(seq_along(f_list), function(i) {
f_typed <- form_retype(f_list[[i]], res_type, method = "dirac")
x_tbl <- meta_x_tbl(f_typed)
x_tbl[[sec_col]] <- x_tbl[[sec_col]] * weights[i]
x_tbl
})
x_tbl <- stack_x_tbl(x_tbl_list)
new_pdqr_by_class(f_list_meta[["class"]])(x_tbl, res_type)
}
impute_weights <- function(weights, n) {
if (is.null(weights)) {
weights <- rep(1, n) / n
} else {
weights <- recycle_vec(weights, n)
}
if (any(weights < 0)) {
stop_collapse("`weights` should not have negative elements")
}
if (sum(weights) <= 0) {
stop_collapse("`weights` should have positive sum.")
}
weights / sum(weights)
}
form_smooth <- function(f, n_sample = 10000, args_new = list()) {
assert_pdqr_fun(f)
assert_type(
n_sample, is_single_number,
type_name = "single number more than 1",
min_val = 2
)
assert_type(args_new, is.list)
disable_asserting_locally()
f_x_tbl <- meta_x_tbl(f)
pdqr_fun <- new_pdqr_by_ref(f)
if (nrow(f_x_tbl) == 1) {
return(pdqr_fun(f_x_tbl[["x"]][1], "discrete"))
}
smpl <- as_r(f)(n_sample)
call_args <- c_dedupl(list(x = smpl, type = "continuous"), args_new)
con_d <- do.call(new_d, call_args)
con_d <- form_resupport(
con_d, support = meta_support(f), method = "reflect"
)
f_x_tbl[[get_x_tbl_sec_col(f_x_tbl)]] <- con_d(f_x_tbl[["x"]])
pdqr_fun(f_x_tbl, type = meta_type(f))
}
form_estimate <- function(f, stat, sample_size, ...,
n_sample = 10000, args_new = list()) {
assert_pdqr_fun(f)
assert_missing(stat, "statistic function")
assert_type(stat, is.function)
assert_missing(sample_size, "size of sample")
assert_type(
sample_size, is_single_number,
type_name = "single positive number", min_val = 1
)
assert_type(
n_sample, is_single_number,
type_name = "single positive number", min_val = 1
)
assert_type(args_new, is.list)
disable_asserting_locally()
r_f <- as_r(f)
est_smpl <- lapply(seq_len(n_sample), function(i) {
stat(r_f(sample_size), ...)
})
est_smpl_is_number <- vapply(est_smpl, is_single_number, logical(1))
est_smpl_is_bool <- vapply(
est_smpl, function(x) {
is.logical(x) && (length(x) == 1)
}, logical(1)
)
if (!all(est_smpl_is_number | est_smpl_is_bool)) {
stop_collapse(
"All outputs of `stat` should be single numeric or logical values."
)
}
est_smpl <- unlist(est_smpl)
if (is.logical(est_smpl)) {
prob_true <- mean(est_smpl, na.rm = TRUE)
return(boolean_pdqr(prob_true, meta_class(f)))
}
call_args <- c_dedupl(
list(x = est_smpl), args_new, list(type = meta_type(f))
)
do.call(new_pdqr_by_ref(f), call_args)
}
NULL
form_recenter <- function(f, to, method = "mean") {
assert_pdqr_fun(f)
assert_type(to, is_single_number, type_name = "single number")
assert_method(method, methods_center)
disable_asserting_locally()
f + (to - summ_center(f, method))
}
form_respread <- function(f, to, method = "sd", center_method = "mean") {
assert_pdqr_fun(f)
assert_type(
to, is_single_number, type_name = "single non-negative number",
min_val = 0
)
assert_method(method, methods_spread)
assert_method(center_method, methods_center)
disable_asserting_locally()
center <- summ_center(f, center_method)
if (to == 0) {
new_pdqr_by_ref(f)(center, type = meta_type(f))
} else {
cur_spread <- summ_spread(f, method)
coef <- switch(
method,
var = sqrt(to / cur_spread),
to / cur_spread
)
coef * f + center * (1 - coef)
}
} |
conceptualStructure<-function(M,field="ID", ngrams=1, method="MCA", quali.supp=NULL, quanti.supp=NULL, minDegree=2,
clust="auto", k.max=5, stemming=FALSE, labelsize=10,documents=2, graph=TRUE,
remove.terms=NULL, synonyms=NULL){
cbPalette <- c(brewer.pal(9, 'Set1')[-6], brewer.pal(8, 'Set2')[-7], brewer.pal(12, 'Paired')[-11],brewer.pal(12, 'Set3')[-c(2,8,12)])
if (!is.null(quali.supp)){
QSUPP=data.frame(M[,quali.supp])
names(QSUPP)=names(M)[quali.supp]
row.names(QSUPP)=tolower(row.names(M))
}
if (!is.null(quanti.supp)){
SUPP=data.frame(M[,quanti.supp])
names(SUPP)=names(M)[quanti.supp]
row.names(SUPP)=tolower(row.names(M))
}
binary=FALSE
if (method=="MCA"){binary=TRUE}
switch(field,
ID={
CW <- cocMatrix(M, Field = "ID", type="matrix", sep=";",binary=binary, remove.terms = remove.terms, synonyms = synonyms)
CW=CW[,colSums(CW)>=minDegree]
CW=CW[,!(colnames(CW) %in% "NA")]
CW=CW[rowSums(CW)>0,]
},
DE={
CW <- cocMatrix(M, Field = "DE", type="matrix", sep=";",binary=binary, remove.terms = remove.terms, synonyms = synonyms)
CW=CW[,colSums(CW)>=minDegree]
CW=CW[rowSums(CW)>0,]
CW=CW[,!(colnames(CW) %in% "NA")]
},
ID_TM={
M=termExtraction(M,Field="ID",remove.numbers=TRUE, stemming=stemming, language="english", remove.terms = remove.terms, synonyms = synonyms, keep.terms=NULL, verbose=FALSE)
CW <- cocMatrix(M, Field = "ID_TM", type="matrix", sep=";",binary=binary)
CW=CW[,colSums(CW)>=minDegree]
CW=CW[,!(colnames(CW) %in% "NA")]
CW=CW[rowSums(CW)>0,]
},
DE_TM={
M=termExtraction(M,Field="DE",remove.numbers=TRUE, stemming=stemming, language="english", remove.terms = remove.terms, synonyms = synonyms,keep.terms=NULL, verbose=FALSE)
CW <- cocMatrix(M, Field = "DE_TM", type="matrix", sep=";",binary=binary)
CW=CW[,colSums(CW)>=minDegree]
CW=CW[,!(colnames(CW) %in% "NA")]
CW=CW[rowSums(CW)>0,]
},
TI={
M=termExtraction(M,Field="TI",remove.numbers=TRUE, stemming=stemming, language="english", remove.terms = remove.terms, synonyms = synonyms, keep.terms=NULL, verbose=FALSE, ngrams=ngrams)
CW <- cocMatrix(M, Field = "TI_TM", type="matrix", sep=";",binary=binary)
CW=CW[,colSums(CW)>=minDegree]
CW=CW[,!(colnames(CW) %in% "NA")]
CW=CW[rowSums(CW)>0,]
},
AB={
M=termExtraction(M,Field="AB",remove.numbers=TRUE, stemming=stemming, language="english", remove.terms = remove.terms, synonyms = synonyms, keep.terms=NULL, verbose=FALSE, ngrams=ngrams)
CW <- cocMatrix(M, Field = "AB_TM", type="matrix", sep=";",binary=binary)
CW=CW[,colSums(CW)>=minDegree]
CW=CW[rowSums(CW)>0,]
CW=CW[,!(colnames(CW) %in% "NA")]
}
)
colnames(CW)=tolower(colnames(CW))
rownames(CW)=tolower(rownames(CW))
p=dim(CW)[2]
quali=NULL
quanti=NULL
if (!is.null(quali.supp)){
ind=which(row.names(QSUPP) %in% row.names(CW))
QSUPP=as.data.frame(QSUPP[ind,])
CW=cbind(CW,QSUPP)
quali=(p+1):dim(CW)[2]
names(CW)[quali]=names(M)[quali.supp]
}
if (!is.null(quanti.supp)){
ind=which(row.names(SUPP) %in% row.names(CW))
SUPP=as.data.frame(SUPP[ind,])
CW=cbind(CW,SUPP)
quanti=(p+1+length(quali)):dim(CW)[2]
names(CW)[quanti]=names(M)[quanti.supp]
}
results <- factorial(CW,method=method,quanti=quanti,quali=quali)
res.mca <- results$res.mca
df <- results$df
docCoord <- results$docCoord
df_quali <- results$df_quali
df_quanti <- results$df_quanti
if ("TC" %in% names(M) & method!="MDS"){docCoord$TC=as.numeric(M[toupper(rownames(docCoord)),"TC"])}
km.res=hclust(dist(df),method="average")
if (clust=="auto"){
clust=min((length(km.res$height)-which.max(diff(km.res$height))+1),k.max)
}else{clust=max(2,min(as.numeric(clust),k.max))}
km.res$data=df
km.res$cluster=cutree(km.res,k=clust)
km.res$data.clust=cbind(km.res$data,km.res$cluster)
names(km.res$data.clust)[3]="clust"
centers<- km.res$data.clust %>% group_by(.data$clust) %>%
summarise("Dim.1"=mean(.data$Dim.1),"Dim.2"=mean(.data$Dim.2)) %>%
as.data.frame()
km.res$centers=centers[,c(2,3,1)]
data("logo",envir=environment())
logo <- grid::rasterGrob(logo,interpolate = TRUE)
b=fviz_cluster(km.res, stand=FALSE, data = df,labelsize=labelsize, repel = TRUE)+
theme_minimal()+
scale_color_manual(values = cbPalette[1:clust])+
scale_fill_manual(values = cbPalette[1:clust]) +
labs(title= paste("Conceptual Structure Map - method: ",method,collapse="",sep="")) +
geom_point() +
geom_hline(yintercept=0, linetype="dashed", color = adjustcolor("grey40",alpha.f = 0.7))+
geom_vline(xintercept=0, linetype="dashed", color = adjustcolor("grey40",alpha.f = 0.7))+
theme(panel.border = element_rect(fill=NA, size = 0.3, linetype = 'dashed', colour = adjustcolor("gray60",alpha.f = 0.7)),
text = element_text(size=labelsize),
axis.title=element_text(size=labelsize,face="bold"),
plot.title=element_text(size=labelsize+1,face="bold"),
panel.background = element_rect(fill = "white", colour = "white"),
panel.grid.major = element_line(size = 0.3, linetype = 'dashed', colour = adjustcolor("gray60",alpha.f = 0.7)),
panel.grid.minor = element_blank())
if (method!="MDS"){
b=b+xlab(paste("Dim 1 (",round(res.mca$eigCorr$perc[1],2),"%)",sep=""))+
ylab(paste("Dim 2 (",round(res.mca$eigCorr$perc[2],2),"%)",sep=""))
}else{b=b+xlab("Dim 1")+ylab("Dim 2")}
if (!is.null(quali.supp)){
s_df_quali=df_quali[(abs(df_quali[,1]) >= quantile(abs(df_quali[,1]),0.75) | abs(df_quali[,2]) >= quantile(abs(df_quali[,2]),0.75)),]
names(s_df_quali)=c("x","y")
s_df_quali$label=row.names(s_df_quali)
x=s_df_quali$x
y=s_df_quali$y
label=s_df_quali$label
b=b+geom_point(aes(x=x,y=y),data=s_df_quali,colour="red",size=1) +
geom_label_repel(aes(x=x,y=y,label=label,size=1),data=s_df_quali)
}
if (!is.null(quanti.supp)){
names(df_quanti)=c("x","y")
df_quanti$label=row.names(df_quanti)
x=df_quanti$x
y=df_quanti$y
label=df_quanti$label
b=b+geom_point(aes(x=x,y=y),data=df_quanti,colour="blue",size=1) +
geom_label_repel(aes(x=x,y=y,label=label,size=1),data=df_quanti) +
geom_segment(data=df_quanti,aes(x=0,y=0,xend = x, yend = y), size=1.5,arrow = arrow(length = unit(0.3,"cm")))
}
b=b + theme(legend.position="none")
coord_b <- plotCoord(b)
b <- b + annotation_custom(logo, xmin = coord_b[1], xmax = coord_b[2], ymin = coord_b[3], ymax = coord_b[4])
if (isTRUE(graph)){plot(b)}
b_dend <- fviz_dend(km.res, rect = TRUE, k=clust,
cex=labelsize/20, main="Topic Dendrogram",
k_colors = cbPalette[clust:1])+
theme(plot.title=element_text(size=labelsize+1,face="bold"),
axis.title=element_text(size=labelsize,face="bold") ,
panel.background = element_rect(fill = "white",
colour = "white"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank())
coord <- plotCoord(b_dend, side="u")
b_dend <- b_dend + annotation_custom(logo, xmin = coord[1], xmax = coord[2], ymin = coord[3], ymax = coord[4])
if (isTRUE(graph)){plot(b_dend)}
if (method !="MDS"){
if (documents>dim(docCoord)[1]){documents=dim(docCoord)[1]}
centers=data.frame(dim1=km.res$centers[,1],dim2=km.res$centers[,2])
centers$color=cbPalette[1:dim(centers)[1]]
row.names(centers)=paste("cluster",as.character(1:dim(centers)[1]),sep="")
A=euclDist(docCoord[,1:2],centers)
docCoord$Cluster=A$color
A$color=cbPalette[A$color]
A$contrib <- docCoord$contrib
A <- A %>%
mutate(names=row.names(A)) %>%
group_by(.data$color) %>%
top_n(.data$contrib,n=documents) %>%
select(!"contrib")%>%
as.data.frame()
row.names(A) <- A$names
A <- A[,-4]
names(centers)=names(A)
A=rbind(A,centers)
x=A$dim1
y=A$dim2
A[,4]=row.names(A)
names(A)[4]="nomi"
df_all=rbind(as.matrix(df),as.matrix(A[,1:2]))
rangex=c(min(df_all[,1]),max(df_all[,1]))
rangey=c(min(df_all[,2]),max(df_all[,2]))
b_doc <- ggplot(aes(x=.data$dim1,y=.data$dim2,label=.data$nomi),data=A)+
geom_point(size = 2, color = A$color)+
labs(title= "Factorial map of the documents with the highest contributes") +
geom_label_repel(box.padding = unit(0.5, "lines"),size=(log(labelsize*3)), fontface = "bold",
fill=adjustcolor(A$color,alpha.f=0.6), color = "white", segment.alpha=0.5, segment.color="gray")+
scale_x_continuous(limits = rangex, breaks=seq(round(rangex[1]), round(rangex[2]), 1))+
scale_y_continuous(limits = rangey, breaks=seq(round(rangey[1]), round(rangey[2]), 1))+
geom_hline(yintercept=0, linetype="dashed", color = adjustcolor("grey40",alpha.f = 0.7))+
geom_vline(xintercept=0, linetype="dashed", color = adjustcolor("grey40",alpha.f = 0.7))+
theme(plot.title=element_text(size=labelsize+1,face="bold"),
axis.title=element_text(size=labelsize,face="bold") ,
panel.border = element_rect(fill=NA, size = 0.3, linetype = 'dashed', colour = adjustcolor("gray60",alpha.f = 0.7)),
panel.background = element_rect(fill = "white",
colour = "white"),
panel.grid.major = element_line(size = 0.3, linetype = 'dashed', colour = adjustcolor("gray90",alpha.f = 0.7)),
panel.grid.minor = element_blank())
if (method!="MDS"){
b_doc=b_doc+xlab(paste("Dim 1 (",round(res.mca$eigCorr$perc[1],2),"%)",sep=""))+
ylab(paste("Dim 2 (",round(res.mca$eigCorr$perc[2],2),"%)",sep=""))
}else{b_doc=b_doc+xlab("Dim 1")+ylab("Dim 2")}
xl <- c(rangex[2]-0.02-diff(rangex)*0.125, rangex[2]-0.02)
yl <- c(rangey[1],rangey[1]+diff(rangey)*0.125)+0.02
b_doc <- b_doc + annotation_custom(logo, xmin = xl[1], xmax = xl[2], ymin = yl[1], ymax = yl[2])
if (isTRUE(graph)){(plot(b_doc))}
docCoord=docCoord[order(-docCoord$TC),]
B=euclDist(docCoord[,1:2],centers)
B$color=cbPalette[B$color]
B$TC <- docCoord$TC
B <- B %>%
mutate(names=row.names(B)) %>%
group_by(.data$color) %>%
top_n(.data$TC,n=documents) %>%
select(!"TC")%>%
as.data.frame()
row.names(B) <- B$names
B <- B[,-4]
B=rbind(B,centers)
x=B$dim1
y=B$dim2
B[,4]=row.names(B)
names(B)[4]="nomi"
df_all_TC=rbind(as.matrix(df),as.matrix(B[,1:2]))
rangex=c(min(df_all_TC[,1]),max(df_all_TC[,1]))
rangey=c(min(df_all_TC[,2]),max(df_all_TC[,2]))
b_doc_TC=ggplot(aes(x=.data$dim1,y=.data$dim2,label=.data$nomi),data=B)+
geom_point(size = 2, color = B$color)+
labs(title= "Factorial map of the most cited documents") +
geom_label_repel(box.padding = unit(0.5, "lines"),size=(log(labelsize*3)), fontface = "bold",
fill=adjustcolor(B$color,alpha.f=0.6), color = "white", segment.alpha=0.5, segment.color="gray")+
scale_x_continuous(limits = rangex, breaks=seq(round(rangex[1]), round(rangex[2]), 1))+
scale_y_continuous(limits = rangey, breaks=seq(round(rangey[1]), round(rangey[2]), 1))+
xlab(paste("Dim 1 (",round(res.mca$eigCorr$perc[1],2),"%)",sep=""))+
ylab(paste("Dim 2 (",round(res.mca$eigCorr$perc[2],2),"%)",sep=""))+
geom_hline(yintercept=0, linetype="dashed", color = adjustcolor("grey60",alpha.f = 0.7))+
geom_vline(xintercept=0, linetype="dashed", color = adjustcolor("grey60",alpha.f = 0.7))+
theme(plot.title=element_text(size=labelsize+1,face="bold"),
axis.title=element_text(size=labelsize,face="bold") ,
panel.border = element_rect(fill=NA, size = 0.3, linetype = 'dashed', colour = adjustcolor("gray60",alpha.f = 0.7)),
panel.background = element_rect(fill = "white", colour = "white"),
panel.grid.major = element_line(size = 0.3, linetype = 'dashed', colour = adjustcolor("gray90",alpha.f = 0.7)),
panel.grid.minor = element_blank())
xl <- c(rangex[2]-0.02-diff(rangex)*0.125, rangex[2]-0.02)
yl <- c(rangey[1],rangey[1]+diff(rangey)*0.125)+0.02
b_doc_TC <- b_doc_TC + annotation_custom(logo, xmin = xl[1], xmax = xl[2], ymin = yl[1], ymax = yl[2])
if (isTRUE(graph)){plot(b_doc_TC)}
semanticResults=list(net=CW,res=res.mca,km.res=km.res,graph_terms=b,graph_dendogram=b_dend,graph_documents_Contrib=b_doc,graph_documents_TC=b_doc_TC,docCoord=docCoord)
}else{
semanticResults=list(net=CW,res=res.mca,km.res=km.res,graph_terms=b,graph_dendogram=b_dend,graph_documents_Contrib=NULL,graph_documents_TC=NULL,docCoord=NULL)
}
return(semanticResults)
}
factorial<-function(X,method,quanti,quali){
df_quali=data.frame()
df_quanti=data.frame()
switch(method,
CA={
res.mca <- CA(X, quanti.sup=quanti, quali.sup=quali, ncp=2, graph=FALSE)
coord=get_ca_col(res.mca)
df=data.frame(coord$coord)
if (!is.null(quali)){
df_quali=data.frame(res.mca$quali.sup$coord)
}
if (!is.null(quanti)){
df_quanti=data.frame(res.mca$quanti.sup$coord)
}
coord_doc=get_ca_row(res.mca)
df_doc=data.frame(coord_doc$coord)
},
MCA={
if(length(quanti)>0){
X[,-quanti]=data.frame(apply(X[,-quanti],2,factor))} else{X=data.frame(apply(X,2,factor))}
res.mca <- MCA(X, quanti.sup=quanti, quali.sup=quali, ncp=2, graph=FALSE)
coord=get_mca_var(res.mca)
df=data.frame(coord$coord)[seq(2,dim(coord$coord)[1],by=2),]
row.names(df)=gsub("_1","",row.names(df))
if (!is.null(quali)){
df_quali=data.frame(res.mca$quali.sup$coord)[seq(1,dim(res.mca$quali.sup$coord)[1],by=2),]
row.names(df_quali)=gsub("_1","",row.names(df_quali))
}
if (!is.null(quanti)){
df_quanti=data.frame(res.mca$quanti.sup$coord)[seq(1,dim(res.mca$quanti.sup$coord)[1],by=2),]
row.names(df_quanti)=gsub("_1","",row.names(df_quanti))
}
coord_doc=get_mca_ind(res.mca)
df_doc=data.frame(coord_doc$coord)
},
MDS={
NetMatrix=Matrix::crossprod(X,X)
Net=1-normalizeSimilarity(NetMatrix, type="association")
Matrix::diag(Net)=0
res.mca <- Net %>%
cmdscale()
colnames(res.mca) <- c("Dim.1", "Dim.2")
df=data.frame(res.mca)
row.names(df)=row.names(Net)
}
)
if (method!="MDS"){
docCoord=as.data.frame(cbind(df_doc,rowSums(coord_doc$contrib)))
names(docCoord)=c("dim1","dim2","contrib")
docCoord=docCoord[order(-docCoord$contrib),]
res.mca <- eigCorrection(res.mca)
results=list(res.mca=res.mca,df=df,df_doc=df_doc,df_quali=df_quali,df_quanti=df_quanti,docCoord=docCoord)
}else{
results=list(res.mca=res.mca,df=df,df_doc=NA,df_quali=NA,df_quanti=NA,docCoord=NA)
}
return(results)
}
euclDist<-function(x,y){
df=as.data.frame(matrix(NA,dim(x)[1],dim(y)[1]))
row.names(df)=row.names(x)
colnames(df)=row.names(y)
for (i in 1:dim(y)[1]){
ref=y[i,1:2]
df[,i]=apply(x,1,function(x)sqrt(sum((x-ref)^2)))
}
x$color=apply(df,1,function(m){which(m==min(m))})
return(x)
}
eigCorrection <- function(res) {
n <- nrow(res$eig)
e <- res$eig[,1]
eigBenz <- ((n / (n - 1)) ^ 2) * ((e - (1 / n)) ^ 2)
eigBenz[e< 1/n] <- 0
perc <- eigBenz / sum(eigBenz) * 100
cumPerc = cumsum(perc)
res$eigCorr <- data.frame(eig=e, eigBenz=eigBenz, perc=perc, cumPerc=cumPerc)
return(res)
}
plotCoord <- function(g, side="b"){
a <- ggplot_build(g)$data
ymin <- unlist(lapply(a, function(l){
if ("y" %in% names(l)){
min(l["y"])
}
})) %>% min(na.rm=TRUE)
ymax <- unlist(lapply(a, function(l){
if ("y" %in% names(l)){
max(l["y"])
}
})) %>% max(na.rm=TRUE)
xmin <- unlist(lapply(a, function(l){
if ("x" %in% names(l)){
min(l["x"])
}
})) %>% min(na.rm=TRUE)
xmax <- unlist(lapply(a, function(l){
if ("x" %in% names(l)){
max(l["x"])
}
})) %>% max(na.rm=TRUE)
coord <- c(xmin,xmax,ymin,ymax)
xl <- c(xmax-0.02-diff(c(xmin,xmax))*0.125, xmax-0.02)
if (side=="b"){
yl <- c(ymin,ymin+diff(c(ymin,ymax))*0.125)+0.02
}else{
yl <- c(ymax-0.02-diff(c(ymin,ymax))*0.125, ymax-0.02)
}
coord <- c(xl,yl)
} |
get_vaccine_age_groups <- function(split = c("overall", "province"),
province = NULL,
group = NULL, before = NULL, after = NULL) {
base_url <- "https://api.covid19tracker.ca/vaccines/age-groups"
province_codes <- c(
"AB", "BC", "MB", "NB", "NL", "NS", "NT", "NU", "ON",
"PE", "QC", "SK", "YT"
)
split <- match.arg(split)
if (split == "province") {
base_url <- paste0(base_url, "/split")
} else if (!is.null(province)) {
province <- match.arg(toupper(province), province_codes, several.ok = TRUE)
base_url <- paste0(base_url, "/province/", province)
}
parameters <- tibble::lst(group, before, after)
parameters <- parameters[lengths(parameters) == 1]
if (length(parameters) > 0) {
params_url <- purrr::imap_chr(
parameters,
~ paste0(.y, "=", utils::URLencode(.x, reserved = TRUE))
) %>%
paste(collapse = "&")
params_url <- paste0("?", params_url)
} else {
params_url <- ""
}
purrr::map_dfr(
base_url,
function(base_url) {
url <- paste0(base_url, params_url)
content_parsed <- get_content_parsed(url)
if (!is.null(group)) {
content_parsed$data <- purrr::discard(content_parsed$data,
~ is.null(.x$data))
}
if (!is.null(province)) {
dplyr::bind_cols(
content_parsed["province"],
dplyr::bind_rows(content_parsed$data)
)
} else {
dplyr::bind_rows(content_parsed$data)
}
}
) %>%
dplyr::mutate(
data = purrr::map(
.data$data,
~jsonlite::fromJSON(.x) %>% dplyr::bind_rows(.id = "group_code")
)
) %>%
tidyr::unnest(.data$data) %>%
dplyr::mutate(dplyr::across(tidyselect::matches("date"), as.Date))
} |
context("retrieveFunctionArguments")
o <- list(
retrieveFunctionArguments(cos),
retrieveFunctionArgumentNames(cos),
retrieveFunctionArguments(append),
retrieveFunctionArgumentNames(append),
retrieveFunctionArguments(`$`),
retrieveFunctionArgumentNames(`$`),
retrieveFunctionArgumentNames(sum)
)
test_that("retrieveFunctionArguments", {
expect_equal(length(o[[1]]), length(o[[2]]))
expect_equal(length(o[[3]]), length(o[[4]]))
expect_true(is.null(o[[5]]))
}) |
crossRegex <- function(IDLevels, effectLevels, ID, effectName, formula) {
trms <- attr(terms(formula), "term.labels")
trms <- cleanName(trms)
idFirst <- paste0(ID, "_", effectName)
effectFirst <- paste0(effectName, "_", ID)
if (idFirst %in% trms) {
suffix <- outer(IDLevels, effectLevels, FUN = paste, sep = "_")
regexThetaID <- apply(suffix, MARGIN = c(1, 2), function(x) paste0("^", ID, "_", effectName, "_", x, "$"))
regexThetaID <- as.data.frame(regexThetaID)
colnames(regexThetaID) <- paste0(effectName, "_", effectLevels)
return(regexThetaID)
} else if (effectFirst %in% trms) {
suffix <- outer(effectLevels, IDLevels, FUN = paste, sep = "_")
suffix <- t(suffix)
regexThetaID <- apply(suffix, MARGIN = c(1, 2), function(x) paste0("^", effectName, "_", ID, "_", x, "$"))
regexThetaID <- as.data.frame(regexThetaID)
colnames(regexThetaID) <- paste0(effectName, "_", effectLevels)
return(regexThetaID)
} else {
stop("Unable to match formula elements with column names from posterior samples.")
}
} |
s2_order <- function(
s2_prodlist = NULL,
export_prodlist = TRUE,
delay = 0.5,
apihub = NA,
service = NA,
reorder = TRUE
) {
.s2_order(
s2_prodlist = s2_prodlist,
export_prodlist = export_prodlist,
delay = delay,
apihub = apihub,
service = service,
reorder = reorder,
.s2_availability = NULL
)
}
.s2_order <- function(
s2_prodlist = NULL,
export_prodlist = TRUE,
delay = 0.5,
apihub = NA,
service = NA,
reorder = TRUE,
.s2_availability = NULL,
.log_path = TRUE
) {
i <- NULL
for (a in c("s2_prodlist", "export_prodlist", "apihub")) {
if (suppressWarnings(all(is.na(get(a))))) {
assign(a,NULL)
}
}
if (length(nn(s2_prodlist)) == 0) {
return(invisible(NULL))
}
if (all(is.character(export_prodlist), length(export_prodlist) > 0)) {
if (!dir.exists(export_prodlist)) {
print_message(
type = "error",
"Argument 'export_prodlist' must be TRUE, FALSE or the path of an existing folder."
)
}
}
if (any(length(delay) == 0, !is.numeric(delay))) {
print_message(
type = "error",
"Argument 'delay' must be numeric"
)
}
s2_prodlist <- as(s2_prodlist, "safelist")
if (!service %in% c("apihub", "dhus", NA)) {
print_message(
type = "error",
"Argument 'service' can be only \"apihub\" or \"dhus\"; ",
"leaving the input URLs as are."
)
} else if (!is.na(service)) {
s2_prodlist <- gsub(
"^https://((scihub)|(apihub)).copernicus.eu/((apihub)|(dhus))/odata",
paste0("https://",ifelse(service=="dhus","scihub","apihub"),
".copernicus.eu/",service,"/odata"),
s2_prodlist
)
}
s2_scihub <- s2_prodlist[grepl("^http.+Products\\(.+\\)/\\$value$", s2_prodlist)]
if (length(s2_scihub) > 0) {
creds <- read_scihub_login(apihub)
}
s2_scihub <- s2_prodlist[grepl("^http.+Products\\(.+\\)/\\$value$", s2_prodlist)]
if (length(s2_scihub) < length(s2_prodlist)) {
print_message(
type = "message",
date = TRUE,
length(s2_prodlist) - length(s2_scihub),
" products are not from SciHub and will not considered."
)
s2_prodlist <- s2_scihub
}
s2_availability <- if (is.null(.s2_availability)) {
print_message(
type = "message",
date = TRUE,
"Check if products are already available for download..."
)
safe_is_online(s2_prodlist, verbose = FALSE, apihub = apihub)
} else {
.s2_availability
}
if (sum(s2_availability, na.rm = TRUE) > 0) {
print_message(
type = "message",
date = TRUE,
sum(s2_availability, na.rm = TRUE)," Sentinel-2 images are already online."
)
}
if (sum(!nn(s2_availability), na.rm = TRUE) > 0) {
print_message(
type = "message",
date = TRUE,
"Ordering ",sum(!nn(s2_availability), na.rm = TRUE)," Sentinel-2 images ",
"stored in the Long Term Archive..."
)
}
if (!is.null(attr(s2_prodlist, "order_status")) & reorder == FALSE) {
old_order <- which(!s2_availability & attr(s2_prodlist, "order_status") == "ordered")
to_order <- which(!s2_availability & attr(s2_prodlist, "order_status") != "ordered")
} else {
to_order <- which(!nn(s2_availability))
old_order <- NULL
}
false_invalid_safe <- FALSE
quota_exceeded <- rep(TRUE, length(to_order))
status_codes <- c()
i_cred <- 1
ordered_products <- foreach(i = seq_along(to_order), .combine = c) %do% {
if (i != 1) {
Sys.sleep(delay)
}
while (all(quota_exceeded[i], i_cred <= nrow(creds))) {
times_429 <- 10
while (times_429 > 0) {
make_order <- RETRY(
verb = "GET",
url = as.character(s2_prodlist[i]),
config = authenticate(creds[i_cred,1], creds[i_cred,2])
)
times_429 <-if (make_order$status_code != 429) {0} else {times_429 - 1}
}
sel_ordered <- if (inherits(make_order, "response")) {
status_codes[i] <- make_order$status_code
quota_exceeded[i] <- any(grepl(
"retrieval quota exceeded",
make_order$headers$`cause-message`
))
if (quota_exceeded[i]) {
i_cred <- i_cred + 1
if (i_cred <= nrow(creds)) {
print_message(
type = "message",
"Switching to SciHub record ",i_cred," at product ",i,".",
date = TRUE
)
}
}
if (make_order$status_code == 200) {
false_invalid_safe <- TRUE
make_order$content <- NULL; gc()
}
make_order$status_code == 202
} else FALSE
}
sel_ordered
}
tempordered <- rep(FALSE, length(s2_prodlist))
if (!is.null(old_order)) {
tempordered[sort(unique(c(to_order[ordered_products], old_order)))] <- TRUE
} else {
tempordered[to_order[ordered_products]] <- TRUE
}
ordered_products <- tempordered
notordered_products <- !ordered_products & !nn(s2_availability)
out_list <- s2_prodlist[ordered_products]
attr(out_list, "available") <- s2_prodlist[s2_availability]
attr(out_list, "notordered") <- s2_prodlist[notordered_products]
attr(out_list, "order_status") <- NULL
list_towrite <- list(
ordered = as.list(out_list),
available = as.list(attr(out_list, "available")),
notordered = as.list(attr(out_list, "notordered"))
)
if (any(export_prodlist != FALSE) & length(list_towrite) > 0) {
order_time <- Sys.time()
prodlist_dir <- if (is.logical(export_prodlist)) {
file.path(dirname(attr(load_binpaths(), "path")), "lta_orders")
} else {
export_prodlist
}
dir.create(prodlist_dir, showWarnings = FALSE)
prodlist_path <- file.path(
prodlist_dir,
strftime(order_time, format = "lta_%Y%m%d_%H%M%S.json")
)
writeLines(
toJSON(as.list(list_towrite), pretty = TRUE),
prodlist_path
)
attr(out_list, "path") <- prodlist_path
}
if (sum(ordered_products) > 0) {
print_message(
type = "message",
date = TRUE,
sum(ordered_products)," of ",sum(!nn(s2_availability), na.rm = TRUE)," Sentinel-2 images ",
"were correctly ordered. ",
if (.log_path == TRUE) {paste0(
"You can check at a later time if the ordered products are available online ",
"using the command:\n",
if (is.null(attr(out_list, "path"))) {paste0(
'\u00A0\u00A0safe_is_online(c(\n "',paste(out_list, collapse = '",\n "'),'"\n))'
)} else {paste0(
'\u00A0\u00A0safe_is_online("',attr(out_list, "path"),'")'
)},
"\n"
)}
)
}
if (sum(notordered_products) > 0) {
print_message(
type = "message",
date = TRUE,
sum(notordered_products)," of ",sum(!nn(s2_availability), na.rm = TRUE)," Sentinel-2 images ",
"were not correctly ordered ",
"(HTML status code: ",unique(paste(status_codes[status_codes!=202]), collapse = ", "),")",
if (any(quota_exceeded)) {paste0(
" because user '",creds[1,1],"' offline products retrieval quota exceeded. ",
"Please retry later, otherwise use different SciHub credentials ",
"(see ?write_scihub_login or set a specific value for argument \"apihub\")."
)} else if (false_invalid_safe) {paste0(
" because some invalid SAFE products were stored on the ESA API Hub. ",
"Please retry ordering them on DHUS ",
"(set argument 'service = \"dhus\"' in function s2_order())."
)} else {
"."
},
if (.log_path == TRUE) {paste0(
" You can try ordering them at a later time ",
"using the command:\n",
if (is.null(attr(out_list, "path"))) {paste0(
'\u00A0\u00A0s2_order(c(\n "',paste(out_list, collapse = '",\n "'),'"\n))'
)} else {paste0(
'\u00A0\u00A0s2_order("',attr(out_list, "path"),'")'
)},
"\n"
)}
)
}
return(out_list)
} |
summary.interVA5 <- function(object, top = 5, id = NULL, InterVA.rule = TRUE, ...){
if(is.null(object$dev)){
data("causetextV5", envir = environment())
causetextV5 <- get("causetextV5", envir = environment())
causenames <- causetextV5[4:64,2]
causeindex <- 4:64
}else{
InterVA5 <- FALSE
causenames <- names(object$VA[[1]]$wholeprob)
causeindex <- 1:length(causenames)
}
out <- NULL
va <- object$VA
out$top <- top
out$N <- length(va)
out$Malaria <- object$Malaria
out$HIV <- object$HIV
dist <- NULL
for(i in 1:length(va)){
if(!is.null(va[[i]][15])){
dist <- rep(0, length(unlist(va[[i]][15])))
break
}
}
undeter <- 0
if(is.null(dist)){cat("No va probability found in input"); return()}
if(!InterVA.rule){
for(i in 1:length(va)){
if(is.null(va[[i]][15])) {undeter = undeter + 1; next}
this.dist <- unlist(va[[i]][15])
dist <- dist + this.dist
}
if(undeter > 0){
dist.cod <- c(dist[causeindex], undeter)
dist.cod <- dist.cod/sum(dist.cod)
names(dist.cod) <- c(causenames, "Undetermined")
}else{
csmf <- dist[causeindex]/sum(dist[causeindex])
names(csmf) <- causenames
}
}else{
csmf <- CSMF.interVA5(va)
}
csmf <- data.frame(cause = names(csmf), likelihood = csmf)
rownames(csmf) <- NULL
csmfc <- COMCAT.interVA5(va)
csmfc <- data.frame(cause = names(csmfc), likelihood = csmfc)
rownames(csmfc) <- NULL
if(!is.null(id)){
index <- which(object$ID == id)
if(is.null(index)){
stop("Error: provided ID not found")
}else if(is.null(va[[i]][15])){
out$undet <- TRUE
}else{
out$undet <- FALSE
probs.tmp <- object$VA[[index]][15][[1]]
out$preg <- probs.tmp[1:3]
out$probs <- probs.tmp[causeindex]
topcauses <- sort(out$probs, decreasing = TRUE)[1:top]
out$indiv.top <- data.frame(Cause = names(topcauses))
out$indiv.top$Likelihood <- topcauses
}
out$id.toprint <- id
}else{
out$csmf.ordered <- csmf[order(csmf[,2], decreasing = TRUE),]
out$comcat.ordered <- csmfc[order(csmfc[, 2], decreasing = TRUE),]
}
out$InterVA.rule <- InterVA.rule
class(out) <- "interVA5_summary"
return(out)
}
print.interVA5_summary <- function(x, ...){
if(!is.null(x$id.toprint)){
cat(paste0("InterVA5 fitted top ", x$top, " causes for death ID: ", x$id.toprint, "\n\n"))
if(x$undet){
cat("Cause of death undetermined\n")
}else{
x$indiv.top[, 2] <- round(x$indiv.top[, 2], 4)
print(x$indiv.top, row.names = FALSE, right = FALSE)
}
}else{
cat(paste("InterVA5 fitted on", x$N, "deaths\n"))
if(x$InterVA.rule){
cat("CSMF calculated using reported causes by InterVA5 only\nThe remaining probabilities are assigned to 'Undetermined'\n")
}else{
cat("CSMF calculated using distribution over all causes\nwithout 'Undetermined' category\n")
}
cat("\n")
cat(paste("Top", x$top, "CSMFs:\n"))
csmf.out.ordered <- x$csmf.ordered[1:x$top, ]
csmf.out.ordered[, 2] <- round(csmf.out.ordered[, 2], 4)
print(csmf.out.ordered, right = FALSE, row.names = F)
cat("\n")
cat(paste("Top", min(x$top,6), "Circumstance of Mortality Category:\n"))
csmf.out.ordered <- x$comcat[1:min(x$top,6), ]
csmf.out.ordered[, 2] <- round(csmf.out.ordered[, 2], 4)
print(csmf.out.ordered, right = FALSE, row.names = F)
}
} |
test_that("Default check_outliers", {
expect_s3_class(check_outliers(GSE74821), "nacho")
})
test_that("missing object", {
expect_error(check_outliers())
})
test_that("wrong attribute", {
attr(GSE74821, "RCC_type") <- "something_wrong"
expect_error(check_outliers(GSE74821))
}) |
knitr::opts_chunk$set(
collapse = TRUE,
comment = "
)
library(anovir)
head(recovery_data, 3)
tail(recovery_data, 3) |
genSurvData <- function(n, p, s, mag, cens.quant = 0.6){
SigmaX <- matrix(0, nrow = p, ncol = p)
for (j in 1:p) {
for (k in 1:p) {
SigmaX[j,k] <- 0.7^(abs(j-k))
}
}
eo <- eigen(SigmaX)
SigmaXSqrt <- eo$vec%*%diag(eo$val^.5)%*%t(eo$vec)
X <- tcrossprod(matrix(rnorm(n*p), nrow=n, ncol=p),SigmaXSqrt)
beta <- sample(c(rep(0, p-s), rep(mag, s)))*sample(c(-1,1), p, replace = TRUE)
logtime <- X%*%beta + rlogis(n, location = 0, scale = 2)
temp <- quantile(exp(logtime), cens.quant)
C <- rexp(n=n, rate=1/temp)
logY <- pmin(logtime, log(C))
status <- 1*(logtime == logY)
return(list(
"beta" = beta,
"logY" = logY,
"status" = status,
"X" = X
))
} |
ui_heatmap <- function() {
ns <- NS("heatmap")
tabPanel("Heatmap",
value = "heatmap",
verticalLayout(
plotOutput(
ns("heatmap"),
width = "25cm",
height = "auto"
),
checkboxInput(ns("cluster_features"),
label = "Cluster features on heatmap?",
value = TRUE),
checkboxInput(ns("hide_unselected"),
label = "Display unselected features?",
value = TRUE),
checkboxInput(ns("show_col_names"),
label = "Display record IDs labels?",
value = FALSE)
)
)
}
server_heatmap <- function(id,
heatmap_annotation,
clusters,
nclusters,
cluster_colors,
scaled_data,
scaled_unselected_data,
scale_flag,
distance_method) {
moduleServer(id, function(input, output, session) {
output$heatmap <- renderPlot({
req(scaled_data())
top_matrix <- t(scaled_data())
if (!is.null(scaled_unselected_data()) & input$hide_unselected) {
bottom_matrix <- t(scaled_unselected_data())
} else {
bottom_matrix <- NULL
}
heatmap_clusters <- reorder_dendrograms(clusters(),
nclusters(), cluster_colors)
plot_cluster_heatmaps(
top_matrix,
bottom_matrix,
heatmap_clusters$dendrogram,
heatmap_clusters$ids,
heatmap_annotation(),
scale_flag(),
distance_method(),
input$cluster_features,
input$show_col_names
)
}, height = function() { if (ncol(scaled_data()) > 100) 900 else 700 })
})
} |
plot.HR <- function(x, predictor, prob=NULL, pred.value=NULL, conf.level=0.95, round.x=NULL, ref.label=NULL, col, main, xlab, ylab, lty, xlim, ylim, xx, ...) {
object <- x
if ( !inherits(object, "HR") ) stop("Object must be of class HR")
mydata <- object$dataset
fit <- object$coxfit
if ( missing(round.x) ) round.x <- 5
if ( !missing(pred.value) ) prob <- 0.5
if ( !missing(prob) ) if(prob < 0 | prob > 1) stop("The argument 'prob' must be between o and 1")
if ( missing(prob) & missing(pred.value) ) prob <- 0
if ( !missing(pred.value) & !missing(xlim) ) if ( pred.value < min(xlim) | pred.value > max(xlim) ) stop("The reference value is out of range of 'xlim'")
if ( missing(predictor) ) stop("Missing predictor")
if ( missing(col) ) col <- c("black", "black", "grey85")
if ( missing(ylab) ) ylab <- c("Ln HR(Z,Zref)")
if ( missing(lty) ) lty <- c(1,3)
ctype <- "FALSE"
qvalue <- (1+conf.level)/2
linear.predictor <- FALSE
k1 <- 9999
k <- which(names(mydata) == predictor)
k <- c(k, k1)
if (k[1] == 9999) stop ("predictor must be in data")
k <- k[1]
a <- mydata
if ( missing(xlab) ) xlab <- names(a)[k]
n.predictor <- names(a)[k]
n <- dim(a)[1]
if (prob == 0) {
eta.no.ref <- predict(fit,type = "terms")
if ( inherits(eta.no.ref, "numeric") ) {kp <- 1; eta.no.ref <- cbind(eta.no.ref,eta.no.ref);}
else {kp <- grep( predictor, colnames(eta.no.ref) );}
eta.xref <- min(eta.no.ref[,kp])
ii <- which.min(eta.no.ref[,kp])
xref <- a[ii,k]
eta.ref <- eta.no.ref[,kp]-eta.xref
indices <- grep(names(a)[k], dimnames(fit$x)[[2]])
submatriz.diseno <- fit$x[,indices]
if (is.matrix(submatriz.diseno) == FALSE) linear.predictor <- TRUE
submatriz.var <- fit$var[indices, indices]
xref1 <- rep(fit$x[ii,indices], dim(fit$x)[1])
if (linear.predictor == FALSE) {
xref1 <- matrix(xref1, nrow=dim(fit$x)[1], ncol=dim(submatriz.diseno)[2], byrow=TRUE)
}
if (linear.predictor == TRUE) {
xref1 <- matrix(xref1, nrow=dim(fit$x)[1], ncol=1, byrow=TRUE)
}
eta.ref1 <- fit$x[,indices]-xref1
var.eta.ref1 <- rep(NA, n)
for (i in 1:n) var.eta.ref1[i] <- eta.ref1[i,]%*%fit$var[indices,indices]%*%eta.ref1[i,]
se.eta.ref1 <- sqrt(var.eta.ref1)
}
if (prob > 0 & prob < 1) {
eta.no.ref <- predict(fit, type="terms")
if ( inherits(eta.no.ref, "numeric") ) {kp <- 1; eta.no.ref <- cbind(eta.no.ref, eta.no.ref);}
else {kp <- grep( predictor, colnames(eta.no.ref) )}
ord <- order(a[,k])
if ( !missing(pred.value) ) {
pp <- seq(0, 1, len=1000)
app <- quantile(a[,k], pp)
qq <- which(app<=pred.value)
qq1 <- max(qq)
prob <- qq1/1000
}
ind.prob <- trunc(prob*n)
xref <- a[,k][ord[ind.prob]]
eta.xref <- eta.no.ref[,kp][ord[ind.prob]]
eta.ref <- eta.no.ref[,kp]-eta.xref
indices <- grep(names(a)[k], dimnames(fit$x)[[2]])
submatriz.diseno <- fit$x[,indices]
if (is.matrix(submatriz.diseno) == FALSE) linear.predictor <- TRUE
submatriz.var <- fit$var[indices, indices]
xref1 <- rep(fit$x[ord[ind.prob],indices], dim(fit$x)[1])
if (linear.predictor == FALSE) {
xref1 <- matrix(xref1, nrow=dim(fit$x)[1], ncol=dim(submatriz.diseno)[2], byrow=TRUE)
}
if (linear.predictor == TRUE) {
xref1 <- matrix(xref1, nrow=dim(fit$x)[1], ncol=1, byrow=TRUE)
}
eta.ref1 <- fit$x[,indices]-xref1
var.eta.ref1 <- rep(NA,n)
for (i in 1:n) var.eta.ref1[i] <- eta.ref1[i,]%*%fit$var[indices,indices]%*%eta.ref1[i,]
se.eta.ref1 <- sqrt(var.eta.ref1)
}
if (prob == 1) {
eta.no.ref <- predict(fit, type="terms")
if ( inherits(eta.no.ref, "numeric") ) {kp <- 1; eta.no.ref <- cbind(eta.no.ref, eta.no.ref);}
else {kp <- grep( predictor, colnames(eta.no.ref) )}
eta.xref <- max(eta.no.ref[,kp])
ii <- which.max(eta.no.ref[,kp])
xref <- a[ii,k]
eta.ref <- eta.no.ref[,kp]-eta.xref
indices <- grep(names(a)[k], dimnames(fit$x)[[2]])
submatriz.diseno <- fit$x[,indices]
if (is.matrix(submatriz.diseno) == FALSE) linear.predictor <- TRUE
submatriz.var <- fit$var[indices,indices]
xref1 <- rep(fit$x[ii,indices], dim(fit$x)[1])
if (linear.predictor == FALSE) {
xref1 <- matrix(xref1, nrow=dim(fit$x)[1], ncol=dim(submatriz.diseno)[2], byrow=TRUE)
}
if (linear.predictor == TRUE) {
xref1 <- matrix(xref1, nrow=dim(fit$x)[1], ncol=1, byrow=TRUE)
}
eta.ref1 <- fit$x[,indices]-xref1
var.eta.ref1 <- rep(NA,n)
for (i in 1:n) var.eta.ref1[i] <- eta.ref1[i,]%*%fit$var[indices,indices]%*%eta.ref1[i,]
se.eta.ref1 <- sqrt(var.eta.ref1)
}
if ( missing(main) ) main <- paste("Smooth log hazard ratio for", names(a)[k])
tmat <- cbind(eta.ref, eta.ref-qnorm(qvalue)*se.eta.ref1, eta.ref+qnorm(qvalue)*se.eta.ref1)
line <- rep(0, n)
jj <- match(sort(unique(a[,k])), a[,k])
if ( missing(xlim) ) xlim <- c( min(a[,k]), max(a[,k]) )
else {
if ( missing(ylim) ) {
index1 <- which( a[jj,k] >= min(xlim) & a[jj,k] <= max(xlim) )
index <- jj[index1]
ylim <- c( min(tmat[index,2]), max(tmat[index,3]) )
}
}
if ( missing(ylim) ) ylim <- c( min(tmat[,2]), max(tmat[,3]) )
if ( xref < min(a[,k]) | xref > max(a[,k]) ) stop("The reference value is out of range of x")
if ( xref < min(xlim) | xref > max(xlim) ) stop("The reference value is out of range of 'xlim'")
matplot(a[jj,k], tmat[jj,], type="l", lty=c(1, 5, 5, 2), xaxt="n", ylim=ylim, xlim=xlim, xlab=xlab, ylab=ylab, col=c(1, 2, 2, 1), main=main, ...)
xxx <- round( seq(min(a[,k]), max(a[,k]),len=5) )
if ( missing(xx) ) xx <- c( min(a[,k]), round(xref,1), xxx[2], xxx[3], xxx[4], max(a[,k]) )
axis(1, xx, ...)
m <- length(jj)
x <- rep(NA, 2*m+1)
y <- rep(NA, 2*m+1)
for (l in 1:m) {
x[l] <- a[jj,k][l]
x[m+l] <- a[jj,k][m+1-l]
y[l] <- tmat[jj,2][l]
y[m+l] <- tmat[jj,1][m+1-l]
}
x[m+1] <- x[m]
x[2*m+1] <- x[1]
y[2*m+1] <- tmat[jj,2][1]
polygon(c(x), c(y), col=col[3], ...)
y <- rep(NA, 2*m+1)
for (l in 1:m) {
x[l] <- a[jj,k][l]
x[m+l] <- a[jj,k][m+1-l]
y[l] <- tmat[jj,3][l]
y[m+l] <- tmat[jj,1][m+1-l]
}
x[m+1] <- x[m]
x[2*m+1] <- x[1]
y[2*m+1] <- tmat[jj,2][1]
polygon(c(x), c(y), col=col[3], ...)
y <- rep(NA, 2*m+1)
for (l in 1:m) {
x[l] <- a[jj,k][l]
x[m+l] <- a[jj,k][m+1-l]
y[l] <- tmat[jj,3][l]
y[m+l] <- tmat[jj,2][m+1-l]
}
x[m+1] <- x[m]
x[2*m+1] <- x[1]
y[2*m+1] <- tmat[jj,2][1]
polygon(c(x), c(y), col=col[3], border="white", ...)
y <- rep(NA, 2*m+1)
for (l in 1:m) {
x[l] <- a[jj,k][l]
x[m+l] <- a[jj,k][m+1-l]
y[l] <- tmat[jj,3][l]
y[m+l] <- tmat[jj,2][m+1-l]
}
x[m+1] <- x[m]
x[2*m+1] <- x[1]
y[2*m+1] <- tmat[jj,2][1]
polygon(c(x), c(y), col=col[3], border=col[2], lty=lty[2], lwd=1.5, ...)
x <- rep(NA, 2*m+1)
y <- rep(NA, 2*m+1)
for (l in 1:m) {
x[l] <- a[jj,k][l]
x[m+l] <- a[jj,k][m+1-l]
y[l] <- tmat[jj,1][l]
y[m+l] <- tmat[jj,1][m+1-l]
}
x[m+1] <- x[m]
x[2*m+1] <- x[1]
y[2*m+1] <- tmat[jj,1][1]
polygon(c(x), c(y), col=col[3], border=col[1], lty=lty[1], ...)
abline(0, 0, lty=2)
abline(v=min(a[,k]), col="white")
abline(v=max(a[,k]), col="white")
if ( missing(xlim) ) {
v1 <- min(a[,k])+( max(a[,k])-min(a[,k]) )/10
v2 <- min(a[,k])+9*( max(a[,k])-min(a[,k]) )/10
} else {
v1 <- min(xlim)+( max(xlim)-min(xlim) )/10
v2 <- min(xlim)+9*( max(xlim)-min(xlim) )/10
}
if ( missing(ylim) ) {
y[1] <- max(tmat[,3])/2
y[2] <- min(tmat[,2])
} else {
y[1] <- max(ylim)/2
y[2] <- min(ylim)
}
if ( !missing(ref.label) ) n.predictor <- ref.label
if (xref > v1 & xref < v2) {
arrows(xref, y[1], xref, y[2], length=0.08)
ys <- y[1]
if (ys > 2*y[1]-(2*y[1]-y[2])/10) {
text(xref, y[1], paste(n.predictor, "=", round( xref, round.x) ), adj=c(0.5, 2.3), ...)
}
if (ys <= 2*y[1]-(2*y[1]- y[2])/10) {
text(xref, y[1], paste( n.predictor, "=", round(xref,round.x) ), adj=c(0.5, -0.7), ...)
}
}
if (xref <= v1) {
v3 <- ( max(xlim)-min(xlim) )/100
xref2 <- xref
if ( xref == min(xlim) ) xref2 <- xref+min(0.05, v3)
arrows(xref2, y[1], xref2, y[2], length=0.08)
ys <- y[1]
if (ys > 2*y[1]-(2*y[1]-y[2])/10) {
text(xref, y[1], paste( n.predictor, "=", round(xref,round.x) ), adj=c(0, 2.3), ...)
}
if (ys <= 2*y[1]-(2*y[1]-y[2])/10) {
text(xref, y[1], paste( n.predictor, "=", round(xref,round.x) ), adj=c(0, -0.7), ...)
}
}
if (xref >= v2) {
v3 <- ( max(xlim)-min(xlim) )/100
xref2 <- xref
if ( xref == max(xlim) ) xref2 <- xref-min(0.05, v3)
arrows(xref2, y[1], xref2, y[2], length=0.08)
ys <- y[1]
if (ys > 2*y[1]-(2*y[1]-y[2])/10) {
text(xref, y[1], paste( n.predictor, "=", round(xref,round.x) ), adj=c(1, 2.3), ...)
}
if (ys <= 2*y[1]-(2*y[1]-y[2])/10) {
text(xref, y[1], paste( n.predictor, "=", round(xref,round.x) ), adj=c(1, -0.7), ...)
}
}
} |
is_mhcnuggets_name <- function(mhc) {
if (length(mhc) != 1) return(FALSE)
if (is.null(mhc)) return(FALSE)
!is.na(
stringr::str_match(
string = mhc,
pattern = paste0(
"^",
"(BoLA|Eqca|H|H-2|HLA|Mamu|Patr|SLA)",
"-[A-Za-z]{0,3}[[:digit:]]{0,4}:?[[:digit:]]{0,4}",
"(-[A-Za-z]{1,3}[[:digit:]]{1,4}:[[:digit:]]{1,4})?",
"$"
)
)[, 1]
)
} |
.onAttach = function (libname, pkgname)
{
packagedir <- path.package("rDotNet")
server <- sprintf("%s/server/bin/Debug/CLRServer.exe", packagedir)
if (file.exists(server))
return()
packageStartupMessage ("attempting to build CLR server, one time setup")
if (Sys.which("nuget") == "")
{
warning ("could not find nuget in path; will not be able to use rDotNet unless corrected and rebuilt")
return ()
}
if (Sys.which("msbuild") == "" && Sys.which("xbuild") == "")
{
warning ("could not find msbuild or xbuild in path; will not be able to use rDotNet unless corrected and rebuilt")
return()
}
cwd <- getwd()
setwd(sprintf("%s/server", packagedir))
packageStartupMessage ("getting dependent packages")
system2 ("nuget", "restore", wait=TRUE, stderr=TRUE, stdout=TRUE)
packageStartupMessage ("building project")
system2 (ifelse(Sys.which("msbuild") != "", "msbuild", "xbuild"), wait=TRUE, stderr=TRUE, stdout=TRUE)
setwd(cwd)
} |
CSCI<-function(C,D,times=NULL,type=c("VALID","ABA","LIKELIHOOD"),conf.level=0.95, control=controlCSCI()){
if (any(C<=0) | any(C==Inf)) stop("must have 0<C[i]<Inf for all i")
Confidence.level<- conf.level
type_CSCI<- match.arg(type)
if (is.null(times)) times<- sort(unique(C))
CSCI_Valid<-function(C,D,times,Confidence.level){
alp<- 1-Confidence.level
o<-order(C)
C<- C[o]
D<- D[o]
uTimes<- sort(unique(c(0,Inf,C)))
k<- length(uTimes)
n<- length(C)
uN<-uD<- rep(0,k)
for (i in 1:k){
I<- C==uTimes[i]
uN[i]<- length(C[I])
uD[i]<- sum(D[I])
}
Nat<-Ntb<-Yat<-Ytb<- rep(0,(k-1))
power<- control$power
m<- ceiling(n^power)
is.even<-function(x){ round(x) %% 2 ==0 }
rcumsum<-function(x){ rev(cumsum(rev(x))) }
for (i in 1:(k-1)){
AT<- uTimes<=uTimes[i]
if (sum(uN[AT])<=m){
Nat[i]<- sum(uN[AT])
Yat[i]<- sum(uD[AT])
} else {
rc<- rcumsum(uN[AT])
if (any(rc==m)){
I<- rc<=m
Nat[i]<- sum(uN[AT][I])
Yat[i]<- sum(uD[AT][I])
} else {
rc_2<-c(1:length(rc))[rc<m]
if(length(rc_2)==0){
I<-length(rc)
} else {
h<- min( rc_2 )
I<- (h-1):length(rc)
}
Nat[i]<- sum(uN[AT][I])
Yat[i]<- sum(uD[AT][I])
}
}
TB<- uTimes>uTimes[i]
if (sum(uN[TB])<=m){
Ntb[i]<- sum(uN[TB])
Ytb[i]<- sum(uD[TB])
} else {
cc<- cumsum(uN[TB])
if (any(cc==m)){
I<- cc<=m
Ntb[i]<- sum(uN[TB][I])
Ytb[i]<- sum(uD[TB][I])
} else {
cc_2<-c(1:length(cc))[cc<m]
if(length(cc_2)==0){cc_2=0}
h<- max( cc_2 )
I<- 1:(h+1)
Ntb[i]<- sum(uN[TB][I])
Ytb[i]<- sum(uD[TB][I])
}
}
}
qcl=rep(0, (k-1))
qcu=rep(0, (k-1))
qcl=qbeta((1-Confidence.level)/2, Yat, Nat-Yat+1)
qcu=qbeta(1-((1-Confidence.level)/2), Ytb+1, Ntb-Ytb)
NPzvalue=isoreg(C,D)$yf
NPT<-rep(NA, (k-1))
for(i in 1:(k-1)){
NPT[i]<-NPT_function(C, NPzvalue, uTimes[i])
}
II<-qcl>qcu
qcl[II]=NPT[II]
qcu[II]=NPT[II]
l_uTimes<-uTimes[-(length(uTimes))]
u_uTimes<-uTimes[-1]
C_intervals=noquote(paste0("[",round(l_uTimes,4),",", round(u_uTimes,4), ")"))
out_lower_upper<-data.frame(C_intervals, l_uTimes, NPT, qcl,qcu)
colnames(out_lower_upper)<-c("Intervals","times","NPMLE","Lower CL", "Upper CL")
low_ci<-rep(0,length(times))
upp_ci<-rep(0,length(times))
NPT_times<- rep(0,length(times))
for(i in 1:length(times)){
loc_t<-0
loc_t<-which(l_uTimes<=times[i] & times[i]<u_uTimes)
NPT_times[i]<- NPT_function(C, NPzvalue, times[i])
low_ci[i]<-qcl[loc_t]
upp_ci[i]<-qcu[loc_t]
}
out_times_lower_upper<-data.frame(times, NPT_times, low_ci,upp_ci)
colnames(out_times_lower_upper)<-c("times","NPMLE","Lower CL", "Upper CL")
out_cis<-list(ciTable_all=out_lower_upper, ciTable_times=out_times_lower_upper)
return(out_cis)
}
kernel_function_density <-
function(type_kernel,u)
{
if(type_kernel == "n")
{
result <- dnorm(u)
return(result)
}
else
if(type_kernel == "t")
{
result <- u
Logic0 <- (u <= -1)
Logic1 <- (u >= 1)
Logic2 <- (u > -1 & u < 1)
Logic3 <- (u > -1 & u < 1)
result[Logic0] <- 0
result[Logic1] <- 0
Uval <- result[Logic2]
result[Logic2] <- (35/32)*((1-(Uval^(2)))^(3))
return(result)
}
}
kernel_function_derivative <-
function(type_kernel,u)
{
if(type_kernel == "n")
{
result <- (-u)*dnorm(u)
return(result)
}
else
if(type_kernel == "t")
{
result <- u
Logic0 <- (u <= -1)
Logic1 <- (u >= 1)
Logic2 <- (u > -1 & u < 1)
Logic3 <- (u > -1 & u < 1)
result[Logic0] <- 0
result[Logic1] <- 0
Uval <- result[Logic2]
result[Logic2] <- (35/32)*(-6)*(Uval)*((1-(Uval^(2)))^(2))
return(result)
}
}
kernel_function_distribution <-
function(type_kernel,u)
{
if(type_kernel == "n")
{
result <- pnorm(u)
return(result)
}
else
if(type_kernel == "t")
{
result <- u
Logic0 <- (u <= -1)
Logic1 <- (u >= 1)
Logic2 <- (u > -1 & u < 1)
Logic3 <- (u > -1 & u < 1)
result[Logic0] <- 0
result[Logic1] <- 1
Uval <- result[Logic2]
result[Logic2] <- (((35/32) * Uval) -((35/32)* (Uval^3))+((21/32)* (Uval^5)))-((5/32)*(Uval^7)) + 0.5
return(result)
}
}
ghat_function<-function(x_v, y_v ,TV){
kkt=0
R_x_v<-c(-Inf, x_v, Inf)
kkt<-(0:length(x_v))[R_x_v[-(length(x_v)+2)]<=TV & TV<R_x_v[-1]]
R_y_v<-c(y_v[1], y_v, y_v[(length(y_v))])
R_x_v<-c(x_v[1], x_v, x_v[(length(x_v))])
g_hat<-0
if(kkt==(length(x_v))){
g_hat=y_v[(length(y_v))]
}else if(kkt==0){
g_hat=y_v[1]
}else if(kkt!=(length(x_v)) & kkt!=0){
g_hat=R_y_v[(kkt+1)]+(R_y_v[(kkt+2)]-R_y_v[(kkt+1)])*((TV-R_x_v[(kkt+1)])/(R_x_v[(kkt+2)]-R_x_v[(kkt+1)]))
}
return(g_hat)
}
NPT_function<-function(data, npv ,TV){
npv<-sort(npv)
data<-sort(data)
kkt=0
R_data<-c(0, data, Inf)
kkt<-(0:length(data))[R_data[-(length(data)+2)]<=TV & TV<R_data[-1]]
R_npv<-c(min(npv), npv, max(npv))
R_data<-c(min(data), data, max(data))
NP_hat<-0
if(kkt==(length(data))){
NP_hat=max(npv)
}else if(kkt==0){
NP_hat=0
}else if(kkt!=(length(data)) & kkt!=0){
NP_hat=R_npv[(kkt+1)]+(R_npv[(kkt+2)]-R_npv[(kkt+1)])*((TV-R_data[(kkt+1)])/(R_data[(kkt+2)]-R_data[(kkt+1)]))
}
return(NP_hat)
}
CSCI_ABA<-function(C,D,times,Confidence.level){
alp<- 1-Confidence.level
o<-order(C)
C<- C[o]
D<- D[o]
uTimes<- sort(unique(c(0,Inf,C)))
k<- length(uTimes)
n<- length(C)
uN<-uD<- rep(0,k)
for (i in 1:k){
I<- C==uTimes[i]
uN[i]<- length(C[I])
uD[i]<- sum(D[I])
}
NPzvalue=isoreg(C,D)$yf
NPT<-NULL
for(i in 1:(k-1)){
NPT[i]<-NPT_function(C, NPzvalue, uTimes[i])
}
ap=approx(C, NPzvalue, n=512, ties=mean)
dapy=c(ap$y[1], diff(ap$y))
aux<-outer(uTimes[-length(uTimes)],ap$x,"-")
bw=.9*(min(sd(C), (IQR(C)/1.34))*n^(-1/5))
aux_dv <-(1/(bw^2))* kernel_function_derivative("n", aux/bw)
fdhat<-NULL
fdhat=aux_dv%*%dapy
fdhat2=fdhat^2
fdhat2[fdhat2<=.001]<-.001
aux_D <- kernel_function_distribution("n", aux/bw)
Fhat<-NULL
Fhat=aux_D%*%dapy
Fhat_m<-Fhat
Fhat_m[Fhat_m>=.99]<-.99
Fhat_m[Fhat_m<=.01]<-.01
ghat<-NULL
dTx<-density(C, from=min(C), to=max(C))$x
dTy<-density(C, from=min(C), to=max(C))$y
for(i in 1: (k-1)){
ghat[i]=ghat_function(dTx, dTy, uTimes[i])
}
ghat[ghat<=.0001]<-.0001
c_hat=((((Fhat_m*(1-Fhat_m))/ghat)*(1/(2*sqrt(pi))))^(1/5))*((fdhat2)^(-1/5))
h_hat=(c_hat)*n^(-1/5)
Naux<-matrix(NA, nrow=(k-1), ncol=512)
for(i in 1:(k-1)){
Naux[i,]=aux[i,]/(h_hat[i,1])
}
Naux <- kernel_function_distribution("n", Naux)
NFhat=Naux%*%dapy
if((k-1)>=2){
for(i in 1:((k-1)-1)){
if(NFhat[i+1]<=NFhat[i]){
NFhat[i+1]<-NFhat[i]}
}
}
NFhat[NFhat<=.01]<-.01
NFhat[NFhat>=.99]<-.99
Naux<-matrix(NA, nrow=(k-1), ncol=512)
for(i in 1:(k-1)){
Naux[i,]=aux[i,]/(h_hat[i,1])
}
h_hat7=(c_hat)*n^(-1/5)
aux_density_p <-kernel_function_density("n", Naux)
aux_density<-matrix(NA, nrow=(k-1), ncol=512)
for(i in 1:(k-1)){
aux_density[i,]=aux_density_p[i,]/h_hat7[i,1]
}
fhat<-NULL
fhat=aux_density%*%dapy
fhat[fhat<=.0001]<-.0001
ratiofg=fhat/ghat
dfnew<-function(n, ratio.fg, N.Fhat){
aa=(1/((4*n)^2))*(ratio.fg^2)
bb=-(1/((4*n)^2))*(ratio.fg^2)
dd=((N.Fhat)^2-(N.Fhat))
zz=matrix(c(dd,0,bb,aa), ncol=1)
as=polyroot(zz)
Re(as[1])}
qcl=rep(0, (k-1))
qcu=rep(0, (k-1))
qcl_midp=rep(0, (k-1))
qcu_midp=rep(0, (k-1))
for (q in 1: (k-1)){
m=0
m=dfnew(n, ratiofg[q], NFhat[q])
if(m<=2){m=2}
m=min(m, (NFhat[q]*(4*n)/ratiofg[q]), ((1-NFhat[q])*(4*n)/ratiofg[q]), n)
if(m<=2){m=2}
N_at_T<-0
N_at_T=uN[uTimes==uTimes[q]]
D_at_T<-0
D_at_T=uD[uTimes==uTimes[q]]
if(length(N_at_T)==0){N_at_T=0}
if(length(D_at_T)==0){D_at_T=0}
AT<- uTimes<=uTimes[q]
S_Left=sum(uN[AT])
TB<- uTimes>uTimes[q]
S_Right=sum(uN[TB])
mm<-0
if(m-N_at_T<=0){
Nab=N_at_T
Dab=D_at_T
}else{
mm<-ceiling(m/2)
mm_new=min(S_Left, S_Right, mm)
if(length(uN[AT])>length(uN[TB])){uNTB<-c(uN[TB], rep(0, length(uN[AT])-length(uN[TB])))
} else {uNTB<-uN[TB]
}
if(length(uN[AT])<length(uN[TB])){ruNAT<-c(rev(uN[AT]), rep(0, length(uN[TB])-length(uN[AT])))
} else {ruNAT<-rev(uN[AT])
}
if(length(uD[AT])>length(uD[TB])){uDTB<-c(uD[TB], rep(0, length(uD[AT])-length(uD[TB])))
} else {uDTB<-uD[TB]
}
if(length(uD[AT])<length(uD[TB])){ruDAT<-c(rev(uD[AT]), rep(0, length(uD[TB])-length(uD[AT])))
} else {ruDAT<-rev(uD[AT])
}
l_m<-min(which(cumsum(ruNAT)>=mm_new))
u_m<-min(which(cumsum(uNTB)>=mm_new))
Nab<-0
Nab<-cumsum(ruNAT)[l_m]+cumsum(uNTB)[u_m]
Dab<-0
Dab<-cumsum(ruDAT)[l_m]+cumsum(uDTB)[u_m]
}
qcl[q]=qbeta((1-Confidence.level)/2, Dab, Nab-Dab+1)
qcu[q]=qbeta(1-((1-Confidence.level)/2), Dab+1, Nab-Dab)
qcl_midp[q]=binom.exact(Dab,Nab, conf.level=Confidence.level, midp=TRUE)$conf.int[1]
qcu_midp[q]=binom.exact(Dab,Nab, conf.level=Confidence.level, midp=TRUE)$conf.int[2]
}
qcl[1]<-0
qcu[(k-1)]<-1
qcl_midp[1]<-0
qcu_midp[(k-1)]<-1
Iqcu<-qcu<NPT
qcu[Iqcu]<-NPT[Iqcu]
Iqcl<-qcl>NPT
qcl[Iqcl]<-NPT[Iqcl]
IqcuM<-qcu_midp<NPT
qcu_midp[IqcuM]<-NPT[IqcuM]
IqclM<-qcl_midp>NPT
qcl_midp[IqclM]<-NPT[IqclM]
for(i in 1:(k-1)){
qcl[i]<-max(qcl[1:i])
}
for(i in 1:(k-1)){
qcu[i]<-min(qcu[i:(length(qcu))])
}
for(i in 1:(k-1)){
qcl_midp[i]<-max(qcl_midp[1:i])
}
for(i in 1:(k-1)){
qcu_midp[i]<-min(qcu_midp[i:(length(qcu_midp))])
}
l_uTimes<-uTimes[-(length(uTimes))]
u_uTimes<-uTimes[-1]
C_intervals=noquote(paste0("[",round(l_uTimes,4),",", round(u_uTimes,4), ")"))
out_lower_upper<-data.frame(C_intervals, l_uTimes, NPT, qcl,qcu, qcl_midp,qcu_midp)
colnames(out_lower_upper)<-c("Intervals","times", "NPMLE", "Lower CL", "Upper CL", "midP-Lower CL", "midP-Upper CL")
low_ci<-rep(0,length(times))
upp_ci<-rep(0,length(times))
low_ci_midp<-rep(0,length(times))
upp_ci_midp<-rep(0,length(times))
NPT_times<- rep(0,length(times))
for(i in 1:length(times)){
loc_t<-0
loc_t<-which(l_uTimes<=times[i] & times[i]<u_uTimes)
low_ci[i]<-qcl[loc_t]
upp_ci[i]<-qcu[loc_t]
low_ci_midp[i]<-qcl_midp[loc_t]
upp_ci_midp[i]<-qcu_midp[loc_t]
NPT_times[i]<-NPT_function(C, NPzvalue, times[i])
}
out_times_lower_upper<-data.frame(times, NPT_times, low_ci,upp_ci,low_ci_midp,upp_ci_midp)
colnames(out_times_lower_upper)<-c("times","NPMLE","Lower CL", "Upper CL", "midP-Lower CL", "midP-Upper CL")
out_cis<-list(ciTable_all=out_lower_upper, ciTable_times=out_times_lower_upper)
return(out_cis)
}
CSCI_Likelihood<-function(C,D,times,Confidence.level){
o<-order(C)
C<- C[o]
D<- D[o]
uTimes<- sort(unique(C))
k<- length(uTimes)
n<- length(C)
uN<-uD<- rep(0,k)
for (i in 1:k){
I<- C==uTimes[i]
uN[i]<- length(C[I])
uD[i]<- sum(D[I])
}
quan_p<- control$quan_p
xp_hat<- control$xp_hat
d_alpha<-xp_hat[which(quan_p==Confidence.level)]
if (is.na(match(Confidence.level, quan_p)))
stop("Choose the Confidence level= .25,.50,.75,.80,.85,.90,.95, or.99. See Table 2 in Banerjee and Wellner (2001).")
intF<-control$intF
F=c(1:(intF-1)/intF)
isoci<-function(x,y){
out<-isoreg(x,y)
cbind(out$x,out$yf)
}
Ures=0
NP=isoci(C,D)
NPF=NP[,2]
I1<-NPF!=0 & NPF!=1
mBB1<-D[I1]
MNPF<-NPF[I1]
Ures=sum(mBB1*log(MNPF)+(1-mBB1)*log(1-MNPF))
Lj<-rep(NA, length(times))
Uj<-rep(NA, length(times))
for(j in 1:length(times)){
Res=rep(0, length(F))
I_ci<-C<=times[j]
kk<-length(C[I_ci])
for(l in 1:length(F)){
if(kk<(n-1)& kk>=2){
NP1=isoci(C[1:kk], D[1:kk])
NPF1=NP1[,2]
NPF1[1:kk][NPF1[1:kk]>F[l]]<-F[l]
NP2=isoci(C[(kk+1):n], D[(kk+1):n])
NPF2=NP2[,2]
NPF2[1:(n-kk)][NPF2[1:(n-kk)]<F[l]]<-F[l]
NNPF=c(NPF1,NPF2)
I<-NNPF!=0 & NNPF!=1
mBB2<-D[I]
MNNPF<-NNPF[I]
Res[l]=sum(mBB2*log(MNNPF)+(1-mBB2)*log(1-MNNPF))
}else if (kk>=(n-1)){
NP1=isoci(C,D)
NPF1=NP1[,2]
NPF1[NPF1>F[l]]<-F[l]
I<-NPF1!=0 & NPF1!=1
mBB2<-D[I]
MNNPF<-NPF1[I]
Res[l]=sum(mBB2*log(MNNPF)+(1-mBB2)*log(1-MNNPF))
}else if (kk<2){
NP2=isoci(C,D)
NPF2=NP2[,2]
NPF2[NPF2<F[l]]<-F[l]
I<-NPF2!=0 & NPF2!=1
mBB2<-D[I]
MNNPF<-NPF2[I]
Res[l]=sum(mBB2*log(MNNPF)+(1-mBB2)*log(1-MNNPF))
}
}
R2R=0
R2R=2*(Ures-Res)
RLow=0
RLow=min(which(R2R<d_alpha))/intF
RUpp=0
RUpp=max(which(R2R<d_alpha))/intF
RLength=RUpp-RLow
Lj[j]<-RLow
Uj[j]<-RUpp
}
NPzvalue=isoreg(C,D)$yf
NPT<-NULL
for(i in 1:(k-1)){
NPT[i]<-NPT_function(C, NPzvalue, uTimes[i])
}
NPT_times<- rep(0,length(times))
for(i in 1:length(times)){
NPT_times[i]<- NPT_function(C, NPzvalue, times[i])
}
out_lower_upper<- data.frame(times, NPT_times, Lj,Uj)
colnames(out_lower_upper)<-c("times","NPMLE","Lower CL", "Upper CL")
out_cis<-list(ciTable_all=NULL, ciTable_times=out_lower_upper)
return(out_cis)
}
if (toupper(type_CSCI)=="VALID"){
return(CSCI_Valid(C,D,times,Confidence.level))
} else if (toupper(type_CSCI)=="ABA"){
return(CSCI_ABA(C,D,times,Confidence.level))
} else if (toupper(type_CSCI)=="LIKELIHOOD"){
return(CSCI_Likelihood(C,D,times,Confidence.level))
}
}
controlCSCI<-function(power=2/3,
quan_p=c(.25,.50,.75,.80,.85,.90,.95,.99),
xp_hat=c(.06402, .28506, .80694, .98729, 1.22756, 1.60246, 2.26916, 3.83630),
intF=1000){
if (power<0 | power>1) stop("power must be in (0,1)")
if (any(quan_p<0 | quan_p>1)) stop("quan_p must be in (0,1)")
if (intF<3) stop("intF<3")
list(power=power, quan_p=quan_p, xp_hat=xp_hat,intF=intF)
} |
contraste.diferencia.proporciones <- function(x,
variable = NULL,
introducir = FALSE,
hipotesis_nula = 0,
tipo_contraste = c("bilateral","cola derecha","cola izquierda"),
alfa = 0.05,
grafico = FALSE){
tipo_contraste <- tolower(tipo_contraste)
tipo_contraste <- match.arg(tipo_contraste)
if(alfa >= 0 & alfa <=1){
if(tipo_contraste == "bilateral"){
valor_critico <- qnorm(alfa/2,lower.tail = F)
}
if(tipo_contraste == "cola izquierda"){
valor_critico <- qnorm(alfa,lower.tail = T)
}
if(tipo_contraste == "cola derecha"){
valor_critico <- qnorm(alfa,lower.tail = F)
}
valor_critico <- round(valor_critico,4)
} else{
stop("El nivel de significacion debe fijarse entre 0 y 1")
}
if(hipotesis_nula >= 0 & hipotesis_nula <=1){
H0 <- hipotesis_nula
}else{
stop("La hip\u00f3tesis nula es una proporcion y por tanto tiene que fijarse entre 0 y 1")
}
if(isFALSE(introducir)) {
x <- data.frame(x)
varnames <- names(x)
if(is.null(variable)){
if(length(x) == 2){
x <- x
} else{
warning("Para realizar el contraste hay que seleccionar dos variables")
stop("El conjunto de datos seleccionado tiene mas de 2 variables.")
}
} else if(length(variable) == 2){
if(is.numeric(variable)){
if(all(variable <= length(x))){
variable <- variable
} else{
stop("Selecci\u00f3n err\u00f3nea de variables")
}
}
if(is.character(variable)){
if(all(variable %in% varnames)){
variable = match(variable,varnames)
} else {
stop("El nombre de la variable no es v\u00e1lido")
}
}
x <- x[,variable] %>% as.data.frame()
names(x) <- varnames[variable]
} else{
warning("Para realizar el contraste hay que seleccionar dos variables")
stop("El conjunto de datos seleccionado no es adecuado.")
}
clase <- sapply(x, class)
if (!all(clase %in% c("numeric","integer"))) {
stop("No puede calcularse el contraste porque las variables seleccionadas no son cuantitativas")
}
x1 <- na.omit(x[1])
x2 <- na.omit(x[2])
if(!all((x1 == 0) | x1 ==1)){
print("Aplica a tus datos la condici\u00f3n que debe cumplir la poblaci\u00f3n para transfomar los datos en ceros (ausencia/no \u00e9xito) y unos (presencia/\u00e9xito)")
stop("Los valores en la muestra deben ser 0 y 1.")
}
if(!all((x2 == 0) | x2 ==1)){
print("Aplica a tus datos la condici\u00f3n que debe cumplir la poblaci\u00f3n para transfomar los datos en ceros (ausencia/no \u00e9xito) y unos (presencia/\u00e9xito)")
stop("Los valores en la muestra deben ser 0 y 1.")
}
n1 <- nrow(x1)
n2 <- nrow(x2)
p_mu1 <- round(sum(x1)/n1,6)
p_mu2 <- round(sum(x2)/n2,6)
} else{
print("Primero vas a introducir los datos de la muestra 1 y a continuaci\u00f3n introducir\u00e1s los datos de la muestra 2")
print("Si los datos provienen de encuestas realizadas antes y despu\u00e9s de una determinada acci\u00f3n, introduce primero los datos de la encuesta realizada despu\u00e9s de dicha acci\u00f3n")
n1 <- readline(prompt = "Introducir el tama\u00f1o de la muestra 1: ")
n1 <- as.numeric(n1)
p_mu1 <- readline(prompt = "Introducir el valor de la proporcion muestral 1: ")
p_mu1 <- as.numeric(p_mu1)
n2 <- readline(prompt = "Introducir el tama\u00f1o de la muestra 2: ")
n2 <- as.numeric(n2)
p_mu2 <- readline(prompt = "Introducir el valor de la proporcion muestral 2: ")
p_mu2 <- as.numeric(p_mu2)
}
est_proporcion <- as.numeric(readline('Selecciona el valor que quieres utilizar para el error t\u00edpico bajo la H0: \n 1. "Estimar p como media ponderada de las proporciones muestrales" \n 2. "Utilizar las proporciones muestrales" \n'))
dif_p <- p_mu1 - p_mu2
if(est_proporcion == 1){
est_p <- ((n1*p_mu1) + (n2*p_mu2))/(n1+n2)
error_tipico <- sqrt(((n1+n2)/(n1*n2)) * est_p * (1-est_p))
} else{
error_tipico <- sqrt((p_mu1 * (1-p_mu1))/n1 + (p_mu2 * (1-p_mu2))/n2)
}
estadistico.Z <- (dif_p - H0)/error_tipico
estadistico.Z <- round(estadistico.Z,5)
if(tipo_contraste == "bilateral"){
estadistico.Z2 <- abs(estadistico.Z)
pvalor <- 2*pnorm(estadistico.Z2,lower.tail=FALSE)
media_inf <- H0 - valor_critico * error_tipico
media_sup <- H0 + valor_critico * error_tipico
if(estadistico.Z >= -valor_critico & estadistico.Z <= valor_critico){
print(paste("No se rechaza la hip\u00f3tesis nula. La regi\u00f3n de aceptaci\u00f3n viene dada por el intervalo [", -valor_critico," , ",valor_critico,"]",sep=""))
print("El valor del estad\u00edstico de prueba (o valor experimental) se encuentra dentro de la regi\u00f3n de aceptaci\u00f3n")
} else{
print(paste("Se rechaza la hip\u00f3tesis nula. La regi\u00f3n de aceptaci\u00f3n viene dada por el intervalo [", -valor_critico," , ",valor_critico,"]",sep=""))
print("El valor del estad\u00edstico de prueba (o valor experimental) no se encuentra dentro de la regi\u00f3n de aceptaci\u00f3n")
}
if(isTRUE(grafico)){
plot <- ggplot(NULL, aes(c(-4,4))) +
geom_area(stat = "function", fun = dnorm, fill = "red", xlim = c(-4, -valor_critico)) +
geom_area(stat = "function", fun = dnorm, fill = "darkgreen", xlim = c(-valor_critico, valor_critico)) +
geom_area(stat = "function", fun = dnorm, fill = "red", xlim = c(valor_critico, 4)) +
geom_vline(xintercept = -estadistico.Z2, linetype = "dashed") +
geom_vline(xintercept = estadistico.Z2, linetype = "dashed") +
labs(x = "", y = "",title="Regi\u00f3n de aceptaci\u00f3n-rechazo para\nla diferencia de proporciones") +
scale_y_continuous(breaks = NULL) +
scale_x_continuous(breaks = c(estadistico.Z2,-estadistico.Z2,-valor_critico,valor_critico)) +
theme(axis.text.x = element_text(angle = 45))
}
} else if(tipo_contraste == "cola derecha"){
media_inf <- -Inf
media_sup <- H0 + valor_critico * error_tipico
pvalor <- pnorm(estadistico.Z,lower.tail=FALSE)
if(estadistico.Z > valor_critico){
print(paste("Se rechaza la hip\u00f3tesis nula. La regi\u00f3n de aceptaci\u00f3n viene dada por el intervalo ]-Inf , ", valor_critico,"]",sep=""))
print("El valor del estad\u00edstico de prueba (o valor experimental) no se encuentra dentro de la regi\u00f3n de aceptaci\u00f3n")
} else{
print(paste("No se rechaza la hip\u00f3tesis nula. La regi\u00f3n de aceptaci\u00f3n viene dada por el intervalo ]-Inf , ", valor_critico,"]",sep=""))
print("El valor del estad\u00edstico de prueba (o valor experimental) se encuentra dentro de la regi\u00f3n de aceptaci\u00f3n")
}
if(isTRUE(grafico)){
plot <- ggplot(NULL, aes(c(-4,4))) +
geom_area(stat = "function", fun = dnorm, fill = "darkgreen", xlim = c(-4,valor_critico)) +
geom_area(stat = "function", fun = dnorm, fill = "red", xlim = c(valor_critico, 4)) +
geom_vline(xintercept = estadistico.Z, linetype = "dashed") +
labs(x = "", y = "",title="Regi\u00f3n de aceptaci\u00f3n-rechazo para\nla diferencia de proporciones") +
scale_y_continuous(breaks = NULL) +
scale_x_continuous(breaks = c(estadistico.Z,valor_critico)) +
theme(axis.text.x = element_text(angle = 45))
}
} else{
media_inf <- H0 + valor_critico * error_tipico
media_sup <- Inf
pvalor <- pnorm(estadistico.Z,lower.tail=TRUE)
if(estadistico.Z < valor_critico){
print(paste("Se rechaza la hip\u00f3tesis nula. La regi\u00f3n de aceptaci\u00f3n viene dada por el intervalo [ ",valor_critico," , inf[",sep=""))
print("El valor del estad\u00edstico de prueba (o valor experimental) no se encuentra dentro de la regi\u00f3n de aceptaci\u00f3n")
} else{
print(paste("No se rechaza la hip\u00f3tesis nula. La regi\u00f3n de aceptaci\u00f3n viene dada por el intervalo [ ",valor_critico," , inf[",sep=""))
print("El valor del estad\u00edstico de prueba (o valor experimental) se encuentra dentro de la regi\u00f3n de aceptaci\u00f3n")
}
if(isTRUE(grafico)){
plot <- ggplot(NULL, aes(c(-4,4))) +
geom_area(stat = "function", fun = dnorm, fill = "red", xlim = c(-4, -valor_critico)) +
geom_area(stat = "function", fun = dnorm, fill = "darkgreen", xlim = c(-valor_critico, 4)) +
geom_vline(xintercept = estadistico.Z, linetype = "dashed") +
labs(x = "", y = "",title="Regi\u00f3n de aceptaci\u00f3n-rechazo para\nla diferencia de proporciones") +
scale_y_continuous(breaks = NULL) +
scale_x_continuous(breaks = c(estadistico.Z,-valor_critico)) +
theme(axis.text.x = element_text(angle = 45))
}
}
CH <- cbind(H0,estadistico.Z,pvalor)
CH <- as.data.frame(CH)
names(CH) <- c("Hip\u00f3tesis nula", "estad\u00edstico de prueba", "p-valor")
row.names(CH) <- NULL
Idifpro <- cbind(`limite_inferior`=media_inf,`limite_superior`=media_sup)
if(grafico){
return(list(`Estadistico`=CH,`Intervalo de la proporcion muestral (supuesta H0 cierta)`= Idifpro,`Graficos`= plot))
} else{
return(list(`Estadistico`=CH,`Intervalo de la proporcion muestral (supuesta H0 cierta)`= Idifpro))
}
} |
print.tidylda <- function(x, digits = max(3L, getOption("digits") - 3L), n = 5, ...) {
s <- x$summary
cat(
"A Latent Dirichlet Allocation Model of ", nrow(x$beta), "topics, ",
nrow(x$theta), " documents, and ", ncol(x$beta), " tokens:\n"
)
print(x$call)
cat("\n")
if ("r2" %in% names(x)) {
cat("The model's R-squared is ", round(x$r2, digits = digits), "\n")
}
cat("The ", n, " most prevalent topics are:\n")
print(s[order(s$prevalence, decreasing = TRUE), ], n = n)
cat("\n")
cat("The ", n, " most coherent topics are:\n")
print(s[order(s$coherence, decreasing = TRUE), ], n = n)
invisible(x)
} |
rtableICC.RxC.engine <-
function(R,C,T,M,p,N,cluster.size,theta){
if (max(cluster.size)>(length(theta))){
stop(c("Maximum number of individuals in one of the clusters is ", paste(max(cluster.size)),", which is greater than maximum allowed cluster size. (1) Re-run the function,
(2) increase maximum allowed cluster size by increasing the number of elements of theta,
(3) increase total number of clusters, or
(4) decrease total number of individuals!"))
}
rTable.raw=array(0,dim=c(R,C,N))
dat=array(0,dim=c(max(cluster.size),R,C,M))
rTable=array(0,dim=c(R,C))
g.t=array(0,dim=c(R,C,(T-1)))
g.tilde=array(0,dim=max((T-1)))
pp=as.vector(t(p))
bsl=1
say=1
selT=0
if (R==1){
p=t(as.matrix(p))
}else if (C==1){
p=as.matrix(p)
}
sumCounts=array(0,R*C)
for (r in 1:M){
if (cluster.size[r]>1){
counts=cbind(t(compositions(cluster.size[r],R*C,include.zero=TRUE)),0)
cl=ncol(counts)
for (j in 1:nrow(counts)){
if (sum(counts[j,]==0)==(R*C)){
for (i in 1:R){
for (k in 1:C){
if (counts[j,((i-1)*C+k)]>0){
counts[j,cl]=theta[cluster.size[r]]*p[i,k]+(1-theta[cluster.size[r]])*p[i,k]^theta[cluster.size[r]]
}
}
}
} else {
counts[j,cl]=(1-theta[cluster.size[r]])*prod(pp^counts[j,1:(R*C)])
}
}
counts[,cl]=counts[,cl]/sum(counts[,cl])
ind=rDiscrete(1,counts[,cl])$rDiscrete
sel=counts[ind,1:(R*C)]
sumCounts=sumCounts+sel
if (sum(sel==0)==((R*C)-1)){
for (i in 1:R){
for (k in 1:C){
if (sel[((i-1)*C+k)]>0){
g.t[i,k,(cluster.size[r]-1)]=g.t[i,k,(cluster.size[r]-1)]+1
rTable.raw[i,k,say:(say+sel[((i-1)*C+k)]-1)]=1
say=say+sel[((i-1)*C+k)]
}
}
}
} else {
g.tilde[(cluster.size[r]-1)]=g.tilde[(cluster.size[r]-1)]+1
for (i in 1:R){
for (k in 1:C){
if (sel[((i-1)*C+k)]>0){
rTable.raw[i,k,say:(say+sel[((i-1)*C+k)]-1)]=1
say=say+sel[((i-1)*C+k)]
}
}
}
}
} else if (cluster.size[r]==1){
ind=rDiscrete(1,pp)$rDiscrete
sumCounts[ind]=sumCounts[ind]+1
for (i in 1:R){
for (k in 1:C){
if (ind==((i-1)*C+k)){
rTable.raw[R,C,say]=1
say=say+1
}
}
}
}
}
rTable=sumCounts
list(rTable=rTable,rTable.raw=rTable.raw,g.t=g.t,g.tilde=g.tilde)
} |
library(gapmap)
data("sample_tcga")
library(RColorBrewer)
RdBu = rev(brewer.pal(11, name="RdBu"))
RdYlBu = rev(brewer.pal(11, name="RdYlBu"))
dataTable <- t(sample_tcga)
row_dist <- as.dist(1-cor(t(dataTable), method = "pearson"))
col_dist <- as.dist(1-cor(dataTable, method = "pearson"))
col_hc <- hclust(col_dist, method = "complete")
row_hc <- hclust(row_dist, method = "complete")
col_d <- as.dendrogram(col_hc)
row_d <- as.dendrogram(row_hc)
gapmap(m = as.matrix(dataTable), d_row = rev(row_d), d_col = col_d, ratio = 0, verbose=FALSE, col=RdBu,
label_size=2, v_ratio= c(0.1,0.8,0.1), h_ratio=c(0.1,0.8,0.1))
gapmap(m = as.matrix(dataTable), d_row = rev(row_d), d_col = col_d, mode = "quantitative", mapping="exponential", col=RdBu,
ratio = 0.3, verbose=FALSE, scale = 0.5, label_size=2, v_ratio= c(0.1,0.8,0.1), h_ratio=c(0.1,0.8,0.1))
gapmap(m = as.matrix(dataTable), d_row = rev(row_d), d_col = col_d, mode = "quantitative", mapping="exponential", col=RdYlBu,
ratio = 0.3, verbose=FALSE, scale = 0.5, label_size=2, v_ratio= c(0.1,0.8,0.1), h_ratio=c(0.1,0.8,0.1))
library(dendsort)
gapmap(m = as.matrix(dataTable), d_row = rev(dendsort(row_d, type = "average")), d_col = dendsort(col_d, type = "average"),
mode = "quantitative", mapping="exponential", ratio = 0.3, verbose=FALSE, scale = 0.5, v_ratio= c(0.1,0.8,0.1),
h_ratio=c(0.1,0.8,0.1), label_size=2, show_legend=TRUE, col=RdBu)
row_data <- gap_data(d= row_d, mode = "quantitative", mapping="exponential", ratio=0.3, scale= 0.5)
dend <- gap_dendrogram(data = row_data, leaf_labels = TRUE, rotate_label = TRUE)
dend + theme(axis.ticks.length= grid::unit(0,"lines") )+ theme(axis.ticks.margin = grid::unit(-0.8, "lines"))
row_data <- gap_data(d= dendsort(row_d, type = "average"), mode = "quantitative", mapping="exponential", ratio=0.3, scale= 0.5)
dend <- gap_dendrogram(data = row_data, leaf_labels = TRUE, rotate_label = TRUE)
dend + theme(axis.ticks.length= grid::unit(0,"lines") )+ theme(axis.ticks.margin = grid::unit(-0.8, "lines")) |
umap.small = function(d, config) {
warning("constructing layout for a very small input dataset", call.=FALSE)
embedding = matrix(0, ncol=config$n_components, nrow=nrow(d))
if (nrow(d)==2) {
embedding[1,] = 5
embedding[2,] = -5
}
rownames(embedding) = rownames(d)
list(layout=embedding, config=config)
} |
tam_pv_mcmc_likelihood_R <- function( probs, resp1, resp_ind_bool, nitems)
{
nstud <- nrow(resp1)
loglike <- rep(1,nstud)
probs_index_ii <- cbind( 1:nstud, 0 )
for (ii in 1:nitems){
probs_ii <- probs[, ii, ]
probs_index_ii[,2] <- resp1[,ii]
incr <- ifelse( resp_ind_bool[,ii], probs_ii[ probs_index_ii ], 1 )
loglike <- loglike * incr
}
return(loglike)
} |
skip_if_srcs_missing(c("mimic_demo", "eicu_demo"))
test_that("load_src()", {
expect_fsetequal(
load_src("d_labitems", "mimic_demo"),
as_src_tbl("d_labitems", "mimic_demo")[]
)
expect_fsetequal(
load_src("hospital", "eicu_demo"),
as_src_tbl("hospital", "eicu_demo")[]
)
})
test_that("mimic load_difftime()", {
cols <- c("hadm_id", "charttime", "value")
alb1 <- load_difftime("labevents", "mimic_demo", is_val(itemid, 50862L),
cols)
expect_s3_class(alb1, "id_tbl")
expect_named(alb1, cols)
expect_s3_class(alb1[["charttime"]], "difftime")
expect_identical(units(alb1[["charttime"]]), "mins")
alb2 <- load_difftime("labevents", "mimic_demo", is_val(itemid, 50862L),
cols[-2L])
expect_fsetequal(alb1[, cols[-2L], with = FALSE], alb2)
alb2 <- load_difftime("labevents", "mimic_demo", is_val(itemid, 50862L),
cols[-1L])
expect_fsetequal(alb1, alb2)
expect_s3_class(alb2, "id_tbl")
expect_named(alb2, cols)
expect_identical(units(alb1[["charttime"]]), "mins")
expect_error(
load_difftime("labevents", "mimic_demo", is_val(itemid, 50862L),
c("icustay_id", "charttime", "value")),
class = "vctrs_error_subscript_oob"
)
})
test_that("eicu load_difftime()", {
cols <- c("patientunitstayid", "labresultoffset", "labresult")
alb1 <- load_difftime("lab", "eicu_demo", is_val(labname, "albumin"), cols)
expect_s3_class(alb1, "id_tbl")
expect_named(alb1, cols)
expect_s3_class(alb1[["labresultoffset"]], "difftime")
expect_identical(units(alb1[["labresultoffset"]]), "mins")
alb2 <- load_difftime("lab", "eicu_demo", is_val(labname, "albumin"),
cols[-2L])
expect_fsetequal(alb1[, c("patientunitstayid", "labresult"), with = FALSE],
alb2)
alb2 <- load_difftime("lab", "eicu_demo", is_val(labname, "albumin"),
cols[-1L])
expect_fsetequal(alb1, alb2)
expect_s3_class(alb2, "id_tbl")
expect_named(alb2, cols)
expect_identical(units(alb1[["labresultoffset"]]), "mins")
expect_error(
load_difftime("lab", "eicu_demo", is_val(labname, "albumin"),
c("patienthealthsystemstayid", "labresultoffset", "labresult")
),
class = "vctrs_error_subscript_oob"
)
})
test_that("mimic load_id()", {
cols <- c("charttime", "value")
alb1 <- load_id("labevents", "mimic_demo", is_val(itemid, 50862L), cols)
expect_s3_class(alb1, "id_tbl")
expect_identical(units(alb1[["charttime"]]), "hours")
alb2 <- load_id("labevents", "mimic_demo", is_val(itemid, 50862L), cols,
interval = mins(60L))
expect_identical(units(alb2[["charttime"]]), "mins")
units(alb2[["charttime"]]) <- "hours"
expect_fsetequal(alb1, alb2)
})
test_that("eicu load_id()", {
cols <- c("labresultoffset", "labresult")
alb1 <- load_id("lab", "eicu_demo", is_val(labname, "albumin"), cols)
expect_s3_class(alb1, "id_tbl")
expect_identical(units(alb1[["labresultoffset"]]), "hours")
alb2 <- load_id("lab", "eicu_demo", is_val(labname, "albumin"), cols,
interval = mins(60L))
expect_identical(units(alb2[["labresultoffset"]]), "mins")
units(alb2[["labresultoffset"]]) <- "hours"
expect_fsetequal(alb1, alb2)
}) |
as.data.frame.uncertainty <-
function(x, row.names = NULL, optional = FALSE, sort, ...)
{
as.data.frame(summary(x), row.names=row.names, optional=optional,
sort=sort, ...)
} |
resampling_gauss <-
function(h, n, X)
{
ind = sample(n, size = n, replace = TRUE)
Z = rnorm(n, mean = 0, sd = 1)
XB = h*Z + X[ind]
XB = sort(XB)
res <- XB
return (res)
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.