code
stringlengths 1
13.8M
|
---|
rename_sequences = function(fasta_files,
df = NULL,
filename = NULL,
marker_names = NULL,
out = NULL,
format = "fasta",
excel.sheet = 1,
unalign = FALSE,
exclude){
if (missing(fasta_files)) {
fasta_files <- find_fasta(dir = getwd(), exclude = exclude)
}
dir_name <- unique(sapply(fasta_files, dirname))
if (length(dir_name) > 1) {
stop("All fasta files should be located in the same directory.")
}
fasta_cols <- sapply(fasta_files, basename)
if (anyDuplicated(fasta_cols)) {
stop("Some files share the same name (",
fasta_cols[anyDuplicated(fasta_cols)], ").")
}
if (is.null(df) & is.null(filename)) {
stop("Either `filename` or `df` must be provided.")
}
if (!is.null(df) & !is.null(filename)) {
stop("Only one of `filename` or `df` must be provided, not both.")
}
if (!is.null(filename)) {
if(grepl(".txt$",filename)==TRUE){
df=read.table(filename,header=T,sep="\t",check.names=F)
} else if(grepl(".xlsx$",filename)==TRUE){
df=readxl::read_xlsx(path=filename, sheet=excel.sheet, col_names=TRUE)
colnames(df)=unlist(lapply(colnames(df),.colname.clean))
df=as.data.frame(apply(df,2,.clean.NA))
} else {
stop("Input file format not recognized. `filename` must end with \".txt\" or \".xlsx\".")
}
} else {
stopifnot(!is.null(df))
df <- as.data.frame(df)
}
df = df[,which(colnames(df)=="name"):ncol(df)]
l=list()
maxlen=0
for (i in 1:length(fasta_files)){
dataset=ape::read.FASTA(fasta_files[i])
a=sd(unlist(lapply(dataset,length)))
if(a!=0){cat("ATTENTION! In file ",fasta_files[i]," not all sequences of same length \n")}
l[[i]]=assign(fasta_files[i],dataset)
if(length(names(dataset))>maxlen){maxlen=length(names(dataset))}
}
names(l)=fasta_cols
alignments = l
lR=alignments[names(alignments) %in% colnames(df)[-1]]
new_names = get_genbank_table(df)
for (i in 2:ncol(new_names)){
new_names[,i] = paste0(unlist(new_names[,i]),"_",unlist(new_names[,1]),"_",rep(marker_names[i-1],length(unlist(new_names[,1]))))
new_names[,i] = unlist(lapply(new_names[,i],.clean.names))
}
for (j in 1:length(lR)){
align=names(lR[j])
lR[[j]]=.rename.seqs(lR[[j]],table=data.frame(name = new_names[,colnames(new_names)==align],df[,2:ncol(df)]),align=align)
lR[[j]]=lR[[j]][!is.na(names(lR[[j]]))]
if (unalign){lR[[j]] = ape::del.gaps(lR[[j]])}
}
for (i in 2:ncol(new_names)){
sequences_in_alignments = new_names[,i] %in% names(lR[[match(colnames(new_names)[i],names(lR))]])
new_names[!sequences_in_alignments,i] = ""
}
if(!is.null(out)){dir_name = out}
if(is.null(out)){dir_name = "renamed"}
base_dir_name = dir_name
N = 0
while (dir.exists(dir_name)) {
N = N+1
dir_name = paste0(base_dir_name,"_",N)
}
dir.create(dir_name)
for (i in 1:length(lR)){
original_alignment_name = names(lR)[i]
write.alignment(lR[[i]],name=paste0(dir_name,"/renamed_",stringr::str_remove(names(lR)[[i]],".fas.*.*")),format=format)
colnames(new_names)[match(original_alignment_name,colnames(new_names))]=paste0("renamed_",stringr::str_remove(names(lR)[[i]],".fas.*.*"),".fasta")
}
writexl::write_xlsx(new_names,paste0(dir_name,"/renamed_correspondence_table.xlsx"))
} |
curve_srvf_align <- function(beta, mode="O", rotated=T, scale = F, maxit=20, ms = "mean"){
if (mode=="C"){
isclosed = TRUE
}
tmp = dim(beta)
n = tmp[1]
T1 = tmp[2]
N = tmp[3]
out = curve_karcher_mean(beta, mode, rotated, scale, maxit, ms)
beta<-out$beta
mu = out$mu
betamean = out$betamean
v = out$v
q = out$q
qn = array(0, c(n,T1,N))
betan = array(0, c(n,T1,N))
for (ii in 1:N){
q1 = q[,,ii]
beta1 = beta[,,ii]
out = find_rotation_seed_unqiue(mu,q1,mode)
beta1 = out$Rbest%*%beta1
beta1n = group_action_by_gamma_coord(beta1, out$gambest)
q1n = curve_to_q(beta1n)$q
out = find_best_rotation(mu, q1n)
qn[,,ii] = out$q2new
betan[,,ii] = out$R%*%beta1n
}
return(list(betan=betan, qn=qn, betamean=betamean, q_mu=mu))
} |
msummary(lm(Salary ~ Gender + PhD + Age, data = SalaryGender)) |
geom_balance <- function(node, fill="steelblue", color='white', alpha=.5, extend=0, extendto=NULL) {
data = NULL
stat = "balance"
position = "identity"
show.legend = NA
na.rm = TRUE
inherit.aes = FALSE
default_aes <- aes_(x=~x, y=~y, node=~node, parent=~parent, branch.length=~branch.length)
mapping <- default_aes
layer(
stat=StatBalance,
data = data,
mapping = mapping,
geom = GeomRect,
position = position,
show.legend=show.legend,
inherit.aes = inherit.aes,
params = list(node=node,
fill=fill,
color=color,
alpha=alpha,
extend=extend,
extendto=extendto,
na.rm = na.rm),
check.aes = FALSE
)
}
StatBalance <- ggproto("StatBalance", Stat,
compute_group = function(self, data, scales, params, node, extend, extendto) {
df <- get_balance_position(data, node)
df$xmax <- df$xmax + extend
if (!is.null(extendto) && !is.na(extendto)) {
if (extendto < df$xmax) {
warning("extendto is too small, keep the original xmax value...")
} else {
df$xmax <- extendto
}
}
return(df)
},
required_aes = c("x", "y", "branch.length")
)
get_balance_position <- function(data, node) {
purrr::map_df(c(1, 2), get_balance_position_, data=data, node=node)
}
get_balance_position_ <- function(data, node, direction) {
ch <- tryCatch(tidytree:::child.tbl_tree(data, node)$node, error=function(e) NULL)
if (length(ch) < 2 || is.null(ch)){
stop('balance cannot be a tip')
} else if (length(ch) > 2){
stop('balance has >2 direct child nodes, can use ape::multi2di to convert to binary tree')
}
i <- match(node, data$node)
sp <- tryCatch(offspring.tbl_tree(data, ch[direction])$node,error=function(e) ch[direction])
if (length(sp) == 0) {
sp <- ch[direction]
}
sp.all <- offspring.tbl_tree(data, i)$node
sp.df <- data[match(sp, data$node),]
sp.all.df <- data[match(sp.all, data$node),]
n.df <- data[i,]
x <- sp.all.df$x
y <- sp.df$y
if ("branch.length" %in% colnames(data)) {
xmin <- min(x)-data[i, "branch.length"]/2
} else {
xmin <- min(sp.df$branch)
}
data.frame(xmin=xmin,
xmax = max(x),
ymin=min(y)-0.5,
ymax=max(y)+0.5)
} |
library(monocle)
library(HSMMSingleCell)
context("markerDiffTable is functioning properly")
pd <- new("AnnotatedDataFrame", data = HSMM_sample_sheet)
fd <- new("AnnotatedDataFrame", data = HSMM_gene_annotation)
HSMM <- newCellDataSet(as.matrix(HSMM_expr_matrix), phenoData = pd, featureData = fd)
HSMM <- newCellDataSet(as(umi_matrix, "sparseMatrix"),
phenoData = pd,
featureData = fd,
lowerDetectionLimit = 0.5,
expressionFamily = negbinomial.size())
cellranger_pipestance_path <- "/path/to/your/pipeline/output/directory"
gbm <- load_cellranger_matrix(cellranger_pipestance_path)
gbm_cds <- newCellDataSet(exprs(gbm),
phenoData = new("AnnotatedDataFrame", data = pData(gbm)),
phenoData = new("AnnotatedDataFrame", data = fData(gbm)),
lowerDetectionLimit = 0.5,
expressionFamily = negbinomial.size())
HSMM <- detectGenes(HSMM, min_expr = 0.1)
expressed_genes <- row.names(subset(fData(HSMM), num_cells_expressed >= 10))
valid_cells <- row.names(subset(pData(HSMM),
Cells.in.Well == 1 &
Control == FALSE &
Clump == FALSE &
Debris == FALSE &
Mapped.Fragments > 1000000))
HSMM <- HSMM[,valid_cells]
pData(HSMM)$Total_mRNAs <- Matrix::colSums(exprs(HSMM))
HSMM <- HSMM[,pData(HSMM)$Total_mRNAs < 1e6]
upper_bound <- 10^(mean(log10(pData(HSMM)$Total_mRNAs)) +
2*sd(log10(pData(HSMM)$Total_mRNAs)))
lower_bound <- 10^(mean(log10(pData(HSMM)$Total_mRNAs)) -
2*sd(log10(pData(HSMM)$Total_mRNAs)))
HSMM <- HSMM[,pData(HSMM)$Total_mRNAs > lower_bound &
pData(HSMM)$Total_mRNAs < upper_bound]
HSMM <- detectGenes(HSMM, min_expr = 0.1)
L <- log(exprs(HSMM[expressed_genes,]))
melted_dens_df <- melt(Matrix::t(scale(Matrix::t(L))))
qplot(value, geom = "density", data = melted_dens_df) +
stat_function(fun = dnorm, size = 0.5, color = 'red') +
xlab("Standardized log(FPKM)") +
ylab("Density")
MYF5_id <- row.names(subset(fData(HSMM), gene_short_name == "MYF5"))
ANPEP_id <- row.names(subset(fData(HSMM), gene_short_name == "ANPEP"))
cth <- newCellTypeHierarchy()
cth <- addCellType(cth, "Myoblast", classify_func = function(x) { x[MYF5_id,] >= 1 })
cth <- addCellType(cth, "Fibroblast", classify_func = function(x)
{ x[MYF5_id,] < 1 & x[ANPEP_id,] > 1 })
HSMM <- classifyCells(HSMM, cth, 0.1)
test_that("markerDiffTable works properly in vignette",
expect_error(markerDiffTable(HSMM[expressed_genes,],
cth,
residualModelFormulaStr = "~Media + num_genes_expressed",
cores = 1), NA))
test_that("markerDiffTable works when 'balanced set to true'",
expect_error(markerDiffTable(HSMM[expressed_genes,],
cth,
balanced = TRUE,
residualModelFormulaStr = "~Media + num_genes_expressed",
cores = 1), NA))
test_that("markerDiffTable works when 'verbose set to true'",
expect_error(markerDiffTable(HSMM[expressed_genes,],
cth,
verbose = TRUE,
residualModelFormulaStr = "~Media + num_genes_expressed",
cores = 1), NA))
test_that("markerDiffTable throws error if cds is not of type 'CellDataSet'",
expect_error(markerDiffTable(cth,
cth,
balanced = TRUE,
residualModelFormulaStr = "~Media + num_genes_expressed",
cores = 1), "Error cds is not of type 'CellDataSet'"))
test_that("markerDiffTable throws error if cds is not of type 'CellDataSet'",
expect_error(markerDiffTable(HSMM[expressed_genes,],
HSMM[expressed_genes],
balanced = TRUE,
residualModelFormulaStr = "~Media + num_genes_expressed",
cores = 1), "Error cth is not of type 'CellTypeHierarchy'")) |
source("build_lesson.R")
get_stage("before_install") %>%
add_code_step(install.packages("git2r")) %>%
add_code_step(update.packages(ask = FALSE))
get_stage("install") %>%
add_code_step(system("python -m pip install update-copyright")) %>%
add_code_step(remotes::install_deps(dependencies = TRUE)) %>%
add_step(step_install_github("fmichonneau/checker"))
get_stage("deploy") %>%
add_step(build_lesson()) %>%
add_step(check_links())
if (Sys.getenv("id_rsa") != "") {
get_stage("before_deploy") %>%
add_step(step_setup_ssh())
if (ci()$get_branch() == "main" || ci()$is_tag()) {
get_stage("deploy") %>%
add_step(step_push_deploy(path = "_site", branch = "gh-pages"))
}
if (ci()$get_branch() == "tidyverse-first") {
get_stage("deploy") %>%
add_step(step_push_deploy(path = "_site", branch = "development"))
}
} |
longdat2 <- function(R){
N <- ncol(R)
D <- max(apply(R, 1, function(x) max(tabulate(x))))
X <- list()
for (d in seq_len(D)){
comb <- combn(seq_len(N), d)
A <- matrix(0, nrow = ncol(comb), ncol = N)
A[cbind(rep(seq_len(ncol(comb)), each = nrow(comb)), c(comb))] <- 1/d
B <- matrix(0, nrow = ncol(comb), ncol = D - 1)
if (ncol(B)) B[, d - 1] <- 1
X[[d]] <- cbind(A, B)
}
resX <- resY <- resZ <- list()
k <- 1
for (i in seq_len(nrow(R))){
J <- max(R[i,])
J <- J - (sum(R[i,] == J) == 1)
resX[[i]] <- resY[[i]] <- resZ[[i]] <- list()
for (j in seq_len(J)){
id <- which(R[i,] < j)
Xij <- list()
for (d in seq_len(min(D, N - length(id)))){
keep <- rowSums(X[[d]][, id, drop = FALSE]) == 0
Xij[[d]] <- X[[d]][keep,]
}
resX[[i]][[j]] <- do.call("rbind", Xij)
resZ[[i]][[j]] <- rep(k, nrow(resX[[i]][[j]]))
resY[[i]][[j]] <- numeric(nrow(resX[[i]][[j]]))
id <- colSums(t(resX[[i]][[j]][, seq_len(N)]) == (R[i,] == j)/
sum(R[i,] == j)) == N
resY[[i]][[j]][id] <- 1
k <- k + 1
}
}
res <- data.frame(y = unlist(resY))
res$X <- do.call("rbind", unlist(resX, recursive = FALSE))
res$z <- factor(unlist(resZ))
res
} |
histbackback <-
function(x, y, brks = NULL, xlab = NULL, axes = TRUE, probability = FALSE,
xlim = NULL, ylab='', ...)
{
if(length(xlab))
xlab <- rep(xlab, length = 2)
if(is.list(x))
{
namx <- names(x)
y <- x[[2]]
if(!length(xlab))
{
if(length(namx))
xlab <- namx[1:2]
else
{
xlab <- deparse(substitute(x))
xlab <- paste(xlab, c("x", "y"), sep = "$")
}
}
x <- x[[1]]
}
else if(!length(xlab))
xlab <- c(deparse(substitute(x)), deparse(substitute(y)))
if(!length(brks))
brks <- hist(c(x, y), plot = FALSE)$breaks
ll <- hist(x, breaks = brks, plot = FALSE)
rr <- hist(y, breaks = brks, plot = FALSE)
if(probability)
{
ll$counts <- ll$density
rr$counts <- rr$density
}
if(length(xlim) == 2)
xl <- xlim
else
{
xl <- pretty(range(c( - ll$counts, rr$counts)))
xl <- c(xl[1], xl[length(xl)])
}
if(length(ll$counts) > 0)
{
barplot(-ll$counts, xlim=xl, space=0,
horiz=TRUE, axes=FALSE, col=0, ...)
par(new = TRUE)
}
if(length(rr$counts) > 0)
barplot(rr$counts, xlim=xl, space=0,
horiz=TRUE, axes=FALSE, col=0, ...)
if(axes)
{
mgp.axis(1, at=pretty(xl), labels=format(abs(pretty(xl))))
del <- (brks[2]-brks[1] - (brks[3]-brks[2]))/2
brks[1] <- brks[1] + del
brks[-1] <- brks[-1] - del
at <- 0 : (length(brks) - 1)
pb <- pretty(brks)
atpb <- approxExtrap(brks, at, xout=pb)$y
mgp.axis(2, at=atpb, labels=format(pb))
title(xlab = xlab[1], adj = (-0.5 * xl[1])/( - xl[1] + xl[2]))
title(xlab = xlab[2], adj = (-xl[1] + 0.5 * xl[2])/(-xl[1] + xl[2]))
if(ylab!='') title(ylab=ylab)
}
abline(v = 0)
box()
invisible(list(left = ll$counts, right = rr$counts, breaks = brks))
} |
microaggregation <- function(obj, variables=NULL, aggr=3, strata_variables=NULL,
method="mdav", weights=NULL, nc=8, clustermethod="clara",
measure="mean", trim=0, varsort=1, transf="log") {
if (!is.data.frame(obj) & !is.null(strata_variables)) {
message("Argument 'strata_variables' is ignored. Only variables specified in slot 'strataVar' (if any) of the input object are used!\n")
}
microaggregationX(obj=obj, variables=variables, aggr=aggr, strata_variables=strata_variables,
method=method, weights=weights, nc=nc, clustermethod=clustermethod,
measure=measure, trim=trim, varsort=varsort, transf=transf)
}
setGeneric("microaggregationX", function(obj, variables=NULL, aggr=3, strata_variables=NULL,
method="mdav", weights=NULL, nc=8, clustermethod="clara",
measure="mean", trim=0, varsort=1, transf="log") {
standardGeneric("microaggregationX")
})
setMethod(f="microaggregationX", signature=c("sdcMicroObj"), definition=function(obj,
variables=NULL, aggr=3, method="mdav", nc=8, clustermethod="clara",
measure="mean", trim=0, varsort=1, transf="log") {
x <- get.sdcMicroObj(obj, type="manipNumVars")
if (is.null(variables)) {
variables <- colnames(x)
}
strataVars <- get.sdcMicroObj(obj, type="strataVar")
if (length(strataVars) > 0) {
sx <- get.sdcMicroObj(obj, type="origData")[, strataVars, drop=FALSE]
x <- cbind(x, sx)
strataVars <- utils::tail(colnames(x), 1)
}
weights <- get.sdcMicroObj(obj, type="weightVar")
if (!is.null(weights)) {
weights <- get.sdcMicroObj(obj, type="origData")[, weights]
}
if (any(weights < 0)) {
warnMsg <- "negative weights have been detected!\n"
obj <- addWarning(obj, warnMsg=warnMsg, method="microaggregation", variable=NA)
}
res <- microaggregationWORK(x, variables=variables, aggr=aggr, strata_variables=strataVars,
method=method, weights=weights, nc=nc, clustermethod=clustermethod,
measure=measure, trim=trim, varsort=varsort, transf=transf)
obj <- nextSdcObj(obj)
x[, variables] <- res$mx[, variables]
obj <- set.sdcMicroObj(obj, type="manipNumVars", input=list(as.data.frame(x[,
colnames(obj@origData)[obj@numVars], drop=FALSE])))
obj <- dRisk(obj)
obj <- dUtility(obj)
obj
})
setMethod(f="microaggregationX", signature=c("data.frame"), definition=function(obj,
variables=NULL, aggr=3, strata_variables=NULL, method="mdav", weights=NULL,
nc=8, clustermethod="clara", measure="mean", trim=0, varsort=1,
transf="log") {
if (is.null(variables)) {
variables <- colnames(obj)
}
microaggregationWORK(x=obj, variables=variables, aggr=aggr, strata_variables=strata_variables,
method=method, weights=weights, nc=nc, clustermethod=clustermethod,
measure=measure, trim=trim, varsort=varsort, transf=transf)
})
microaggregationWORK <- function(x, variables=colnames(x), method="mdav", aggr=3,
weights=NULL, nc=8, clustermethod="clara", measure="mean",
trim=0, varsort=1, transf="log", strata_variables=NULL) {
factorOfTotals <- function(x, aggr) {
n <- dim(x)[1]
abgerundet <- floor(n/aggr)
fot <- n/abgerundet
return(fot)
}
weightedQuantile <- function(x, weights=NULL, probs=seq(0, 1, 0.25), sorted=FALSE, na.rm=FALSE) {
if (!is.numeric(x)) {
stop("'x' must be a numeric vector")
}
n <- length(x)
if (n == 0 || (!isTRUE(na.rm) && any(is.na(x)))) {
return(rep.int(NA, length(probs)))
}
if (!is.null(weights)) {
if (!is.numeric(weights)) {
stop("'weights' must be a numeric vector")
} else if (length(weights) != n) {
stop("'weights' must have the same length as 'x'")
} else if (!all(is.finite(weights))) {
stop("missing or infinite weights")
}
if (any(weights < 0)) {
warnMsg <- "negative weights have been detected!\n"
warning(warnMsg)
}
if (!is.numeric(probs) || all(is.na(probs)) || isTRUE(any(probs < 0 | probs > 1))) {
stop("'probs' must be a numeric vector with values in [0,1]")
}
if (all(weights == 0)) {
warnMsg <- "all weights equal 0!\n"
obj <- addWarning(obj, warnMsg=warnMsg, method="microaggregation", variable=NA)
warning(warnMsg)
return(rep.int(0, length(probs)))
}
}
if (isTRUE(na.rm)) {
indices <- !is.na(x)
x <- x[indices]
if (!is.null(weights))
weights <- weights[indices]
}
if (!isTRUE(sorted)) {
order <- order(x)
x <- x[order]
weights <- weights[order]
}
if (is.null(weights))
rw <- (1:n)/n else rw <- cumsum(weights)/sum(weights)
q <- sapply(probs, function(p) {
if (p == 0)
return(x[1]) else if (p == 1)
return(x[n])
select <- min(which(rw >= p))
if (rw[select] == p)
mean(x[select:(select + 1)]) else x[select]
})
return(unname(q))
}
weightedMedian <- function(x, weights=NULL, sorted=FALSE, na.rm=FALSE) {
weightedQuantile(x, weights, probs=0.5, sorted=sorted, na.rm=na.rm)
}
indexMicro <- function(x, aggr) {
n <- dim(x)[1]
if (n < 2 * aggr) {
stop(paste0("Too less observations (", n, ") for aggregate =", aggr,"\n"))
}
aa <- seq(1, n, aggr)
j <- 1
teiler <- n/aggr
d1 <- 1:n
index <- list()
if (teiler %in% 1:n) {
for (i in 1:length(aa)) {
index[[i]] <- d1[j:(j + aggr - 1)]
j <- j + aggr
}
} else {
for (i in 1:(length(aa) - 2)) {
index[[i]] <- d1[j:(j + aggr - 1)]
j <- j + aggr
}
index[[i + 1]] <- d1[(j):n]
}
index
}
means <- function(x, index, measure, trim=0) {
m <- matrix(ncol=ncol(x), nrow=length(index))
if (measure == "mean" & is.null(weights)) {
for (i in 1:length(index)) {
m[i, ] <- colMeans(x[index[[i]], ])
}
}
if (measure == "median" & is.null(weights)) {
for (i in 1:length(index)) {
m[i, ] <- apply(x[index[[i]], ], 2, median)
}
}
if (measure == "mean" & !is.null(weights)) {
for (i in 1:length(index)) {
m[i, ] <- apply(x[index[[i]], ], 2, function(x) stats::weighted.mean(x,
w=weights[index[[i]]]))
}
}
if (measure == "median" & !is.null(weights)) {
for (i in 1:length(index)) {
m[i, ] <- apply(x[index[[i]], ], 2, function(x) weightedMedian(x,
weights=weights[index[[i]]]))
}
}
if (measure == "trim") {
for (i in 1:length(index)) {
for (j in 1:length(index[[i]])) {
m[i, ] <- apply(x[index[[i]], ], 2, mean, trim=trim)
}
}
}
if (measure == "onestep") {
y <- x
constant <- 3/1.486
for (i in 1:length(index)) {
m1 <- apply(x[index[[i]], ], 2, median)
m2 <- apply(x[index[[i]], ], 2, mad)
limit1 <- m1 + constant * m2
limit2 <- m1 - constant * m2
for (ii in 1:length(index[[i]])) {
if (any(x[index[[i]][ii], ] > limit1)) {
w <- which(x[index[[i]][ii], ] > limit1)
le <- length(w)
y[index[[i]][ii], w] <- limit1[w]
}
if (any(x[index[[i]][ii], ] < limit2)) {
w <- which(x[index[[i]][ii], ] < limit2)
le <- length(w)
y[index[[i]][ii], w] <- limit2[w]
}
m[i, ] <- colMeans(y[index[[i]], ])
}
}
}
colnames(m) <- colnames(x)
return(m)
}
blowup <- function(x, mr, aggr) {
n <- dim(x)[1]
aa <- seq(1, n, aggr)
j <- 1
teiler <- n/aggr
d1 <- 1:n
xx <- matrix(0, ncol=ncol(x), nrow=nrow(x))
if (teiler %in% 1:n) {
for (i in 1:length(aa)) {
for (s in j:(j + aggr - 1)) {
xx[s, ] <- as.matrix(mr[i, , drop=FALSE])
}
j <- j + aggr
}
} else {
for (i in 1:(length(aa) - 2)) {
for (s in j:(j + aggr - 1)) {
xx[s, ] <- as.matrix(mr[i, , drop=FALSE])
}
j <- j + aggr
}
for (s in j:n) {
xx[s, ] <- mr[i + 1, ]
}
}
rownames(xx) <- rownames(x)
xx
}
clust <- function(x, nc, clustermethod="clara", transf="log") {
if (transf == "none") {
y <- x
}
if (transf == "log") {
y <- scale(log(x))
}
if (transf == "boxcox") {
lambda <- car::powerTransform(x)$lambda
y <- scale(car::bcPower(x, lambda))
}
if (clustermethod == "clara") {
a <- clara(x, nc)
clustresult <- a$clust
centers <- a$med
size <- a$clusinfo[, 1]
}
if (clustermethod == "pam") {
a <- pam(x, nc)
clustresult <- a$clust
centers <- a$med
size <- a$clusinfo[, 1]
}
if (clustermethod == "kmeans") {
a <- stats::kmeans(x, nc)
centers <- a$centers
clustresult <- a$cluster
size <- a$size
}
if (clustermethod == "cmeans") {
a <- e1071::cmeans(x, nc)
centers <- a$centers
clustresult <- a$cluster
size <- a$size
res@mem <- a$mem
}
if (clustermethod == "bclust") {
a <- e1071::bclust(x, nc)
centers <- a$centers
groesse <- rep(0, nc)
for (i in seq(nc)) {
groesse[i] <- length(which(a$cluster == i))
}
size <- groesse
clustresult <- a$cluster
}
list(centers=centers, clustresult=clustresult, nc=nc)
}
prcompRob <- function(X, k=0, sca="mad", scores=TRUE) {
n <- nrow(X)
p <- ncol(X)
if (k == 0) {
p1 <- min(n, p)
} else {
p1 <- k
}
S <- rep(1, p1)
V <- matrix(1:(p * p1), ncol=p1, nrow=p)
P <- diag(p)
m <- apply(X, 2, median)
Xcentr <- scale(X, center=m, scale=FALSE)
for (k in 1:p1) {
B <- Xcentr %*% P
Bnorm <- sqrt(apply(B^2, 1, sum))
A <- diag(1/Bnorm) %*% B
Y <- A %*% P %*% t(X)
if (sca == "mad")
s <- apply(Y, 1, mad)
j <- order(s)[n]
S[k] <- s[j]
V[, k] <- A[j, ]
if (V[1, k] < 0)
V[, k] <- (-1) * V[, k]
P <- P - (V[, k] %*% t(V[, k]))
}
if (scores) {
list(scale=S, loadings=V, scores=Xcentr %*% V)
} else list(scale=S, loadings=V)
}
micro_simple <- function(x, aggr, measure, trim) {
index <- indexMicro(x, aggr)
m <- means(x=x, index=index, measure=measure, trim=trim)
mr <- round(m)
blowxm <- blowup(x, m, aggr)
return(list(x=x, method="simple", clustering=FALSE, aggr=aggr, nc=NULL,
xm=m, roundxm=mr, clustermethod=NULL, measure=measure, trim=trim,
varsort=NULL, transf=NULL, blowup=TRUE, blowxm=blowxm, fot=0))
}
micro_single <- function(x, aggr, measure, trim, varsort) {
sortvec <- sort(x[, varsort], index.return=TRUE)$ix
xx <- x[sortvec, ]
index <- indexMicro(xx, aggr)
m <- means(x=xx, index=index, measure=measure, trim=trim)
mr <- round(m)
blowxm <- blowup(x, m, aggr)
rownames(blowxm) <- rownames(xx)
return(list(x=x, method="single", clustering=FALSE, aggr=aggr, nc=NULL,
xm=m, roundxm=mr, clustermethod=NULL, measure=measure, trim=trim,
varsort=varsort, transf=NULL, blowup=TRUE, blowxm=blowxm, fot=0))
}
micro_onedims <- function(x, aggr, measure, trim) {
i <- dim(x)[2]
xx <- sapply(1:i, function(i) {
x[order(x[, i]), i]
})
xxx <- sapply(1:i, function(i) {
rank(x[, i], ties.method="first")
})
index <- indexMicro(xx, aggr)
m <- means(x=xx, index=index, measure=measure, trim=trim)
mr <- round(m)
b <- blowup(x, m, aggr)
y <- x
for (i in 1:dim(x)[2]) {
y[, i] <- b[xxx[, i], i]
}
return(list(x=x, method="onedims", clustering=FALSE, aggr=aggr, nc=NULL,
xm=m, roundxm=mr, clustermethod=NULL, measure=measure, trim=trim,
varsort=NULL, transf=NULL, blowup=TRUE, blowxm=y, fot=0))
}
micro_pca <- function(x, aggr, measure, trim) {
p <- stats::princomp(scale(x))
s1 <- sort(p$scores[, 1], index.return=TRUE)$ix
xx <- x[s1, ]
index <- indexMicro(xx, aggr)
m <- means(x=xx, index=index, measure=measure, trim=trim)
mr <- round(m)
blowxm <- blowup(x, m, aggr)
rownames(blowxm) <- rownames(xx)
return(list(x=x, method="pca", clustering=FALSE, aggr=aggr, nc=NULL,
xm=m, roundxm=mr, clustermethod=NULL, measure=measure, trim=trim,
varsort=NULL, transf=NULL, blowup=TRUE, blowxm=blowxm, fot=0))
}
micro_mcdpca <- function(x, aggr, measure, trim) {
x.mcd <- cov.mcd(x, cor=TRUE)
x.scale <- scale(x, x.mcd$center, sqrt(diag(x.mcd$cor)))
p <- stats::princomp(x.scale, covmat=x.mcd)
s1 <- sort(p$scores[, 1], index.return=TRUE)$ix
xx <- x[s1, ]
index <- indexMicro(xx, aggr)
m <- means(x=xx, index=index, measure=measure, trim=trim)
mr <- round(m)
blowxm <- blowup(x, m, aggr)
rownames(blowxm) <- rownames(xx)
return(list(x=x, method="mcdpca", clustering=FALSE, aggr=aggr, nc=NULL,
xm=m, roundxm=mr, clustermethod=NULL, measure=measure, trim=trim,
varsort=NULL, transf=NULL, blowup=TRUE, blowxm=blowxm, fot=0))
}
micro_pppca <- function(x, aggr, measure, trim) {
p <- prcompRob(x)
s1 <- sort(p$scores[, 1], index.return=TRUE)$ix
xx <- x[s1, ]
index <- indexMicro(xx, aggr)
m <- means(x=xx, index=index, measure=measure, trim=trim)
mr <- round(m)
blowxm <- blowup(x, m, aggr)
rownames(blowxm) <- rownames(xx)
return(list(x=x, method="pppca", clustering=FALSE, aggr=aggr, nc=NULL,
xm=m, roundxm=mr, clustermethod=NULL, measure=measure, trim=trim,
varsort=NULL, transf=NULL, blowup=TRUE, blowxm=blowxm, fot=0))
}
micro_influence <- function(x, aggr, measure, trim, clustermethod, transf, nc) {
ac.scale <- clust(x=x, nc=nc, clustermethod=clustermethod, transf=transf)
cent <- matrix(ac.scale$centers, ncol=nc, byrow=TRUE)
j <- matrix(ncol=1, nrow=nc)
vmax <- matrix(ncol=1, nrow=nc)
for (i in 1:nc) {
j[i, ] <- max(cent[, i])
vmax[i, ] <- which(cent[, i] == j[i, ])
}
ncols <- c(1:ncol(x))
xx <- list()
for (i in 1:nc) {
w <- which(ac.scale$clustresult == i)
s <- x[w, , drop=FALSE]
xx[[i]] <- s[order(s[, vmax[i]]), ]
}
yy <- NULL
for (i in 1:nc) {
yy <- rbind(yy, matrix(unlist(xx[[i]]), ncol=ncol(x), dimnames=list(rownames(xx[[i]]),
colnames(xx[[i]]))))
}
xx <- yy
index <- indexMicro(xx, aggr)
m <- means(x=xx, index=index, measure=measure, trim=trim)
mr <- round(m)
blowxm <- blowup(x, m, aggr)
rownames(blowxm) <- rownames(yy)
return(list(x=x, method="influence", clustering=TRUE, aggr=aggr,
nc=ac.scale$nc, xm=m, roundxm=mr, clustermethod=clustermethod,
measure=measure, trim=trim, varsort=NULL, transf=transf, blowup=TRUE,
blowxm=blowxm, fot=0))
}
micro_clustpca <- function(x, aggr, measure, trim, clustermethod, transf, nc) {
ac.scale <- clust(x=x, nc=nc, clustermethod=clustermethod, transf=transf)
cent <- matrix(ac.scale$centers, ncol=nc, byrow=TRUE)
xx <- list()
for (i in 1:nc) {
w <- which(ac.scale$clustresult == i)
if (length(w) < dim(x)[2]) {
y <- x[w, , drop=FALSE]
xx[[i]] <- y[order(y[, varsort]), ]
} else {
p <- stats::princomp(scale(x[w, , drop=FALSE]))$scores[, 1]
psortind <- sort(p, index.return=TRUE)$ix
y <- x[w, , drop=FALSE]
xx[[i]] <- y[psortind, ]
}
}
yy <- NULL
for (i in 1:nc) {
yy <- rbind(yy, matrix(unlist(xx[[i]]), ncol=ncol(x), dimnames=list(rownames(xx[[i]]),
colnames(xx[[i]]))))
}
xx <- yy
index <- indexMicro(xx, aggr)
m <- means(x=xx, index=index, measure=measure, trim=trim)
mr <- round(m)
blowxm <- blowup(x, m, aggr)
rownames(blowxm) <- rownames(xx)
return(list(x=x, method="clustpca", clustering=TRUE, aggr=aggr, nc=ac.scale$nc,
xm=m, roundxm=mr, clustermethod=clustermethod, measure=measure,
trim=trim, varsort=NULL, transf=transf, blowup=TRUE, blowxm=blowxm,
fot=0))
}
micro_clustmcdpca <- function(x, aggr, measure, trim, clustermethod, transf, nc) {
ac.scale <- clust(x=x, nc=nc, clustermethod=clustermethod, transf=transf)
cent <- matrix(ac.scale$centers, ncol=nc, byrow=TRUE)
xx <- list()
for (i in 1:nc) {
w <- which(ac.scale$clustresult == i)
if (length(w) < dim(x)[2]) {
y <- x[w, , drop=FALSE]
xx[[i]] <- y[order(y[, varsort]), ]
} else {
message("length(w):", length(w), "\n")
x.mcd <- cov.mcd(x[w, ], cor=TRUE)
x.scale <- scale(x[w, ], x.mcd$center, sqrt(diag(x.mcd$cor)))
p <- stats::princomp(x.scale, covmat=x.mcd)$scores[, 1]
psortind <- sort(p, index.return=TRUE)$ix
y <- x[w, , drop=FALSE]
xx[[i]] <- y[psortind, ]
}
}
yy <- NULL
for (i in 1:nc) {
yy <- rbind(yy, matrix(unlist(xx[[i]]), ncol=ncol(x), dimnames=list(rownames(xx[[i]]),
colnames(xx[[i]]))))
}
xx <- yy
index <- indexMicro(xx, aggr)
m <- means(x=xx, index=index, measure=measure, trim=trim)
mr <- round(m)
blowxm <- blowup(x, m, aggr)
rownames(blowxm) <- rownames(xx)
return(list(x=x, method="clustmcdpca", clustering=TRUE, aggr=aggr,
nc=ac.scale$nc, xm=m, roundxm=mr, clustermethod=clustermethod,
measure=measure, trim=trim, varsort=NULL, transf=transf, blowup=TRUE,
blowxm=blowxm, fot=0))
}
micro_clustpppca <- function(x, aggr, measure, trim, clustermethod, transf, nc) {
ac.scale <- clust(x=x, nc=nc, clustermethod=clustermethod, transf=transf)
cent <- matrix(ac.scale$centers, ncol=nc, byrow=TRUE)
xx <- list()
for (i in 1:nc) {
w <- which(ac.scale$clustresult == i)
if (length(w) < dim(x)[2]) {
y <- x[w, , drop=FALSE]
xx[[i]] <- y[order(y[, varsort]), ]
} else {
p <- prcompRob(x[w, , drop=FALSE], 1)$scores
psortind <- sort(p, index.return=TRUE)$ix
y <- x[w, , drop=FALSE]
xx[[i]] <- y[psortind, ]
}
}
yy <- NULL
for (i in 1:nc) {
yy <- rbind(yy, matrix(unlist(xx[[i]]), ncol=ncol(x), dimnames=list(rownames(xx[[i]]),
colnames(xx[[i]]))))
}
xx <- yy
index <- indexMicro(xx, aggr)
m <- means(x=xx, index=index, measure=measure, trim=trim)
mr <- round(m)
blowxm <- blowup(x, m, aggr)
rownames(blowxm) <- rownames(xx)
return(list(x=x, method="clustpppca", clustering=TRUE, aggr=aggr,
nc=ac.scale$nc, xm=m, roundxm=mr, clustermethod=clustermethod,
measure=measure, trim=trim, varsort=NULL, transf=transf, blowup=TRUE,
blowxm=blowxm, fot=0))
}
micro_rmd <- function(x, aggr) {
kn <- function(ds, aggr) {
w <- rep(0, aggr)
for (i in 1:aggr) {
w[i] <- which.min(ds)
ds[w[i]] <- NA
}
return(w)
}
y <- x
cm <- colMeans(x, na.rm=TRUE)
csd <- apply(x, 2, sd, na.rm=TRUE)
len <- nrow(y)
y <- apply(y, 2, function(x) (x - mean(x, na.rm=TRUE))/sd(x, na.rm=TRUE))
d <- as.matrix(stats::dist(y))
set.seed(123)
rr <- covMcd(y)
md <- stats::mahalanobis(y, center=rr$center, cov=rr$cov)
diag(d) <- 0
for (i in 1:(floor(dim(x)[1]/aggr) - 1)) {
s <- which.max(md)
w <- kn(d[, s], aggr)
d[w, ] <- NA
md[w] <- NA
y[w, ] <- rep(colMeans(y[w, ]), each=aggr)
}
w <- which(!is.na(d[, 1]))
y[w, ] <- rep(colMeans(y[w, ]), each=length(w))
for (i in 1:dim(x)[2]) {
y[, i] <- as.numeric((y[, i] * csd[i]) + cm[i])
}
return(list(x=x, method="rmd", clustering=FALSE, aggr=aggr, nc=NULL,
xm=y, roundxm=round(y), clustermethod=NULL, measure=NULL, trim=NULL,
varsort=NULL, transf=NULL, blowup=TRUE, blowxm=y, fot=0))
}
micro_mdav <- function(x, aggr) {
resX <- mdav(x, variables=NULL, weights=NULL, K=aggr, missing=-999)
return(list(x=x, method="mdav", clustering=FALSE, aggr=aggr, nc=NULL,
xm=NULL, roundxm=NULL, clustermethod=NULL, measure="mean", trim=NULL,
varsort=NULL, transf=NULL, blowup=FALSE, blowxm=resX, fot=0))
}
stopifnot(method %in% c("simple", "single", "onedims", "pca", "mcdpca", "pppca",
"clustmcdpca", "clustpppca", "clustpca", "rmd", "mdav", "influence"))
rownames(x) <- 1:nrow(x)
if (length(variables) == 1) {
res <- list()
res$mx <- mafast(x, variables=variables, by=strata_variables, aggr=aggr,
measure=eval(parse(text=measure)))
res$x <- x
res$method <- "mafast"
res$aggr <- aggr
res$measure <- measure
res$fot <- factorOfTotals(x, aggr)
class(res) <- "micro"
return(res)
}
xall <- x
if (!is.null(strata_variables)) {
if (!all(strata_variables %in% colnames(x))) {
stop("strata_variables are not found in the data set!")
}
byvar <- rep("", nrow(x))
for (i in 1:length(strata_variables)) {
byvar <- paste(byvar, x[, strata_variables[i]], sep="-")
}
xsp <- split(x, as.factor(byvar))
} else {
xsp <- list(dataset=x)
}
reslist <- list()
for (spind in 1:length(xsp)) {
x <- xsp[[spind]][, variables, drop=FALSE]
if (method == "simple") {
res <- micro_simple(x=x, aggr=aggr, measure=measure, trim=trim)
}
if (method == "single") {
res <- micro_single(x=x, aggr=aggr, measure=measure, trim=trim, varsort=varsort)
}
if (method == "onedims") {
res <- micro_onedims(x=x, aggr=aggr, measure=measure, trim=trim)
}
if (method == "pca") {
res <- micro_pca(x=x, aggr=aggr, measure=measure, trim=trim)
}
if (method == "mcdpca") {
res <- micro_mcdpca(x=x, aggr=aggr, measure=measure, trim=trim)
}
if (method == "pppca") {
res <- micro_pppca(x=x, aggr=aggr, measure=measure, trim=trim)
}
if (method == "influence") {
res <- micro_influence(x=x, aggr=aggr, measure=measure, trim=trim,
clustermethod=clustermethod, transf=transf, nc=nc)
}
if (method == "clustpca") {
res <- micro_clustpca(x=x, aggr=aggr, measure=measure, trim=trim,
clustermethod=clustermethod, transf=transf, nc=nc)
}
if (method == "clustmcdpca") {
res <- micro_clustmcdpca(x=x, aggr=aggr, measure=measure, trim=trim,
clustermethod=clustermethod, transf=transf, nc=nc)
}
if (method == "clustpppca") {
res <- micro_clustpppca(x=x, aggr=aggr, measure=measure, trim=trim,
clustermethod=clustermethod, transf=transf, nc=nc)
}
if (method == "rmd") {
res <- micro_rmd(x=x, aggr=aggr)
}
if (method == "mdav") {
res <- micro_mdav(x, aggr)
}
res$fot <- factorOfTotals(x, aggr)
reslist[[spind]] <- res
}
res <- reslist[[1]]
if (length(reslist) > 1) {
blowxm <- vector()
fot <- vector()
for (i in 1:length(reslist)) {
blowxm <- rbind(blowxm, reslist[[i]]$blowxm)
fot <- c(fot, reslist[[i]]$fot)
}
res$x <- xall
res$blowxm <- blowxm
names(fot) <- substring(names(xsp), 2)
res$fot <- fot
}
res$x <- res$x[order(as.numeric(rownames(res$x))), ]
res$blowxm <- res$blowxm[order(as.numeric(rownames(res$blowxm))), ]
res$blowxm <- res$blowxm[1:nrow(xall), ]
class(res) <- "micro"
res$mx <- as.data.frame(res$blowxm)
colnames(res$mx) <- variables
resv <- c("x", "mx", "method", "aggr", "measure")
res1 <- list()
for (v in resv) {
res1[[v]] <- res[[v]]
}
class(res1) <- "micro"
return(res1)
}
print.micro <- function(x, ...) {
message(paste("\n Object created with method", x$method, "and aggregation level",
x$aggr))
message("\n -------------------------\n")
message("x ... original values \n")
print(summary(x$x))
message("\n -------------------------\n")
message("mx ... microaggregated values\n")
print(summary(x$mx))
message("\n -------------------------\n")
message("Try names(your object from class micro) for more details")
message("\n")
}
summary.micro <- function(object, ...) {
prcompRob <- function(X, k=0, sca="mad", scores=TRUE) {
n <- nrow(X)
p <- ncol(X)
if (k == 0) {
p1 <- min(n, p)
} else {
p1 <- k
}
S <- rep(1, p1)
V <- matrix(1:(p * p1), ncol=p1, nrow=p)
P <- diag(p)
m <- apply(X, 2, median)
Xcentr <- scale(X, center=m, scale=FALSE)
for (k in 1:p1) {
B <- Xcentr %*% P
Bnorm <- sqrt(apply(B^2, 1, sum))
A <- diag(1/Bnorm) %*% B
Y <- A %*% P %*% t(X)
if (sca == "mad")
s <- apply(Y, 1, mad)
j <- order(s)[n]
S[k] <- s[j]
V[, k] <- A[j, ]
if (V[1, k] < 0)
V[, k] <- (-1) * V[, k]
P <- P - (V[, k] %*% t(V[, k]))
}
if (scores) {
list(scale=S, loadings=V, scores=Xcentr %*% V)
} else list(scale=S, loadings=V)
}
x1 <- as.data.frame(object$x)
x2 <- as.data.frame(object$mx)
colnames(x2) <- colnames(x1)
amx <- mapply(mean, x1)
amxn <- mapply(mean, x2)
amean <- sum(abs(amx - amxn)/(abs(amx)))
meds1 <- mapply(median, x1)
meds2 <- mapply(median, x2)
amedian <- sum(abs(meds1 - meds2) / abs(meds1), na.rm = TRUE)
onestep <- function(x) {
y <- x
constant <- 3/1.486
m1 <- mapply(median, x)
m2 <- mapply(mad, x)
limit1 <- m1 + constant * m2
limit2 <- m1 - constant * m2
for (i in 1:dim(x)[2]) {
if (any(x[, i] > limit1[i])) {
w <- which(x[, i] > limit1[i])
le <- length(w)
y[w, i] <- limit1[i]
}
if (any(x[, i] < limit2[i])) {
w <- which(x[, i] < limit2[i])
le <- length(w)
y[w, i] <- limit2[i]
}
}
y
}
aox <- onestep(x1)
aox <- mapply(mean, aox)
aoxm <- onestep(x2)
aoxm <- mapply(mean, aoxm)
aonestep <- sum(abs(aox - aoxm) / abs(aox), na.rm = TRUE)
devvar <- sum(abs(var(x1) - var(x2))/abs(var(x1)))/length(x1)
amx <- mapply(mad, x1)
amxn <- mapply(mad, x2)
amad <- sum(abs(amx - amxn) / (abs(amx)), na.rm = TRUE)
acov <- sum(abs(cov(x1) - cov(x2))/abs(cov(x1)))/(2 * length(x1))
arcov <- NA
acor <- sum(abs(cor(x1) - cor(x2))/abs(cor(x1)))/(2 * length(x2))
arcor <- NA
acors <- sum(abs(cor(x1, method = "spearman") - cor(x2, method = "spearman")) /
abs(cor(x1, method = "spearman"))) / (2 * length(x1))
l1 <- lm(as.matrix(x1[, 1]) ~ as.matrix(x1[, -1]))$coeff
l2 <- lm(as.matrix(x2[, 1]) ~ as.matrix(x2[, -1]))$coeff
adlm <- sum(abs(l1[2:length(l1)] - l2[2:length(l2)]), na.rm = TRUE)
adlts <- NA
if (dim(x1)[1] > dim(x1)[2] && dim(x2)[1] > dim(x2)[2]) {
p1 <- stats::princomp(x1)
p2 <- stats::princomp(x2)
cp1 <- colMeans(p1$load)
cp2 <- colMeans(p2$load)
apcaload <- sum(abs(cp1 - cp2)/abs(cp1))
} else {
apcaload <- "too less observations"
}
if (dim(x1)[1] > dim(x1)[2] && dim(x2)[1] > dim(x2)[2]) {
p1 <- prcompRob(x1)
p2 <- prcompRob(x2)
cp1 <- colMeans(p1$load)
cp2 <- colMeans(p2$load)
apppcaload <- sum(abs(cp1 - cp2)/abs(cp1))
} else {
apppcaload <- "too less observations"
}
cmx1 <- apply(x1, 2, sum)
cmx2 <- apply(x2, 2, sum) * object$fot
atotals <- sum(abs((cmx1 - cmx2)/cmx1))
pmtotals <- sum((cmx2 - cmx1)/cmx1)
util1 <- dUtility(x1, x2)
deigenvalues <- dUtility(x1, x2, method = "eigen")
risk0 <- dRisk(x1, x2)
r <- dRiskRMD(x1, x2, k = 0.7)
risk1 <- r$risk1
risk2 <- r$risk2
wrisk1 <- r$wrisk1
wrisk2 <- r$wrisk2
list(
meansx = summary(x1),
meansxm = summary(x2),
amean = amean,
amedian = amedian,
aonestep = aonestep,
devvar = devvar,
amad = amad,
acov = acov,
arcov = arcov,
acor = acor,
arcor = arcor,
acors = acors,
adlm = adlm,
adlts = adlts,
apcaload = apcaload,
apppcaload = apppcaload,
totalsOrig = cmx1,
totalsMicro = cmx2,
atotals = atotals,
pmtotals = pmtotals,
util1 = util1,
deigenvalues = deigenvalues,
risk0 = risk0,
risk1 = risk1,
risk2 = risk2,
wrisk1 = wrisk1,
wrisk2 = wrisk2)
} |
library(LearnBayes)
data(iowagpa)
rlabels = c("91-99", "81-90", "71-80", "61-70", "51-60", "41-50",
"31-40", "21-30")
clabels = c("16-18", "19-21", "22-24", "25-27", "28-30")
gpa = matrix(iowagpa[, 1], nrow = 8, ncol = 5, byrow = T)
dimnames(gpa) = list(HSR = rlabels, ACTC = clabels)
gpa
samplesizes = matrix(iowagpa[, 2], nrow = 8, ncol = 5, byrow = T)
dimnames(samplesizes) = list(HSR = rlabels, ACTC = clabels)
samplesizes
act = seq(17, 29, by = 3)
matplot(act, t(gpa), type = "l", lwd = 3,
xlim = c(17, 34), col=1:8, lty=1:8)
legend(30, 3, lty = 1:8, lwd = 3, legend = c("HSR=9", "HSR=8",
"HSR=7", "HSR=6", "HSR=5", "HSR=4", "HSR=3", "HSR=2"), col=1:8)
S=readline(prompt="Type <Return> to continue : ")
MU = ordergibbs(iowagpa, 5000)
postmeans = apply(MU, 2, mean)
postmeans = matrix(postmeans, nrow = 8, ncol = 5)
postmeans=postmeans[seq(8,1,-1),]
dimnames(postmeans)=list(HSR=rlabels,ACTC=clabels)
round(postmeans,2)
windows()
matplot(act, t(postmeans), type = "l", lty=1:8, lwd = 3, col = 1, xlim = c(17, 34))
legend(30, 3, lty = 1:8, lwd = 2, legend = c("HSR=9", "HSR=8",
"HSR=7", "HSR=6", "HSR=5", "HSR=4", "HSR=3", "HSR=2"))
postsds = apply(MU, 2, sd)
postsds = matrix(postsds, nrow = 8, ncol = 5)
postsds=postsds[seq(8,1,-1),]
dimnames(postsds)=list(HSR=rlabels,ACTC=clabels)
round(postsds,3)
s=.65
se=s/sqrt(samplesizes)
round(postsds/se,2)
S=readline(prompt="Type <Return> to continue : ")
FIT=hiergibbs(iowagpa,5000)
windows()
par(mfrow=c(2,1))
plot(density(FIT$beta[,2]),xlab=expression(beta[2]),
main="HIGH SCHOOL RANK")
plot(density(FIT$beta[,3]),xlab=expression(beta[3]),
main="ACT SCORE")
quantile(FIT$beta[,2],c(.025,.25,.5,.75,.975))
quantile(FIT$beta[,3],c(.025,.25,.5,.75,.975))
quantile(FIT$var,c(.025,.25,.5,.75,.975))
posterior.means = apply(FIT$mu, 2, mean)
posterior.means = matrix(posterior.means, nrow = 8, ncol = 5,
byrow = T)
S=readline(prompt="Type <Return> to continue : ")
windows()
par(mfrow=c(1,1))
matplot(act, t(posterior.means), type = "l", lwd = 3, lty=1:8, col=1,
xlim = c(17, 34))
legend(30, 3, lty = 1:8, lwd = 2, legend = c("HSR=9", "HSR=8",
"HSR=7", "HSR=6", "HSR=5", "HSR=4", "HSR=3", "HSR=2"))
p=1-pnorm((2.5-FIT$mu)/.65)
prob.success=apply(p,2,mean)
prob.success=matrix(prob.success,nrow=8,ncol=5,byrow=T)
dimnames(prob.success)=list(HSR=rlabels,ACTC=clabels)
round(prob.success,3) |
setClass("Distribution.of.probability"
) |
cngr <-
function (S, PO = NULL, uniq)
{
flgnum <- FALSE
if (isTRUE(attr(S, "class")[1] == "Semigroup") == FALSE) {
s <- semigroup(S, type = "numerical")
}
else if (isTRUE(attr(S, "class")[2] == "symbolic") == TRUE) {
s <- as.semigroup(S, numerical = TRUE)
flgnum <- TRUE
}
else {
s <- S
}
ns <- s$ord
if (isTRUE(ns > 1L) == TRUE) {
mat <- matrix(0L, nrow = ns, ncol = ns, dimnames = list(seq_len(ns),
seq_len(ns)))
for (i in seq_len(ns)) {
mat[which(s$S == i)[1]] <- i
}
rm(i)
inc <- levels(factor(trnf(dichot(mat, c = 1), tolist = TRUE,
sep = ", ")))
clus <- data.frame(matrix(ncol = ns, nrow = 0))
if (isTRUE(nlevels(factor(as.matrix(s$S))) != 1) == TRUE) {
for (k in as.vector(mat)[which(as.vector(mat) > 0)]) {
for (i in seq_along(inc)) {
clus[i, ] <- as.vector(sprt(s$S, as.numeric(dhc(inc[i])[1]),
as.numeric(dhc(inc[i])[2])))
}
rm(i)
}
rm(k)
ifelse(missing(uniq) == FALSE && isTRUE(uniq == TRUE) ==
TRUE, clus <- unique(clus), NA)
colnames(clus) <- rownames(clus) <- NULL
cls <- data.matrix(clus)
cg <- list()
for (i in seq_len(nrow(cls))) {
cg[[i]] <- as.vector(cls[i, ])
ifelse(isTRUE(flgnum == TRUE) == TRUE, attr(cg[[i]],
"names") <- S$st, attr(cg[[i]], "names") <- dimnames(S)[[1]])
}
rm(i)
}
else {
cg <- rep(1, s$dim)
}
ifelse(isTRUE(flgnum == TRUE) == TRUE, sS <- S$S, sS <- s$S)
ifelse(isTRUE(is.null(PO)) == FALSE, lst <- list(S = sS,
PO = PO, clu = cg), lst <- list(S = sS, clu = cg))
ifelse(isTRUE(flgnum == TRUE) == TRUE, Sclss <- "symbolic",
Sclss <- attr(s, "class")[2])
ifelse(isTRUE(is.null(PO)) == FALSE, class(lst) <- c("Congruence",
"PO.Semigroup", Sclss), class(lst) <- c("Congruence",
"A.Semigroup", Sclss))
return(lst)
}
else {
s
}
} |
resource("hello", engine = "utility1") |
output.best <- function(result, save.pdf = FALSE){
if(!save.pdf[1] %in% c(0, 1) | length(save.pdf) > 1){save.pdf <- FALSE}
datatest <- names(result) != c("method", "weight", "direction", "mu", "sd", "GEBV.value",
"parental.lines", "suggested.subset")
if(TRUE %in% (datatest) | length(datatest)!=8){
stop("Input data error, please input the original output data of simu.GEBVO, simu.GDO, or simu.GEBVGD.",
call. = FALSE)
}
GEBV <- result$GEBV.value
t.n <- colnames(GEBV[[1]][[1]])
method <- result$method
weight <- result$weight
direction <- result$direction
mu <- result$mu
sd <- result$sd
nrep <- length(GEBV)
ngen <- length(GEBV[[1]])-1
nt <- ncol(GEBV[[1]][[1]])
datatry <- try(weight*direction*mu*sd, silent = TRUE)
if(class(datatry)[1] == "try-error" | NA %in% datatry){
stop("Input data error, please input the original output data of simu.GEBVO, simu.GDO, or simu.GEBVGD.",
call. = FALSE)
}
GEBV.max <- list()
for(i in 1:nrep){
max.gvalue0 <- matrix(0, ngen+1, nt)
for(j in 1:(ngen+1)){
max.gvalue <- GEBV[[i]][[j]]
datatry <- try(max.gvalue*max.gvalue, silent = TRUE)
if(class(datatry)[1] == "try-error" | NA %in% max.gvalue){
stop("Input data error, please input the original output data of simu.GEBVO, simu.GDO, or simu.GEBVGD.",
call. = FALSE)
}
sele <- c()
for(k in 1:nt){
if(direction[k] == Inf){
sele0 <- max.gvalue[,k]/sd[k]*weight[k]
} else if (direction[k] == -Inf){
sele0 <- max.gvalue[,k]/sd[k]*weight[k]
sele0 <- -sele0
} else {
sele0 <- ((max.gvalue[,k]-direction[k])/sd[k])*weight[k]
sele0 <- -abs(sele0)
}
sele <- cbind(sele, sele0)
}
sele <- apply(sele, 1, sum)
max.gvalue0[j,] <- max.gvalue[which.max(sele),]
}
GEBV.max[[i]] <- max.gvalue0
}
GEBV.max.ave <- list()
for(i in 1:nt){
ave0 <- matrix(0, (ngen+1), nrep)
for(j in 1:nrep){
ave0[, j] <- GEBV.max[[j]][, i]
}
GEBV.max.ave[[i]] <- ave0
}
GEBV.all <- list()
if(save.pdf){grDevices::pdf("IPLGP.GEBVplot.pdf", width = 8, height = 5)}
for(i in 1:nt){
GEBV.all0 <- data.frame(
generation <- 0:ngen,
best.GEBV.average <- apply(GEBV.max.ave[[i]], 1, mean),
GEBV.sd <- apply(GEBV.max.ave[[i]], 1, sd)
)
colnames(GEBV.all0) = c("generation", "mean", "standard deviation")
plot0 <- ggplot2::ggplot(GEBV.all0, ggplot2::aes(x = generation, y = best.GEBV.average), main = "GEBV.all") +
ggplot2::geom_errorbar(ggplot2::aes(ymin = best.GEBV.average-GEBV.sd, ymax = best.GEBV.average+GEBV.sd),
width = .1) +
ggplot2::geom_line() +
ggplot2::geom_point(size = 1.5)+
ggplot2::labs(title = paste(t.n[i], " (", method, ")", sep = ""))+
ggplot2::scale_x_continuous(breaks = seq(0, ngen, 1), labels = c("P", paste("F", 1:ngen, sep = "")))+
ggplot2::theme_bw()
print(plot0)
GEBV.all0[, 1] <- c("P", paste("F", 1:ngen, sep = ""))
GEBV.all[[i]] <- GEBV.all0
}
if(save.pdf){grDevices::dev.off()}
names(GEBV.all) <- t.n
return(GEBV.all)
} |
contour_image=function(img,plot=TRUE){
Contorno2=function(img2,imagem=TRUE){
t=img2
c=ncol(t)
t2=(t[,-1]+t[,-c])/2
t3=(t2!=0) & (t2!=1)
t[,-1]=t3
return((t))
}
img=as.matrix(img)
m2=Contorno2(img2 = img)
m3=Contorno2(img2 = t(img))
m4=m2+t(m3)
m5=1*(m4!=0)
m5=EBImage::as.Image(m5)
if(plot==TRUE) {plot_image(m5)}
m5
}
print.contour_image=function(x,...){
if(EBImage::is.Image(x)){cat("Is an image object","\n")}
if(is.matrix(x)){cat("Is an matrix object","\n")}
cat("Dimensions of Object:",dim([email protected]),"\n")
} |
npfGr <- function(PVector,A,H){
P <- matrix(PVector, nrow=nrow(A),byrow=FALSE)
n <- nrow(P)
r <- ncol(P)
Gradient <- c()
for(i in 1:n){
Ind <- rep(0,r)
for(j in 1:n){
if(i == j){
Ind <- Ind + 0
}else{
Ind <- Ind + 2*(H[i,j]^2)*(2*(A[i,j] - sum((P[i,]-P[j,])^2))*(-2*(P[i,]-P[j,])))
}
}
Gradient <- rbind(Gradient, Ind)
}
return(as.vector(Gradient))
}
npfGrWrap <- function(P){
A <- get("A")
H <- get("H")
Gradient <- npfGr(P,A,H)
return(Gradient)
} |
brainGraph_permute <- function(densities, resids, N=5e3, perms=NULL, auc=FALSE,
level=c('graph', 'vertex', 'other'),
measure=c('btwn.cent', 'coreness', 'degree', 'eccentricity',
'clo.cent', 'communicability', 'ev.cent', 'lev.cent',
'pagerank', 'subg.cent', 'E.local', 'E.nodal',
'knn', 'Lp', 'transitivity', 'vulnerability'),
.function=NULL) {
stopifnot(inherits(resids, 'brainGraph_resids'))
gID <- getOption('bg.group')
measure <- match.arg(measure)
level <- match.arg(level)
if (level == 'other') {
if (!is.function(.function)) stop('".function" must be a function!')
measure <- 'other'
} else if (level == 'graph') {
measure <- NULL
}
if (is.null(perms)) perms <- shuffleSet(n=nobs(resids), nset=N)
dims <- dim(perms)
N <- dims[1L]
perms <- rbind(perms, seq_len(dims[2L]))
grps <- as.numeric(resids$resids.all[, get(gID)])
if (!getDoParRegistered()) {
cl <- makeCluster(getOption('bg.ncpus'))
registerDoParallel(cl)
}
res.perm <- switch(level,
vertex=permute_vertex_foreach(perms, densities, resids, grps, auc, measure),
other=permute_other_foreach(perms, densities, resids, grps, .function),
graph=permute_graph_foreach(perms, densities, resids, grps, auc))
if (length(densities) == 1L) res.perm <- cbind(densities=densities, res.perm)
if (level == 'vertex') {
res.perm <- as.data.table(res.perm)
start <- if (isTRUE(auc)) 1L else 2L
setnames(res.perm, seq.int(start, dim(res.perm)[2L]), region.names(resids))
}
obs.ind <- if (isTRUE(auc)) N + 1L else (N + 1L) * seq_along(densities)
obs.diff <- res.perm[obs.ind]
res.perm <- res.perm[-obs.ind]
out <- list(atlas=resids$atlas, auc=auc, N=N, level=level, measure=measure, densities=densities,
resids=resids, DT=res.perm, obs.diff=obs.diff, Group=resids$Group)
class(out) <- c('brainGraph_permute', class(out))
return(out)
}
make_graphs_perm <- function(densities, resids, inds, grps) {
corrs <- lapply(unique(grps), function(x)
corr.matrix(resids[which(grps[inds] == x)],
densities=densities, rand=TRUE))
sapply(corrs, lapply, function(x)
apply(x$r.thresh, 3L, graph_from_adjacency_matrix, mode='undirected', diag=FALSE))
}
graph_attr_perm <- function(g, densities) {
mod <- sapply(g, sapply, function(x) modularity(cluster_louvain(x)))
Cp <- sapply(g, sapply, function(x) transitivity(x, type='localaverage'))
Lp <- sapply(g, sapply, mean_distance)
assort <- sapply(g, sapply, assortativity_degree)
E.global <- sapply(g, sapply, efficiency, 'global')
list(mod=mod, Cp=Cp, Lp=Lp, assort=assort, E.global=E.global)
}
permute_graph_foreach <- function(perms, densities, resids, grps, auc) {
i <- NULL
N <- dim(perms)[1L]
if (isTRUE(auc)) {
res.perm <- foreach(i=seq_len(N), .combine='rbind') %dopar% {
g <- make_graphs_perm(densities, resids, perms[i, ], grps)
meas.list <- graph_attr_perm(g, densities)
t(vapply(meas.list, function(y) auc_diff_perm(densities, y), numeric(1L)))
}
res.perm <- as.data.table(res.perm)
} else {
res.perm <- foreach(i=seq_len(N), .combine=function(a, b) Map(rbind, a, b)) %dopar% {
g <- make_graphs_perm(densities, resids, perms[i, ], grps)
meas.list <- graph_attr_perm(g, densities)
}
res.perm <- data.table(densities=rep.int(densities, N),
sapply(res.perm, function(x) as.numeric(-diff(t(x)))))
setkey(res.perm, densities)
}
return(res.perm)
}
vertex_attr_funs <- function(measure) {
switch(measure,
coreness=coreness,
degree=degree,
eccentricity=eccentricity,
clo.cent=function(x) centr_clo(x)$res,
communicability=centr_betw_comm,
ev.cent=function(x) centr_eigen(x)$vector,
lev.cent=function(x) centr_lev(x)$res,
pagerank=function(x) page_rank(x, weights=NA)$vector,
subg.cent=subgraph_centrality,
E.local=function(x) efficiency(x, type='local', weights=NA, use.parallel=FALSE),
E.nodal=function(x) efficiency(x, type='nodal', weights=NA),
knn=function(x) knn(x)$knn,
Lp=function(x) mean_distance_wt(x, level='vertex', weights=NA),
transitivity=function(x) transitivity(x, type='local', isolates='zero'),
vulnerability=function(x) vulnerability(x, use.parallel=FALSE),
function(x) centr_betw(x)$res)
}
permute_vertex_foreach <- function(perms, densities, resids, grps, auc, measure) {
i <- NULL
if (isTRUE(auc)) {
diffFun <- function(densities, meas.list) {
sapply(seq_len(dim(meas.list[[1L]])[2L]), function(x)
auc_diff(densities, cbind(meas.list[[1L]][, x], meas.list[[2L]][, x])))
}
} else {
diffFun <- function(densities, meas.list) cbind(densities, meas.list[[1L]] - meas.list[[2L]])
}
fun <- vertex_attr_funs(measure)
res.perm <- foreach(i=seq_len(dim(perms)[1L]), .combine='rbind') %dopar% {
g <- make_graphs_perm(densities, resids, perms[i, ], grps)
meas.list <- lapply(g, function(x) t(sapply(x, fun)))
diffFun(densities, meas.list)
}
}
permute_other_foreach <- function(perms, densities, resids, grps, .function) {
i <- NULL
res.perm <- foreach(i=seq_len(dim(perms)[1L]), .combine='rbind') %dopar% {
g <- make_graphs_perm(densities, resids, perms[i, ], grps)
.function(g, densities)
}
}
summary.brainGraph_permute <- function(object, measure=object$measure,
alternative=c('two.sided', 'less', 'greater'),
alpha=0.05, p.sig=c('p', 'p.fdr'), ...) {
perm.diff <- p <- p.fdr <- region <- obs.diff <- ..measure <- NULL
gID <- getOption('bg.group')
if (object$level == 'other') {
object$level <- if (dim(object$DT)[2L] > 6L) 'vertex' else 'graph'
}
if (object$level == 'graph' && is.null(measure)) measure <- 'mod'
group_str <- paste0(measure, '.', object$Group)
permDT <- copy(object$DT)
g <- with(object, make_graphs_perm(densities, resids, seq_len(nobs(resids)),
resids$resids.all[, as.numeric(get(gID))]))
densities <- object$densities
if (object$level == 'vertex') {
obsDT <- copy(object$obs.diff)
fun <- vertex_attr_funs(measure)
obs <- lapply(g, function(x) t(sapply(x, fun)))
if (isTRUE(object$auc)) {
obs <- lapply(obs, apply, 2L, function(y) -auc_diff(densities, y))
permDT[, densities := 1]
obsDT[, densities := 1]
densities <- 1
}
sum.dt <- data.table(densities=densities,
region=rep(V(g[[1L]][[1L]])$name, each=length(densities)),
g1=c(obs[[1L]]), g2=c(obs[[2L]]),
key=c('densities', 'region'))
setnames(sum.dt, c('g1', 'g2'), group_str)
obs.m <- melt(obsDT, id.vars='densities', variable.name='region', value.name='obs.diff')
sum.dt <- merge(sum.dt, obs.m)
permDT <- melt(permDT, id.vars='densities', variable.name='region', value.name=measure)
setkeyv(permDT, key(sum.dt))
permdiff <- permDT[, list(perm.diff=mean(get(measure))), by=key(sum.dt)]
sum.dt <- merge(sum.dt, permdiff, by=key(sum.dt))
} else if (object$level == 'graph') {
stopifnot(hasName(permDT, measure))
permDT[, region := 'graph']
obs <- graph_attr_perm(g, densities)[[measure]]
if (isTRUE(object$auc)) {
obs <- t(apply(obs, 2L, function(y) -auc_diff(densities, y)))
permDT[, densities := 1]
densities <- 1
}
sum.dt <- data.table(densities=densities, region='graph', obs)
setnames(sum.dt, c('V1', 'V2'), group_str)
sum.dt[, obs.diff := object$obs.diff[[measure]]]
sum.dt[, perm.diff := permDT[, mean(get(measure)), by=densities]$V1]
}
result.dt <- merge(permDT[, c('densities', 'region', ..measure)],
sum.dt[, c('densities', 'region', 'obs.diff')],
by=c('densities', 'region'))
alt <- match.arg(alternative)
compfun <- switch(alt,
two.sided=function(perm, obs) sum(abs(perm) >= abs(unique(obs))),
less=function(perm, obs) sum(perm <= unique(obs)),
greater=function(perm, obs) sum(perm >= unique(obs)))
result.dt[, p := (compfun(get(measure), obs.diff) + 1L) / (.N + 1L), by=key(result.dt)]
CI <- switch(alt, two.sided=c(alpha / 2, 1 - (alpha / 2)), less=c(alpha, 1), greater=c(1 / object$N, 1 - alpha))
result.dt[, c('ci.low', 'ci.high') := as.list(sort(get(measure))[ceiling(.N * CI)]), by=key(result.dt)]
result.dt <- result.dt[, .SD[1L], by=key(result.dt)]
sum.dt <- merge(sum.dt, result.dt[, !c(measure, 'obs.diff'), with=FALSE], by=key(result.dt))
setcolorder(sum.dt, c('densities', 'region', group_str, 'obs.diff', 'perm.diff', 'ci.low', 'ci.high', 'p'))
if (isFALSE(object$auc)) {
groupby <- if (object$level == 'graph') NULL else 'densities'
sum.dt[, p.fdr := p.adjust(p, 'fdr'), by=groupby]
}
meas.full <- print_full_metric(measure)
p.sig <- match.arg(p.sig)
perm.sum <- c(object, list(DT.sum=sum.dt, meas.full=meas.full, alt=alt, alpha=alpha, p.sig=p.sig))
perm.sum$measure <- measure
class(perm.sum) <- c('summary.brainGraph_permute', class(perm.sum))
perm.sum
}
print.summary.brainGraph_permute <- function(x, ...) {
print_title_summary('Permutation analysis')
cat('
cat('Level: ', x$level, '\n')
cat('Graph metric: ', x$meas.full, '\n')
if (isTRUE(x$auc)) cat('Area-under-the-curve (AUC) calculated across', length(x$densities), 'densities:\n', x$densities, '\n')
symb <- switch(x$alt, two.sided='!=', greater='>', less='<')
alt <- sprintf('%s - %s %s 0', x$Group[1L], x$Group[2L], symb)
cat('Alternative hypothesis: ', '\t', alt, '\n')
cat('Alpha: ', x$alpha, '\n\n')
if (with(x, dim(DT.sum[get(p.sig) < alpha])[1L]) == 0L) {
cat('No significant results!\n')
} else {
clp <- 100 * (1 - x$alpha)
setnames(x$DT.sum, c('ci.low', 'ci.high'), paste0(clp, '% CI ', c('low', 'high')))
with(x, print(DT.sum[get(p.sig) < alpha]))
}
invisible(x)
}
plot.brainGraph_permute <- function(x, measure=x$measure,
alternative=c('two.sided', 'less', 'greater'),
alpha=0.05, p.sig=c('p', 'p.fdr'), ptitle=NULL, ...) {
densities <- Group <- sig <- trend <- yloc <- obs <- mylty <- ci.low <- ci.high <-
variable <- value <- reg.num <- region <- perm.diff <- obs.diff <- plot2 <- reg.num2 <- NULL
p.sig <- match.arg(p.sig)
perm.sum <- summary(x, measure=measure, alternative=alternative, alpha=alpha)
measure <- perm.sum$measure
sum.dt <- perm.sum$DT.sum
if (is.null(ptitle)) ptitle <- perm.sum$meas.full
ylabel2 <- sprintf('Observed and permutation difference (%s - %s)', x$Group[1L], x$Group[2L])
if (x$level == 'graph') {
plot.dt <- melt(sum.dt, id.vars=setdiff(names(sum.dt), paste0(measure, '.', x$Group)),
variable.name='Group', value.name='obs')
plot.dt[, Group := factor(Group, labels=x$Group)]
idvars <- c('densities', 'region', 'p', 'Group', 'obs')
if (isFALSE(x$auc)) idvars <- c(idvars, 'p.fdr')
plot.dt <- melt(plot.dt, id.vars=idvars)
plot.dt[, c('sig', 'trend') := '']
plot.dt[get(p.sig) < alpha, sig := '*']
plot.dt[get(p.sig) >= alpha & get(p.sig) < 2 * alpha, trend := '*']
plot.dt[, yloc := extendrange(obs, f=0.07)[1L]]
plot.dt[, mylty := 0]
plot.dt[variable == 'obs.diff', mylty := 1]
plot.dt[variable %in% c('ci.low', 'ci.high'), mylty := 2]
plot.dt[variable == 'perm.diff', mylty := 3]
if (!requireNamespace('ggplot2', quietly=TRUE)) {
gID <- getOption('bg.group')
grps <- paste(perm.sum$measure, x$Group, sep='.')
ymin <- plot.dt[1L, yloc]
ymax <- plot.dt[, max(obs)]
par(mar=c(8.6, 4.1, 4.1, 2.1), xpd=TRUE)
plot.dt[variable == 'obs.diff' & get(gID) == grps[1L],
plot(densities, obs, type='l', col=plot.cols[1L],
main=perm.sum$meas.full, xlab='Density', ylab='Observed values')]
plot.dt[variable == 'obs.diff' & get(gID) == grps[2L],
lines(densities, obs, col=plot.cols[2L])]
plot.dt[variable == 'obs.diff' & sig == '*',
text(densities, yloc, sig, cex=1.75, col='red')]
plot.dt[variable == 'obs.diff' & trend == '*',
text(densities, yloc, trend, cex=1.75, col='blue')]
legend('bottom', title=gID, sub(paste0(perm.sum$measure, '.'), '', grps),
fill=plot.cols[1L:2L], inset=c(0, -0.35), horiz=TRUE)
ymin <- plot.dt[variable == 'ci.low', min(value)]
ymax <- plot.dt[variable == 'ci.high', max(value)]
par(mar=c(9.1, 4.1, 4.1, 2.1), xpd=TRUE)
plot.dt[variable == 'obs.diff' & get(gID) == grps[1L],
plot(densities, value, type='l', col='red', ylim=c(ymin, ymax),
main=perm.sum$meas.full, xlab='Density', ylab=ylabel2)]
plot.dt[variable == 'obs.diff' & get(gID) == grps[1L],
points(densities, value, col='red', pch=19)]
plot.dt[variable == 'ci.high' & get(gID) == grps[1L],
lines(densities, value, lty=2)]
plot.dt[variable == 'ci.low' & get(gID) == grps[1L],
lines(densities, value, lty=2)]
plot.dt[variable == 'perm.diff' & get(gID) == grps[1L],
lines(densities, value, lty=3)]
legend('bottom', c('Observed diff.', '95% CI', 'Mean permutation diff.'),
lty=1L:3L, inset=c(0, -0.4))
return(invisible(x))
} else {
p <- ggplot2::ggplot(plot.dt, ggplot2::aes(x=densities))
plot1 <- p +
ggplot2::geom_line(ggplot2::aes(y=obs, col=Group)) +
ggplot2::geom_text(ggplot2::aes(y=yloc, label=sig), col='red', size=8) +
ggplot2::geom_text(ggplot2::aes(y=yloc, label=trend), col='blue', size=8) +
ggplot2::theme(legend.position='bottom')
plot2 <- p +
ggplot2::geom_line(data=plot.dt[variable =='obs.diff'], ggplot2::aes(y=value, lty=factor(mylty)), col='red') +
ggplot2::geom_point(data=plot.dt[variable == 'obs.diff'], ggplot2::aes(y=value), col='red', size=3) +
ggplot2::geom_line(data=plot.dt[variable == 'perm.diff'], ggplot2::aes(y=value, lty=factor(mylty))) +
ggplot2::geom_line(data=plot.dt[variable == 'ci.low'], ggplot2::aes(y=value, lty=factor(mylty))) +
ggplot2::geom_line(data=plot.dt[variable == 'ci.high'], ggplot2::aes(y=value, lty=factor(mylty))) +
ggplot2::scale_linetype_manual(name='',
labels=c('Observed diff.', '95% CI', 'Mean permutation diff.'),
values=1:3) +
ggplot2::theme(legend.position='bottom', legend.spacing.x=ggplot2::unit(9, 'pt'),
legend.key=ggplot2::element_rect(fill='white'),
legend.background=ggplot2::element_rect(fill='gray79'))
ylabel1 <- 'Observed values'
plot1 <- plot1 +
ggplot2::labs(title=ptitle, y=ylabel1, x='Density') +
ggplot2::theme(plot.title=ggplot2::element_text(hjust=0.5, face='bold'))
plot2 <- plot2 +
ggplot2::labs(title=ptitle, y=ylabel2, x='Density') +
ggplot2::theme(plot.title=ggplot2::element_text(hjust=0.5, face='bold'))
return(list(plot1, plot2))
}
} else {
plot.dt <- droplevels(sum.dt[get(p.sig) < alpha])
if (dim(plot.dt)[1L] == 0L) stop('No significant results!')
if (!requireNamespace('ggplot2', quietly=TRUE)) {
ymin <- plot.dt[, min(c(obs.diff, ci.low))]
ymax <- plot.dt[, max(c(obs.diff, ci.high))]
plot.dt[, reg.num2 := as.numeric(as.factor(region))]
dotplot(perm.diff ~ region | as.factor(densities),
xlab='Region', ylab=ylabel2, main=perm.sum$meas.full,
data=plot.dt, ylim=extendrange(c(ymin, ymax)),
x0=plot.dt$reg.num2, y0=plot.dt$ci.low, y1=plot.dt$ci.high, yobs=plot.dt$obs.diff,
scales=list(x=list(relation='free')),
panel=function(x, y, x0, y0, y1, yobs, subscripts) {
lpoints(x, y, pch=19)
panel.abline(h=0, lty=3)
larrows(x0=x0[subscripts], y0=y0[subscripts], x1=x0[subscripts], y1=y1[subscripts],
code=3, angle=90, length=0.1)
lsegments(x0[subscripts] - 0.1, yobs[subscripts],
x0[subscripts] + 0.1, yobs[subscripts],
col='red', lwd=3)
})
} else {
plot.dt[, reg.num := seq_len(.N), by=densities]
plot1 <- ggplot2::ggplot(plot.dt, ggplot2::aes(x=region)) +
ggplot2::geom_point(ggplot2::aes(y=perm.diff)) +
ggplot2::geom_errorbar(ggplot2::aes(ymin=ci.low, ymax=ci.high)) +
ggplot2::geom_segment(ggplot2::aes(x=reg.num - .25, xend=reg.num + .25, y=obs.diff, yend=obs.diff),
col='red', size=1.25) +
ggplot2::facet_wrap(~ densities, scales='free') +
ggplot2::labs(title=ptitle, y=ylabel2, x='Region') +
ggplot2::theme(plot.title=ggplot2::element_text(hjust=0.5, face='bold'))
return(list(plot1, plot2))
}
}
} |
find.neib = function(i, p1, n1, zeta, A, k){
step = 1
x = find.x(i,p1)
y = find.y(i,p1)
while(TRUE){
p.range = (i - min(step, x - 1)) : (i + min(step, p1 - x))
n.range = (-min(step, y - 1)) : (min(step, n1 - y))
to.check = c(rep(p.range, length(n.range)) + rep(n.range, each = length(p.range)) * p1)
if(all(!zeta[to.check,])){
step = step + 1
}else{
return(rep(mean(A[to.check,][zeta[to.check,]]), k))
}
}
} |
nbeats <- function(
mode = "regression",
id,
freq,
prediction_length,
lookback_length = NULL,
loss_function = NULL,
bagging_size = NULL,
num_stacks = NULL,
num_blocks = NULL,
epochs = NULL,
batch_size = NULL,
num_batches_per_epoch = NULL,
learn_rate = NULL,
learn_rate_decay_factor = NULL,
learn_rate_min = NULL,
patience = NULL,
clip_gradient = NULL,
penalty = NULL
) {
args <- list(
id = rlang::enquo(id),
freq = rlang::enquo(freq),
prediction_length = rlang::enquo(prediction_length),
lookback_length = rlang::enquo(lookback_length),
loss_function = rlang::enquo(loss_function),
bagging_size = rlang::enquo(bagging_size),
num_stacks = rlang::enquo(num_stacks),
num_blocks = rlang::enquo(num_blocks),
epochs = rlang::enquo(epochs),
batch_size = rlang::enquo(batch_size),
num_batches_per_epoch = rlang::enquo(num_batches_per_epoch),
learn_rate = rlang::enquo(learn_rate),
learn_rate_decay_factor = rlang::enquo(learn_rate_decay_factor),
learn_rate_min = rlang::enquo(learn_rate_min),
patience = rlang::enquo(patience),
clip_gradient = rlang::enquo(clip_gradient),
penalty = rlang::enquo(penalty)
)
parsnip::new_model_spec(
"nbeats",
args = args,
eng_args = NULL,
mode = mode,
method = NULL,
engine = NULL
)
}
print.nbeats <- function(x, ...) {
cat("N-BEATS Model Specification (", x$mode, ")\n\n", sep = "")
parsnip::model_printer(x, ...)
if(!is.null(x$method$fit$args)) {
cat("Model fit template:\n")
print(parsnip::show_call(x))
}
invisible(x)
}
update.nbeats <- function(object, parameters = NULL,
id = NULL,
freq = NULL,
prediction_length = NULL,
lookback_length = NULL,
loss_function = NULL,
bagging_size = NULL,
num_stacks = NULL,
num_blocks = NULL,
epochs = NULL,
batch_size = NULL,
num_batches_per_epoch = NULL,
learn_rate = NULL,
learn_rate_decay_factor = NULL,
learn_rate_min = NULL,
patience = NULL,
clip_gradient = NULL,
penalty = NULL,
fresh = FALSE, ...) {
parsnip::update_dot_check(...)
if (!is.null(parameters)) {
parameters <- parsnip::check_final_param(parameters)
}
args <- list(
id = rlang::enquo(id),
freq = rlang::enquo(freq),
prediction_length = rlang::enquo(prediction_length),
lookback_length = rlang::enquo(lookback_length),
loss_function = rlang::enquo(loss_function),
bagging_size = rlang::enquo(bagging_size),
num_stacks = rlang::enquo(num_stacks),
num_blocks = rlang::enquo(num_blocks),
epochs = rlang::enquo(epochs),
batch_size = rlang::enquo(batch_size),
num_batches_per_epoch = rlang::enquo(num_batches_per_epoch),
learn_rate = rlang::enquo(learn_rate),
learn_rate_decay_factor = rlang::enquo(learn_rate_decay_factor),
learn_rate_min = rlang::enquo(learn_rate_min),
patience = rlang::enquo(patience),
clip_gradient = rlang::enquo(clip_gradient),
penalty = rlang::enquo(penalty)
)
args <- parsnip::update_main_parameters(args, parameters)
if (fresh) {
object$args <- args
} else {
null_args <- purrr::map_lgl(args, parsnip::null_value)
if (any(null_args))
args <- args[!null_args]
if (length(args) > 0)
object$args[names(args)] <- args
}
parsnip::new_model_spec(
"nbeats",
args = object$args,
eng_args = object$eng_args,
mode = object$mode,
method = NULL,
engine = object$engine
)
}
translate.nbeats <- function(x, engine = x$engine, ...) {
if (is.null(engine)) {
message("Used `engine = 'gluonts_nbeats'` for translation.")
engine <- "gluonts_nbeats"
}
x <- parsnip::translate.default(x, engine, ...)
x
}
nbeats_fit_impl <- function(x, y, freq, prediction_length, id,
epochs = 5,
batch_size = 32,
num_batches_per_epoch = 50,
learning_rate = 0.001,
learning_rate_decay_factor = 0.5,
patience = 10,
minimum_learning_rate = 5e-5,
clip_gradient = 10,
weight_decay = 1e-8,
init = "xavier",
ctx = NULL,
hybridize = TRUE,
context_length = NULL,
loss_function = "sMAPE",
num_stacks = 30,
num_blocks = list(1),
widths = list(512),
sharing = list(FALSE),
expansion_coefficient_lengths = list(32),
stack_types = list("G")
) {
validate_gluonts_required_args(x, prediction_length, freq, id)
if (length(context_length) > 1) {
rlang::abort("Only one 'lookback_length' allowed. Did you mean to use 'gluonts_nbeats_ensemble'.")
}
if (is.null(context_length)) context_length <- reticulate::py_none()
if (is.null(ctx)) ctx <- reticulate::py_none()
num_blocks <- as.list(num_blocks)
widths <- as.list(widths)
sharing <- as.list(sharing)
expansion_coefficient_lengths <- as.list(expansion_coefficient_lengths)
stack_types <- as.list(stack_types)
outcome <- y
predictor <- x
index_tbl <- modeltime::parse_index_from_data(predictor)
idx_col <- names(index_tbl)
idx <- timetk::tk_index(index_tbl)
id_tbl <- x %>% dplyr::select(dplyr::all_of(id))
value_tbl <- tibble::tibble(value = y)
constructed_tbl <- dplyr::bind_cols(id_tbl, index_tbl, value_tbl)
gluon_listdataset <- constructed_tbl %>%
to_gluon_list_dataset(
date_var = !! rlang::sym(idx_col),
value_var = value,
id_var = !! rlang::sym(id),
freq = freq
)
trainer <- pkg.env$gluonts$trainer$Trainer(
ctx = ctx,
epochs = epochs,
batch_size = batch_size,
num_batches_per_epoch = num_batches_per_epoch,
learning_rate = learning_rate,
learning_rate_decay_factor = learning_rate_decay_factor,
patience = patience,
minimum_learning_rate = minimum_learning_rate,
clip_gradient = clip_gradient,
weight_decay = weight_decay,
init = init,
hybridize = hybridize
)
model_spec <- pkg.env$gluonts$model$n_beats$NBEATSEstimator(
freq = freq,
prediction_length = prediction_length,
trainer = trainer,
context_length = context_length,
loss_function = loss_function,
num_stacks = num_stacks,
num_blocks = num_blocks,
widths = widths,
sharing = sharing,
expansion_coefficient_lengths = expansion_coefficient_lengths,
stack_types = stack_types
)
model_fit <- model_spec$train(training_data = gluon_listdataset)
class <- "nbeats_fit_impl"
models <- list(
model_1 = model_fit
)
data <- index_tbl %>%
dplyr::mutate(
.actual = y,
.fitted = NA,
.residuals = .actual - .fitted
)
extras <- list(
id = id,
idx_column = idx_col,
value_column = "value",
freq = freq,
grps = constructed_tbl %>% dplyr::pull(!! rlang::sym(id)) %>% unique(),
constructed_tbl = list(constructed_tbl)
)
desc <- "NBEATS"
modeltime::new_modeltime_bridge(
class = class,
models = models,
data = data,
extras = extras,
desc = desc
)
}
print.nbeats_fit_impl <- function(x, ...) {
cat(x$desc)
cat("\n")
cat("--------")
cat("\nModel: ")
print(x$models$model_1)
cat("\n")
print(x$models$model_1$prediction_net)
invisible(x)
}
nbeats_predict_impl <- function(object, new_data) {
model <- object$models$model_1
id <- object$extras$id
idx_col <- object$extras$idx_col
freq <- object$extras$freq
constructed_tbl <- object$extras$constructed_tbl[[1]]
gluon_listdataset <- constructed_tbl %>%
to_gluon_list_dataset(
date_var = !! rlang::sym(idx_col),
value_var = value,
id_var = !! rlang::sym(id),
freq = freq
)
preds <- make_gluon_predictions(
model = model,
gluon_listdataset = gluon_listdataset,
new_data = new_data,
id_col = id,
idx_col = idx_col
)
return(preds)
}
predict.nbeats_fit_impl <- function(object, new_data, ...) {
nbeats_predict_impl(object, new_data, ...)
}
nbeats_ensemble_fit_impl <- function(x, y, freq, prediction_length, id,
epochs = 5,
batch_size = 32,
num_batches_per_epoch = 50,
learning_rate = 0.001,
learning_rate_decay_factor = 0.5,
patience = 10,
minimum_learning_rate = 5e-5,
clip_gradient = 10,
weight_decay = 1e-8,
init = "xavier",
ctx = NULL,
hybridize = TRUE,
meta_context_length = prediction_length * c(2, 4),
meta_loss_function = list('sMAPE'),
meta_bagging_size = 3,
num_stacks = 30,
num_blocks = list(1),
widths = list(512),
sharing = list(FALSE),
expansion_coefficient_lengths = list(32),
stack_types = list("G")
) {
validate_gluonts_required_args(x, prediction_length, freq, id)
if (is.null(meta_context_length)) {
meta_context_length <- reticulate::py_none()
} else {
meta_context_length <- as.list(meta_context_length)
}
if (is.null(meta_loss_function)) {
meta_loss_function <- reticulate::py_none()
} else {
meta_loss_function <- as.list(meta_loss_function)
}
if (is.null(ctx)) ctx <- reticulate::py_none()
stack_types <- as.list(stack_types)
expansion_coefficient_lengths <- as.list(expansion_coefficient_lengths)
widths <- as.list(widths)
num_blocks <- as.list(num_blocks)
outcome <- y
predictor <- x
index_tbl <- modeltime::parse_index_from_data(predictor)
idx_col <- names(index_tbl)
idx <- timetk::tk_index(index_tbl)
id_tbl <- x %>% dplyr::select(dplyr::all_of(id))
value_tbl <- tibble::tibble(value = y)
constructed_tbl <- dplyr::bind_cols(id_tbl, index_tbl, value_tbl)
gluon_listdataset <- constructed_tbl %>%
to_gluon_list_dataset(
date_var = !! rlang::sym(idx_col),
value_var = value,
id_var = !! rlang::sym(id),
freq = freq
)
trainer <- pkg.env$gluonts$trainer$Trainer(
ctx = ctx,
epochs = epochs,
batch_size = batch_size,
num_batches_per_epoch = num_batches_per_epoch,
learning_rate = learning_rate,
learning_rate_decay_factor = learning_rate_decay_factor,
patience = patience,
minimum_learning_rate = minimum_learning_rate,
clip_gradient = clip_gradient,
weight_decay = weight_decay,
init = init,
hybridize = hybridize
)
model_spec <- pkg.env$gluonts$model$n_beats$NBEATSEnsembleEstimator(
freq = freq,
prediction_length = prediction_length,
trainer = trainer,
meta_context_length = meta_context_length,
meta_loss_function = meta_loss_function,
meta_bagging_size = meta_bagging_size,
num_stacks = num_stacks,
num_blocks = num_blocks,
widths = widths,
sharing = sharing,
expansion_coefficient_lengths = expansion_coefficient_lengths,
stack_types = stack_types
)
model_fit <- model_spec$train(training_data = gluon_listdataset)
class <- "nbeats_ensemble_fit_impl"
models <- list(
model_1 = model_fit
)
data <- index_tbl %>%
dplyr::mutate(
.actual = y,
.fitted = NA,
.residuals = .actual - .fitted
)
extras <- list(
id = id,
idx_column = idx_col,
value_column = "value",
freq = freq,
grps = constructed_tbl %>% dplyr::pull(!! rlang::sym(id)) %>% unique(),
constructed_tbl = list(constructed_tbl)
)
desc <- "NBEATS ENSEMBLE"
modeltime::new_modeltime_bridge(
class = class,
models = models,
data = data,
extras = extras,
desc = desc
)
}
print.nbeats_ensemble_fit_impl <- function(x, ...) {
cat(x$desc)
cat("\n")
cat("--------")
cat("\nModel: ")
print(x$models$model_1)
invisible(x)
}
nbeats_ensemble_predict_impl <- function(object, new_data) {
model <- object$models$model_1
id <- object$extras$id
idx_col <- object$extras$idx_col
freq <- object$extras$freq
constructed_tbl <- object$extras$constructed_tbl[[1]]
gluon_listdataset <- constructed_tbl %>%
to_gluon_list_dataset(
date_var = !! rlang::sym(idx_col),
value_var = value,
id_var = !! rlang::sym(id),
freq = freq
)
preds <- make_gluon_predictions(
model = model,
gluon_listdataset = gluon_listdataset,
new_data = new_data,
id_col = id,
idx_col = idx_col
)
return(preds)
}
predict.nbeats_ensemble_fit_impl <- function(object, new_data, ...) {
nbeats_ensemble_predict_impl(object, new_data, ...)
} |
test_that("aggregation functions warn once if na.rm = FALSE", {
local_con(simulate_dbi())
sql_mean <- win_aggregate("MEAN")
expect_warning(sql_mean("x"), "Missing values")
expect_warning(sql_mean("x"), NA)
expect_warning(sql_mean("x", na.rm = TRUE), NA)
})
test_that("window functions without group have empty over", {
expect_equal(translate_sql(n()), sql("COUNT(*) OVER ()"))
expect_equal(translate_sql(sum(x, na.rm = TRUE)), sql("SUM(`x`) OVER ()"))
})
test_that("aggregating window functions ignore order_by", {
expect_equal(
translate_sql(n(), vars_order = "x"),
sql("COUNT(*) OVER ()")
)
expect_equal(
translate_sql(sum(x, na.rm = TRUE), vars_order = "x"),
sql("SUM(`x`) OVER ()")
)
})
test_that("order_by overrides default ordering", {
expect_equal(
translate_sql(order_by(y, cumsum(x)), vars_order = "x"),
sql("SUM(`x`) OVER (ORDER BY `y` ROWS UNBOUNDED PRECEDING)")
)
expect_equal(
translate_sql(order_by(y, cummean(x)), vars_order = "x"),
sql("AVG(`x`) OVER (ORDER BY `y` ROWS UNBOUNDED PRECEDING)")
)
expect_equal(
translate_sql(order_by(y, cummin(x)), vars_order = "x"),
sql("MIN(`x`) OVER (ORDER BY `y` ROWS UNBOUNDED PRECEDING)")
)
expect_equal(
translate_sql(order_by(y, cummax(x)), vars_order = "x"),
sql("MAX(`x`) OVER (ORDER BY `y` ROWS UNBOUNDED PRECEDING)")
)
})
test_that("cumulative windows warn if no order", {
expect_warning(translate_sql(cumsum(x)), "does not have explicit order")
expect_warning(translate_sql(cumsum(x), vars_order = "x"), NA)
})
test_that("ntile always casts to integer", {
expect_equal(
translate_sql(ntile(x, 10.5)),
sql("NTILE(10) OVER (ORDER BY `x`)")
)
})
test_that("first, last, and nth translated to _value", {
expect_equal(
translate_sql(first(x)),
sql("FIRST_VALUE(`x`) OVER ()")
)
expect_equal(
translate_sql(last(x), vars_order = "a", vars_frame = c(0, Inf)),
sql("LAST_VALUE(`x`) OVER (ORDER BY `a` ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)")
)
expect_equal(
translate_sql(nth(x, 3), vars_order = "a", vars_frame = c(-Inf, 0)),
sql("NTH_VALUE(`x`, 3) OVER (ORDER BY `a` ROWS UNBOUNDED PRECEDING)")
)
})
test_that("can override frame of recycled functions", {
expect_equal(
translate_sql(sum(x, na.rm = TRUE), vars_frame = c(-1, 0), vars_order = "y"),
sql("SUM(`x`) OVER (ORDER BY `y` ROWS 1 PRECEDING)")
)
})
test_that("frame is checked", {
expect_snapshot(
error = TRUE,
translate_sql(sum(x, na.rm = TRUE), vars_frame = c(1, 0))
)
})
test_that("win_rank works", {
local_con(simulate_dbi())
sql_row_number <- win_rank("ROW_NUMBER")
expect_equal(
sql_row_number("x"),
sql("ROW_NUMBER() OVER (ORDER BY `x`)")
)
})
test_that("win_rank works", {
local_con(simulate_dbi())
sql_cumsum <- win_cumulative("SUM")
expect_equal(
sql_cumsum(ident("x"), "y"),
sql("SUM(`x`) OVER (ORDER BY `y` ROWS UNBOUNDED PRECEDING)")
)
})
test_that("over() only requires first argument", {
local_con(simulate_dbi())
expect_equal(win_over("X"), sql("'X' OVER ()"))
})
test_that("multiple group by or order values don't have parens", {
local_con(simulate_dbi())
expect_equal(
win_over(ident("x"), order = c("x", "y")),
sql("`x` OVER (ORDER BY `x`, `y`)")
)
expect_equal(
win_over(ident("x"), partition = c("x", "y")),
sql("`x` OVER (PARTITION BY `x`, `y`)")
)
})
test_that("window_frame()", {
lf <- lazy_frame(x = runif(10), y = 1:10)
expect_snapshot(
lf %>%
window_frame(-3, 0) %>%
window_order(x) %>%
mutate(z = sum(y)) %>%
show_query()
)
})
test_that("window_frame() checks arguments", {
skip_if(getRversion() <= '3.5.0', "R too old")
lf <- lazy_frame(x = runif(10), y = 1:10)
expect_snapshot(error = TRUE, window_frame(lf, "a"))
expect_snapshot(error = TRUE, window_frame(lf, 1:2))
expect_snapshot(error = TRUE, window_frame(lf, 1, "a"))
expect_snapshot(error = TRUE, window_frame(lf, 1, 1:2))
}) |
lexicon_bing <- function(dir = NULL, delete = FALSE, return_path = FALSE,
clean = FALSE, manual_download = FALSE) {
load_dataset(data_name = "bing", name = "bing.rds", dir = dir,
delete = delete, return_path = return_path, clean = clean,
manual_download = manual_download)
}
download_bing <- function(folder_path) {
file_path_neg <- path(folder_path, "negative-words.txt")
file_path_pos <- path(folder_path, "positive-words.txt")
if (file_exists(file_path_pos) & file_exists(file_path_neg)) {
return(invisible())
}
download.file(url = "http://ptrckprry.com/course/ssd/data/negative-words.txt",
destfile = file_path_neg)
download.file(url = "http://ptrckprry.com/course/ssd/data/positive-words.txt",
destfile = file_path_pos)
}
process_bing <- function(folder_path, name_path) {
file_path_neg <- path(folder_path, "negative-words.txt")
file_path_pos <- path(folder_path, "positive-words.txt")
neg_words <- read_lines(file_path_neg, skip = 35)
pos_words <- read_lines(file_path_pos, skip = 35)
data <- tibble(word = c(neg_words, pos_words),
sentiment = rep(c("negative", "positive"),
c(length(neg_words), length(pos_words))))
write_rds(data, name_path)
} |
library(testthat)
library(hablar)
context("could this be that")
test_that("could_chr_be_num", {
expect_equal(could_chr_be_num("a"), F)
expect_equal(could_chr_be_num("."), F)
expect_equal(could_chr_be_num(" 3"), T)
expect_equal(could_chr_be_num("3 0"), F)
expect_equal(could_chr_be_num("2018-03-01"), F)
expect_equal(could_chr_be_num("2018-10-09 19:19:26 CEST"), F)
expect_equal(could_chr_be_num("1"), T)
expect_equal(could_chr_be_num(".56"), T)
expect_equal(could_chr_be_num("7.0"), T)
expect_equal(could_chr_be_num("0003"), T)
expect_equal(could_chr_be_num("1.98"), T)
expect_equal(could_chr_be_num(as.character(c(NA, NA))), F)
expect_equal(could_chr_be_num(".98"), T)
expect_equal(could_chr_be_num(as.character(NA)), F)
expect_error(could_chr_be_num(as.numeric(1)))
expect_error(could_chr_be_num())
expect_error(could_chr_be_num(c()))
expect_error(could_chr_be_num(data.frame(a = c(1,3,4))))
expect_error(could_chr_be_num(list(a = c(1,3,4))))
})
test_that("could_chr_be_int", {
expect_equal(could_chr_be_int("a"), F)
expect_equal(could_chr_be_int("."), F)
expect_equal(could_chr_be_int(" 3"), T)
expect_equal(could_chr_be_int("3 0"), F)
expect_equal(could_chr_be_int("2018-03-01"), F)
expect_equal(could_chr_be_int("2018-10-09 19:19:26 CEST"), F)
expect_equal(could_chr_be_int("1"), T)
expect_equal(could_chr_be_int(".56"), F)
expect_equal(could_chr_be_int("7.0"), T)
expect_equal(could_chr_be_int("0003"), T)
expect_equal(could_chr_be_int("1,98"), F)
expect_equal(could_chr_be_int(as.character(c())), F)
expect_equal(could_chr_be_int(as.character(c(NA, NA))), F)
expect_equal(could_chr_be_int(",98"), F)
expect_equal(could_chr_be_int(as.character(NA)), F)
expect_error(could_chr_be_int(as.numeric(1)))
expect_error(could_chr_be_int())
expect_error(could_chr_be_int(data.frame(a = c(1,3,4))))
expect_error(could_chr_be_num(list(a = c(1,3,4))))
})
test_that("could_num_be_int", {
expect_equal(could_num_be_int(as.numeric(1)), T)
expect_equal(could_num_be_int(c(1, 2)), T)
expect_equal(could_num_be_int(c(1, 2.6)), F)
expect_equal(could_num_be_int(as.numeric(NA)), F)
expect_equal(could_num_be_int(as.numeric(c())), F)
expect_error(could_num_be_int(as.character(c())))
expect_error(could_num_be_int())
expect_error(could_num_be_int("a"))
expect_error(could_num_be_int(",98"))
expect_error(could_num_be_int(as.character(NA)))
expect_error(could_num_be_int(data.frame(a = c(1,3,4))))
expect_error(could_chr_be_num(list(a = c(1,3,4))))
})
test_that("could_chr_be_dtm", {
expect_equal(could_chr_be_dtm("a"), F)
expect_equal(could_chr_be_dtm("."), F)
expect_equal(could_chr_be_dtm(" 3"), F)
expect_equal(could_chr_be_dtm("3 0"), F)
expect_equal(could_chr_be_dtm("2018-03-01"), T)
expect_equal(could_chr_be_dtm("2018-10-09 19:19:26 CEST"), T)
expect_equal(could_chr_be_dtm(as.character(c("2018-10-09 19:19:26 CEST", "2018-10-09 19:19:27 CEST", NA))), T)
expect_equal(could_chr_be_dtm("1"), F)
expect_equal(could_chr_be_dtm("7.0"), F)
expect_equal(could_chr_be_dtm("0003"), F)
expect_equal(could_chr_be_dtm(as.character(NA)), F)
expect_error(could_chr_be_dtm(as.POSIXct(c("2018-10-09 19:19:26 CEST", "2018-10-09 19:19:27 CEST"))))
expect_error(could_chr_be_dtm(as.Date(c(NA, NA))))
expect_error(could_chr_be_dtm(as.numeric(1)))
expect_error(could_chr_be_dtm(as.factor("2018-03-01")))
expect_error(could_chr_be_dtm(data.frame(a = c(1,3,4))))
expect_error(could_chr_be_num(list(a = c(1,3,4))))
})
test_that("could_dtm_be_dte", {
expect_equal(could_dtm_be_dte(as.POSIXct("2018-03-01")), T)
expect_equal(could_dtm_be_dte(as.POSIXct(c("2018-03-01", "2018-03-03"))), T)
expect_equal(could_dtm_be_dte(as.POSIXct(c("2018-10-09 19:19:26 CEST", "2018-10-09 19:19:27 CEST"))), F)
expect_equal(could_dtm_be_dte(as.POSIXct(c("2018-10-09 19:19:26 CEST", "2018-10-09 19:19:27 CEST", NA))), F)
expect_equal(could_dtm_be_dte(as.POSIXct(NA)), F)
expect_error(could_dtm_be_dte("a"))
expect_error(could_dtm_be_dte(as.Date(c(NA, NA))))
expect_error(could_dtm_be_dte(as.numeric(1)))
expect_error(could_dtm_be_dte(as.factor("2018-03-01")))
expect_error(could_dtm_be_dte(data.frame(a = c(1,3,4))))
expect_error(could_chr_be_num(list(a = c(1,3,4))))
}) |
setConstructorS3("OpticalBackgroundCorrection", function(..., minimum=1) {
extend(BackgroundCorrection(..., typesToUpdate="pmmm"),
"OpticalBackgroundCorrection",
.minimum = minimum
)
})
setMethodS3("getParameters", "OpticalBackgroundCorrection", function(this, ...) {
params <- NextMethod("getParameters")
params2 <- list(
minimum = this$.minimum
)
params <- c(params, params2)
params
}, protected=TRUE)
setMethodS3("process", "OpticalBackgroundCorrection", function(this, ..., force=FALSE, verbose=FALSE) {
verbose <- Arguments$getVerbose(verbose)
if (verbose) {
pushState(verbose)
on.exit(popState(verbose))
}
verbose && enter(verbose, "Background correcting data set")
if (!force && isDone(this)) {
verbose && cat(verbose, "Already background corrected for \"optical\" effects")
verbose && exit(verbose)
outputDataSet <- getOutputDataSet(this)
return(outputDataSet)
}
ds <- getInputDataSet(this)
outputPath <- getPath(this)
cdf <- getCdf(ds)
params <- getParameters(this)
subsetToUpdate <- params$subsetToUpdate
typesToUpdate <- params$typesToUpdate
minimum <- params$minimum
params <- NULL
hasSubsetToUpdate <- getFraction <- FALSE
nbrOfCells <- nbrOfCells(cdf)
nbrOfArrays <- length(ds)
verbose && cat(verbose, "Number of arrays: ", nbrOfArrays)
res <- listenv()
for (ii in seq_along(ds)) {
df <- ds[[ii]]
verbose && enter(verbose, sprintf("Array
filename <- basename(getPathname(df))
filename <- gsub("[.]cel$", ".CEL", filename);
pathname <- Arguments$getWritablePathname(filename, path=outputPath,
mustNotExist=FALSE)
pathname <- AffymetrixFile$renameToUpperCaseExt(pathname)
if (!force && isFile(pathname)) {
verbose && cat(verbose, "Already processed. Skipping.")
dfOut <- newInstance(df, pathname)
setCdf(dfOut, cdf)
res[[ii]] <- pathname
verbose && exit(verbose)
next
}
if (!hasSubsetToUpdate) {
verbose && enter(verbose, "Identifying cells to be updated")
subsetToUpdate <- identifyCells(cdf, indices=subsetToUpdate,
types=typesToUpdate)
verbose && cat(verbose, "Number of cells: ", length(subsetToUpdate))
verbose && str(verbose, subsetToUpdate)
hasSubsetToUpdate <- TRUE
verbose && exit(verbose)
}
res[[ii]] %<-% {
verbose && enter(verbose, "Reading probe intensities")
x <- readRawData(df, fields="intensities", verbose=less(verbose,2))
x <- x$intensities
verbose && exit(verbose)
verbose && enter(verbose, "Adjusting background for optical effect")
arrayMinimum <- min(x[subsetToUpdate], na.rm=TRUE)
verbose && printf(verbose, "Array minimum: %.2f\n", arrayMinimum)
xdiff <- (arrayMinimum - minimum)
verbose && printf(verbose, "Correction: -(%.2f-%.2f) = %+.2f\n",
arrayMinimum, minimum, -xdiff)
x[subsetToUpdate] <- x[subsetToUpdate] - xdiff
verbose && exit(verbose)
verbose && enter(verbose, "Writing adjusted probe signals")
isFile <- (force && isFile(pathname))
pathnameT <- pushTemporaryFile(pathname, isFile=isFile, verbose=verbose)
verbose && enter(verbose, "Creating CEL file for results, if missing")
createFrom(df, filename=pathnameT, path=NULL, verbose=less(verbose))
verbose && exit(verbose)
verbose && enter(verbose, "Writing adjusted intensities")
.updateCel(pathnameT, intensities=x)
verbose && exit(verbose)
popTemporaryFile(pathnameT, verbose=verbose)
dfOut <- newInstance(df, pathname)
setCdf(dfOut, cdf)
dfZ <- getChecksumFile(dfOut)
verbose && exit(verbose)
pathname
}
verbose && exit(verbose)
}
subsetToUpdate <- NULL
res <- as.list(res)
res <- NULL
gc <- gc()
verbose && print(verbose, gc)
outputDataSet <- getOutputDataSet(this)
verbose && exit(verbose)
outputDataSet
}) |
stat_bin <- function(mapping = NULL, data = NULL,
geom = "bar", position = "stack",
...,
binwidth = NULL,
bins = NULL,
center = NULL,
boundary = NULL,
breaks = NULL,
closed = c("right", "left"),
pad = FALSE,
na.rm = FALSE,
orientation = NA,
show.legend = NA,
inherit.aes = TRUE) {
layer(
data = data,
mapping = mapping,
stat = StatBin,
geom = geom,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
binwidth = binwidth,
bins = bins,
center = center,
boundary = boundary,
breaks = breaks,
closed = closed,
pad = pad,
na.rm = na.rm,
orientation = orientation,
...
)
)
}
StatBin <- ggproto("StatBin", Stat,
setup_params = function(data, params) {
params$flipped_aes <- has_flipped_aes(data, params, main_is_orthogonal = FALSE)
has_x <- !(is.null(data$x) && is.null(params$x))
has_y <- !(is.null(data$y) && is.null(params$y))
if (!has_x && !has_y) {
abort("stat_bin() requires an x or y aesthetic.")
}
if (has_x && has_y) {
abort("stat_bin() can only have an x or y aesthetic.")
}
x <- flipped_names(params$flipped_aes)$x
if (is.integer(data[[x]])) {
abort(glue("StatBin requires a continuous {x} variable: the {x} variable is discrete.",
"Perhaps you want stat=\"count\"?"))
}
if (!is.null(params$drop)) {
warn("`drop` is deprecated. Please use `pad` instead.")
params$drop <- NULL
}
if (!is.null(params$origin)) {
warn("`origin` is deprecated. Please use `boundary` instead.")
params$boundary <- params$origin
params$origin <- NULL
}
if (!is.null(params$right)) {
warn("`right` is deprecated. Please use `closed` instead.")
params$closed <- if (params$right) "right" else "left"
params$right <- NULL
}
if (!is.null(params$width)) {
abort("`width` is deprecated. Do you want `geom_bar()`?")
}
if (!is.null(params$boundary) && !is.null(params$center)) {
abort("Only one of `boundary` and `center` may be specified.")
}
if (is.null(params$breaks) && is.null(params$binwidth) && is.null(params$bins)) {
message_wrap("`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.")
params$bins <- 30
}
params
},
extra_params = c("na.rm", "orientation"),
compute_group = function(data, scales, binwidth = NULL, bins = NULL,
center = NULL, boundary = NULL,
closed = c("right", "left"), pad = FALSE,
breaks = NULL, flipped_aes = FALSE,
origin = NULL, right = NULL, drop = NULL,
width = NULL) {
x <- flipped_names(flipped_aes)$x
if (!is.null(breaks)) {
if (!scales[[x]]$is_discrete()) {
breaks <- scales[[x]]$transform(breaks)
}
bins <- bin_breaks(breaks, closed)
} else if (!is.null(binwidth)) {
if (is.function(binwidth)) {
binwidth <- binwidth(data[[x]])
}
bins <- bin_breaks_width(scales[[x]]$dimension(), binwidth,
center = center, boundary = boundary, closed = closed)
} else {
bins <- bin_breaks_bins(scales[[x]]$dimension(), bins, center = center,
boundary = boundary, closed = closed)
}
bins <- bin_vector(data[[x]], bins, weight = data$weight, pad = pad)
bins$flipped_aes <- flipped_aes
flip_data(bins, flipped_aes)
},
default_aes = aes(x = after_stat(count), y = after_stat(count), weight = 1),
required_aes = "x|y"
) |
make.dmu.deta <- function(linkstr) switch(linkstr,
"logit" = {
logit_link <- make.link("logit")
function(eta) logit_link$mu.eta(eta) * (1 - 2 * logit_link$linkinv(eta))
},
"probit" = function(eta) -eta * pmax(dnorm(eta), .Machine$double.eps),
"cauchit" = function(eta) -2 * pi * eta * pmax(dcauchy(eta)^2, .Machine$double.eps),
"cloglog" = function(eta) pmax((1 - exp(eta)) * exp(eta - exp(eta)), .Machine$double.eps),
"loglog" = function(eta) pmax(exp(-exp(-eta) - eta) * expm1(-eta), .Machine$double.eps),
"identity" = function(eta) rep.int(0, length(eta)),
"log" = function(eta) pmax(exp(eta), .Machine$double.eps),
"sqrt" = function(eta) rep.int(2, length(eta)),
"1/mu^2" = function(eta) 3/(4 * eta^2.5),
"inverse" = function(eta) 2/(eta^3)
) |
Lognormal <- R6Class("Lognormal",
inherit = SDistribution, lock_objects = F,
public = list(
name = "Lognormal",
short_name = "Lnorm",
description = "Lognormal Probability Distribution.",
packages = "stats",
initialize = function(meanlog = NULL, varlog = NULL, sdlog = NULL, preclog = NULL,
mean = NULL, var = NULL, sd = NULL, prec = NULL,
decorators = NULL) {
super$initialize(
decorators = decorators,
support = PosReals$new(),
type = PosReals$new()
)
},
mean = function(...) {
unlist(self$getParameterValue("mean"))
},
mode = function(which = "all") {
exp(unlist(self$getParameterValue("meanlog")) - unlist(self$getParameterValue("varlog")))
},
median = function() {
exp(unlist(self$getParameterValue("meanlog")))
},
variance = function(...) {
unlist(self$getParameterValue("var"))
},
skewness = function(...) {
varlog <- unlist(self$getParameterValue("varlog"))
return(sqrt(exp(varlog) - 1) * (exp(varlog) + 2))
},
kurtosis = function(excess = TRUE, ...) {
varlog <- unlist(self$getParameterValue("varlog"))
if (excess) {
return((exp(4 * varlog) + 2 * exp(3 * varlog) + 3 * exp(2 * varlog) - 6))
} else {
return((exp(4 * varlog) + 2 * exp(3 * varlog) + 3 * exp(2 * varlog) - 3))
}
},
entropy = function(base = 2, ...) {
log(sqrt(2 * pi) * unlist(self$getParameterValue("sdlog")) *
exp(unlist(self$getParameterValue("meanlog")) + 0.5), base)
},
mgf = function(t, ...) {
return(NaN)
},
pgf = function(z, ...) {
return(NaN)
}
),
private = list(
.pdf = function(x, log = FALSE) {
meanlog <- self$getParameterValue("meanlog")
sdlog <- self$getParameterValue("sdlog")
call_C_base_pdqr(
fun = "dlnorm",
x = x,
args = list(
meanlog = unlist(meanlog),
sdlog = unlist(sdlog)
),
log = log,
vec = test_list(meanlog)
)
},
.cdf = function(x, lower.tail = TRUE, log.p = FALSE) {
meanlog <- self$getParameterValue("meanlog")
sdlog <- self$getParameterValue("sdlog")
call_C_base_pdqr(
fun = "plnorm",
x = x,
args = list(
meanlog = unlist(meanlog),
sdlog = unlist(sdlog)
),
lower.tail = lower.tail,
log = log.p,
vec = test_list(meanlog)
)
},
.quantile = function(p, lower.tail = TRUE, log.p = FALSE) {
meanlog <- self$getParameterValue("meanlog")
sdlog <- self$getParameterValue("sdlog")
call_C_base_pdqr(
fun = "qlnorm",
x = p,
args = list(
meanlog = unlist(meanlog),
sdlog = unlist(sdlog)
),
lower.tail = lower.tail,
log = log.p,
vec = test_list(meanlog)
)
},
.rand = function(n) {
meanlog <- self$getParameterValue("meanlog")
sdlog <- self$getParameterValue("sdlog")
call_C_base_pdqr(
fun = "rlnorm",
x = n,
args = list(
meanlog = unlist(meanlog),
sdlog = unlist(sdlog)
),
vec = test_list(meanlog)
)
},
.traits = list(valueSupport = "continuous", variateForm = "univariate")
)
)
.distr6$distributions <- rbind(
.distr6$distributions,
data.table::data.table(
ShortName = "Lnorm", ClassName = "Lognormal",
Type = "\u211D+", ValueSupport = "continuous",
VariateForm = "univariate",
Package = "stats", Tags = ""
)
) |
tryCatch <- function(expr, ..., finally) {
tryCatchList <- function(expr, names, parentenv, handlers) {
nh <- length(names)
if (nh > 1L)
tryCatchOne(tryCatchList(expr, names[-nh], parentenv,
handlers[-nh]),
names[nh], parentenv, handlers[[nh]])
else if (nh == 1L)
tryCatchOne(expr, names, parentenv, handlers[[1L]])
else expr
}
tryCatchOne <- function(expr, name, parentenv, handler) {
doTryCatch <- function(expr, name, parentenv, handler) {
.Internal(.addCondHands(name, list(handler), parentenv,
environment(), FALSE))
expr
}
value <- doTryCatch(return(expr), name, parentenv, handler)
if (is.null(value[[1L]])) {
msg <- .Internal(geterrmessage())
call <- value[[2L]]
cond <- simpleError(msg, call)
}
else cond <- value[[1L]]
value[[3L]](cond)
}
if (! missing(finally))
on.exit(finally)
handlers <- list(...)
classes <- names(handlers)
parentenv <- parent.frame()
if (length(classes) != length(handlers))
stop("bad handler specification")
tryCatchList(expr, classes, parentenv, handlers)
}
withCallingHandlers <- function(expr, ...) {
handlers <- list(...)
classes <- names(handlers)
parentenv <- parent.frame()
if (length(classes) != length(handlers))
stop("bad handler specification")
.Internal(.addCondHands(classes, handlers, parentenv, NULL, TRUE))
expr
}
suppressWarnings <- function(expr) {
ops <- options(warn = -1)
on.exit(options(ops))
withCallingHandlers(expr,
warning=function(w)
invokeRestart("muffleWarning"))
}
simpleCondition <- function(message, call = NULL) {
class <- c("simpleCondition", "condition")
structure(list(message=as.character(message), call = call), class=class)
}
simpleError <- function(message, call = NULL) {
class <- c("simpleError", "error", "condition")
structure(list(message=as.character(message), call = call), class=class)
}
simpleWarning <- function(message, call = NULL) {
class <- c("simpleWarning", "warning", "condition")
structure(list(message=as.character(message), call = call), class=class)
}
conditionMessage <- function(c) UseMethod("conditionMessage")
conditionCall <- function(c) UseMethod("conditionCall")
conditionMessage.condition <- function(c) c$message
conditionCall.condition <- function(c) c$call
print.condition <- function(x, ...) {
msg <- conditionMessage(x)
call <- conditionCall(x)
cl <- class(x)[1L]
if (! is.null(call))
cat("<", cl, " in ", deparse(call), ": ", msg, ">\n", sep="")
else
cat("<", cl, ": ", msg, ">\n", sep="")
invisible(x)
}
as.character.condition <- function(x, ...) {
msg <- conditionMessage(x)
call <- conditionCall(x)
cl <- class(x)[1L]
if (! is.null(call))
paste(cl, " in ", deparse(call)[1L], ": ", msg, "\n", sep="")
else
paste(cl, ": ", msg, "\n", sep="")
}
as.character.error <- function(x, ...) {
msg <- conditionMessage(x)
call <- conditionCall(x)
if (! is.null(call))
paste("Error in ", deparse(call)[1L], ": ", msg, "\n", sep="")
else
paste("Error: ", msg, "\n", sep="")
}
signalCondition <- function(cond) {
if (! inherits(cond, "condition"))
cond <- simpleCondition(cond)
msg <- conditionMessage(cond)
call <- conditionCall(cond)
.Internal(.signalCondition(cond, msg, call))
}
restartDescription <- function(r) r$description
restartFormals <- function(r) formals(r$handler)
print.restart <- function(x, ...) {
cat(paste("<restart:", x[[1L]], ">\n"))
invisible(x)
}
isRestart <- function(x) inherits(x, "restart")
findRestart <- function(name, cond = NULL) {
i <- 1L
repeat {
r <- .Internal(.getRestart(i))
if (is.null(r))
return(NULL)
else if (name == r[[1L]] &&
(is.null(cond) || is.null(r$test) || r$test(cond)))
return(r)
else i <- i + 1L
}
}
computeRestarts <- function(cond = NULL) {
val <- NULL
i <- 1L
repeat {
r <- .Internal(.getRestart(i))
if (is.null(r))
return(val)
else if (is.null(cond) || is.null(r$test) || r$test(cond))
val <- c(val, list(r))
i <- i + 1L
}
}
invokeRestart <- function(r, ...) {
if (! isRestart(r)) {
res <- findRestart(r)
if (is.null(res))
stop(gettextf("no 'restart' '%s' found", as.character(r)),
domain = NA)
r <- res
}
.Internal(.invokeRestart(r, list(...)))
}
invokeRestartInteractively <- function(r) {
if (! interactive())
stop("not an interactive session")
if (! isRestart(r)) {
res <- findRestart(r)
if (is.null(res))
stop(gettextf("no 'restart' '%s' found", as.character(r)),
domain = NA)
r <- res
}
if (is.null(r$interactive)) {
pars <- names(restartFormals(r))
args <- NULL
if (length(pars)) {
cat("Enter values for restart arguments:\n\n")
for (p in pars) {
if (p == "...") {
prompt <- "... (a list): "
args <- c(args, eval(parse(prompt = prompt)))
}
else {
prompt <- paste(p, ": ", sep="")
args <- c(args, list(eval(parse(prompt = prompt))))
}
}
}
}
else args <- r$interactive()
.Internal(.invokeRestart(r, args))
}
withRestarts <- function(expr, ...) {
docall <- function(fun, args) {
if ((is.character(fun) && length(fun) == 1L) || is.name(fun))
fun <- get(as.character(fun), envir = parent.frame(),
mode = "function")
do.call("fun", lapply(args, enquote))
}
makeRestart <- function(name = "",
handler = function(...) NULL,
description = "",
test = function(c) TRUE,
interactive = NULL) {
structure(list(name = name, exit = NULL, handler = handler,
description = description, test = test,
interactive = interactive),
class = "restart")
}
makeRestartList <- function(...) {
specs <- list(...)
names <- names(specs)
restarts <- vector("list", length(specs))
for (i in seq_along(specs)) {
spec <- specs[[i]]
name <- names[i]
if (is.function(spec))
restarts[[i]] <- makeRestart(handler = spec)
else if (is.character(spec))
restarts[[i]] <- makeRestart(description = spec)
else if (is.list(spec))
restarts[[i]] <- docall("makeRestart", spec)
else
stop("not a valid restart specification")
restarts[[i]]$name <- name
}
restarts
}
withOneRestart <- function(expr, restart) {
doWithOneRestart <- function(expr, restart) {
restart$exit <- environment()
.Internal(.addRestart(restart))
expr
}
restartArgs <- doWithOneRestart(return(expr), restart)
docall(restart$handler, restartArgs)
}
withRestartList <- function(expr, restarts) {
nr <- length(restarts)
if (nr > 1L)
withOneRestart(withRestartList(expr, restarts[-nr]),
restarts[[nr]])
else if (nr == 1L)
withOneRestart(expr, restarts[[1L]])
else expr
}
restarts <- makeRestartList(...)
if (length(restarts) == 0L)
expr
else if (length(restarts) == 1L)
withOneRestart(expr, restarts[[1L]])
else withRestartList(expr, restarts)
}
.signalSimpleWarning <- function(msg, call, immediate = FALSE)
withRestarts({
.Internal(.signalCondition(simpleWarning(msg, call), msg, call))
.Internal(.dfltWarn(msg, call, immediate))
}, muffleWarning = function() NULL)
.handleSimpleError <- function(h, msg, call)
h(simpleError(msg, call)) |
popsamp <-
function (n, pop,...)
{
if (n > nrow(pop)) stop("Sample size cannot exceed population size")
poprows <- 1:nrow(pop)
popcols <- 1:ncol(pop)
rsamp <- sample(poprows, size = n,...)
"["(pop,rsamp,popcols,drop=FALSE)
} |
expected <- eval(parse(text="list(character(0), \"c0\")"));
test(id=0, code={
argv <- eval(parse(text="list(structure(list(c0 = structure(integer(0), .Label = character(0), class = \"factor\")), .Names = \"c0\", row.names = character(0), class = \"data.frame\"))"));
do.call(`dimnames`, argv);
}, o=expected); |
nPV <-
function(se, sp, prev, NPV0, PPV0, NPVpower=0.8, PPVpower=0.8, rangeP=c(0.05,0.95), nsteps=20, alpha=0.05, setnames=NULL)
{
arglist<-list(se=se, sp=sp, prev=prev, NPV0=NPV0, PPV0=PPV0, NPVpower=NPVpower, PPVpower=PPVpower)
arglength<-unlist(lapply(arglist, length))
maxlength<-max(arglength)
if(is.null(setnames)){setnames<-paste("Setting ", 1:maxlength, sep="")}
ARGLIST <- lapply(X=arglist, FUN=function(x){rep(x, length.out=maxlength)})
inDAT <- as.data.frame(ARGLIST)
rownames(inDAT) <- make.unique(rep(setnames, length.out=maxlength))
inPPV<-apply(X=inDAT, MARGIN=1, FUN=function(x){ppv(p=x["prev"], se=x["se"], sp=x["sp"])})
inNPV<-apply(X=inDAT, MARGIN=1, FUN=function(x){npv(p=x["prev"], se=x["se"], sp=x["sp"])})
CONF.LEVEL=1-alpha
Pseq <- seq(from=min(rangeP), to=max(rangeP), length.out=nsteps)
nlist<-list()
for (i in 1:maxlength)
{
PPV <- nPPV(propP=Pseq, se=ARGLIST$se[i], sp=ARGLIST$sp[i], prev=ARGLIST$prev[i],
PPV0=ARGLIST$PPV0[i], power=ARGLIST$PPVpower[i], conf.level=CONF.LEVEL )
NPV <- nNPV(propP=Pseq, se=ARGLIST$se[i], sp=ARGLIST$sp[i], prev=ARGLIST$prev[i],
NPV0=ARGLIST$NPV0[i], power=ARGLIST$NPVpower[i], conf.level=CONF.LEVEL )
nlist[[i]]<-list(NPV=NPV, PPV=PPV)
}
outDAT<-cbind(inDAT, trueNPV=inNPV, truePPV=inPPV)
RES<-list(outDAT=outDAT, nlist=nlist, NSETS=maxlength, nsteps=nsteps, rangeP=rangeP, propP=Pseq)
class(RES) <- "nPV"
return(RES)
} |
set_pushover_app <- function(token = NULL, ask = is_interactive()) {
if (is.null(token)) {
if (ask && is_interactive()) {
cli::cli_alert_info("{.envvar PUSHOVER_APP} is not set, and application token not provided (see {.code ?pushoverr} for details)")
in_token <- readline("Please enter your application token: ")
Sys.setenv("PUSHOVER_APP" = in_token)
} else {
cli::cli_abort("Set Pushover application token by providing value for argument {.arg app} or setting {.envvar PUSHOVER_APP}. See {.code pushoverr} for details.")
}
} else {
if (identical(Sys.getenv("PUSHOVER_APP"), token)) {
cli::cli_alert_info("Pushover app was already set to {.code {token}}")
}
Sys.setenv("PUSHOVER_APP" = token)
}
}
get_pushover_app <- function(ask = is_interactive()) {
if (!pushover_app.isset()) {
set_pushover_app(ask = ask)
}
Sys.getenv("PUSHOVER_APP")
}
unset_pushover_app <- function() {
if (!pushover_app.isset()) {
cli::cli_alert_info("{.envvar PUSHOVER_APP} is not set")
}
Sys.unsetenv("PUSHOVER_APP")
}
pushover_app.isset <- function() {
!is.na(Sys.getenv("PUSHOVER_APP", NA_character_))
} |
{
library("spinifex")
library("testthat")
r_idx <- c(1:3, (nrow(wine) - 2):nrow(wine))
sub <- wine[r_idx, ]
dat_std <- scale_sd(sub[, 2:6])
clas <- sub$Type
bas <- basis_pca(dat_std)
mv <- manip_var_of(bas)
}
rb <- tourr::basis_random(ncol(dat_std), 2)
ib <- tourr::basis_init(n = 4, 2)
b_pca <- basis_pca(dat_std)
b_guide <- basis_guided(data = dat_std, index_f = tourr::holes())
diag4 <- diag(4)
not_orth <- matrix(sample(1:16, 16), ncol=4)
test_that("is_orthonormal: bases functions & diag() are orthonormal, rand matrix isn't", {
expect_true(is_orthonormal(rb))
expect_true(is_orthonormal(ib))
expect_true(is_orthonormal(b_pca))
expect_true(is_orthonormal(b_guide))
expect_true(is_orthonormal(diag4))
expect_false(is_orthonormal(not_orth))
})
ret_t <- is_orthonormal(rb)
ret_f <- is_orthonormal(not_orth)
test_that("is_orthonormal: returns are logical.", {
expect_equal(class(ret_t), "logical")
expect_equal(class(ret_f), "logical")
})
array_single <- array(bas, dim = c(dim(bas), 1))
attr(array_single, "manip_var") <- mv
ret_single <- array2df(basis_array = array_single)
mt_array <- manual_tour(basis = bas, manip_var = mv)
ret_mt <- array2df(basis_array = mt_array, data = dat_std,
basis_label = paste0("MyLabs", 1:nrow(bas)),
data_label = paste0("obs
gt_array <- save_history(data = dat_std, max_bases = 10)
class(gt_array) <- "array"
ret_gt <- array2df(basis_array = gt_array, data = dat_std,
basis_label = paste0("MyLabs", 1:nrow(bas)),
data_label = paste0("obs
test_that("array2df: class", {
expect_equal(class(ret_single), "list")
expect_equal(class(ret_mt ), "list")
expect_equal(class(ret_gt ), "list")
})
test_that("array2df: length", {
expect_equal(length(ret_single), 1)
expect_equal(length(ret_mt) , 2)
expect_equal(length(ret_gt) , 2)
})
ret <- map_relative(x = bas, position = "bottomleft")
ret_to <- map_relative(x = bas, position = "topright", to = wine[, 2:3])
test_that("map_relative: class and dim", {
expect_equal(class(ret ), c("matrix" ,"array"))
expect_equal(class(ret_to), c("matrix" ,"array"))
expect_equal(dim(ret), c(5, 2))
expect_equal(dim(ret_to), c(5, 2))
})
ret_mat <- map_absolute(x = bas, offset = c(-1, 0), scale = c(2/3, 2/3))
ret_df <- map_absolute(x = mtcars[,1:2], offset = c(0, 100), scale = c(.1, .1))
test_that("map_absolute: class and dim", {
expect_equal(class(ret_mat), c("matrix" ,"array"))
expect_equal(class(ret_df ), "data.frame")
expect_equal(dim(ret_mat), c(5, 2))
expect_equal(dim(ret_df), c(32, 2))
})
t <- theme_spinifex()
g <- ggplot2::ggplot() + t
test_that("theme_spinifex: class", {
expect_equal(class(t), "list")
expect_equal(class(g), c("gg", "ggplot"))
})
b1 <- basis_pca(dat_std)
b2 <- basis_half_circle(dat_std)
b3 <- basis_odp(dat_std, clas)
b4 <- basis_olda(wine[, 2:6], wine$Type)
test_that("basis_*", {
expect_equal(class(b1), c("matrix", "array"))
expect_equal(class(b2), c("matrix", "array"))
expect_equal(class(b3), c("matrix", "array"))
expect_equal(class(b4), c("matrix", "array"))
expect_equal(dim(b1), c(5, 2))
expect_equal(dim(b2), c(5, 2))
expect_equal(dim(b3), c(5, 2))
expect_equal(dim(b4), c(5, 2))
expect_equal(is_orthonormal(b1), TRUE)
expect_equal(is_orthonormal(b2), TRUE)
expect_equal(is_orthonormal(b3), TRUE)
expect_equal(is_orthonormal(b4), TRUE)
})
ret_holes <- basis_guided(data = dat_std, index_f = tourr::holes())
ret_cmass <- basis_guided(data = dat_std, index_f = tourr::cmass(),
alpha = .4, cooling = .9, max.tries = 30)
test_that("basis_guided: class and dim", {
expect_equal(class(ret_holes), c("matrix", "array"))
expect_equal(class(ret_cmass), c("matrix", "array"))
expect_equal(dim(ret_holes), c(5, 2))
expect_equal(dim(ret_cmass), c(5, 2))
expect_equal(is_orthonormal(ret_holes), TRUE)
expect_equal(is_orthonormal(ret_cmass), TRUE)
})
ret <- manip_var_of(bas)
test_that("manip_var_of: class and dim", {
expect_equal(class(ret), "integer")
expect_equal(length(ret), 1)
expect_warning(manip_var_of(not_orth))
})
s1 <- scale_sd(mtcars)
s2 <- scale_01(dat_std)
s3 <- scale_01(as.matrix(mtcars))
test_that("scale, class, bounds, dim", {
expect_equal(class(s1), c("matrix", "array"))
expect_equal(class(s2), c("matrix", "array"))
expect_equal(class(s3), c("matrix", "array"))
expect_equal(min(s2), 0)
expect_equal(max(s2), 1)
expect_equal(min(s3), 0)
expect_equal(max(s3), 1)
expect_equal(dim(s1), dim(mtcars))
expect_equal(dim(s2), dim(dat_std))
expect_equal(dim(s3), dim(mtcars))
})
test_that("other basis_* class, orth, ", {
expect_warning(sa <- scale_axes(mtcars))
expect_equal(sa, map_relative(mtcars))
expect_warning(pz <- pan_zoom(mtcars))
expect_equal(pz, map_absolute(mtcars))
}) |
library(civis)
library(future)
start <- proc.time()
cat("setting up models...", fill = TRUE)
plan("multisession", workers = 20)
data("iris")
tmp <- tempfile()
write.csv(iris, file = tmp)
iris_id <- write_civis_file(tmp, name = "iris.csv")
do_class <- function(algo, id) {
cv <- if (grepl("perceptron", algo)) "hyperband" else NULL
future(civis_ml(civis_file(id), dependent_variable = "Species",
model_type = algo, cross_validation_parameters = cv))
}
binary_iris <- iris[with(iris, Species %in% c('virginica', 'setosa')), ]
binary_fut <- future(civis_ml(binary_iris, dependent_variable = "Species",
model_type = "sparse_logistic"))
data("ChickWeight")
tmp <- tempfile()
write.csv(ChickWeight, file = tmp)
chick_id <- write_civis_file(tmp, name = "chicweight.csv")
do_regr <- function(algo, id) {
cv <- if (grepl("perceptron", algo)) "hyperband" else NULL
future(civis_ml(civis_file(id), "weight", algo,
cross_validation_parameters = cv))
}
unlink(tmp)
cat("submitting models...", fill = T)
mclass_fut <- lapply(CIVIS_ML_CLASSIFIERS, do_class, id = iris_id)
mreg_fut <- lapply(CIVIS_ML_REGRESSORS, do_regr, id = chick_id)
multi_output_fut <- future(civis_ml(civis_file(chick_id), dependent_variable = c("weight", "Time"),
model_type = "random_forest_regressor"))
multi_class_output_fut <- future(civis_ml(civis_file(chick_id), dependent_variable = c("Time", "Diet"),
model_type = "random_forest_classifier"))
no_val_reg_fut <- future(civis_ml(civis_file(chick_id), dependent_variable = c("weight", "Time"),
model_type = "random_forest_regressor", validation_data = "skip"))
no_val_class_fut <- future(civis_ml(civis_file(iris_id), dependent_variable = c("Species"),
model_type = "sparse_logistic", validation_data = "skip"))
cat("waiting for models to complete...", fill = TRUE)
mclass_list <- lapply(c(mclass_fut, binary_fut), value)
cat(" ...classification models completed", fill = TRUE)
mreg_list <- lapply(mreg_fut, value)
cat(" ...regression models completed", fill = TRUE)
multi_output <- list(value(multi_output_fut), value(multi_class_output_fut))
cat(" ...multi output completed", fill = TRUE)
no_val <- list(value(no_val_reg_fut), value(no_val_class_fut))
ms <- c(mclass_list, mreg_list, multi_output, no_val)
cat(" ...no validation completed", fill = TRUE)
cat("writing tests/testthat/data/civis_ml_models.rds", fill = TRUE)
saveRDS(ms, "../tests/testthat/data/civis_ml_models.rds")
end <- proc.time()
tot <- end - start
cat("total time: ", tot[3], "s", fill = TRUE) |
setClass("DSObject", "VIRTUAL")
setGeneric("dsGetInfo",
def = function(dsObj, ...) standardGeneric("dsGetInfo"),
valueClass = "list") |
msc.matrix <- function(files, samples, groups) {
if (sum(file.exists(files))!=length(files)) stop("ERROR: One or more files doesn't exist")
if (!identical(samples, sort(samples))) stop("ERROR: Make sure to order you sample vector. Adapt your groups factor if necessary.")
if (length(samples) != length(groups)) stop("ERROR: The sample and groups vector are not of equal length")
if (!is.factor(groups)) stop("ERROR: The groups vector should be a factor")
id <- as.numeric(regmatches( gsub(".*/", "", files), gregexpr("[[:digit:]]+", gsub(".*/", "", files))))
clust_mat_ids <- list()
for (n in 1:length(id)) {
cat(paste0("Calculating cluster matrix with id", id[n], "...\n"))
uc_H <- read.uc(files[n])$hits
uc_C <- read.uc(files[n])$clusters
uc_C2 <- read.uc(files[n])$clustnumbers
clusters <- vector(mode = 'list', length = length(uc_C))
for (i in 1:length(uc_C2)) {
if (uc_C[uc_C$V2==uc_C2[i], 3] == 1) {
clusters[[i]] <- as.character(uc_C[uc_C$V2==uc_C2[i], 9])
} else {
clusters[[i]] <- unique(c(as.character(uc_H[uc_H$V2==uc_C2[i], 9]), as.character(uc_H[uc_H$V2==uc_C2[i],10])))
}
}
clust_mat <- matrix(nrow=length(clusters), ncol = length(samples))
colnames(clust_mat) <- samples
rownames(clust_mat) <- paste('C', uc_C[,2], sep = '')
for (t in 1:length(samples)) {
temp <- lapply(lapply(clusters, function(x) table(gsub('_contig.*','',x))),
function(x) x[which(names(x) == samples[t])])
for (ct in 1:length(temp)) {
if (length(temp[[ct]]) > 0) {
clust_mat[ct,t] <- temp[[ct]]
} else if (length(temp[[ct]]) == 0) {
clust_mat[ct,t] <- 0
}
}
}
clust_mat_ids[[n]] <- clust_mat[,order(colnames(clust_mat))]
}
names(clust_mat_ids) <- paste0("id", id)
return(clust_mat_ids)
} |
NULL
"worldEnergyUse"
globalVariables(c("mod", "multiple", "name", "type")) |
condition <- function(subclass, message, call = sys.call(-1), ...) {
structure(
class = c(subclass, "condition"),
list(message = message, call = call, ...)
)
}
rate_limit_exception <- function(wait_time) {
condition("rate_limit_exception",
message = "",
call = NULL,
wait_time = wait_time)
} |
test_that("interactive_charcoal",
{
skip_on_cran()
iniv <- c(0,0)
names(iniv) <- c("radius", "sigma")
expected <- image_charcoal(img, iniv[1], iniv[2])
expect_equal(expected, interactive_charcoal(img))
expect_equal(iniv, interactive_charcoal(img, return_param = TRUE))
expect_equal(expected, interactive_charcoal(img, scale = scale1))
}) |
context("gls objects")
set.seed(20190513)
library(nlme, quietly=TRUE, warn.conflicts=FALSE)
data(Ovary, package = "nlme")
Ovary$time_int <- 1:nrow(Ovary)
lm_hom <- gls(follicles ~ sin(2*pi*Time) + cos(2*pi*Time), data = Ovary)
lm_power <- gls(follicles ~ sin(2*pi*Time) + cos(2*pi*Time), data = Ovary,
weights = varPower())
lm_AR1 <- gls(follicles ~ sin(2*pi*Time) + cos(2*pi*Time), data = Ovary,
correlation = corAR1(form = ~ time_int | Mare))
lm_AR1_power <- update(lm_AR1, weights = varPower())
test_that("bread works", {
expect_true(check_bread(lm_hom, cluster = Ovary$Mare, y = Ovary$follicles))
expect_true(check_bread(lm_power, cluster = Ovary$Mare, y = Ovary$follicles))
expect_true(check_bread(lm_AR1, cluster = Ovary$Mare, y = Ovary$follicles))
expect_true(check_bread(lm_AR1_power, cluster = Ovary$Mare, y = Ovary$follicles))
expect_equal(vcov(lm_hom), lm_hom$sigma^2 * bread(lm_hom) / v_scale(lm_hom))
expect_equal(vcov(lm_power), lm_power$sigma^2 * bread(lm_power) / v_scale(lm_power))
expect_equal(vcov(lm_AR1), lm_AR1$sigma^2 * bread(lm_AR1) / v_scale(lm_AR1))
expect_equal(vcov(lm_AR1_power), lm_AR1_power$sigma^2 * bread(lm_AR1_power) / v_scale(lm_AR1_power))
})
test_that("vcovCR options work for CR2", {
CR2_AR1 <- vcovCR(lm_AR1, type = "CR2")
expect_equal(vcovCR(lm_AR1, cluster = Ovary$Mare, type = "CR2"), CR2_AR1)
expect_equal(vcovCR(lm_AR1, type = "CR2", inverse_var = TRUE), CR2_AR1)
expect_false(identical(vcovCR(lm_AR1, type = "CR2", inverse_var = FALSE), CR2_AR1))
target <- targetVariance(lm_AR1)
expect_equal(vcovCR(lm_AR1, type = "CR2", target = target, inverse_var = TRUE), CR2_AR1)
attr(CR2_AR1, "inverse_var") <- FALSE
expect_equal(vcovCR(lm_AR1, type = "CR2", target = target, inverse_var = FALSE), CR2_AR1)
CR2_power <- vcovCR(lm_AR1_power, type = "CR2")
expect_equal(vcovCR(lm_AR1_power, cluster = Ovary$Mare, type = "CR2"), CR2_power)
expect_equal(vcovCR(lm_AR1_power, type = "CR2", inverse_var = TRUE), CR2_power)
expect_false(identical(vcovCR(lm_AR1_power, type = "CR2", inverse_var = FALSE), CR2_power))
target <- targetVariance(lm_AR1_power, cluster = Ovary$Mare)
expect_equal(vcovCR(lm_AR1_power, type = "CR2", target = target, inverse_var = TRUE), CR2_power)
attr(CR2_power, "inverse_var") <- FALSE
expect_equal(vcovCR(lm_AR1_power, type = "CR2", target = target, inverse_var = FALSE), CR2_power)
})
test_that("vcovCR options work for CR4", {
CR4_AR1 <- vcovCR(lm_AR1, type = "CR4")
expect_equal(vcovCR(lm_AR1, cluster = Ovary$Mare, type = "CR4"), CR4_AR1)
expect_equal(vcovCR(lm_AR1, type = "CR4", inverse_var = TRUE), CR4_AR1)
expect_false(identical(vcovCR(lm_AR1, type = "CR4", inverse_var = FALSE), CR4_AR1))
target <- targetVariance(lm_AR1)
expect_equal(vcovCR(lm_AR1, type = "CR4", target = target, inverse_var = TRUE), CR4_AR1)
attr(CR4_AR1, "inverse_var") <- FALSE
expect_equal(vcovCR(lm_AR1, type = "CR4", target = target, inverse_var = FALSE), CR4_AR1)
CR4_power <- vcovCR(lm_AR1_power, type = "CR4")
expect_equal(vcovCR(lm_AR1_power, cluster = Ovary$Mare, type = "CR4"), CR4_power)
expect_equal(vcovCR(lm_AR1_power, type = "CR4", inverse_var = TRUE), CR4_power)
expect_false(identical(vcovCR(lm_AR1_power, type = "CR4", inverse_var = FALSE), CR4_power))
target <- targetVariance(lm_AR1_power)
expect_equal(vcovCR(lm_AR1_power, type = "CR4", target = target, inverse_var = TRUE), CR4_power)
attr(CR4_power, "inverse_var") <- FALSE
expect_equal(vcovCR(lm_AR1_power, type = "CR4", target = target, inverse_var = FALSE), CR4_power)
})
test_that("CR2 and CR4 are target-unbiased", {
expect_true(check_CR(lm_AR1, vcov = "CR2"))
expect_true(check_CR(lm_AR1_power, vcov = "CR2"))
expect_true(check_CR(lm_AR1, vcov = "CR4"))
expect_true(check_CR(lm_AR1_power, vcov = "CR4"))
})
test_that("getData works.", {
re_order <- sample(nrow(Ovary))
egg_scramble <- Ovary[re_order,]
gls_scramble <- gls(follicles ~ sin(2*pi*Time) + cos(2*pi*Time),
data = egg_scramble)
scramble_dat <- getData(gls_scramble)
expect_equal(egg_scramble, scramble_dat)
})
CR_types <- paste0("CR",0:4)
test_that("Order doesn't matter.", {
check_sort_order(lm_AR1_power, dat = Ovary,
tol = 10^-4, tol2 = 10^-3, tol3 = 10^-3)
})
test_that("clubSandwich works with dropped observations", {
dat_miss <- Ovary
dat_miss$follicles[sample.int(nrow(Ovary), size = round(nrow(Ovary) / 10))] <- NA
lm_dropped <- gls(follicles ~ sin(2*pi*Time) + cos(2*pi*Time), data = dat_miss,
correlation = corAR1(form = ~ 1 | Mare), na.action = na.omit)
lm_complete <- gls(follicles ~ sin(2*pi*Time) + cos(2*pi*Time),
data = dat_miss, subset = !is.na(follicles),
correlation = corAR1(form = ~ 1 | Mare))
CR_drop <- lapply(CR_types, function(x) vcovCR(lm_dropped, type = x))
CR_complete <- lapply(CR_types, function(x) vcovCR(lm_complete, type = x))
expect_equal(CR_drop, CR_complete)
test_drop <- lapply(CR_types, function(x) coef_test(lm_dropped, vcov = x, test = "All", p_values = FALSE))
test_complete <- lapply(CR_types, function(x) coef_test(lm_complete, vcov = x, test = "All", p_values = FALSE))
expect_equal(test_drop, test_complete)
})
test_that("Possible to cluster at higher level than random effects", {
pair_id <- rep(1:nlevels(Ovary$Mare), each = 3, length.out = nlevels(Ovary$Mare))[Ovary$Mare]
re_order <- sample(nrow(Ovary))
dat_scramble <- Ovary[re_order,]
pair_scramble <- pair_id[re_order]
expect_is(vcovCR(lm_hom, type = "CR2", cluster = pair_id), "vcovCR")
expect_is(vcovCR(lm_power, type = "CR2", cluster = pair_id), "vcovCR")
expect_is(vcovCR(lm_AR1, type = "CR2", cluster = pair_id), "vcovCR")
V <- vcovCR(lm_AR1_power, type = "CR2", cluster = pair_id)
expect_is(V, "vcovCR")
expect_error(vcovCR(lm_AR1, type = "CR2", cluster = pair_scramble))
expect_error(vcovCR(lm_AR1_power, type = "CR2", cluster = pair_scramble))
V_scramble <- vcovCR(update(lm_AR1_power, data = dat_scramble),
type = "CR2", cluster = pair_scramble)
expect_equal(diag(V), diag(V_scramble), tol = 10^-6)
}) |
context("sjmisc, to_value")
library(sjmisc)
test_that("to_value", {
expect_equal(to_value(factor(c(0,1,2)), keep.labels = FALSE), c(0,1,2))
expect_equal(to_value(factor(c(2,3,4)), keep.labels = FALSE), c(2,3,4))
expect_equal(to_value(factor(c("a", "b", "c")), keep.labels = FALSE), c(1,2,3))
expect_equal(to_value(factor(c("d", "e", "f")), keep.labels = FALSE), c(1,2,3))
})
test_that("to_value", {
expect_equal(to_value(factor(c(0,1,2)), start.at = 4, keep.labels = FALSE), c(4,5,6))
expect_equal(to_value(factor(c(2,3,4)), start.at = 4, keep.labels = FALSE), c(4,5,6))
expect_equal(to_value(factor(c("a", "b", "c")), start.at = 4, keep.labels = FALSE), c(4,5,6))
expect_equal(to_value(factor(c("d", "e", "f")), start.at = 4, keep.labels = FALSE), c(4,5,6))
}) |
infcrit.dof <-
function (modplsR, naive = FALSE)
{
if (!(is.null(modplsR$weights) | identical(modplsR$weights,
rep(1L, modplsR$nr)) | identical(modplsR$weights, rep(1,
modplsR$nr)))) {
naive = TRUE
}
if (modplsR$na.miss.X | modplsR$na.miss.Y) {
naive = TRUE
}
if (!naive) {
tempmodplsR_dof <- plsR.dof(modplsR, naive = FALSE)
tempAIC.dof <- aic.dof(modplsR$RSS, modplsR$nr, tempmodplsR_dof$DoF,
tempmodplsR_dof$sigmahat)
tempBIC.dof <- bic.dof(modplsR$RSS, modplsR$nr, tempmodplsR_dof$DoF,
tempmodplsR_dof$sigmahat)
tempGMDL.dof <- gmdl.dof(tempmodplsR_dof$sigmahat, modplsR$nr,
tempmodplsR_dof$DoF, tempmodplsR_dof$yhat)
tempmodplsR_naive <- plsR.dof(modplsR, naive = TRUE)
tempAIC.naive <- aic.dof(modplsR$RSS, modplsR$nr, tempmodplsR_naive$DoF,
tempmodplsR_naive$sigmahat)
tempBIC.naive <- bic.dof(modplsR$RSS, modplsR$nr, tempmodplsR_naive$DoF,
tempmodplsR_naive$sigmahat)
tempGMDL.naive <- gmdl.dof(tempmodplsR_naive$sigmahat,
modplsR$nr, tempmodplsR_naive$DoF, tempmodplsR_naive$yhat)
InfCrit.dof <- t(rbind(tempmodplsR_dof$DoF, tempmodplsR_dof$sigmahat,
tempAIC.dof, tempBIC.dof, tempGMDL.dof, tempmodplsR_naive$DoF,
tempmodplsR_naive$sigmahat, tempAIC.naive, tempBIC.naive,
tempGMDL.naive))
dimnames(InfCrit.dof) <- list(paste("Nb_Comp_", 0:modplsR$computed_nt,
sep = ""), c("DoF.dof", "sigmahat.dof", "AIC.dof",
"BIC.dof", "GMDL.dof", "DoF.naive", "sigmahat.naive",
"AIC.naive", "BIC.naive", "GMDL.naive"))
}
else {
tempmodplsR_naive <- plsR.dof(modplsR, naive = TRUE)
tempAIC.naive <- aic.dof(modplsR$RSS, modplsR$nr, tempmodplsR_naive$DoF,
tempmodplsR_naive$sigmahat)
tempBIC.naive <- bic.dof(modplsR$RSS, modplsR$nr, tempmodplsR_naive$DoF,
tempmodplsR_naive$sigmahat)
tempGMDL.naive <- gmdl.dof(tempmodplsR_naive$sigmahat,
modplsR$nr, tempmodplsR_naive$DoF, tempmodplsR_naive$yhat)
InfCrit.dof <- t(rbind(NA, NA, NA, NA, NA, tempmodplsR_naive$DoF,
tempmodplsR_naive$sigmahat, tempAIC.naive, tempBIC.naive,
tempGMDL.naive))
dimnames(InfCrit.dof) <- list(paste("Nb_Comp_", 0:modplsR$computed_nt,
sep = ""), c("DoF.dof", "sigmahat.dof", "AIC.dof",
"BIC.dof", "GMDL.dof", "DoF.naive", "sigmahat.naive",
"AIC.naive", "BIC.naive", "GMDL.naive"))
}
return(InfCrit.dof)
} |
transform_gibbs_samples = function(gibbs_samples, R, B, Q, normalization) {
if(!inherits(normalization, "RprobitB_normalization"))
stop("'normalization' must be of class 'RprobitB_normalization'.")
scaling = function(samples, factor){
if(is.null(samples)) NULL else samples * factor
}
scale = normalization$scale
s_n = scaling(gibbs_samples$s, 1)
if(scale$parameter == "a"){
factor = scale$value / gibbs_samples$alpha[,scale$index]
alpha_n = scaling(gibbs_samples$alpha, factor)
b_n = scaling(gibbs_samples$b, factor)
Omega_n = scaling(gibbs_samples$Omega, factor^2)
Sigma_n = scaling(gibbs_samples$Sigma, factor^2)
}
if(scale$parameter == "s"){
factor = scale$value / gibbs_samples$Sigma[,paste0(scale$index,",",scale$index)]
alpha_n = scaling(gibbs_samples$alpha, sqrt(factor))
b_n = scaling(gibbs_samples$b, sqrt(factor))
Omega_n = scaling(gibbs_samples$Omega, factor)
Sigma_n = scaling(gibbs_samples$Sigma, factor)
}
gibbs_samples_n = list("s" = s_n,
"alpha" = alpha_n,
"b" = b_n,
"Omega" = Omega_n,
"Sigma" = Sigma_n)
gibbs_samples_n = gibbs_samples_n[lengths(gibbs_samples_n) != 0]
burn = function(samples){
if(is.null(samples)) NULL else samples[(B+1):R,,drop=FALSE]
}
s_nb = burn(s_n)
alpha_nb = burn(alpha_n)
b_nb = burn(b_n)
Omega_nb = burn(Omega_n)
Sigma_nb = burn(Sigma_n)
gibbs_samples_nb = list("s" = s_nb,
"alpha" = alpha_nb,
"b" = b_nb,
"Omega" = Omega_nb,
"Sigma" = Sigma_nb)
gibbs_samples_nb = gibbs_samples_nb[lengths(gibbs_samples_nb) != 0]
thin = function(samples, end){
if(any(is.null(samples))) NULL else samples[seq(1,end,Q),,drop=FALSE]
}
s_nt = thin(s_n,R)
alpha_nt = thin(alpha_n,R)
b_nt = thin(b_n,R)
Omega_nt = thin(Omega_n,R)
Sigma_nt = thin(Sigma_n,R)
gibbs_samples_nt = list("s" = s_nt,
"alpha" = alpha_nt,
"b" = b_nt,
"Omega" = Omega_nt,
"Sigma" = Sigma_nt)
gibbs_samples_nt = gibbs_samples_nt[lengths(gibbs_samples_nt) != 0]
s_nbt = thin(s_nb,R-B)
alpha_nbt = thin(alpha_nb,R-B)
b_nbt = thin(b_nb,R-B)
Omega_nbt = thin(Omega_nb,R-B)
Sigma_nbt = thin(Sigma_nb,R-B)
gibbs_samples_nbt = list("s" = s_nbt,
"alpha" = alpha_nbt,
"b" = b_nbt,
"Omega" = Omega_nbt,
"Sigma" = Sigma_nbt)
gibbs_samples_nbt = gibbs_samples_nbt[lengths(gibbs_samples_nbt) != 0]
gibbs_samples = list("gibbs_samples" = gibbs_samples,
"gibbs_samples_n" = gibbs_samples_n,
"gibbs_samples_nb" = gibbs_samples_nb,
"gibbs_samples_nt" = gibbs_samples_nt,
"gibbs_samples_nbt" = gibbs_samples_nbt)
class(gibbs_samples) = "RprobitB_gibbs_samples"
return(gibbs_samples)
} |
context("Testing function estimate_partition_function")
test_that(
"estimate_partition_function fails for wrong asymptotic metrics",
{
for(metric in c("cayley", "hamming", "kendall", "ulam")){
expect_error(
estimate_partition_function(method = "asymptotic",
alpha_vector = seq(from = 1, to = 2, by = .1),
n_items = 10,
metric = metric,
n_iterations = 50,
K = 20, degree = 5)
)
}
}
)
test_that(
"estimate_partition_function asymptotic gives correct values",
{
expect_equal(
estimate_partition_function(method = "asymptotic",
alpha_vector = seq(from = 1, to = 2, by = .1),
n_items = 10, metric = "footrule",
n_iterations = 50, K = 20, degree = 5),
c(`(Intercept)` = 15.1041186472153, `I(alpha^1)` = -3.32366499344578,
`I(alpha^2)` = 0.221067735142993, `I(alpha^3)` = 0.00983874213603723,
`I(alpha^4)` = -0.00400646965806447, `I(alpha^5)` = 0.000290662453972935
)
)
expect_equal(
estimate_partition_function(method = "asymptotic",
alpha_vector = seq(from = 1, to = 2, by = .2),
n_items = 20, metric = "spearman",
n_iterations = 55, K = 21, degree = 4),
c(`(Intercept)` = 34.3498359623487, `I(alpha^1)` = -22.0673491727874,
`I(alpha^2)` = 10.8140117937552, `I(alpha^3)` = -3.18806119268205,
`I(alpha^4)` = 0.394939624914785)
)
}
) |
ComparacaoMedias=function(y, trt, DFerror, MSerror, alpha = 0.05, group = TRUE,main = NULL) {
lastC=function (x)
{
y <- sub(" +$", "", x)
p1 <- nchar(y)
cc <- substr(y, p1, p1)
return(cc)
}
tapply.stat=function (y, x, stat = "mean")
{
cx <- deparse(substitute(x))
cy <- deparse(substitute(y))
x <- data.frame(c1 = 1, x)
y <- data.frame(v1 = 1, y)
nx <- ncol(x)
ny <- ncol(y)
namex <- names(x)
namey <- names(y)
if (nx == 2)
namex <- c("c1", cx)
if (ny == 2)
namey <- c("v1", cy)
namexy <- c(namex, namey)
for (i in 1:nx) {
x[, i] <- as.character(x[, i])
}
z <- NULL
for (i in 1:nx) {
z <- paste(z, x[, i], sep = "&")
}
w <- NULL
for (i in 1:ny) {
m <- tapply(y[, i], z, stat)
m <- as.matrix(m)
w <- cbind(w, m)
}
nw <- nrow(w)
c <- rownames(w)
v <- rep("", nw * nx)
dim(v) <- c(nw, nx)
for (i in 1:nw) {
for (j in 1:nx) {
v[i, j] <- strsplit(c[i], "&")[[1]][j + 1]
}
}
rownames(w) <- NULL
junto <- data.frame(v[, -1], w)
junto <- junto[, -nx]
names(junto) <- namexy[c(-1, -(nx + 1))]
return(junto)
}
order.group= function (trt, means, N, MSerror, Tprob, std.err, parameter = 1)
{
N <- rep(1/mean(1/N), length(N))
n <- length(means)
letras <- letters
if (n > 26) {
l <- floor(n/26)
for (i in 1:l) letras <- c(letras, paste(letters, i,
sep = ""))
}
z <- data.frame(trt, means, N, std.err)
w <- z[order(z[, 2], decreasing = TRUE), ]
M <- rep("", n)
k <- 1
j <- 1
k <- 1
cambio <- n
cambio1 <- 0
chequeo = 0
M[1] <- letras[k]
while (j < n) {
chequeo <- chequeo + 1
if (chequeo > n)
break
for (i in j:n) {
minimo <- Tprob * sqrt(parameter * MSerror * (1/N[i] +
1/N[j]))
s <- abs(w[i, 2] - w[j, 2]) <= minimo
if (s) {
if (lastC(M[i]) != letras[k])
M[i] <- paste(M[i], letras[k], sep = "")
}
else {
k <- k + 1
cambio <- i
cambio1 <- 0
ja <- j
for (jj in cambio:n) M[jj] <- paste(M[jj], " ",
sep = "")
M[cambio] <- paste(M[cambio], letras[k], sep = "")
for (v in ja:cambio) {
if (abs(w[v, 2] - w[cambio, 2]) > minimo) {
j <- j + 1
cambio1 <- 1
}
else break
}
break
}
}
if (cambio1 == 0)
j <- j + 1
}
w <- data.frame(w, stat = M)
trt <- as.character(w$trt)
means <- as.numeric(w$means)
N <- as.numeric(w$N)
std.err <- as.numeric(w$std.err)
output <- data.frame(Tratamentos=trt, Medias=means, M, N, std.err)
return(output)
}
order.stat.SNK=function (treatment, means, minimum)
{
n <- length(means)
z <- data.frame(treatment, means)
w <- z[order(z[, 2], decreasing = TRUE), ]
M <- rep("", n)
k <- 1
k1 <- 0
j <- 1
i <- 1
r <- 1
cambio <- n
cambio1 <- 0
chequeo = 0
M[1] <- letters[k]
while (j < n) {
chequeo <- chequeo + 1
if (chequeo > n)
break
for (i in j:n) {
if (abs(j - i) == 0) {
r <- 1
}
else {
r <- abs(j - i)
}
s <- abs(w[i, 2] - w[j, 2]) <= minimum[r]
if (s) {
if (lastC(M[i]) != letters[k])
M[i] <- paste(M[i], letters[k], sep = "")
}
else {
k <- k + 1
cambio <- i
cambio1 <- 0
ja <- j
for (jj in cambio:n) M[jj] <- paste(M[jj], " ",
sep = "")
M[cambio] <- paste(M[cambio], letters[k], sep = "")
for (v in ja:cambio) {
if (abs(v - cambio) == 0) {
r <- 1
}
else {
r <- abs(v - cambio)
}
if (abs(w[v, 2] - w[cambio, 2]) > minimum[r]) {
j <- j + 1
cambio1 <- 1
}
else break
}
break
}
}
if (cambio1 == 0)
j <- j + 1
}
w <- data.frame(w, stat = M)
trt <- as.character(w$treatment)
means <- as.numeric(w$means)
output <- data.frame(trt, means, M)
return(output)
}
TesteT=function (y, trt, DFerror, MSerror, alpha = 0.05, group = TRUE,main = NULL){
SSerror <- MSerror*DFerror
name.y <- paste(deparse(substitute(y)))
name.t <- paste(deparse(substitute(trt)))
junto <- subset(data.frame(y, trt), is.na(y) == FALSE)
means <- tapply.stat(junto[, 1], junto[, 2], stat = "mean")
sds <- tapply.stat(junto[, 1], junto[, 2], stat = "sd")
nn <- tapply.stat(junto[, 1], junto[, 2], stat = "length")
means <- data.frame(means, std.err = sds[, 2]/sqrt(nn[, 2]),
replication = nn[, 2])
names(means)[1:2] <- c(name.t, name.y)
ntr <- nrow(means)
Tprob <- qt(1 - (alpha/2), DFerror) * sqrt(2)
nr <- unique(nn[, 2])
nfila <- c("Alpha", "Error Degrees of Freedom", "Error Mean Square",
"Critical Value of Studentized Range")
nvalor <- c(alpha, DFerror, MSerror, Tprob)
xtabla <- data.frame(...... = nvalor)
row.names(xtabla) <- nfila
if (group) {
if (length(nr) == 1) {
HSD <- Tprob * sqrt(MSerror/nr)
}
else {
nr1 <- 1/mean(1/nn[, 2])
HSD <- Tprob * sqrt(MSerror/nr1)
}
output= order.group(means[, 1], means[, 2], means[,
4], MSerror, Tprob, means[, 3], parameter = 0.5)
}
if (!group) {
comb <- combn(ntr, 2)
nn <- ncol(comb)
dif <- rep(0, nn)
pvalue <- rep(0, nn)
for (k in 1:nn) {
i <- comb[1, k]
j <- comb[2, k]
dif[k] <- abs(means[i, 2] - means[j, 2])
sdtdif <- sqrt(MSerror * (1/means[i, 4] + 1/means[j,
4]))
pvalue[k] <- round(1 - ptukey(dif[k] * sqrt(2)/sdtdif,
ntr, DFerror), 4)
}
tr.i <- comb[1, ]
tr.j <- comb[2, ]
output <- data.frame(trt = means[, 1], means = means[,
2], M = "", N = means[, 4], std.err = means[, 3])
}
return(output[,1:3])
}
TesteTProtegido=function (y, trt, DFerror, MSerror, alpha = 0.05, group = TRUE,main = NULL){
SSerror <- MSerror*DFerror
name.y <- paste(deparse(substitute(y)))
name.t <- paste(deparse(substitute(trt)))
junto <- subset(data.frame(y, trt), is.na(y) == FALSE)
means <- tapply.stat(junto[, 1], junto[, 2], stat = "mean")
sds <- tapply.stat(junto[, 1], junto[, 2], stat = "sd")
nn <- tapply.stat(junto[, 1], junto[, 2], stat = "length")
means <- data.frame(means, std.err = sds[, 2]/sqrt(nn[, 2]),
replication = nn[, 2])
names(means)[1:2] <- c(name.t, name.y)
ntr <- nrow(means)
alphap = (2 * alpha)/(ntr * (ntr - 1))
Tprob <- qt(1 - (alphap/2), DFerror) * sqrt(2)
nr <- unique(nn[, 2])
nfila <- c("Alpha", "Error Degrees of Freedom", "Error Mean Square",
"Critical Value of Studentized Range")
nvalor <- c(alpha, DFerror, MSerror, Tprob)
xtabla <- data.frame(...... = nvalor)
row.names(xtabla) <- nfila
if (group) {
if (length(nr) == 1) {
HSD <- Tprob * sqrt(MSerror/nr)
}
else {
nr1 <- 1/mean(1/nn[, 2])
HSD <- Tprob * sqrt(MSerror/nr1)
}
output <- order.group(means[, 1], means[, 2], means[,
4], MSerror, Tprob, means[, 3], parameter = 0.5)
}
if (!group) {
comb <- combn(ntr, 2)
nn <- ncol(comb)
dif <- rep(0, nn)
pvalue <- rep(0, nn)
for (k in 1:nn) {
i <- comb[1, k]
j <- comb[2, k]
dif[k] <- abs(means[i, 2] - means[j, 2])
sdtdif <- sqrt(MSerror * (1/means[i, 4] + 1/means[j,
4]))
pvalue[k] <- round(1 - ptukey(dif[k] * sqrt(2)/sdtdif,
ntr, DFerror), 4)
}
tr.i <- comb[1, ]
tr.j <- comb[2, ]
output <- data.frame(trt = means[, 1], means = means[,
2], M = "", N = means[, 4], std.err = means[, 3])
}
return(output[,1:3])
}
Duncan=function (y, trt, DFerror, MSerror, alpha = 0.05, group = TRUE,main = NULL){
SSerror <- MSerror*DFerror
name.y <- paste(deparse(substitute(y)))
name.t <- paste(deparse(substitute(trt)))
junto <- subset(data.frame(y, trt), is.na(y) == FALSE)
means <- tapply.stat(junto[, 1], junto[, 2], stat = "mean")
sds <- tapply.stat(junto[, 1], junto[, 2], stat = "sd")
nn <- tapply.stat(junto[, 1], junto[, 2], stat = "length")
means <- data.frame(means, std.err = sds[, 2]/sqrt(nn[, 2]),
replication = nn[, 2])
names(means)[1:2] <- c(name.t, name.y)
ntr <- nrow(means)
k.snk <- ntr - 1
Tprob <- vector(mode = "integer", k.snk)
kk <- 1
for (kk in 1:k.snk) {
alphap = 1 - (1 - alpha)^((kk + 1) - 1)
xxxx=suppressWarnings(qtukey(1 - alphap, kk + 1, DFerror))
if(is.na(xxxx)){xxxx=4}
Tprob[kk] <- xxxx
}
p.nan <- as.vector(na.action(na.omit(Tprob)))[1]
ult <- p.nan - 1
if(!is.null(p.nan)){
if (ntr == 50)
Tprob[p.nan:length(Tprob)] <- seq(Tprob[ult], 3.61, length = length(Tprob) -
ult)
if (ntr == 100)
Tprob[p.nan:length(Tprob)] <- seq(Tprob[ult], 3.67, length = length(Tprob) -
ult)
}
nr <- unique(nn[, 2])
nfila <- c("Alpha", "Error Degrees of Freedom", "Error Mean Square")
nfila1 <- c("Distances between averages", "Critical Value of Studentized Range")
nvalor <- c(alpha, DFerror, MSerror)
nvalor1 <- rbind(t(seq(2, ntr)), t(Tprob))
xtabla <- data.frame(...... = nvalor)
xtabla1 <- data.frame(...... = nvalor1)
row.names(xtabla) <- nfila
row.names(xtabla1) <- nfila1
HSD <- vector(mode = "integer", k.snk)
if (group) {
if (length(nr) == 1) {
kk <- 1
for (kk in 1:k.snk) {
HSD[kk] <- Tprob[kk] * sqrt(MSerror/nr)
}
}
else {
nr1 <- 1/mean(1/nn[, 2])
kk <- 1
for (kk in 1:k.snk) {
HSD[kk] <- Tprob[kk] * sqrt(MSerror/nr1)
}
}
output <- order.stat.SNK(means[, 1], means[, 2], HSD)
}
return(output)
}
SNK=function (y, trt, DFerror, MSerror, alpha = 0.05, group = TRUE,main = NULL){
SSerror <- MSerror*DFerror
name.y <- paste(deparse(substitute(y)))
name.t <- paste(deparse(substitute(trt)))
junto <- subset(data.frame(y, trt), is.na(y) == FALSE)
means <- tapply.stat(junto[, 1], junto[, 2], stat = "mean")
sds <- tapply.stat(junto[, 1], junto[, 2], stat = "sd")
nn <- tapply.stat(junto[, 1], junto[, 2], stat = "length")
means <- data.frame(means, std.err = sds[, 2]/sqrt(nn[, 2]),
replication = nn[, 2])
names(means)[1:2] <- c(name.t, name.y)
ntr <- nrow(means)
k.snk <- ntr - 1
Tprob <- vector(mode = "integer", k.snk)
kk <- 1
for (kk in 1:k.snk) {
Tprob[kk] <- qtukey(1 - alpha, kk + 1, DFerror)
}
nr <- unique(nn[, 2])
nfila <- c("Alpha", "Error Degrees of Freedom", "Error Mean Square")
nfila1 <- c("Distances between averages", "Critical Value of Studentized Range")
nvalor <- c(alpha, DFerror, MSerror)
nvalor1 <- rbind(t(seq(2, ntr)), t(Tprob))
xtabla <- data.frame(...... = nvalor)
xtabla1 <- data.frame(...... = nvalor1)
row.names(xtabla) <- nfila
row.names(xtabla1) <- nfila1
HSD <- vector(mode = "integer", k.snk)
if (group) {
if (length(nr) == 1) {
kk <- 1
for (kk in 1:k.snk) {
HSD[kk] <- Tprob[kk] * sqrt(MSerror/nr)
}
}
else {
nr1 <- 1/mean(1/nn[, 2])
kk <- 1
for (kk in 1:k.snk) {
HSD[kk] <- Tprob[kk] * sqrt(MSerror/nr1)
}
}
output <- order.stat.SNK(means[, 1], means[, 2], HSD)
}
return(output[,1:3])
}
tukey=function (y, trt, DFerror, MSerror, alpha = 0.05, group = TRUE,main = NULL){
SSerror <- MSerror*DFerror
name.y <- paste(deparse(substitute(y)))
name.t <- paste(deparse(substitute(trt)))
junto <- subset(data.frame(y, trt), is.na(y) == FALSE)
means <- tapply.stat(junto[, 1], junto[, 2], stat = "mean")
sds <- tapply.stat(junto[, 1], junto[, 2], stat = "sd")
nn <- tapply.stat(junto[, 1], junto[, 2], stat = "length")
means <- data.frame(means, std.err = sds[, 2]/sqrt(nn[, 2]),
replication = nn[, 2])
names(means)[1:2] <- c(name.t, name.y)
ntr <- nrow(means)
Tprob <- qtukey(1 - alpha, ntr, DFerror)
nr <- unique(nn[, 2])
nfila <- c("Alpha", "Error Degrees of Freedom", "Error Mean Square",
"Critical Value of Studentized Range")
nvalor <- c(alpha, DFerror, MSerror, Tprob)
xtabla <- data.frame(...... = nvalor)
row.names(xtabla) <- nfila
if (group) {
if (length(nr) == 1) {
HSD <- Tprob * sqrt(MSerror/nr)
}
else {
nr1 <- 1/mean(1/nn[, 2])
HSD <- Tprob * sqrt(MSerror/nr1)
}
output <- order.group(means[, 1], means[, 2], means[,
4], MSerror, Tprob, means[, 3], parameter = 0.5)
}
if (!group) {
comb <- combn(ntr, 2)
nn <- ncol(comb)
dif <- rep(0, nn)
pvalue <- rep(0, nn)
for (k in 1:nn) {
i <- comb[1, k]
j <- comb[2, k]
dif[k] <- abs(means[i, 2] - means[j, 2])
sdtdif <- sqrt(MSerror * (1/means[i, 4] + 1/means[j,
4]))
pvalue[k] <- round(1 - ptukey(dif[k] * sqrt(2)/sdtdif,
ntr, DFerror), 4)
}
tr.i <- comb[1, ]
tr.j <- comb[2, ]
output <- data.frame(trt = means[, 1], means = means[,
2], M = "", N = means[, 4], std.err = means[, 3])
}
return(output[,1:3])
}
ScottKnott= function (y, trt, DFerror, MSerror, alpha = 0.05, group = TRUE,
main = NULL)
{
SSerror=MSerror*DFerror
sk <- function(medias, s2, dfr, prob) {
bo <- 0
si2 <- s2
defr <- dfr
parou <- 1
np <- length(medias) - 1
for (i in 1:np) {
g1 <- medias[1:i]
g2 <- medias[(i + 1):length(medias)]
B0 <- sum(g1)^2/length(g1) + sum(g2)^2/length(g2) -
(sum(g1) + sum(g2))^2/length(c(g1, g2))
if (B0 > bo) {
bo <- B0
parou <- i
}
}
g1 <- medias[1:parou]
g2 <- medias[(parou + 1):length(medias)]
teste <- c(g1, g2)
sigm2 <- (sum(teste^2) - sum(teste)^2/length(teste) +
defr * si2)/(length(teste) + defr)
lamb <- pi * bo/(2 * sigm2 * (pi - 2))
v0 <- length(teste)/(pi - 2)
p <- pchisq(lamb, v0, lower.tail = FALSE)
if (p < prob) {
for (i in 1:length(g1)) {
cat(names(g1[i]), "\n", file = "skresult", append = TRUE)
}
cat("*", "\n", file = "skresult", append = TRUE)
}
if (length(g1) > 1) {
sk(g1, s2, dfr, prob)
}
if (length(g2) > 1) {
sk(g2, s2, dfr, prob)
}
}
medias <- sort(tapply(y, trt, mean), decreasing = TRUE)
dfr <- DFerror
rep <- tapply(y, trt, length)
s0 <- MSerror <- SSerror/DFerror
s2 <- s0/rep[1]
prob <- alpha
sk(medias, s2, dfr, prob)
f <- names(medias)
names(medias) <- 1:length(medias)
resultado <- data.frame(r = 0, f = f, m = medias)
if (file.exists("skresult") == FALSE) {
stop
}
else {
xx <- read.table("skresult")
file.remove("skresult")
x <- xx[[1]]
x <- as.vector(x)
z <- 1
for (j in 1:length(x)) {
if (x[j] == "*") {
z <- z + 1
}
for (i in 1:length(resultado$f)) {
if (resultado$f[i] == x[j]) {
resultado$r[i] <- z
}
}
}
}
letras <- letters
if (length(resultado$r) > 26) {
l <- floor(length(resultado$r)/26)
for (i in 1:l) letras <- c(letras, paste(letters, i,
sep = ""))
}
res <- 1
for (i in 1:(length(resultado$r) - 1)) {
if (resultado$r[i] != resultado$r[i + 1]) {
resultado$r[i] <- letras[res]
res <- res + 1
if (i == (length(resultado$r) - 1)) {
resultado$r[i + 1] <- letras[res]
}
}
else {
resultado$r[i] <- letras[res]
if (i == (length(resultado$r) - 1)) {
resultado$r[i + 1] <- letras[res]
}
}
}
names(resultado) <- c("Grupos", "Tratamentos", "Medias")
return(resultado[,c(2,3,1)])
}
Resultado=cbind(
TesteT(y, trt, DFerror, MSerror, alpha = 0.05, group = TRUE,main = NULL)[,1:2],
TesteT=TesteT(y, trt, DFerror, MSerror, alpha = 0.05, group = TRUE,main = NULL)[,3],
TesteT_Bonferroni=TesteTProtegido(y, trt, DFerror, MSerror, alpha = 0.05, group = TRUE,main = NULL)[,3],
Duncan=Duncan(y, trt, DFerror, MSerror, alpha = 0.05, group = TRUE,main = NULL)[,3],
SNK=SNK(y, trt, DFerror, MSerror,alpha = 0.05, group = TRUE,main = NULL)[,3],
Tukey=tukey(y, trt, DFerror, MSerror,alpha = 0.05, group = TRUE,main = NULL)[,3],
ScottKnott=ScottKnott(y, trt, DFerror, MSerror, alpha = 0.05)[,3] )
return(Resultado)} |
khb <- function(X,y,z){
X <- as.data.frame(X)
glmR <- glm(y ~ -1 + ., family=binomial(link="probit"),data=X[,-which(names(X)==z)])
glmF <- glm(y ~ -1 + ., family=binomial(link="probit"),data=X)
lmA <- lm(X$eta ~ -1 + ., data=X)
glmFs <- glm(y ~ -1 + . + lmA$resid, family=binomial(link="probit"),data=X[,-which(names(X)==z)])
b.yx.zt <- glmFs$coef[!names(glmFs$coef)%in%c("(Intercept)","lmA$resid")]
b.yx.z <- glmF$coef[!names(glmF$coef)%in%c("(Intercept)",z)]
b.yz.x <- glmF$coef[names(glmF$coef)==z]
sigma.b.yz.x <- diag(vcov(glmF))[names(diag(vcov(glmF)))==z]
if(sum(X[,1])!=dim(X)[1]){
t.zx <- lmA$coef
sigma.t.zx <- diag(vcov(lmA))
} else{
t.zx <- lmA$coef[-1]
sigma.t.zx <- diag(vcov(lmA))[-1]
}
round(b.yx.zt - b.yx.z,5) == round(b.yz.x * t.zx,5)
Z <- (b.yx.zt - b.yx.z)/sqrt((b.yz.x^2 * sigma.t.zx^2 + t.zx^2 * sigma.b.yz.x^2))
p.val <- round((1-pnorm(Z)),4)
cat("\nKarlson-Holm-Breen method\nNull hypothesis: Change in coefficient is not attributable to confounding by z.\n\n")
data.frame(p.value=ifelse(p.val<1e-04,"<1e-04",p.val))
} |
dcircpurka <- function(x, m, a, rads = FALSE, logden = FALSE) {
if ( !rads ) x <- x * pi/180
x <- cbind( cos(x), sin(x) )
m <- c( cos(m), sin(m) )
den <- log(a) - log(2) - log(1 - exp(-a * pi)) - a * acos( x %*% m)
if ( !logden ) den <- exp(den)
den
} |
context("dropdown")
test_that("test dropdown_input", {
expect_is(dropdown_input("a", c(1,2,3)), "shiny.tag")
expect_error(dropdown_input("a"),
"argument \"choices\" is missing, with no default")
si_str <- as.character(dropdown_input("a", c(1,2,3)))
expect_true(any(grepl("<div class=\"item\" data-value=\"1\">1</div>",
si_str, fixed = TRUE)))
expect_true(any(grepl("<div class=\"item\" data-value=\"2\">2</div>",
si_str, fixed = TRUE)))
expect_true(any(grepl("<div class=\"item\" data-value=\"3\">3</div>",
si_str, fixed = TRUE)))
expect_false(any(grepl("<div class=\"item\" data-value=\"0\">0</div>",
si_str, fixed = TRUE)))
}) |
test_that("parsing works properly", {
expr <- parse_safe(c("gamma", "", "alpha"))
expect_identical(class(expr), "expression")
expect_equal(expr, expression(gamma, NA, alpha))
expect_type(expr, "expression")
}) |
irls.nb.1 = function(y, s, x, phi,
beta0=rep(NA,p),
mustart=NULL,
maxit=50, tol.mu=1e-3/length(y), print.level=0) {
nobs = as.integer(dim(x)[1]);
p = as.integer(dim(x)[2]);
id1 = (1:p)[is.na(beta0)];
id0 = (1:p)[!is.na(beta0)];
q = length(id0);
nvars = p - q;
beta = beta0;
offset = matrix(x[, id0], nobs, q) %*% beta[id0];
if (is.null(mustart)) {
mu = y + (y==0)/6;
} else {
mu = mustart;
}
eta = log(mu/s);
conv = FALSE;
for (iter in 1L:maxit) {
varmu = mu + phi * mu^2;
if (any(is.na(varmu)))
stop("NAs in V(mu)")
if (any(varmu == 0))
stop("0s in V(mu)")
z = eta - offset + (y - mu)/mu;
w = drop(mu/sqrt(varmu));
epsilon = 1e-7;
fit = .Call(Cdqrls, x[, id1, drop=FALSE] * w, w * z, epsilon);
if (any(!is.finite(fit$coefficients))) {
warning(gettextf("non-finite coefficients at iteration %d", iter), domain = NA);
break
}
if (nobs < fit$rank)
stop(gettextf("X matrix has rank %d, but only %d observations",
fit$rank, nobs), domain = NA)
muold = mu;
beta[id1[fit$pivot]] = fit$coefficients;
eta = drop(x %*% beta);
mu = s*exp(eta);
if (max(abs(mu - muold)) < tol.mu) {
conv = TRUE;
break
}
}
zero = any(mu < tol.mu * 2);
list(mu=mu, beta=beta, iter=iter, conv=conv, zero=zero);
}
irls.nb = function(y, s, x, phi, beta0, mustart=NULL, ..., print.level=0) {
m = dim(y)[1];
n = dim(y)[2];
p = dim(x)[2];
phi = matrix(phi, m, n);
if (print.level > 0)
print("Estimating NB regression coefficients using IRLS.");
res=list(mu=matrix(NA, m, n), beta=matrix(NA, m, p),
conv=logical(m), iter=numeric(m));
if (print.level > 1) {
pb=txtProgressBar(style=3);
}
for (i in 1:m) {
if (print.level > 1) {
setTxtProgressBar(pb, i/m);
}
if (is.null(mustart)) {
res0 = irls.nb.1(y[i,], s, x, phi[i,], beta0,
...,
print.level=print.level-1);
} else {
res0 = irls.nb.1(y[i,], s, x, phi[i,], beta0,
mustart=mustart[i,],
...,
print.level=print.level-1);
}
res$mu[i,] = res0$mu;
res$beta[i,] = res0$beta;
res$conv[i] = res0$conv;
res$iter[i] = res0$iter;
}
if (print.level>1) close(pb);
res;
}
fit.nb.regression = function(nb.data, dispersion, x, beta0=rep(NA, dim(x)[2]), ...) {
res = irls.nb(nb.data$counts, nb.data$eff.lib.sizes, dispersion$estimates, x, beta0, ...);
res
} |
NULL
MaxPooling1D <- function(pool_size = 2, strides = NULL, padding = 'valid',
input_shape = NULL) {
if (!is.null(strides))
strides <- int32(strides)
if (is.null(input_shape)) {
res <- modules$keras.layers.pooling$MaxPooling1D(
pool_size = int32(pool_size),
strides = strides,
padding = padding)
} else {
input_shape <- as.list(input_shape)
input_shape <- modules$builtin$tuple(int32(input_shape))
res <- modules$keras.layers.pooling$MaxPooling1D(
pool_size = int32(pool_size),
strides = strides,
padding = padding,
input_shape = input_shape)
}
return(res)
}
MaxPooling2D <- function(pool_size = c(2, 2), strides = NULL,
padding = 'valid', data_format = NULL,
input_shape = NULL) {
if (!is.null(strides))
strides <- int32(strides)
if (is.null(input_shape)) {
res <- modules$keras.layers.pooling$MaxPooling2D(
pool_size = int32(pool_size),
strides = strides,
padding = padding,
data_format = data_format)
} else {
input_shape <- as.list(input_shape)
input_shape <- modules$builtin$tuple(int32(input_shape))
res <- modules$keras.layers.pooling$MaxPooling2D(
pool_size = int32(pool_size),
strides = strides,
padding = padding,
data_format = data_format,
input_shape = input_shape)
}
return(res)
}
MaxPooling3D <- function(pool_size = c(2, 2, 2), strides = NULL,
padding = 'valid',
data_format = NULL, input_shape = NULL) {
if (!is.null(strides))
strides <- int32(strides)
if (is.null(input_shape)) {
res <- modules$keras.layers.pooling$MaxPooling3D(
pool_size = int32(pool_size),
strides = strides,
padding = padding,
data_format = data_format)
} else {
input_shape <- as.list(input_shape)
input_shape <- modules$builtin$tuple(int32(input_shape))
res <- modules$keras.layers.pooling$MaxPooling3D(
pool_size = int32(pool_size),
strides = strides,
padding = padding,
data_format = data_format,
input_shape = input_shape)
}
return(res)
}
NULL
AveragePooling1D <- function(pool_size = 2, strides = NULL,
padding = 'valid', input_shape = NULL) {
if (!is.null(strides))
strides <- int32(strides)
if (is.null(input_shape)) {
res <- modules$keras.layers.pooling$AveragePooling1D(
pool_size = int32(pool_size),
strides = strides,
padding = padding)
} else {
input_shape <- as.list(input_shape)
input_shape <- modules$builtin$tuple(int32(input_shape))
res <- modules$keras.layers.pooling$AveragePooling1D(
pool_size = int32(pool_size),
strides = strides,
padding = padding,
input_shape = input_shape)
}
return(res)
}
AveragePooling2D <- function(pool_size = c(2, 2), strides = NULL,
padding = 'valid',
data_format = NULL,
input_shape = NULL) {
if (!is.null(strides))
strides <- int32(strides)
if (is.null(input_shape)) {
res <- modules$keras.layers.pooling$AveragePooling2D(
pool_size = int32(pool_size),
strides = strides,
padding = padding,
data_format = data_format)
} else {
input_shape <- as.list(input_shape)
input_shape <- modules$builtin$tuple(int32(input_shape))
res <- modules$keras.layers.pooling$AveragePooling2D(
pool_size = int32(pool_size),
strides = strides,
padding = padding,
data_format = data_format,
input_shape = input_shape)
}
return(res)
}
AveragePooling3D <- function(pool_size = c(2, 2, 2), strides = NULL,
padding = 'valid',
data_format = NULL,
input_shape = NULL) {
if (!is.null(strides))
strides <- int32(strides)
if (is.null(input_shape)) {
res <- modules$keras.layers.pooling$AveragePooling3D(
pool_size = int32(pool_size),
strides = strides,
padding = padding,
data_format = data_format)
} else {
input_shape <- as.list(input_shape)
input_shape <- modules$builtin$tuple(int32(input_shape))
res <- modules$keras.layers.pooling$AveragePooling3D(
pool_size = int32(pool_size),
strides = strides,
padding = padding,
data_format = data_format,
input_shape = input_shape)
}
return(res)
}
NULL
GlobalMaxPooling1D <- function(input_shape = NULL) {
if (is.null(input_shape)) {
res <- modules$keras.layers.pooling$GlobalMaxPooling1D()
} else {
input_shape <- as.list(input_shape)
input_shape <- modules$builtin$tuple(int32(input_shape))
res <- modules$keras.layers.pooling$GlobalMaxPooling1D(
input_shape = input_shape)
}
return(res)
}
GlobalAveragePooling1D <- function(input_shape = NULL) {
if (is.null(input_shape)) {
res <- modules$keras.layers.pooling$GlobalAveragePooling1D()
} else {
input_shape <- as.list(input_shape)
input_shape <- modules$builtin$tuple(int32(input_shape))
res <- modules$keras.layers.pooling$GlobalAveragePooling1D(
input_shape = input_shape)
}
return(res)
}
GlobalMaxPooling2D <- function(data_format = NULL, input_shape = NULL) {
if (is.null(input_shape)) {
res <- modules$keras.layers.pooling$GlobalMaxPooling2D(
data_format = data_format)
} else {
input_shape <- as.list(input_shape)
input_shape <- modules$builtin$tuple(int32(input_shape))
res <- modules$keras.layers.pooling$GlobalMaxPooling2D(
data_format = data_format,
input_shape = input_shape)
}
return(res)
}
GlobalAveragePooling2D <- function(data_format = NULL, input_shape = NULL) {
if (is.null(input_shape)) {
res <- modules$keras.layers.pooling$GlobalAveragePooling2D(
data_format = data_format)
} else {
input_shape <- as.list(input_shape)
input_shape <- modules$builtin$tuple(int32(input_shape))
res <- modules$keras.layers.pooling$GlobalAveragePooling2D(
data_format = data_format,
input_shape = input_shape)
}
return(res)
} |
idx2cref <- function(x, absRow = TRUE, absCol = TRUE) {
if(!is.numeric(x)) stop("x must be a numeric matrix or vector of indices!")
if(!is.matrix(x)) x <- matrix(x, ncol = 2, byrow = TRUE)
apply(x, 1, function(xx) {
cf <- new(J("org.apache.poi.ss.util.CellReference"), as.integer(xx[1] - 1), as.integer(xx[2] - 1), absRow, absCol)
cf$formatAsString()
})
} |
expected <- eval(parse(text="structure(c(0.996759595651485, 0.982714508399566, 0.932630193386819, 0.993236290056531, 0.96419666630608, 0.864295998535007, 0.985909253030328, 0.926595788340938, 0.73715919564636, 0.970762623112779, 0.852643843272687, 0.528531215128914, 0.93983881933649, 0.716531310573769, 0.263597138115719, 0.878323725051003, 0.498077283739148, 0.0615441729240406, 0.762398079151112, 0.232834562975273, 0.00293893375721675, 0.567077114659808, 0.0474784101238388, 5.08141506069197e-06, 0.305397625390859, 0.00170825768891125, 8.51556634078889e-12), .Dim = c(3L, 9L), .Dimnames = list(c(\"x\", \"x\", \"\"), NULL))"));
test(id=0, code={
argv <- eval(parse(text="list(structure(c(-0.00324566582797463, -0.0174366299092001, -0.0697465196367962, -0.00678668749718479, -0.0364599944879883, -0.145839977951944, -0.014190964081224, -0.0762378512927396, -0.304951405170939, -0.0296733069908004, -0.159413352946301, -0.637653411785165, -0.0620468872115091, -0.333333333333361, -1.33333333333336, -0.129740045955487, -0.697000025766712, -2.78800010306667, -0.271286446121824, -1.45742710775627, -5.8297084310247, -0.567259979811165, -3.04748019497741, -12.1899207799089, -1.18614066163432, -6.37228132326786, -25.4891252930698), .Dim = c(3L, 9L), .Dimnames = list(c(\"x\", \"x\", \"\"), NULL)))"));
do.call(`exp`, argv);
}, o=expected); |
visnetwork <- function(graph) {
nodes <- graph %>% get_node_df()
edges <- graph %>% get_edge_df()
if (graph %>% is_graph_empty()) {
nodes <- create_node_df(n = 1)
nodes <- nodes[-1, ]
edges <- create_edge_df(from = 1, to = 1)
edges <- edges[-1, ]
}
if ("pos" %in% colnames(nodes)) {
nodes <- nodes[, -(which(colnames(nodes) %in% "pos"))]
}
colnames(nodes)[which(colnames(nodes) == "nodes")] <- "id"
colnames(nodes)[which(colnames(nodes) == "type")] <- "group"
colnames(nodes)[which(colnames(nodes) == "tooltip")] <- "title"
colnames(nodes)[which(colnames(nodes) == "fillcolor")] <- "color"
colnames(edges)[which(colnames(edges) == "rel")] <- "label"
colnames(edges)[which(colnames(edges) == "tooltip")] <- "title"
colnames(edges)[which(colnames(edges) == "penwidth")] <- "width"
if ("fontcolor" %in% colnames(edges)) {
fontcolor <- edges[, -(which(colnames(edges) %in% "fontcolor"))]
}
if (all(c("x", "y") %in% colnames(nodes)) == FALSE) {
if (nrow(graph$edges_df) == 0) {
vn_obj <- visNetwork(nodes = nodes)
}
if (nrow(graph$edges_df) > 0) {
vn_obj <- visNetwork(nodes = nodes, edges = edges)
if (is_graph_directed(graph)) {
vn_obj <-
visEdges(
graph = vn_obj,
arrows = list(
to = list(
enabled = TRUE,
scaleFactor = 1)))
}
if (is_graph_undirected(graph)) {
vn_obj <-
visEdges(
graph = vn_obj,
arrows = list(
to = list(
enabled = FALSE,
scaleFactor = 1)))
}
}
vn_obj <-
visPhysics(
graph = vn_obj,
solver = "barnesHut",
stabilization = list(
enabled = TRUE,
onlyDynamicEdges = FALSE,
fit = TRUE))
vn_obj <-
visLayout(
graph = vn_obj,
improvedLayout = TRUE)
}
if (all(c("x", "y") %in% colnames(nodes))) {
nodes$y <- -as.numeric(nodes$y)
if (is.null(graph$edges_df)) {
vn_obj <- visNetwork(nodes = nodes)
vn_obj <-
visNodes(
graph = vn_obj,
physics = FALSE,
fixed = FALSE)
vn_obj <-
visPhysics(
graph = vn_obj,
stabilization = list(
enabled = FALSE,
onlyDynamicEdges = FALSE,
fit = TRUE))
vn_obj <-
visInteraction(
graph = vn_obj,
dragNodes = FALSE)
}
if (nrow(graph$edges_df) > 0) {
if ("arrow" %in% colnames(edges)) {
if (all(edges[which(colnames(edges) %in% "arrow")] == FALSE)) {
arrows_for_edges <- FALSE
} else {
arrows_for_edges <- FALSE
}
} else {
arrows_for_edges <- FALSE
}
vn_obj <-
visNetwork(
nodes = nodes,
edges = edges)
vn_obj <-
visNodes(
graph = vn_obj,
physics = FALSE,
fixed = FALSE)
vn_obj <-
visEdges(
graph = vn_obj,
arrows = list(
to =
list(
enabled = ifelse(arrows_for_edges, TRUE, FALSE),
scaleFactor = 1)),
smooth = FALSE,
font = list(
color = "
size = 14,
face = "arial",
background = NULL,
strokeWidth = 2,
strokeColor = "
align = "middle"))
vn_obj <-
visPhysics(
graph = vn_obj,
stabilization = list(
enabled = TRUE,
onlyDynamicEdges = FALSE,
fit = TRUE))
vn_obj <-
visInteraction(
graph = vn_obj,
dragNodes = FALSE)
}
}
vn_obj
} |
.cov.shrink.tawny <-
function(returns, sample = NULL, prior.fun = .cov.prior.cc, ...)
{
if (is.null(sample)) { S <- .cov.sample.tawny(returns) }
else { S <- sample }
T <- nrow(returns)
F <- prior.fun(S, ...)
k <- .shrinkage.intensity(returns, F, S)
d <- max(0, min(k/T, 1))
if (.loglevel.tawny() > 0) cat("Got intensity k =", k,
"and coefficient d =",d,"\n")
S.hat <- d * F + (1 - d) * S
S.hat
}
.getCorFilter.Shrinkage <-
function(prior.fun = .cov.prior.cc, ...)
{
function(h) return(cov2cor(.cov.shrink.tawny(h, prior.fun=prior.fun, ...)))
}
.cov.sample.tawny <-
function(returns)
{
T <- nrow(returns)
X <- t(returns)
ones <- rep(1,T)
S <- (1/T) * X %*% (diag(T) - 1/T * (ones %o% ones) ) %*% t(X)
S
}
.cov.prior.cc <-
function(S)
{
r.bar <- .cor.mean.tawny(S)
vars <- diag(S) %o% diag(S)
F <- r.bar * (vars)^0.5
diag(F) <- diag(S)
return(F)
}
.cov.prior.identity <-
function(S)
{
return(diag(nrow(S)))
}
.cor.mean.tawny <-
function(S)
{
N <- ncol(S)
cors <- cov2cor(S)
2 * sum(cors[lower.tri(cors)], na.rm=TRUE) / (N^2 - N)
}
.shrinkage.intensity <-
function(returns, prior, sample)
{
p <- .shrinkage.p(returns, sample)
r <- .shrinkage.r(returns, sample, p)
c <- .shrinkage.c(prior, sample)
(p$sum - r) / c
}
.shrinkage.p <-
function(returns, sample)
{
T <- nrow(returns)
N <- ncol(returns)
ones <- rep(1,T)
means <- t(returns) %*% ones / T
z <- returns - matrix(rep(t(means), T), ncol=N, byrow=TRUE)
term.1 <- t(z^2) %*% z^2
term.2 <- 2 * sample * (t(z) %*% z)
term.3 <- sample^2
phi.mat <- (term.1 - term.2 + term.3) / T
phi <- list()
phi$sum <- sum(phi.mat)
phi$diags <- diag(phi.mat)
phi
}
.shrinkage.r <-
function(returns, sample, pi.est)
{
N <- ncol(returns)
T <- nrow(returns)
ones <- rep(1,T)
means <- t(returns) %*% ones / T
z <- returns - matrix(rep(t(means), T), ncol=N, byrow=TRUE)
r.bar <- .cor.mean.tawny(sample)
term.1 <- t(z^3) %*% z
term.2 <- diag(sample) * (t(z) %*% z)
term.3 <- sample * (t(z^2) %*% matrix(rep(1,N*T), ncol=N))
term.4 <- (diag(sample) %o% rep(1,N)) * sample
script.is <- (term.1 - term.2 - term.3 + term.4) / T
ratios <- (diag(sample) %o% diag(sample)^-1)^0.5
rhos <- 0.5 * r.bar * (ratios * script.is + t(ratios) * t(script.is))
sum(pi.est$diags, na.rm = TRUE)
+ sum(rhos[lower.tri(rhos)], na.rm = TRUE)
+ sum(rhos[upper.tri(rhos)], na.rm = TRUE)
}
.shrinkage.c <-
function(prior, sample)
{
squares <- (prior - sample)^2
sum(squares, na.rm = TRUE)
}
.loglevel.tawny <-
function (new.level = NULL)
{
if (!is.null(new.level)) {
options(log.level = new.level)
}
if (is.null(getOption("log.level"))) {
return(0)
}
return(getOption("log.level"))
} |
model.matrix.binaryChoice <- function (object, ...) {
if (n_match <- match("x", names(object), 0))
object[[n_match]]
else {
data <- model.frame(object, xlev = object$xlevels, ...)
NextMethod("model.matrix", data = data, contrasts = object$contrasts)
}
} |
rollFun =
function(x, n, trim = TRUE, na.rm = FALSE, FUN, ...)
{
x.orig = x
if (is.timeSeries(x)) {
stopifnot(isUnivariate(x))
TS = TRUE
} else {
TS = FALSE
}
if (TS) {
positions = x.orig@positions
x = series(x.orig)[, 1]
} else {
x = as.vector(x.orig)
names(x) = NULL
}
if (na.rm) {
if (TS) positions = positions[!is.na(x)]
x = as.vector(na.omit(x))
}
start = 1
end = length(x)-n+1
m = x[start:end]
if (n > 1) {
for (i in 2:n) {
start = start + 1
end = end + 1
m = cbind(m, x[start:end])
}
} else {
m = matrix(m)
}
ans = apply(m, MARGIN = 1, FUN = FUN, ...)
if (!trim)
ans = c(rep(NA, (n-1)), ans)
if (trim & TS)
positions = positions[-(1:(n-1))]
if (TS) {
ans = timeSeries(as.matrix(ans), positions, recordIDs = data.frame(),
units = x.orig@units, FinCenter = x.orig@FinCenter)
}
ans
}
rollVar =
function(x, n = 9, trim = TRUE, unbiased = TRUE, na.rm = FALSE)
{
if (is.timeSeries(x)) TS = TRUE else TS = FALSE
rvar = rollFun(x = x, n = n, trim = trim, na.rm = na.rm, FUN = var)
if (!unbiased) {
if (TS) {
series(rvar) = (series(rvar) * (n-1))/n
} else {
rvar = (rvar * (n-1))/n
}
}
rvar
} |
tuneGenSA = function(learner, task, resampling, measures, par.set, control, opt.path, show.info, resample.fun) {
requirePackages("GenSA", why = "tuneGenSA", default.method = "load")
low = getLower(par.set)
upp = getUpper(par.set)
start = control$start %??% sampleValue(par.set, trafo = FALSE)
start = convertStartToNumeric(start, par.set)
ctrl.gensa = control$extra.args
res = GenSA::GenSA(par = start, fn = tunerFitnFun, lower = low, upper = upp, control = ctrl.gensa,
learner = learner, task = task, resampling = resampling, measures = measures,
par.set = par.set, ctrl = control, opt.path = opt.path, show.info = show.info,
convertx = convertXNumeric, remove.nas = FALSE, resample.fun = resample.fun)
if (!is.null(control$budget) && res$counts > control$budget) {
warningf("GenSA used %i function calls, exceededing the given budget of %i evaluations.",
res$counts, control$budget)
}
makeTuneResultFromOptPath(learner, par.set, measures, resampling, control, opt.path)
} |
rho.file <- function(
x, col1, col2,
OcSBaserate = NULL,
testSetBaserateInflation = 0,
OcSLength = 10000,
replicates = 800,
ScSKappaThreshold = 0.9,
ScSKappaMin = .40,
ScSPrecisionMin = 0.6,
ScSPrecisionMax = 1
) {
setFile = utils::read.csv(x)
set = as.matrix(setFile[,c(col1, col2)])
if (!any(is.na(set))) {
rhoSet(
set,
OcSBaserate,
testSetBaserateInflation,
OcSLength, replicates,
ScSKappaThreshold, ScSKappaMin,
ScSPrecisionMin, ScSPrecisionMax
)
}
else {
stop("The columns provided for col1 and col2 must not contain NA")
}
} |
knitr::opts_chunk$set(collapse = T, comment = "
library(pander)
library(tables)
panderOptions('knitr.auto.asis', FALSE)
panderOptions('plain.ascii', TRUE)
pander(head(iris))
pander(head(mtcars[1:5]))
pander(tabular( (Species + 1) ~ (n=1) + Format(digits=2)*
(Sepal.Length + Sepal.Width)*(mean + sd), data=iris ))
methods(pander)
evals('1:10')
str(Pandoc.brew(text ='Pi equals to `<%= pi %>`. And here are some random data: `<%= runif(10)%>`'))
pots <- panderOptions("table.style")
panderOptions("table.style", "simple")
pander(mtcars[1:3, 1:4])
pander(head(iris))
panderOptions("table.style", "grid")
pander(head(iris))
panderOptions("table.style", pots) |
write_file <- function(text, path) {
text <- as.character(text)
if (length(text) > 1) {
stop("`text` cannot have more than one element", call. = FALSE)
}
if (length(path) != 1) {
stop("`path` must be a single element", call. = FALSE)
}
text <- enc2utf8(text)
path <- normalizePath(path, mustWork = FALSE)
invisible(.Call(brio_write_file, text, path))
} |
omegaMultiB <- function(data, ns, n.iter, n.burnin, n.chains, thin, model, pairwise, callback) {
n <- nrow(data)
k <- ncol(data)
mod_opts <- indexMatrix(model, k, ns, colnames(data))
idex <- mod_opts$idex
imat <- mod_opts$imat
inds <- which(is.na(data), arr.ind = TRUE)
imputed <- array(0, c(n.chains, n.iter, nrow(inds)))
pars <- list(H0k = rep(1, ns), a0k = 2, b0k = 1, l0k = matrix(0, k, ns),
H0kw = 2.5, a0kw = 2, b0kw = 1, beta0k = numeric(ns),
R0w = diag(rep(1 / k, ns + 1)), p0w = ns^2)
omsh <- matrix(0, n.chains, n.iter)
omst <- matrix(0, n.chains, n.iter)
impl_covs <- array(0, c(n.chains, n.iter, k, k))
for (ai in 1:n.chains) {
starts <- drawStartMulti(n, k, ns, pars, imat)
wi <- starts$wi
phiw <- starts$phiw
if (pairwise) {
dat_filled <- data
dat_filled[inds] <- colMeans(data, na.rm = TRUE)[inds[, 2]]
ms <- rep(0, k)
for (i in 1:n.iter) {
params <- sampleSecoParams(dat_filled, pars, wi, phiw, ns, idex)
wi <- params$wi
phiw <- params$phiw
Lm <- cbind(0, params$lambda)
Bm <- matrix(0, ns + 1, ns + 1)
Bm[2:(ns + 1), 1] <- params$beta
oms <- omegasSeco(Lm, Bm, diag(params$psi), diag(c(1, params$psiw)))
omsh[ai, i] <- oms[1]
omst[ai, i] <- oms[2]
cc <- implCovMulti(Lm, Bm, theta = diag(params$psi), psi = diag(c(1, params$psiw)))
impl_covs[ai, i, , ] <- cc
cols <- unique(inds[, 2])
for (ccc in cols) {
rows <- inds[which(inds[, 2] == ccc), 1]
mu1 <- ms[ccc]
mu2 <- ms[-ccc]
cc11 <- cc[ccc, ccc]
cc21 <- cc[-ccc, ccc]
cc12 <- cc[ccc, -ccc]
cc22 <- cc[-ccc, -ccc]
ccq <- cc11 - cc12 %*% try(solve(cc22)) %*% cc21
for (r in rows) {
muq <- mu1 + cc12 %*% try(solve(cc22)) %*% (as.numeric(dat_filled[r, -ccc]) - mu2)
dat_filled[r, ccc] <- rnorm(1, muq, sqrt(ccq))
}
}
imputed[ai, i, ] <- dat_filled[inds]
}
} else {
for (i in 1:n.iter) {
params <- sampleSecoParams(data, pars, wi, phiw, ns, idex)
wi <- params$wi
phiw <- params$phiw
Lm <- cbind(0, params$lambda)
Bm <- matrix(0, ns + 1, ns + 1)
Bm[2:(ns + 1), 1] <- params$beta
oms <- omegasSeco(Lm, Bm, diag(params$psi), diag(c(1, params$psiw)))
omsh[ai, i] <- oms[1]
omst[ai, i] <- oms[2]
impl_covs[ai, i, , ] <- implCovMulti(Lm, Bm, theta = diag(params$psi), psi = diag(c(1, params$psiw)))
}
}
}
omh_burn <- omsh[, (n.burnin + 1):n.iter, drop = F]
omt_burn <- omst[, (n.burnin + 1):n.iter, drop = F]
impl_covs_burn <- impl_covs[, (n.burnin + 1):n.iter, , , drop = F]
omh_out <- omh_burn[, seq(1, dim(omh_burn)[2], thin), drop = F]
omt_out <- omt_burn[, seq(1, dim(omt_burn)[2], thin), drop = F]
impl_covs_out <- impl_covs_burn[, seq(1, dim(omt_burn)[2], thin), , , drop = F]
return(list(omh = omh_out, omt = omt_out, impl_covs = impl_covs_out, imputed_values = imputed,
modfile = mod_opts))
}
sampleSecoParams <- function(data, pars, wi, phiw, ns, idex) {
n <- nrow(data)
k <- ncol(data)
H0k <- pars$H0k
l0k <- pars$l0k
a0k <- pars$a0k
b0k <- pars$b0k
H0kw <- pars$H0kw
beta0k <- pars$beta0k
a0kw <- pars$a0kw
b0kw <- pars$b0kw
R0w <- pars$R0w
p0w <- pars$p0w
ll <- matrix(0, k, ns)
pp <- numeric(k)
for (ii in 1:ns) {
ids <- idex[[ii]]
Ak <- solve(1 / H0k[ii] + t(wi[, ii + 1]) %*% wi[, ii + 1])
ak <- Ak %*% (c(1 / H0k[ii]) %*% t(l0k[ids, ii]) + wi[, ii + 1] %*% data[, ids])
bekk <- b0k + 0.5 * (t(data[, ids]) %*% data[, ids]
- t(ak) %*% solve(Ak) %*% ak
+ (l0k[ids, ii] * 1 / H0k[ii]) %*% t(l0k[ids, ii]))
bek <- diag(bekk)
invpsi <- rgamma(length(ids), n / 2 + a0k, bek)
psi <- 1 / invpsi
lambda <- rnorm(length(ids), ak, sqrt(psi * as.vector(Ak)))
if (mean(lambda) < 0) {
lambda <- -lambda
}
ll[ids, ii] <- lambda
pp[ids] <- psi
}
Akw <- 1 / (1 / H0kw + c(t(wi[, 1]) %*% wi[, 1]))
akw <- Akw * (1 / H0kw * beta0k + t(wi[, 1]) %*% wi[, 2:(ns + 1)])
bekkw <- b0kw + 0.5 * (t(wi[, 2:(ns + 1)]) %*% wi[, 2:(ns + 1)]
- ((t(akw) * (1 / Akw)) %*% akw)
+ (beta0k * (1 / H0kw)) %*% t(beta0k))
bekw <- diag(bekkw)
invpsiw <- rgamma(ns, n / 2 + a0kw, bekw)
psiw <- 1 / invpsiw
beta <- rnorm(ns, akw * sqrt(diag(phiw)[1]), sqrt(psiw * Akw))
if (mean(beta) < 0) {
beta <- -beta
}
betaMat <- matrix(0, ns + 1, ns + 1)
betaMat[2:(ns + 1), 1] <- beta
ident <- diag(ns + 1)
identInv <- solve(ident - betaMat)
psid <- diag(c(1, psiw))
sigW <- identInv %*% psid %*% t(identInv)
invsigW <- solve(sigW)
lll <- cbind(0, ll)
impM <- t(lll) %*% solve(diag(pp)) %*% lll
mw <- solve(invsigW + impM) %*% t(lll) %*% solve(diag(pp)) %*% t(data)
Vw <- solve(invsigW + impM)
wi <- genNormDataTweak(n, t(mw), Vw)
wi <- apply(wi, 2, function(x) x / sd(x))
phiw <- LaplacesDemon::rinvwishart(nu = n + p0w, S = t(wi) %*% (wi) + solve(R0w))
return(list(psi = pp, lambda = ll, psiw = psiw, beta = beta, wi = wi, phiw = phiw))
}
drawStartMulti <- function(n, k, ns, pars, imat) {
invpsi <- rgamma(k, pars$a0k, pars$b0k)
psi <- 1 / invpsi
invpsiw <- rgamma(ns, pars$a0kw, pars$b0kw)
psiw <- 1 / invpsiw
phiw <- LaplacesDemon::rinvwishart(nu = pars$p0w, S = (pars$R0w))
wi <- MASS::mvrnorm(n, numeric(ns + 1), phiw)
wi <- apply(wi, 2, function(x) x / sd(x))
return(list(wi = wi, phiw = phiw))
} |
library(act)
\dontrun{
mysearch <- act::search_new(examplecorpus, pattern="yo")
test <- act::search_cuts(x=examplecorpus, s=mysearch)
cat([email protected])
cat([email protected])
cat(test@results[, [email protected]])
} |
source("ESEUR_config.r")
fails=read.csv(paste0(ESEUR_dir, "reliability/2014-04-13.csv.xz"), as.is=TRUE)
fails$SW_f_per_t=c(0, diff(fails$cum.SW.fail))
fails$use_f_per_t=c(0, diff(fails$cum.usage.fail))
plot(~ Date+SW_f_per_t+use_f_per_t+cum.SW.fail+cum.usage.fail+ Site, data=fails)
SW_f_mod=glm(SW_f_per_t ~ Date+Site, data=fails, family=poisson)
summary(SW_f_mod) |
krscvNOMAD <- function(xz,
y,
degree.max=10,
segments.max=10,
degree.min=0,
segments.min=1,
cv.df.min=1,
complexity=c("degree-knots","degree","knots"),
knots=c("quantiles","uniform", "auto"),
basis=c("additive","tensor","glp","auto"),
cv.func=c("cv.ls","cv.gcv","cv.aic"),
degree=degree,
segments=segments,
lambda=lambda,
lambda.discrete=FALSE,
lambda.discrete.num=100,
random.seed=42,
max.bb.eval=10000,
initial.mesh.size.real="r0.1",
initial.mesh.size.integer="1",
min.mesh.size.real=paste("r",sqrt(.Machine$double.eps),sep=""),
min.mesh.size.integer="1",
min.poll.size.real="1",
min.poll.size.integer="1",
opts=list(),
nmulti=0,
tau=NULL,
weights=NULL,
singular.ok=FALSE) {
complexity <- match.arg(complexity)
knots <- match.arg(knots)
basis <- match.arg(basis)
cv.func <- match.arg(cv.func)
if( missing(lambda) || is.null(lambda)){
lambda <- NULL
}
if(degree.min < 0 ) degree.min <- 0
if(segments.min < 1 ) segments.min <- 1
if(degree.max < degree.min) degree.max <- (degree.min + 1)
if(segments.max < segments.min) segments.max <- (segments.min + 1)
if(missing(degree)) degree <- NULL
if(missing(segments)) segments <- NULL
if(lambda.discrete && !is.null(lambda.discrete.num)){
lambda.discrete.num <- as.integer(lambda.discrete.num)
if(lambda.discrete.num < 1) lambda.discrete.num <- 10
}
if(!options('crs.messages')$crs.messages && is.null(opts[["DISPLAY_DEGREE"]])) opts$"DISPLAY_DEGREE"=0
t1 <- Sys.time()
cv.nomad <- function(x,
y,
z,
degree.max=degree.max,
segments.max=segments.max,
degree.min=degree.min,
segments.min=segments.min,
z.unique,
ind,
ind.vals,
nrow.z.unique,
is.ordered.z,
complexity=complexity,
knots=knots,
basis=basis,
segments=segments,
degree=degree,
lambda=lambda,
lambda.discrete=lambda.discrete,
lambda.discrete.num=lambda.discrete.num,
cv.func=cv.func,
opts=opts,
print.output=print.output,
nmulti=nmulti,
cv.df.min=cv.df.min,
tau=tau,
weights=weights,
singular.ok=singular.ok) {
if( missing(x) || missing(y) ) stop(" you must provide input, x, y")
n <- length(y)
num.x <- NCOL(x)
if(NROW(y) != NROW(x)) stop(" x and y have differing numbers of observations")
eval.cv <- function(input, params){
complexity <- params$complexity
segments <- params$segments
degree <- params$degree
x <- params$x
y <- params$y
z <- params$z
knots <- params$knots
cv.func <- params$cv.func
basis <- params$basis
z.unique <- params$z.unique
ind <- params$ind
ind.vals <- params$ind.vals
nrow.z.unique <- params$nrow.z.unique
is.ordered.z <- params$is.ordered.z
lambda.discrete.num <- params$lambda.discrete.num
lambda.discrete <- params$lambda.discrete
cv.df.min <- params$cv.df.min
tau <- params$tau
weights <- params$weights
singular.ok <- params$singular.ok
num.x <- NCOL(x)
num.z <- NCOL(z)
if(complexity=="degree-knots") {
K <- round(cbind(input[1:num.x],input[(num.x+1):(2*num.x)]))
lambda <- input[(2*num.x+1):(2*num.x+num.z)]
}
else if(complexity=="degree") {
K<-round(cbind(input[1:num.x],segments))
lambda <- input[(num.x+1):(num.x+num.z)]
}
else if(complexity=="knots")
{
K<-round(cbind(degree, input[1:num.x]))
lambda <- input[(num.x+1):(num.x+num.z)]
}
if(lambda.discrete)
lambda <- lambda/lambda.discrete.num
lambda <- ifelse(lambda <= 0, .Machine$double.eps, lambda)
basis.opt <- basis;
if(basis=="auto"){
basis.opt <- "additive"
cv <- cv.kernel.spline.wrapper(x=x,
y=y,
z=z,
K=K,
lambda=lambda,
z.unique=z.unique,
ind=ind,
ind.vals=ind.vals,
nrow.z.unique=nrow.z.unique,
is.ordered.z=is.ordered.z,
knots=knots,
basis=basis.opt,
cv.df.min=cv.df.min,
cv.func=cv.func,
tau=tau,
weights=weights,
singular.ok=singular.ok)
cv.tensor <- cv.kernel.spline.wrapper(x=x,
y=y,
z=z,
K=K,
lambda=lambda,
z.unique=z.unique,
ind=ind,
ind.vals=ind.vals,
nrow.z.unique=nrow.z.unique,
is.ordered.z=is.ordered.z,
knots=knots,
basis="tensor",
cv.func=cv.func,
cv.df.min=cv.df.min,
tau=tau,
weights=weights,
singular.ok=singular.ok)
if(cv > cv.tensor){
cv <- cv.tensor
basis.opt <-"tensor"
}
cv.glp <- cv.kernel.spline.wrapper(x=x,
y=y,
z=z,
K=K,
lambda=lambda,
z.unique=z.unique,
ind=ind,
ind.vals=ind.vals,
nrow.z.unique=nrow.z.unique,
is.ordered.z=is.ordered.z,
knots=knots,
basis="glp",
cv.func=cv.func,
cv.df.min=cv.df.min,
tau=tau,
weights=weights,
singular.ok=singular.ok)
if(cv > cv.glp){
cv <- cv.glp
basis.opt <-"glp"
}
} else {
cv <- cv.kernel.spline.wrapper(x=x,
y=y,
z=z,
K=K,
lambda=lambda,
z.unique=z.unique,
ind=ind,
ind.vals=ind.vals,
nrow.z.unique=nrow.z.unique,
is.ordered.z=is.ordered.z,
knots=knots,
basis=basis.opt,
cv.func=cv.func,
cv.df.min=cv.df.min,
tau=tau,
weights=weights,
singular.ok=singular.ok)
}
attr(cv, "basis.opt")<-basis.opt
console <- newLineConsole()
console <- printClear(console)
console <- printPop(console)
console <- printPush("\r ",console = console)
console <- printPush(paste("\rfv = ",format(cv)," ", sep=""),console = console)
return(cv)
}
params <- list()
params$complexity <- complexity
params$segments <- segments
params$degree <- degree
params$x <- x
params$y <- y
params$z <- z
params$knots <- knots
params$cv.func <- cv.func
params$basis <- basis
params$z.unique <- z.unique
params$ind <- ind
params$ind.vals <- ind.vals
params$nrow.z.unique <- nrow.z.unique
params$is.ordered.z <- is.ordered.z
params$lambda.discrete.num <- lambda.discrete.num
params$lambda.discrete <- lambda.discrete
params$tau <- tau
params$weights <- weights
params$singular.ok <- singular.ok
params$cv.df.min <- cv.df.min
num.z <- NCOL(z)
xsegments <- segments
xdegree <- degree
xlambda <- lambda
lambda.flag <- 0L
lambda.ub <- 1.0
if(lambda.discrete){
if(!is.null(xlambda))
xlambda <- round(lambda*lambda.discrete.num)
lambda.flag <- 1L
lambda.ub <- lambda.discrete.num
lambda.ub <- as.integer(lambda.ub)
}
if(exists(".Random.seed", .GlobalEnv)) {
save.seed <- get(".Random.seed", .GlobalEnv)
exists.seed = TRUE
} else {
exists.seed = FALSE
}
set.seed(random.seed)
if(is.null(xdegree)) xdegree <- rep(max(1,degree.min),num.x)
if(is.null(xsegments)) xsegments <- rep(max(1,segments.min),num.x)
if(is.null(xlambda)) {
xlambda <- runif(num.z)
if(lambda.discrete)
xlambda <- rep(round(0.5*lambda.discrete.num), num.z)
}
if(lambda.discrete)
xlambda <- as.integer(xlambda)
if(complexity =="degree-knots") {
x0 <- c(xdegree, xsegments, xlambda)
bbin <-c(rep(1, num.x*2),rep(lambda.flag, num.z))
lb <- c(rep(degree.min,num.x), rep(segments.min,num.x), rep(0, num.z))
ub <- c(rep(degree.max,num.x), rep(segments.max,num.x), rep(lambda.ub, num.z))
}
else if(complexity=="degree") {
x0 <- c(xdegree, xlambda)
bbin <-c(rep(1, num.x),rep(lambda.flag, num.z))
lb <- c(rep(degree.min,num.x), rep(0, num.z) )
ub <- c(rep(degree.max,num.x), rep(lambda.ub, num.z))
}
else if(complexity=="knots")
{
x0 <- c(xsegments, xlambda)
bbin <-c(rep(1, num.x),rep(lambda.flag, num.z))
lb <- c(rep(segments.min,num.x), rep(0, num.z))
ub <- c(rep(segments.max,num.x), rep(lambda.ub, num.z))
}
ill.conditioned <- check.max.spline.degree(x,rep(degree.max,num.x),issue.warning=FALSE)
degree.max.vec <- attr(ill.conditioned, "degree.max.vec")
if(complexity != "knots") {
ub[1:num.x] <- ifelse(ub[1:num.x] > degree.max.vec, degree.max.vec, ub[1:num.x])
x0[1:num.x] <- ifelse(x0[1:num.x] > degree.max.vec, degree.max.vec, x0[1:num.x])
}
if(length(x0) != length(lb)) stop(" x0 and bounds have differing numbers of variables")
bbout <-c(0)
solution<-snomadr(eval.f=eval.cv,
n=length(x0),
x0=as.numeric(x0),
bbin=bbin,
bbout=bbout,
lb=lb,
ub=ub,
nmulti=as.integer(nmulti),
random.seed=random.seed,
opts=opts,
print.output=print.output,
params=params);
if(basis == "auto"){
cv.basis <- eval.cv(solution$solution, params)
attr(solution, "basis.opt") <- attributes(cv.basis)$basis.opt
if(knots == "auto")
attr(solution, "knots.opt") <- attributes(cv.basis)$knots.opt
}
else if (knots == "auto")
attr(solution, "knots.opt") <- attributes(eval.cv(solution$solution, params))$knots.opt
solution$degree.max.vec <- degree.max.vec
if(exists.seed) assign(".Random.seed", save.seed, .GlobalEnv)
return(solution)
}
xztmp <- splitFrame(xz,factor.to.numeric=TRUE)
x <- xztmp$x
z <- xztmp$z
if(is.null(z))
stop(" categorical kernel smoothing requires ordinal/nominal predictors")
z <- as.matrix(xztmp$z)
num.z <- NCOL(z)
is.ordered.z <- xztmp$is.ordered.z
z.unique <- uniquecombs(z)
ind <- attr(z.unique,"index")
ind.vals <- unique(ind)
nrow.z.unique <- NROW(z.unique)
num.x <- NCOL(x)
n <- NROW(x)
if(!is.null(lambda) ) {
if(length(lambda)!=num.z){
warning(paste(" the length of lambda (", length(lambda),") is not the same as the length of z (", num.z, ")",sep=""))
lambda <- NULL
}
else if (any(lambda < 0) || any(lambda > 1) ) {
lambda <- NULL
}
}
if(complexity=="degree") {
if(missing(segments) || is.null(segments)) stop(" segments missing for cross-validation of spline degree")
if(length(segments)!=num.x) stop(" segments vector must be the same length as x")
if(!is.null(degree) && length(degree) == num.x) {
if(any(degree < degree.min)||any(degree>degree.max)) {
warning(paste(" The provided initial values for the degree are not in the bounds.", sep=""))
degree <- NULL
}
}
else
degree <- NULL
} else if(complexity=="knots") {
if(missing(degree) || is.null(degree)) stop(" degree missing for cross-validation of number of spline knots")
if(length(degree)!=num.x) stop(" degree vector must be the same length as x")
if(!is.null(segments) && length(segments) == num.x) {
if(any(segments < segments.min)||any(segments > segments.max)) {
warning(paste(" The provided initial values for the segments are not in the bounds.", sep=""))
segments <- NULL
}
}
else
segments <- NULL
}
else {
if(!is.null(degree) && length(degree) == num.x) {
if(any(degree < degree.min)||any(degree>degree.max)) {
warning(paste(" The provided initial values for the degree are not in the bounds.", sep=""))
degree <- NULL
}
}
else {
degree <- NULL
}
if(!is.null(segments) && length(segments) == num.x) {
if(any(segments < segments.min)||any(segments > segments.max)) {
warning(paste(" The provided initial values for the segments are not in the bounds.", sep=""))
segments <- NULL
}
}
else
segments <- NULL
}
INITIAL.MESH.SIZE <- list()
MIN.MESH.SIZE <- list()
MIN.POLL.SIZE <- list()
if(complexity=="degree-knots") {
for(i in 1:(2*num.x)) {
INITIAL.MESH.SIZE[[i]] <- initial.mesh.size.integer
MIN.MESH.SIZE[[i]] <- min.mesh.size.integer
MIN.POLL.SIZE[[i]] <- min.poll.size.integer
}
for(i in (2*num.x+1):(2*num.x+num.z)) {
INITIAL.MESH.SIZE[[i]] <- initial.mesh.size.real
MIN.MESH.SIZE[[i]] <- min.mesh.size.real
MIN.POLL.SIZE[[i]] <- min.poll.size.real
}
}
else if(complexity=="degree"|complexity=="knots") {
for(i in 1:num.x) {
INITIAL.MESH.SIZE[[i]] <- initial.mesh.size.integer
MIN.MESH.SIZE[[i]] <- min.mesh.size.integer
MIN.POLL.SIZE[[i]] <- min.poll.size.integer
}
for(i in (num.x+1):(num.x+num.z)) {
INITIAL.MESH.SIZE[[i]] <- initial.mesh.size.real
MIN.MESH.SIZE[[i]] <- min.mesh.size.real
MIN.POLL.SIZE[[i]] <- min.poll.size.real
}
}
opts$"EPSILON" <- .Machine$double.eps
opts$"MAX_BB_EVAL" <- max.bb.eval
opts$"INITIAL_MESH_SIZE" <- INITIAL.MESH.SIZE
opts$"MIN_MESH_SIZE" <- MIN.MESH.SIZE
opts$"MIN_POLL_SIZE" <- MIN.POLL.SIZE
if((num.x==1) && (basis == "auto")) basis <- "additive"
if(degree.max < 1 || segments.max < 1 ) stop(" degree.max or segments.max must be greater than or equal to 1")
print.output <- FALSE
console <- newLineConsole()
if(!is.null(opts$DISPLAY_DEGREE)){
if(opts$DISPLAY_DEGREE>0){
print.output <-TRUE
console <- printPush("Calling NOMAD (Nonsmooth Optimization by Mesh Adaptive Direct Search)\n",console = console)
}
}
else {
print.output <-TRUE
console <- printPush("Calling NOMAD (Nonsmooth Optimization by Mesh Adaptive Direct Search)\n",console = console)
}
nomad.solution<-cv.nomad(x,
y,
z,
degree.max=degree.max,
segments.max=segments.max,
degree.min=degree.min,
segments.min=segments.min,
z.unique=z.unique,
ind=ind,
ind.vals=ind.vals,
nrow.z.unique=nrow.z.unique,
is.ordered.z=is.ordered.z,
complexity=complexity,
knots=knots,
basis=basis,
segments=segments,
degree=degree,
lambda=lambda,
lambda.discrete=lambda.discrete,
lambda.discrete.num=lambda.discrete.num,
cv.func=cv.func,
opts=opts,
print.output=print.output,
nmulti=nmulti,
cv.df.min=cv.df.min,
tau=tau,
weights=weights,
singular.ok=singular.ok)
t2 <- Sys.time()
cv.min <- nomad.solution$objective
if(isTRUE(all.equal(cv.min,sqrt(.Machine$double.xmax)))) stop(" Search failed: restart with larger nmulti or smaller degree.max")
if(complexity=="degree-knots") {
K.opt <- as.integer(nomad.solution$solution[1:(2*num.x)])
lambda.opt <- as.numeric(nomad.solution$solution[(2*num.x+1):(2*num.x+num.z)])
degree <- K.opt[1:num.x]
segments <- K.opt[(num.x+1):(2*num.x)]
}
else if(complexity=="degree") {
degree <- as.integer(nomad.solution$solution[1:num.x])
lambda.opt <- as.numeric(nomad.solution$solution[(num.x+1):(num.x+num.z)])
K.opt <-cbind(degree, segments)
}
else if(complexity=="knots")
{
segments <- as.integer(nomad.solution$solution[1:num.x])
lambda.opt <- as.numeric(nomad.solution$solution[(num.x+1):(num.x+num.z)])
K.opt <-cbind(degree, segments)
}
if(lambda.discrete)
lambda.opt <- lambda.opt/lambda.discrete.num
basis.opt <- basis
if(basis == "auto") basis.opt <- attributes(nomad.solution)$basis.opt
knots.opt <- knots
if(knots == "auto") knots.opt <- attributes(nomad.solution)$knots.opt
lambda.opt <- ifelse(lambda.opt <= 0, .Machine$double.eps, lambda.opt)
console <- printClear(console)
console <- printPop(console)
segments[degree==0] <- 1
if(any(degree==nomad.solution$degree.max.vec)) warning(paste(" optimal degree equals search maximum (", nomad.solution$degree.max.vec,"): rerun with larger degree.max",sep=""))
if(any(segments==segments.max)) warning(paste(" optimal segment equals search maximum (", segments.max,"): rerun with larger segments.max",sep=""))
if(!is.null(opts$MAX_BB_EVAL)){
if(nmulti>0) {if(nmulti*opts$MAX_BB_EVAL <= nomad.solution$bbe) warning(paste(" MAX_BB_EVAL reached in NOMAD: perhaps use a larger value...", sep=""))}
if(nmulti==0) {if(opts$MAX_BB_EVAL <= nomad.solution$bbe) warning(paste(" MAX_BB_EVAL reached in NOMAD: perhaps use a larger value...", sep="")) }
}
cv.vec <- NULL
lambda.mat <- NULL
basis.vec <- NULL
K.mat <- NULL
crscv(K=K.opt,
I=NULL,
basis=basis.opt,
basis.vec=basis.vec,
degree.max=degree.max,
segments.max=segments.max,
degree.min=degree.min,
segments.min=segments.min,
complexity=complexity,
knots=knots.opt,
degree=degree,
segments=segments,
restarts=nmulti,
K.mat=K.mat,
lambda=lambda.opt,
lambda.mat=lambda.mat,
cv.objc=cv.min,
cv.objc.vec=cv.vec,
num.x=num.x,
cv.func=cv.func,
tau=tau)
} |
library(MiRNAQCD)
rawTrainingSet = read.table(file="real_dataset_2_training.dat", fileEncoding="UTF-8", header=FALSE)
rawTestingSet = read.table(file="real_dataset_2_testing.dat", fileEncoding="UTF-8", header=FALSE)
names(rawTrainingSet) <- c("miRNA", "Subject", "Value", "Class")
names(rawTestingSet) <- c("miRNA", "Subject", "Value", "Class")
dir.create(file.path(getwd(), "Results_real_dataset_2"), showWarnings = FALSE)
preprocTrainingSet <- miRNA_expressionPreprocessing(rawTrainingSet, multipletSize=3)
preprocTestingSet <- miRNA_expressionPreprocessing(rawTestingSet, multipletSize=3)
qualityThresholdTrainingSet <- miRNA_assessQualityThreshold(preprocTrainingSet, significanceLevel=0.05)
qualityThresholdTestingSet <- miRNA_assessQualityThreshold(preprocTestingSet, significanceLevel=0.05)
cleanedTrainingSet <- miRNA_removeOutliers(preprocTrainingSet, qualityThresholdTrainingSet)
cleanedTestingSet <- miRNA_removeOutliers(preprocTestingSet, qualityThresholdTestingSet)
Target <- c("AT","TC")
Versus <- c("AD","SQC")
outputAnalysis <- miRNA_classifierSetup(cleanedTrainingSet, inputTargetList=Target, inputVersusList=Versus, inputMiRNAList="U6", saveOutputFile=TRUE, outputFileBasename="Results_real_dataset_2/miRNAanalysis_training_dataset_norm_U6")
outputTraining <- miRNA_classifierSetup(cleanedTrainingSet, inputTargetList=Target, inputVersusList=Versus, inputMiRNAList=c("miR375","U6"), coeffList=c(-1.0, 1.0), histogramParameters="-6.0_14.0_1.0", saveOutputFile=TRUE, outputFileBasename="Results_real_dataset_2/classifier_training_dataset_-miR375_U6", scorePlotAscending=FALSE, scorePlotParameters="-5.0_15.0_5", colorComplementFlag=TRUE)
classifiedDataset <- miRNA_diagnosis(cleanedTestingSet, inputMiRNAList=c("miR375","U6"), coeffList=c(-1.0, 1.0), inputThreshold=outputTraining, inputTargetList=Target, inputVersusList=Versus, saveOutputFile=TRUE, outputFileBasename="Results_real_dataset_2/diagnosis_testing_dataset_-miR375_U6", scorePlotAscending=FALSE, scorePlotParameters="-10.0_10.0_5", colorComplementFlag=TRUE)
classifiedDataset <- miRNA_diagnosis(preprocTestingSet, inputMiRNAList=c("miR375","U6"), coeffList=c(-1.0, 1.0), inputThreshold=outputTraining, inputTargetList=Target, inputVersusList=Versus, saveOutputFile=TRUE, outputFileBasename="Results_real_dataset_2/diagnosis_testing_dataset_with_outliers_-miR375_U6", scorePlotAscending=FALSE, scorePlotParameters="-10.0_10.0_5", colorComplementFlag=TRUE) |
set.seed(1234)
suppressPackageStartupMessages(library("argparse"))
library(tidyverse)
parser = ArgumentParser()
parser$add_argument("--matrix1", required=T, nargs=1)
parser$add_argument("--infercnv_obj", required=T, nargs=1)
parser$add_argument("--log", required=F, default=FALSE, action="store_true")
parser$add_argument("--output", required=T, nargs=1, help="output filename pdf")
args = parser$parse_args()
data1 = as.matrix(read.table(args$matrix1, header=T, row.names=1))
infercnv_obj_file = args$infercnv_obj
infercnv_obj = readRDS(infercnv_obj_file)
data2 = as.matrix([email protected][, unlist(infercnv_obj@reference_grouped_cell_indices)])
png(args$output)
if (args$log) {
data1 = log(data1+1)
data2 = log(data2+1)
}
m1_ecdf = ecdf(data1)
m2_ecdf = ecdf(data2)
val_range = range(data1, data2)
step = (val_range[2] - val_range[1])/100
vals = seq(val_range[1], val_range[2], step)
m1_cdf = m1_ecdf(vals)
m2_cdf = m2_ecdf(vals)
cdfs = data.frame(vals,
m1_cdf,
m2_cdf)
ks_point = which.max(abs(cdfs$m1_cdf - cdfs$m2_cdf))
ks_point_info = cdfs[ks_point,]
cdfs = cdfs %>% gather('m1_cdf', 'm2_cdf', key='type', value='cdf')
ggplot(cdfs, aes(x=vals, y=cdf)) +
geom_line(aes(color=type, linetype=type)) +
geom_segment(aes(x=ks_point_info$vals,
y=ks_point_info$m1_cdf,
xend=ks_point_info$vals,
yend=ks_point_info$m2_cdf), color='magenta', size=2) +
ggtitle(sprintf("%s vs. %s KS", args$matrix1, args$matrix2)) + xlab("number") + ylab("cdf") |
library(plantecophys)
context("FARAO")
f1 <- FARAO()
f2 <- FARAO(energybalance=TRUE)
f3 <- FARAO2()
f4 <- FARAO2(energybalance=TRUE)
f5 <- FARAO(C4=TRUE)
test_that("FARAO output", {
expect_named(f1)
expect_named(f2)
expect_named(f3)
expect_named(f4)
}) |
as_messydate <- function(x) UseMethod("as_messydate")
as_messydate.Date <- function(x) {
x <- as.character(x)
new_messydate(x)
}
as_messydate.POSIXct <- function(x) {
x <- as.character(x)
new_messydate(x)
}
as_messydate.POSIXlt <- function(x) {
x <- as.character(x)
new_messydate(x)
}
as_messydate.character <- function(x) {
d <- standardise_date_separators(x)
d <- standardise_date_order(d)
d <- standardise_date_input(d)
d <- standardise_unspecifieds(d)
d <- standardise_widths(d)
d <- standardise_ranges(d)
d <- remove_imprecision(d)
new_messydate(d)
}
standardise_date_separators <- function(dates) {
dates <- stringr::str_replace_all(dates,
"([:digit:]{4})([:digit:]{2})
([:digit:]{2})",
"\\1-\\2-\\3")
dates <- stringr::str_replace_all(dates,
"(?<=[:digit:])\\.(?=[:digit:])",
"-")
dates <- stringr::str_replace_all(dates, "\\/", "-")
dates <- stringr::str_trim(dates, side = "both")
dates
}
standardise_date_order <- function(dates) {
dates <- stringr::str_replace_all(dates,
"([:digit:]{2})-([:digit:]{2})-
([:digit:]{4})", "\\3-\\2-\\1")
dates
}
standardise_date_input <- function(dates) {
as_bc_dates <- function(dates) {
dates <- stringr::str_remove_all(dates, "(bc|BC|Bc|bC)")
dates <- stringr::str_trim(dates, side = "both")
dates <- paste0("-", dates)
dates
}
as_ac_dates <- function(dates) {
dates <- stringr::str_remove_all(dates, "(ad|AD|Ad|aD)")
dates <- stringr::str_trim(dates, side = "both")
}
dates <- ifelse(stringr::str_detect(dates, "(bc|BC|Bc|bC)"),
as_bc_dates(dates), dates)
dates <- ifelse(stringr::str_detect(dates, "(ad|AD|Ad|aD)"),
as_ac_dates(dates), dates)
dates
}
standardise_unspecifieds <- function(dates) {
dates <- stringr::str_replace_all(dates, "^NA", "XXXX")
dates <- stringr::str_replace_all(dates, "-NA", "-XX")
dates <- stringr::str_replace_all(dates, "0000", "XXXX")
dates <- stringr::str_replace_all(dates, "-00-|-0-|-0$|-00$", "-XX-")
dates <- stringr::str_replace_all(dates, "\\?\\?\\?\\?", "XXXX")
dates <- stringr::str_replace_all(dates, "-\\?\\?", "-XX")
dates
}
standardise_widths <- function(dates) {
dates <- stringr::str_replace_all(dates, "^-([:digit:]{1})$", "-000\\1")
dates <- stringr::str_replace_all(dates, "^-([:digit:]{2})$", "-00\\1")
dates <- stringr::str_replace_all(dates, "^-([:digit:]{3})$", "-0\\1")
dates <- stringr::str_replace_all(dates, "^~([:digit:]{1})$", "000\\1~")
dates <- stringr::str_replace_all(dates, "^~([:digit:]{2})$", "00\\1~")
dates <- stringr::str_replace_all(dates, "^~([:digit:]{3})$", "0\\1~")
dates <- stringr::str_replace_all(dates, "^\\?([:digit:]{1})$", "000\\1?")
dates <- stringr::str_replace_all(dates, "^\\?([:digit:]{2})$", "00\\1?")
dates <- stringr::str_replace_all(dates, "^\\?([:digit:]{3})$", "0\\1?")
dates <- ifelse(stringr::str_detect(dates, "^([:digit:]{1})~$|^([:digit:]{1})\\?$"),
paste0("000", dates), dates)
dates <- ifelse(stringr::str_detect(dates, "^([:digit:]{2})~$|^([:digit:]{2})\\?$"),
paste0("00", dates), dates)
dates <- ifelse(stringr::str_detect(dates, "^([:digit:]{3})~$|^([:digit:]{3})\\?$"),
paste0("0", dates), dates)
dates <- stringr::str_replace_all(dates, "^([:digit:]{1})$", "000\\1")
dates <- stringr::str_replace_all(dates, "^([:digit:]{2})$", "00\\1")
dates <- stringr::str_replace_all(dates, "^([:digit:]{3})$", "0\\1")
dates <- stringr::str_replace_all(dates, "-([:digit:])$", "-0\\1")
dates <- stringr::str_replace_all(dates, "-([:digit:])-", "-0\\1-")
dates <- stringr::str_replace_all(dates, "^([:digit:])-", "0\\1-")
dates
}
standardise_ranges <- function(dates) {
dates <- stringr::str_replace_all(dates, "_", "..")
dates <- stringr::str_replace_all(dates, ":", "..")
dates
}
remove_imprecision <- function(dates) {
dates <- stringr::str_replace_all(dates, "-XX$", "")
dates
} |
bitseq_rnaSeqData <-
function(t,trFileName) {
tu=unique(t)
nu=as.matrix(as.vector(table(t)))
time_mapping=data.frame(files=names(t),time=unname(t),repMask=match(t,tu))
tr_data=read.table(trFileName, header=TRUE)
gene_mapping=data.frame(gene_ID=as.matrix(tr_data$gene_ID),transcript_ID=as.matrix(tr_data$transcript_ID),transcript_length=as.matrix(tr_data$transcript_length))
M=length(gene_mapping$transcript_ID)
nT=length(t)
unique_genes_ID=unique(as.matrix(gene_mapping$gene_ID))
N=length(unique_genes_ID)
gene=list(logTrans=1,mean=matrix(0,N,nT),std=matrix(0,N,nT),time_mapping=time_mapping,gene_mapping=gene_mapping)
dimnames(gene$mean) = list(unique_genes_ID,time_mapping$files)
dimnames(gene$std) = list(unique_genes_ID,time_mapping$files)
abstr=list(logTrans=1,mean=matrix(0,M,nT),std=matrix(0,M,nT),time_mapping=time_mapping,gene_mapping=gene_mapping)
dimnames(abstr$mean) = list(gene_mapping$transcript_ID,time_mapping$files)
dimnames(abstr$std) = list(gene_mapping$transcript_ID,time_mapping$files)
reltr=list(logTrans=0,mean=matrix(0,M,nT),std=matrix(0,M,nT),time_mapping=time_mapping,gene_mapping=gene_mapping)
dimnames(reltr$mean) = list(gene_mapping$transcript_ID,time_mapping$files)
dimnames(reltr$std) = list(gene_mapping$transcript_ID,time_mapping$files)
gpData=list(gene=gene,abstr=abstr,reltr=reltr)
for (i in 1:nT) {
mcmcFileName=names(t)[i]
mcmc_data=as.matrix(read.table(mcmcFileName))
J=ncol(mcmc_data)
for (k in 1:N) {
my_gene=unique_genes_ID[k]
tr_inds=which(gene_mapping$gene_ID %in% my_gene)
no_tr=length(tr_inds)
tr_expr=matrix(mcmc_data[tr_inds,],no_tr,J)
gene_expr=matrix(colSums(tr_expr),1,J)
gpData$gene$mean[k,i]=rowMeans(log(gene_expr))
gpData$gene$std[k,i]=sqrt(rowVars(log(gene_expr)))
gpData$abstr$mean[tr_inds,i]=rowMeans(log(tr_expr))
gpData$abstr$std[tr_inds,i]=sqrt(rowVars(log(tr_expr)))
gpData$reltr$mean[tr_inds,i]=rowMeans(tr_expr/(repmat(gene_expr,no_tr,1)))
gpData$reltr$std[tr_inds,i]=sqrt(rowVars(tr_expr/(repmat(gene_expr,no_tr,1))))
}
}
return(gpData)
}
repmat <-
function(x,m,n){
n_rows = dim(x)[1]
n_columns = dim(x)[2]
y=matrix(t(matrix(x,n_rows,n_columns*n)),n_rows*m,n_columns*n,byrow=TRUE)
return(y)
} |
stratRejectionSampler_normal <- nimbleFunction(
run = function(
numPoints = integer(0),
lowerCoords = double(2),
upperCoords = double(2),
s = double(1),
windowIntensities = double(1),
sd = double(0)
) {
numWindows <- dim(lowerCoords)[1]
numDims <- 2
if(numPoints <= 0) return(matrix(nrow = 0, ncol = numDims))
sumIntensity <- sum(windowIntensities)
outCoordinates <- matrix(nrow = numPoints, ncol = numDims)
if(sumIntensity <= 0.0) {
windowIntensities <- calcWindowSizes(lowerCoords, upperCoords)
sumIntensity <- sum(windowIntensities)
if(sumIntensity <= 0.0) return(matrix(nrow = 0, ncol = numDims))
}
maxWidth <- abs(qnorm(0.0001, mean = 0.0, sd = sd))
withinMaxWidth <- numeric(length = numWindows)
for(i in 1:numWindows) {
withinMaxWidth[i] <- prod(s[1:numDims] - maxWidth < upperCoords[i, 1:numDims]) *
prod(s[1:numDims] + maxWidth > lowerCoords[i, 1:numDims])
}
correctedIntensities <- windowIntensities[1:numWindows] * withinMaxWidth[1:numWindows]
for(i in 1:numPoints) {
curInd <- rcat(1, correctedIntensities[1:numWindows])
nearestPoint <- pmin(pmax(s[1:numDims], lowerCoords[curInd, 1:numDims]), upperCoords[curInd, 1:numDims])
maxIntensity <- exp(-sum(pow(nearestPoint[1:numDims] - s[1:numDims], 2.0)) / (2.0 * sd * sd))
testCoords <- runif(numDims,
min = pmax(lowerCoords[curInd, 1:numDims], s[1:numDims] - maxWidth),
max = pmin(upperCoords[curInd, 1:numDims], s[1:numDims] + maxWidth))
testIntensity <- exp(-sum(pow(testCoords[1:numDims] - s[1:numDims], 2.0)) / (2.0 * sd * sd))
randVal <- runif(1, min = 0.0, max = 1.0)
while(randVal > (testIntensity / maxIntensity)) {
testCoords <- runif(numDims,
min = pmax(lowerCoords[curInd, 1:numDims], s[1:numDims] - maxWidth),
max = pmin(upperCoords[curInd, 1:numDims], s[1:numDims] + maxWidth))
testIntensity <- exp(-sum(pow(testCoords[1:numDims] - s[1:numDims], 2.0)) / (2.0 * sd * sd))
randVal <- runif(1, min = 0.0, max = 1.0)
}
outCoordinates[i, 1:numDims] <- testCoords[1:numDims]
}
return(outCoordinates)
returnType(double(2))
}
) |
`factor.it<-` <- function(x,value){
if (!is.data.frame(x)) stop('x must be a dataframe')
if (any(! value %in% colnames(x))) stop('value must be column names in x')
for (i in value) {
x[,i]=factor(x[,i])
}
x
}
factor.it <- function(x,value){
if (!is.data.frame(x)) stop('x must be a dataframe')
if (any(! value %in% colnames(x))) stop('value must be column names in x')
for (i in value) {
x[,i]=factor(x[,i])
}
x
}
numeric.it <- function(x,value){
if (!is.data.frame(x)) stop('x must be a dataframe')
if (any(! value %in% colnames(x))) stop('value must be column names in x')
for (i in value) {
x[,i]=as.numeric(as.character(x[,i]))
}
x
}
`numeric.it<-` <- function(x,value){
if (!is.data.frame(x)) stop('x must be a dataframe')
if (any(! value %in% colnames(x))) stop('value must be column names in x')
for (i in value) {
x[,i]=as.numeric(as.character(x[,i]))
}
x
} |
setGeneric("getWindEstimates", function(data, timestamps, ...) {
standardGeneric("getWindEstimates")
})
setMethod("getWindEstimates", signature = signature(data = "MoveStack", timestamps =
"missing") ,
function(data, timestamps, ...) {
l <- list(...)
if ('isFocalPoint' %in% names(l))
if (is.logical(l[['isFocalPoint']])) {
stopifnot(length(l[['isFocalPoint']]) == sum(n.locs(data)))
l[['isFocalPoint']] <- which(l[['isFocalPoint']])
}
slist <- move::split(data)
x <-
mapply(
function(...) {
tryCatch(
getWindEstimates(...),
error = function(e) {
if (e$message == 'No sample locations selected, either through the focal function or large windowSize') {
NULL
} else{
stop(e)
}
}
)
},
slist
,
focalSampleBefore = head(c(0, cumsum(n.locs(
data
))),-1),
MoreArgs = l,
SIMPLIFY = F
)
if (all(s <- unlist(lapply(x, is.null))))
{
stop('No sample locations selected, either through the focal function or large windowSize')
}
if (unique(unlist(lapply(x[!s], class))) != "Move")
{
return(unlist(recursive = F, x))
}
res <- moveStack(ifelse(s, slist, x))
return(res)
})
setMethod("getWindEstimates", signature = signature(data = "Move", timestamps =
"missing") ,
function(data, timestamps, groundSpeedXY = NULL, ...) {
if (!is.null(groundSpeedXY))
{
stopifnot(is.character(groundSpeedXY))
stopifnot(length(groundSpeedXY) == 2)
stopifnot(all(groundSpeedXY %in% names(data)))
dataDf <- data.frame(data[, groundSpeedXY])[, groundSpeedXY]
} else{
spd <- speed(data)
ang <- angle(data)
dataDf <-
data.frame(vx = spd * sin(ang / 180 * pi),
vy = spd * cos(ang / 180 * pi))
dataDf <- rbind(dataDf, NA)
}
res <-
callGeneric(data = dataDf,
timestamps = move::timestamps(data),
...)
if(!is.data.frame(res))
{
return(res)
}
slot(data, "data") <- cbind(slot(data, 'data'), res)
return(data)
})
setMethod("getWindEstimates", signature = signature(data = "data.frame", timestamps =
'POSIXct') ,
function(data,
timestamps,
windowSize = 29,
isFocalPoint = function(i, ts) {
TRUE
},
isSamplingRegular = 1,
focalSampleBefore = 0,
returnSegmentList=F, referenceGroundSpeed=NULL,
...)
{
l <- nrow(data)
segList <-
getTrackSegments(
data,
windowSize = windowSize,
isFocalPoint = isFocalPoint,
isSamplingRegular = isSamplingRegular,
focalSampleBefore=focalSampleBefore, timestamps=timestamps
)
data <- segList
if(returnSegmentList){
return(data)
}
w <- getWindowSizeLR(windowSize)
if (is.null(referenceGroundSpeed)) {
referenceGroundSpeed <- which(((-w[1]):w[2]) == 0)
}
res <-
callGeneric(data = data, referenceGroundSpeed = referenceGroundSpeed, ...)
resFull<-res[F,][1:l,]
resFull[as.numeric(names(segList)),]<-res
return(resFull)
})
setMethod("getWindEstimates", signature = signature(data = "list", timestamps =
'ANY') ,
function(data,
timestamps,
phi = 0,
isThermallingFunction = getDefaultIsThermallingFunction(360, 4),
columnNamesWind = c(
"estimationSuccessful",
"residualVarAirspeed",
"windX",
"windY",
"windVarX",
"windVarY",
"windCovarXY",
"windVarMax",
'airX',
'airY'
),
referenceGroundSpeed=NULL,
...)
{
n<-length(data)
windEst <- mclapply(data, getWindEstimate, phi = phi)
res <- data.frame(matrix(NA, ncol = length(columnNamesWind), nrow = n))
names(res) <- columnNamesWind
estimationSuccessful <- !unlist(lapply(windEst, is.null))
airVectors <-
mapply('-',
lapply(data[estimationSuccessful], t),
lapply(windEst[estimationSuccessful], '[[', 'windEst'),
SIMPLIFY =
F)
airHeadings <- lapply(airVectors
, function(x) {
atan2(x[1, ], x[2, ]) / pi * 180
})
airSpeeds <- lapply(airVectors
, function(x) {
sqrt(colSums(x ^ 2))
})
isThermalling = rep(F, n)
isThermalling[estimationSuccessful] <-
unlist(mcmapply(
headings = airHeadings,
speeds = airSpeeds
,
isThermallingFunction
))
estimationSuccessful = estimationSuccessful & isThermalling
res[, columnNamesWind[1]] <- estimationSuccessful
if(all(!estimationSuccessful))
{ return(res)}
res[estimationSuccessful, columnNamesWind[2:4]] <-
matrix(unlist(lapply(
windEst[estimationSuccessful], '[', c("residualVarAirSpeed", "windEst")
)), ncol = 3, byrow = T)
covarList <- lapply(windEst[estimationSuccessful], '[[', 'covar')
nonNullCovar = rep(F, n)
nonNullCovar[estimationSuccessful] <- !unlist(lapply(covarList, is.null))
res[estimationSuccessful & nonNullCovar, columnNamesWind[5:7]] <-
matrix(unlist(covarList[nonNullCovar[estimationSuccessful]]), ncol = 4, byrow = T)[, c(1, 4, 2)]
nonNaNCovar = rep(F, n)
nonNaNCovar[estimationSuccessful & nonNullCovar] <-
!unlist(lapply(lapply(covarList[nonNullCovar], is.nan), any))
res[estimationSuccessful & nonNullCovar & nonNaNCovar, columnNamesWind[8]] <-
unlist(lapply(covarList[nonNullCovar[estimationSuccessful] & nonNaNCovar[estimationSuccessful]], eigen, only.values = T))[c(T, F)]
if(!is.null(referenceGroundSpeed))
{
grndS<-do.call("rbind",lapply(data,'[', referenceGroundSpeed, T))
res[, columnNamesWind[9:10]]<-
grndS-res[,c("windX","windY")]
}
rownames(res)<-names(data)
return(res)
}) |
context("test-latest.R")
test_that("latest works", {
skip_on_cran()
skip_on_travis()
skip_on_appveyor()
cur <- fixer_latest(symbols = c("USD", "GBP", "JPY"))
expect_equal(length(cur), 2)
expect_true("USD" %in% cur$name)
expect_true(tibble::is.tibble(cur))
expect_equal(nrow(cur), 3)
}) |
context("search_pv")
eps <- get_endpoints()
test_that("API returns expected df names for all endpoints", {
skip_on_cran()
skip_on_ci()
z <- vapply(eps, function(x) {
Sys.sleep(3)
j <- search_pv("{\"patent_number\":\"5116621\"}", endpoint = x)
names(j[[1]])
}, FUN.VALUE = character(1), USE.NAMES = FALSE)
expect_equal(eps, z)
})
test_that("DSL-based query returns expected results", {
skip_on_cran()
skip_on_ci()
query <- with_qfuns(
and(
or(
gte(patent_date = "2014-01-01"),
lte(patent_date = "1978-01-01")
),
text_phrase(patent_abstract = c("computer program", "dog leash"))
)
)
out <- search_pv(query)
expect_gt(out$query_results$total_patent_count, 1000)
})
test_that("search_pv can pull all fields for all endpoints except locations", {
skip_on_cran()
skip_on_ci()
eps_no_loc <- eps[eps != "locations"]
z <- lapply(eps_no_loc, function(x) {
Sys.sleep(3)
search_pv(
"{\"patent_number\":\"5116621\"}",
endpoint = x,
fields = get_fields(x)
)
})
expect_true(TRUE)
})
test_that("search_pv can return subent_cnts", {
skip_on_cran()
skip_on_ci()
out_spv <- search_pv(
"{\"patent_number\":\"5116621\"}",
fields = get_fields("patents", c("patents", "inventors")),
subent_cnts = TRUE
)
expect_true(length(out_spv$query_results) == 2)
})
test_that("Sort option works as expected", {
skip_on_cran()
skip_on_ci()
out_spv <- search_pv(
qry_funs$gt(patent_date = "2015-01-01"),
fields = get_fields("inventors", c("inventors")),
endpoint = "inventors",
sort = c("inventor_lastknown_latitude" = "desc"),
per_page = 100
)
lat <- as.numeric(out_spv$data$inventors$inventor_lastknown_latitude)
expect_true(lat[1] >= lat[100])
})
test_that("search_pv can pull all fields by group for the locations endpoint", {
skip_on_cran()
skip_on_ci()
groups <- unique(fieldsdf[fieldsdf$endpoint == "locations", "group"])
z <- lapply(groups, function(x) {
Sys.sleep(3)
search_pv(
'{"patent_number":"5116621"}',
endpoint = "inventors",
fields = get_fields("inventors", x)
)
})
expect_true(TRUE)
})
test_that("search_pv properly encodes queries", {
skip_on_cran()
skip_on_ci()
result <- search_pv(
query = with_qfuns(
begins(assignee_organization = "Johnson & Johnson")
)
)
expect_true(TRUE)
}) |
expected <- eval(parse(text="c(FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE)"));
test(id=0, code={
argv <- eval(parse(text="list(c(-Inf, 2.17292368994844e-311, 4.34584737989688e-311, 8.69169475979376e-311, 1.73833895195875e-310, 3.4766779039175e-310, 6.953355807835e-310, 1.390671161567e-309, 2.781342323134e-309, 5.562684646268e-309, 1.1125369292536e-308, 2.2250738585072e-308, 4.4501477170144e-308, 8.90029543402881e-308, 1.78005908680576e-307, 2.2250738585072e-303, 2.2250738585072e-298, 1.79769313486232e+298, 1.79769313486232e+303, 2.24711641857789e+307, 4.49423283715579e+307, 8.98846567431158e+307, 1.79769313486232e+308, Inf, Inf, NaN, NA))"));
do.call(`is.na`, argv);
}, o=expected); |
hqa <- function(base, conteos=FALSE, units=NULL,
durat=FALSE, periodos=NULL, pesos = NULL,
ilustra=NULL, ilustc = NULL,ilust.type = NULL,
nfact=5, nfcl=5, k.clust=NULL, combinat=TRUE,
vector, tableclass=FALSE, clasifica=TRUE)
{
if(!is.matrix(base) & !is.data.frame(base)) {
stop("base is not a matrix nor a dataframe\n")
}
if(is.data.frame(base) & prod(dim(base))==0) {
stop("base is an empty data frame\n")
}
if(durat==TRUE)
{
datos <- duration(base, units)
base <- datos
}
if(conteos==TRUE)
{
conteos <- durationtotableA(base, periodos)
base <- data.frame(conteos$Conteo)
}
if(combinat==TRUE)
{
AAC <- combination(base, vector, ilustra, ilustc, ilust.type, pesos)
tabla <- AAC$global.pca$ind$coord
nval.propios <- nrow(AAC$eig)
val.propios <- AAC$eig[,1]
dif.valores <- c(rep(0, nval.propios-1))
for (i in 1:nval.propios-1)
{ dif.valores[i] <- abs(val.propios[i+1]-val.propios[i])}
nfcl <- which(dif.valores==max(dif.valores))
} else {
AAC <- fuzzy(base, vector, nfact, pesos)
nfcl <- dim(AAC$li)[2]
tabla <- AAC$li
}
if(clasifica==TRUE)
{
if(is.null(k.clust)) {
coo <- tabla[, 1:nfcl]
W <- dist(coo)^2/nrow(coo)/2
HW <- hclust(W,method="ward.D")
coord <- as.vector(HW$height)
dif.saltos <- c(rep(0, length(coord)-1))
for (i in 1:length(coord)-1) dif.saltos[i] <- abs(coord[i+1]-coord[i])
k.clust <- which(dif.saltos==max(dif.saltos))
k.clust <- length(coord) - k.clust + 1
}
if(tableclass==TRUE)
{
tablaclases <- tableclass(tabla, nfcl, k.clust)
nfcl <- tablaclases[which(tablaclases[,2]== min(tablaclases[,2])), 1]
}
clasificar <- classification(tabla, k.clust, nfcl,pesos)
if (!is.null(ilustra)) Ilust <- cluster.carac(ilustra, clasificar$cluster, tipo.v="n") else Ilust <- NULL
Active <- cluster.carac(base, clasificar$cluster, tipo.v="co")
} else {
clasificar <- NULL
Active <- NULL
Ilust <- NULL
}
return(list(HQA=AAC, Clases=clasificar, Active = Active, Ilust = Ilust))
} |
opal.sql <- function(opal, query, project = NULL, id.name = '_id') {
if (is.na(opal$version) || opal.version_compare(opal,"4.1")<0) {
stop("SQL queries are not available for opal ", opal$version, " (4.1.0 or higher is required)")
}
if (is.null(project)) {
location <- paste0(c('datasources', '_rsql'), collapse = '/')
} else {
location <- paste0(c('datasource', project, '_rsql'), collapse = '/')
}
out <- tempfile()
r <- httr::POST(
.url(opal, location),
body = list(
query = query,
id = id.name
),
encode = "form",
write_disk(out, overwrite = TRUE), accept("application/x-rdata"),
config=opal$config, handle=opal$handle, .verbose()
)
if (r$status>=300) {
.handleError(opal, r)
}
res <- readRDS(out)
unlink(out)
res
}
opal.sql_history <- function(opal, project = NULL, offset = 0, limit = 100, user = NULL, df = TRUE) {
if (is.na(opal$version) || opal.version_compare(opal,"4.1")<0) {
stop("SQL queries are not available for opal ", opal$version, " (4.1.0 or higher is required)")
}
q <- list(offset = offset, limit = limit)
if (!is.null(project))
if (is.na(project))
q$datasource <- '*'
else
q$datasource <- project
subject <- "_current"
if (!is.null(user))
if (is.na(user))
subject <- '*'
else if (user != opal$username)
subject <- user
res <- opal.get(opal, "system", "subject-profile", subject, "sql-history", query = q)
if (df) {
if (length(res)>0) {
user <- replicate(length(res), opal$username)
query <- replicate(length(res), NA)
project <- replicate(length(res), NA)
error <- replicate(length(res), NA)
start <- replicate(length(res), NA)
end <- replicate(length(res), NA)
elapsed <- replicate(length(res), NA)
for (i in 1:length(res)) {
item <- res[[i]]
user[[i]] <- item$user
query[[i]] <- item$query
if (!is.null(item$datasource))
project[[i]] <- item$datasource
if (!is.null(item$error))
error[[i]] <- item$error
start[[i]] <- item$start/1000
end[[i]] <- item$end/1000
elapsed[[i]] <- item$end - item$start
}
data.frame(user, query, project, error, start = as.POSIXct(start, origin='1970-01-01'), end = as.POSIXct(end, origin='1970-01-01'), elapsed)
} else {
data.frame()
}
} else {
res
}
} |
getTeamBattingDetails <- function(team,dir=".",save=FALSE,odir="."){
overs=batsman=NULL
a <- paste(dir,"/","*",team,"*",sep="")
fl <- Sys.glob(a)
battingDetails <- NULL
for(i in 1:length(fl)){
tryCatch({
load(fl[i])
match <- overs
details <- teamBattingPerfDetails(match,team,includeInfo=TRUE)
if(!is.null(dim(details))){
battingDetails <- rbind(battingDetails,details)
}else {
next
}
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
}
if(save==TRUE){
fl <-paste(odir,"/",team,"-BattingDetails.RData",sep="")
save(battingDetails,file=fl)
}
battingDetails <- arrange(battingDetails,batsman,date)
battingDetails
} |
link.im <- function(data,r,char=NULL,oneside=NULL,twoside=NULL,trace=NULL,...){
if(is.null(twoside))twoside=TRUE
if(is.null(oneside))oneside=TRUE
if(is.null(trace))trace = FALSE
RES <- linkim(data,r,char,oneside,twoside,trace)
return(RES)
}
linkim <-
function(data,r,char=NULL,oneside=NULL,twoside=NULL,trace=NULL,...){
if(is.null(twoside))twoside=TRUE
if(is.null(oneside))oneside=TRUE
if(is.null(trace))trace = FALSE
if(is.null(char)){
cn <- ncol(data)
rn <- nrow(data)
char <- NULL
for(i in 1:cn) char <- union(data[,i],char)
id.na = which(is.na(char))
if(length(id.na)==1)char <- sort(char[-id.na])
}
if(length(char)==2){
cn <- ncol(data)
rn <- nrow(data)
homo <- TRUE
if(!(setequal(c(0,1),char))) data <- matrix(as.numeric(greplace(data,char,c(0,1))),rn,cn)
}
if(length(char)==3){
cn <- ncol(data)
rn <- nrow(data)
homo <- FALSE
if(!(setequal(c(0,1,2),char))) data <- matrix(as.numeric(greplace(data,char,c(0,1,2))),rn,cn)
}
if(length(char)!=2 & length(char)!=3)stop("The marker data should be Homozygous(2 levels) or Heterozygous(3 levels)")
if(max(r)>0.5)distance <- TRUE
else distance <- FALSE
dataNew = data
if(twoside){
for(i in 1:nrow(data)){
if(trace)cat("individual",i,"\n")
id1 = which(is.na(data[i,]))
id2 = which(!is.na(data[i,]))
if(length(id1)>0) {
a = length(id1)
NoStop = T
while(a>0 & NoStop){
if(id1[1] < id2[1]){
id1 = id1[-1]
a = length(id1)
}
if(a>0) if(id1[1] > id2[1]) NoStop = F
}
}
if(length(id1)>0) {
a = length(id1)
b = length(id2)
NoStop = T
while(a>0 & NoStop){
if(id1[a] > id2[b]){
id1 = id1[-a]
a = length(id1)
}
if(a>0) if(id1[a] < id2[b]) NoStop = F
}
}
while(length(id1)>0){
Aid = id1[1]-1
id2del = which(id2 < id1[1])
id2 = id2[-id2del]
Bid = id2[1]
id1del = which(id1 < id2[1])
id1 = id1[-id1del]
for(j in (Aid+1):(Bid-1)){
vec = c(Aid,j,Bid)
if(distance){
r0 = r[vec]
r1 = r0[-1]-r0[-3]
r2 = 0.5*(1-exp(-2*0.01*r1))
}
if(!distance)r2=r[vec[-1]]
if(homo){
ComLoci = gcomb(data,index=vec,marker=c(0,1))
comb = numeric()
for(ii in 1:4)comb[ii] = ComLoci[ii,4]+ComLoci[9-ii,4]
id = which(comb==max(comb))
LociOrder1 = t(as.matrix(ComLoci[id,1:3]))
LociOrder2 = t(as.matrix(ComLoci[9-id,1:3]))
Freq = freqmat(r2[1],r2[2],twoside=TRUE,cross=TRUE)
ConFreq = Uu(Freq[2])
if(data[i,vec[1]]==LociOrder1[1]){
if(data[i,vec[3]]==LociOrder1[3])p=ConFreq[1,1]
if(data[i,vec[3]]!=LociOrder1[3])p=ConFreq[2,1]
}
if(data[i,vec[1]]!=LociOrder1[1]){
if(data[i,vec[3]]==LociOrder1[3])p=ConFreq[3,1]
if(data[i,vec[3]]!=LociOrder1[3])p=ConFreq[4,1]
}
u = runif(1)
if(u<=p)dataNew[i,vec[2]]=LociOrder1[2]
if(u>p)dataNew[i,vec[2]]=LociOrder2[2]
}
if(!homo){
ComLoci = gcomb(data,index=vec,marker=c(0,1,2))
comb = numeric()
g3 <- c(1,3,7,9)
for(ii in 1:4)comb[ii] = ComLoci[g3[ii],4]+ComLoci[28-g3[ii],4]
id = which(comb==max(comb))
LociOrder1 = t(as.matrix(ComLoci[g3[id],1:3]))
LociOrder3 = t(as.matrix(ComLoci[28-g3[id],1:3]))
LociOrder2 <- t(as.matrix(ComLoci[14,1:3]))
Freq = freqmat(r2[1],r2[2],twoside=TRUE,cross=TRUE,homo=homo)
ConFreq = Uu(Freq[2])
if(data[i,vec[1]]==LociOrder1[1]){
if(data[i,vec[3]]==LociOrder1[3]){
p1=ConFreq[1,1]
p2=ConFreq[1,2]
}
if(data[i,vec[3]]==LociOrder2[3]){
p1=ConFreq[2,1]
p2=ConFreq[2,2]
}
if(data[i,vec[3]]==LociOrder3[3]){
p1=ConFreq[3,1]
p2=ConFreq[3,2]
}
}
if(data[i,vec[1]]==LociOrder2[1]){
if(data[i,vec[3]]==LociOrder1[3]){
p1=ConFreq[4,1]
p2=ConFreq[4,2]
}
if(data[i,vec[3]]==LociOrder2[3]){
p1=ConFreq[5,1]
p2=ConFreq[5,2]
}
if(data[i,vec[3]]==LociOrder3[3]){
p1=ConFreq[6,1]
p2=ConFreq[6,2]
}
}
if(data[i,vec[1]]==LociOrder3[1]){
if(data[i,vec[3]]==LociOrder1[3]){
p1=ConFreq[7,1]
p2=ConFreq[7,2]
}
if(data[i,vec[3]]==LociOrder2[3]){
p1=ConFreq[8,1]
p2=ConFreq[8,2]
}
if(data[i,vec[3]]==LociOrder3[3]){
p1=ConFreq[9,1]
p2=ConFreq[9,2]
}
}
u = runif(1)
if(u<=p1)dataNew[i,vec[2]]=LociOrder1[2]
if(u>p1 & u<=(p1+p2))dataNew[i,vec[2]]=LociOrder2[2]
if(u>(p1+p2))dataNew[i,vec[2]]=LociOrder3[2]
}
}
}
}
data = NULL
}
if(oneside){
data = dataNew
for(i in 1:nrow(data)){
if(trace)cat("individual",i,"\n")
id1 = which(is.na(data[i,]))
id2 = which(!is.na(data[i,]))
if(length(id1)>0){
if(id2[1]>id1[1]){
for(j in 1:(id2[1]-1)){
vec=c(j,id2[1])
if(distance){
r0 = r[vec]
r2 = r0[2]-r0[1]
r2 = 0.5*(1-exp(-2*0.01*r2))
}
if(!distance)r2=r[vec[2]]
if(homo){
ComLoci = gcomb(data,index=vec,marker=c(0,1))
comb = numeric()
for(ii in 1:2)comb[ii] = ComLoci[ii,3]+ComLoci[5-ii,3]
id = which(comb==max(comb))
LociOrder1 = t(as.matrix(ComLoci[id,1:2]))
LociOrder2 = t(as.matrix(ComLoci[5-id,1:2]))
if(data[i,vec[2]]==LociOrder1[2]) p = 1-r2
if(data[i,vec[2]]!=LociOrder1[2]) p = r2
u = runif(1)
if(u<=p)dataNew[i,vec[1]]=LociOrder1[1]
if(u>p)dataNew[i,vec[1]]=LociOrder2[1]
}
if(!homo){
ComLoci = gcomb(data,index=vec,marker=c(0,1,2))
comb = numeric()
g3 <- c(1,3)
for(ii in 1:2)comb[ii] = ComLoci[g3[ii],3]+ComLoci[10-g3[ii],3]
id = which(comb==max(comb))
LociOrder1 = t(as.matrix(ComLoci[g3[id],1:2]))
LociOrder3 = t(as.matrix(ComLoci[10-g3[id],1:2]))
LociOrder2 = t(as.matrix(ComLoci[5,1:2]))
Freq = freqmat(r2,r2,oneside=TRUE,homo=homo)
ConFreq = Uu(Freq[2])
if(data[i,vec[2]]==LociOrder1[2]){
p1 = ConFreq[4,1]
p2 = ConFreq[4,2]
}
if(data[i,vec[2]]==LociOrder2[2]){
p1 = ConFreq[5,1]
p2 = ConFreq[5,2]
}
if(data[i,vec[2]]==LociOrder3[2]){
p1 = ConFreq[6,1]
p2 = ConFreq[6,2]
}
u = runif(1)
if(u<=p1)dataNew[i,vec[1]]=LociOrder1[1]
if(u>p1 & u<=(p1+p2))dataNew[i,vec[1]]=LociOrder2[1]
if(u>(p1+p2))dataNew[i,vec[1]]=LociOrder3[1]
}
}
id1del = which(id1 < id2[1])
id1 = id1[-id1del]
}
if(length(id1)>0){
a = length(id1)
for(j in id1[1]:id1[a]){
vec = c((id1[1]-1),j)
if(distance){
r0 = r[vec]
r1 = r0[2]-r0[1]
r1 = 0.5*(1-exp(-2*0.01*r1))
}
if(!distance)r1=r[vec[2]]
if(homo){
ComLoci = gcomb(data,index=vec,marker=c(0,1))
comb = numeric()
for(ii in 1:2)comb[ii] = ComLoci[ii,3]+ComLoci[5-ii,3]
id = which(comb==max(comb))
LociOrder1 = t(as.matrix(ComLoci[id,1:2]))
LociOrder2 = t(as.matrix(ComLoci[5-id,1:2]))
if(data[i,vec[1]]==LociOrder1[1]) p = 1-r1
if(data[i,vec[1]]!=LociOrder1[1]) p = r1
u = runif(1)
if(u<=p)dataNew[i,vec[2]]=LociOrder1[2]
if(u>p)dataNew[i,vec[2]]=LociOrder2[2]
}
if(!homo){
ComLoci = gcomb(data,index=vec,marker=c(0,1,2))
comb = numeric()
g3 <- c(1,3)
for(ii in 1:2)comb[ii] = ComLoci[g3[ii],3]+ComLoci[10-g3[ii],3]
id = which(comb==max(comb))
LociOrder1 = t(as.matrix(ComLoci[g3[id],1:2]))
LociOrder3 = t(as.matrix(ComLoci[10-g3[id],1:2]))
LociOrder2 = t(as.matrix(ComLoci[5,1:2]))
Freq = freqmat(r1,r1,oneside=TRUE,homo=homo)
ConFreq = Uu(Freq[2])
if(data[i,vec[1]]==LociOrder1[1]){
p1 = ConFreq[1,1]
p2 = ConFreq[1,2]
}
if(data[i,vec[1]]==LociOrder2[1]){
p1 = ConFreq[2,1]
p2 = ConFreq[2,2]
}
if(data[i,vec[1]]==LociOrder3[1]){
p1 = ConFreq[3,1]
p2 = ConFreq[3,2]
}
u = runif(1)
if(u<=p1)dataNew[i,vec[2]]=LociOrder1[2]
if(u>p1 & u<=(p1+p2))dataNew[i,vec[2]]=LociOrder2[2]
if(u>(p1+p2))dataNew[i,vec[2]]=LociOrder3[2]
}
}
}
}
}
data = NULL
}
if(length(char)==2){
if(!(setequal(c(0,1),char))){
cn <- ncol(dataNew)
rn <- nrow(dataNew)
dataNew <- matrix(as.numeric(greplace(dataNew,c(0,1)),char),rn,cn)
}
}
if(length(char)==3){
if(!(setequal(c(0,1,2),char))){
cn <- ncol(dataNew)
rn <- nrow(dataNew)
dataNew <- matrix(as.numeric(greplace(dataNew,c(0,1,2),char)),rn,cn)
}
}
return(dataNew)
} |
lambda_two_compare <- function(lmd, cnames, group_name = "Latino",
cand1or2 = 1) {
if (cand1or2 == 2) {
cnames <- rev(cnames)
p_cand_over_cand <- lmd[, cnames[2]] - lmd[, cnames[1]]
}
n <- nrow(lmd)
labels <- sub(".*\\.", "", cnames)
p_cand_over_cand <- lmd[, cnames[1]] - lmd[, cnames[2]]
graphics::par(mfrow = c(1, 2))
graphics::hist(lmd[, cnames[1]] - lmd[, cnames[2]],
main = paste(labels[1], " - ", labels[2], sep = ""),
xlab = "Posterior Distribution"
)
dens <- stats::density(p_cand_over_cand)
plot(dens,
main = paste("Difference in Proportion ", group_name,
"\nVote for ", labels[1], " and ", labels[2],
sep = ""
),
xlab = "Difference in Posterior Distribution Sampled from "
)
graphics::points(p_cand_over_cand, rep(0, length(p_cand_over_cand)), pch = 3)
graphics::abline(v = 0, col = "grey", lty = 2)
c1g10 <- length(which(p_cand_over_cand > .10)) / n
c1g5 <- length(which(p_cand_over_cand > .05)) / n
c1g0 <- length(which(p_cand_over_cand > 0)) / n
med <- stats::median(p_cand_over_cand)
mean <- mean(p_cand_over_cand)
df <- data.frame(c1g10, c1g5, c1g0, round(med, 3), round(mean, 3))
colnames(df) <- c(
"Prob>10%",
"Prob>5%",
"Prob>0",
"Dist. Median",
"Dist. Mean"
)
return(df)
} |
hl <- half_life(0.01)
test_that("half_life returns a single numeric value", {
expect_length(hl, 1L)
expect_equal(class(hl), "numeric")
})
test_that("half_life throw an error when lambda is not numeric & > 1", {
expect_error(half_life("1"))
expect_error(half_life(c(0.01, 0.01)))
}) |
instdir <- commandArgs()[6]
target_tarball <- file.path(instdir, "libglib.tar.gz")
target_dir <- file.path(instdir, "macos")
if (getRversion() < "3.3.0") setInternet2()
download.file(
"https://github.com/PolMine/libglib/archive/master.tar.gz",
target_tarball, method = "libcurl", quiet = FALSE
)
dir.create(target_dir, showWarnings = FALSE)
untar(target_tarball, exdir = target_dir)
unlink(target_tarball)
pkgconfig_file <- file.path(target_dir, "libglib-master/pkgconfig/glib-2.0.pc")
pc <- readLines(pkgconfig_file)
pc[1] <- sprintf("prefix=%s/macos/libglib-master", getwd())
pc[3] <- sprintf("libdir=${exec_prefix}/lib/%s", Sys.info()[["machine"]])
gettext_libdir <- "/usr/local/opt/gettext/lib"
if (!dir.exists(gettext_libdir)){
libs_line <- grep("^Libs:\\s+", pc)
pc[libs_line] <- gsub(sprintf("-L%s", gettext_libdir), "", pc[libs_line])
}
writeLines(text = pc, con = pkgconfig_file) |
test_that("use_cran_comments() requires a package", {
create_local_project()
expect_usethis_error(use_cran_comments(), "not an R package")
})
test_that("use_cran_comments() creates and ignores the promised file", {
create_local_package()
use_cran_comments()
expect_proj_file("cran-comments.md")
expect_true(is_build_ignored("^cran-comments\\.md$"))
}) |
"BRSoccer2014" |
RoundViaDummy_Version_0.3.0 <- function(data, freqVar, formula = NULL, roundBase = 3, singleRandom = FALSE,
crossTable=TRUE, total = "Total", maxIterRows = 1000, maxIter = 1E7,
x = NULL, hierarchies = NULL, Version = "tull", ...) {
cat("[")
flush.console()
if (is.null(x) & is.null(formula) & is.null(hierarchies)) {
freqVarName <- names(data[1, freqVar, drop = FALSE])
hierarchies <- FindHierarchies(data[, !(names(data) %in% freqVarName)])
}
if (!is.null(hierarchies) & !is.null(formula))
stop("formula combined with hierarchies is not implemented")
if (!is.null(hierarchies) & !is.null(x))
warning("hierarchies ignored when x is supplied")
if (!is.null(hierarchies) & is.null(x)) {
x <- Hierarchies2ModelMatrix(data = data, hierarchies = hierarchies, crossTable = crossTable, total = total, ...)
crossTable <- x$crossTable
x <- x$modelMatrix
}
if(!is.null(x) & !is.null(formula))
warning("formula ignored when x is supplied")
if(is.null(x)){
if(length(total)>1){
total <- total[1]
warning("Only first element of total is used when formula input.")
}
previous_na_action <- options('na.action')
options(na.action='na.pass')
cat("{O")
flush.console()
if(crossTable){
formulaSums <- FormulaSums(formula = as.formula(formula), data = data, crossTable=TRUE,total=total,dropResponse=TRUE)
x <- formulaSums$modelMatrix
crossTab <- formulaSums$crossTable
formulaSums <- NULL
}
else
x <- ModelMatrix_Old_Version(as.formula(formula), data = data, sparse = TRUE)
cat("}")
flush.console()
options(na.action=previous_na_action$na.action)
} else {
if(!is.logical(crossTable))
crossTab <- crossTable
crossTable <- TRUE
}
if(anyNA(x))
x[is.na(x)] =0
yInner <- data[, freqVar]
yPublish <- Matrix::crossprod(x, yInner)[, 1, drop = TRUE]
a <- PlsRoundSparse_Version_0.3.0(x = x, roundBase = roundBase, yInner = yInner, yPublish = yPublish, singleRandom = singleRandom,maxIter=maxIter, maxIterRows=maxIterRows)
cat("]\n")
flush.console()
if(crossTable)
return(list(yInner = IntegerCbind(original = yInner, rounded = a[[1]]),
yPublish = cbind(original = yPublish, rounded = a[[2]][, 1, drop = TRUE]),
crossTable = crossTab))
list(yInner = IntegerCbind(original = yInner, rounded = a[[1]]),
yPublish = cbind(original = yPublish, rounded = a[[2]][, 1, drop = TRUE]))
}
PlsRoundSparse_Version_0.3.0 <- function(x, roundBase = 3, yInner, yPublish = Matrix::crossprod(x, yInner)[, 1, drop = TRUE],
singleRandom = FALSE, maxIter = 1E6, maxIterRows = 1000) {
yInnerExact <- yInner
yPublishExact <- yPublish
i = 0
while (i<maxIter) {
i = i+1
if (i == 1)
a <- PlsRoundSparseSingle_Version_0.3.0(x = x, roundBase = roundBase, yInner = yInner, yPublish = yPublish,
singleRandom = singleRandom, yInnerExact = yInnerExact, yPublishExact = yPublishExact, maxIterRows=maxIterRows)
else
a <- PlsRoundSparseSingle_Version_0.3.0(x = x, roundBase = roundBase, yInner = a[[1]], yPublish = a[[2]][, 1, drop = TRUE],
singleRandom = singleRandom, yInnerExact = yInnerExact, yPublishExact = yPublishExact, maxIterRows=maxIterRows)
suppRoundPublish <- a[[2]] < roundBase & a[[2]] > 0
if (!any(suppRoundPublish))
return(a)
}
stop("Iteration limit exceeded")
}
PlsRoundSparseSingle_Version_0.3.0 <- function(x,roundBase=3, yInner, yPublish = Matrix::crossprod(x,yInner)[,1,drop=TRUE],
singleRandom = FALSE,
suppPublish = yPublish < roundBase & yPublish > 0,
yInnerExact = yInner,
yPublishExact = yPublish,
maxIterRows = 1000) {
Pls1RoundHere <- get0("Pls1RoundFromUser", ifnotfound = Pls1Round_Version_0.3.0)
roundBase = as.integer(roundBase)
suppInput <- yInner < roundBase & yInner > 0
supRows <- Matrix::rowSums(x[, suppPublish, drop = FALSE]) > 0 & suppInput
printInc <- TRUE
if(!singleRandom)
if(sum(supRows)>maxIterRows){
randInd = sample.int(sum(supRows),maxIterRows)
supInds = which(supRows)
supRows[supRows] = FALSE
supRows[supInds[randInd]] = TRUE
printInc <- FALSE
{cat("
}
bSupA <- x[supRows, , drop = FALSE]
ySupp <- yInner[supRows]
colSumsbSupA <- Matrix::colSums(bSupA)
cols2 <- (colSumsbSupA > 0) & ((NROW(bSupA)-colSumsbSupA) > 0)
bSup <- bSupA[, cols2, drop = FALSE]
yPublishCorrection <- yPublishExact[cols2] - yPublish[cols2]
yPls <- t(as.matrix(Matrix::crossprod(bSup, Matrix(ySupp, ncol = 1))))
correction <- TRUE
if (correction) {
yPls <- yPls + yPublishCorrection
nR <- round((sum(ySupp) + sum(yInnerExact) - sum(yInner))/roundBase)
} else nR <- round(sum(ySupp)/roundBase)
if(length(yPls)==0)
singleRandom = TRUE
if (nR == 0 | singleRandom) {
yR <- ySupp * 0L
if (singleRandom)
yR[sample.int(length(ySupp), nR)] <- roundBase
} else yR <- Pls1RoundHere(bSup, ySupp, roundBase = roundBase, yPls = yPls, nR = nR, printInc=printInc)
roundInner <- yInner
roundInner[supRows] <- yR
roundPublish <- yPublish + Matrix::crossprod(bSupA, yR - ySupp)
list(roundInner = roundInner, roundPublish = roundPublish)
}
Pls1Round_Version_0.3.0 <- function(x, y, roundBase = 3L, removeOneCols = FALSE, printInc = TRUE, yPls = NULL, nR = NULL, random = TRUE,dgT=TRUE, wD=TRUE) {
if(printInc) {cat("-"); flush.console()}
if (is.matrix(x))
x <- Matrix(x)
if (removeOneCols)
x <- x[, (colSums(x) > 1), drop = FALSE]
if (is.null(yPls))
yPls <- t(as.matrix(Matrix::crossprod(x, Matrix(y, ncol = 1))))
yR <- rep(0L, length(y))
if (random)
ind <- as.list(sample.int(length(y)))
else ind <- as.list(seq_len(length(y)))
indInv = vector("list",0)
if (is.null(nR))
nR <- round(sum(y)/roundBase)
if(nR==0)
return(yR)
if(nR==length(y))
return(rep(roundBase , length(y)))
if(printInc) {cat("*"); flush.console()}
if(dgT){
dgTBase <- as(roundBase * Matrix::tcrossprod(x),"dgTMatrix")
dgTi <- dgTBase@i +1L
dgTj <- dgTBase@j +1L
dgTx <- dgTBase@x
rm(dgTBase)
if(wD){
dd = diff(dgTj)
if(length(dd)>0){
if(max(dd)>1 | min(dd)<0){
warning("Not required sorting in dgTMatrix. Manual sorting will be done.")
ord <- order(dgTj)
dgTj <- dgTj[ord]
dgTi <- dgTi[ord]
dgTx <- dgTx[ord]
dd = diff(dgTj)
}
wd <- c(1L,1L+which(dd==1L),length(dgTj)+1L)
} else
wd <- c(1L,2L)
GetInd <- function(i,x){matlabColon(x[i],x[i+1L]-1L)}
}
}
else
roundBasecrossprod <- as.matrix(roundBase * Matrix::tcrossprod(x))
if(printInc) {cat("*"); flush.console()}
for (i in 1:nR) {
if (printInc)
if (i%%max(1, round(nR/10)) == 0) {
cat(".")
flush.console()
}
if (i > 1){
ii = GetInd(ik,wd)
ix = dgTi[ii]
coe[ix] <- coe[ix] - dgTx[ii]
}
else
coe <- Matrix::tcrossprod(x, yPls)
k <- which.max(coe[as.integer(ind)])
ik <- ind[[k]]
yR[ik] <- roundBase
indInv <- c(ind[k],indInv)
ind[k] <- NULL
}
absminmaxA = Inf
absminmaxB = Inf
for (i in 1:(nR+100)) {
if (printInc)
if (i%%max(1, round(nR/10)) == 0) {
cat(":")
flush.console()
}
ii = GetInd(ik,wd)
ix = dgTi[ii]
coe[ix] <- coe[ix] - dgTx[ii]
k <- which.min(coe[as.integer(indInv)])
if(k==1){
if(printInc) {cat("="); flush.console()}
return(yR)
}
ik <- indInv[[k]]
yR[ik] <- 0
ind <- c(indInv[k],ind)
indInv[k] <- NULL
ii = GetInd(ik,wd)
ix = dgTi[ii]
coe[ix] <- coe[ix] + dgTx[ii]
k <- which.max(coe[as.integer(ind)])
ik <- ind[[k]]
yR[ik] <- roundBase
indInv <- c(ind[k],indInv)
ind[k] <- NULL
}
if(printInc) {cat("="); flush.console()}
yR
}
ModelMatrix_Old_Version <- function(formula, data = NULL, mf = model.frame(formula, data = data), allFactor = TRUE, sparse = FALSE,
formulaSums=FALSE, printInc = FALSE) {
if(formulaSums)
return(formula = FormulaSums(formula, data = data,
makeNames=TRUE, crossTable=FALSE, total = "Total", printInc=printInc,
dropResponse = TRUE))
for (i in 1:length(mf)) {
if (allFactor)
mf[[i]] <- as.factor(mf[[i]])
if (is.factor(mf[[i]]))
mf[[i]] <- AddEmptyLevel(mf[[i]])
}
if (sparse)
return(sparse.model.matrix(formula, data = mf))
model.matrix(formula, data = mf)
}
AddEmptyLevel <- function(x) factor(x, levels = c("tullnull", levels(x))) |
elementwise <- function(x, fun, ...) {
chunks <- chunk(x)
result <- NULL
for (c in chunks) {
d <- as_rvec(lget(x, range = c))
r <- fun(d, ...)
r <- as_lvec(r)
if (is.null(result)) {
result <- r
length(result) <- length(x)
} else {
if (lvec_type(r) == 'character' && strlen(r) > strlen(result)) {
warning("Changing maximum string length")
strlen(result) <- strlen(r)
}
lset(result, range = c, r)
}
}
result
} |
plot.pocrepath<-function(x, which=1:3, cex=.5, lwd=1, ...)
{
if( any(which==1) )
plotbetanzbeta(ppojb=x, cex=.5, lwd=1, ...)
if( any(which==2) )
plotbetarsq(ppojb=x, cex=.5, lwd=1, ...)
if( any(which==3) )
plotrsqnzbeta(ppojb=x, cex=.5, lwd=1, ...)
}
plotbetanzbeta<-function(ppojb, cex=.5, lwd=1, ...)
{
eps <- .Machine$double.eps
nlambda <- length(ppojb)
temp <- NULL
for(i in 1:nlambda){
temp <- c(temp,ppojb[[i]]$lambda)
}
lambda <- temp
p <- dim(ppojb[[1]]$beta)[1]
ny <- dim(ppojb[[1]]$beta)[2]
nzbeta <-NULL
plot <- ifelse(ny==1,1,2)
pl<-list()
ip<-list()
betanzbeta<-function(k){
beta<-matrix(0,p,nlambda)
for(j in 1:nlambda){
beta[,j] <- ppojb[[j]]$beta[,k]
nzbeta[j] <- sum(abs(ppojb[[j]]$beta[,k])>eps)
}
beta_C <- matrix(t(beta),dim(beta)[1]*dim(beta)[2],1)
lambda_C <- rep(lambda,dim(beta)[1])
group_C <- seq(1,dim(beta)[1])
group_C <- rep(group_C,each=dim(beta)[2])
group_C <- as.character(group_C)
plotdata <- data.frame(lambda_C,beta_C,group_C)
pl<-ggplot(...) +
geom_line(aes(x=lambda_C,y=beta_C,group=group_C),linetype = "dashed",color=group_C,size=lwd) +
geom_point(aes(x=lambda_C,y=beta_C,group=group_C),color=group_C,shape=4,size=cex) +
geom_line(aes(x=lambda,y=nzbeta/(max(nzbeta)/max(beta_C)),color='
geom_point(aes(x=lambda,y=nzbeta/(max(nzbeta)/max(beta_C)),color='
labs(y="beta",x="lambda",title=paste('Y_',k,sep="")) +
scale_y_continuous(sec.axis=sec_axis(~.*max(nzbeta)/max(beta_C),name="
scale_color_discrete(name=NULL)+
theme(legend.justification=c(0,0),legend.position=c(0,0),plot.title = element_text(hjust=0.5))
return(pl)
}
pp<-lapply(1:ny,betanzbeta)
for(i in 1:ny) print(pp[[i]])
}
plotbetarsq<-function(ppojb, cex=.5, lwd=1, ...)
{
eps <- .Machine$double.eps
nlambda <- length(ppojb)
temp <- NULL
for(i in 1:nlambda){
temp <- c(temp,ppojb[[i]]$lambda)
}
lambda <- temp
p <- dim(ppojb[[1]]$beta)[1]
ny <- dim(ppojb[[1]]$beta)[2]
rsq <- NULL
for(i in 1:nlambda){
rsq <- cbind(rsq,ppojb[[i]]$rsq)
}
rsq <- t(matrix(rsq,ny,nlambda))
plot <- ifelse(ny==1,1,2)
betarsq<-function(k){
beta <- matrix(0,p,nlambda)
for(j in 1:nlambda){
beta[,j] <- ppojb[[j]]$beta[,k]
}
temprsq <- (rsq[,k])
beta_C <- matrix(t(beta),dim(beta)[1]*dim(beta)[2],1)
lambda_C <- rep(lambda,dim(beta)[1])
group_C <- seq(1,dim(beta)[1])
group_C <- rep(group_C,each=dim(beta)[2])
group_C <- as.character(group_C)
plotdata <- data.frame(lambda_C,beta_C,group_C)
pl<-ggplot(...) +
geom_line(aes(x=lambda_C,y=beta_C,group=group_C),linetype = "dashed",color=group_C,size=lwd) +
geom_point(aes(x=lambda_C,y=beta_C,group=group_C),color=group_C,shape=4,size=cex) +
geom_line(aes(x=lambda,y=temprsq/(max(temprsq)/max(beta_C)),color='R^2'),size=lwd) +
geom_point(aes(x=lambda,y=temprsq/(max(temprsq)/max(beta_C)),color='R^2'),size=cex) +
labs(y = "beta", x = "lambda",title=paste('Y_', k,sep=""))+
scale_y_continuous(sec.axis = sec_axis(~.*max(temprsq)/max(beta_C),name="R^2"))+
scale_color_discrete(name=NULL)+
theme(legend.justification=c(0,0),legend.position=c(0,0),plot.title = element_text(hjust=0.5))
return(pl)
}
pp<-lapply(1:ny,betarsq)
for(i in 1:ny) print(pp[[i]])
}
plotrsqnzbeta<-function(ppojb, cex=.5, lwd=1, ...)
{
eps <- .Machine$double.eps
nlambda <- length(ppojb)
temp <- NULL
for(i in 1:nlambda){
temp <- c(temp,ppojb[[i]]$lambda)
}
lambda <- temp
p <- dim(ppojb[[1]]$beta)[1]
ny <- dim(ppojb[[1]]$beta)[2]
nzbeta <- NULL
plot <- ifelse(ny==1,1,2)
rsq <- NULL
for(i in 1:nlambda){
rsq <- cbind(rsq,ppojb[[i]]$rsq)
}
rsq <- t(matrix(rsq,ny,nlambda))
rsqnzbeta<-function(k){
beta <- matrix(0,p,nlambda)
for(j in 1:nlambda){
nzbeta[j] <- sum(abs(ppojb[[j]]$beta[,k])>eps)
}
temprsq <-rsq[,k]
pl<-ggplot(...) +
geom_line(aes(x=lambda,y=temprsq,color='R^2'),size=lwd) +
geom_point(aes(x=lambda,y=temprsq,color='R^2'),size=cex) +
labs(title=paste('Y_',k,sep=""),y="R^2",x="lambda") +
geom_line(aes(x=lambda,y=nzbeta*max(temprsq)/max(nzbeta),color='
geom_point(aes(x=lambda,y=nzbeta*max(temprsq)/max(nzbeta),color='
scale_y_continuous(sec.axis = sec_axis(~.*max(nzbeta)/max(temprsq),name="
scale_color_discrete(name=NULL)+
theme(legend.justification=c(0,0),legend.position=c(0,0),plot.title=element_text(hjust=0.5))
return(pl)
}
pp<-lapply(1:ny,rsqnzbeta)
for(i in 1:ny) print(pp[[i]])
} |
context("gglegend")
expect_print <- function(p, ...) {
testthat::expect_silent(print(p))
}
test_that("examples", {
library(ggplot2)
histPlot <- ggplot(diamonds, aes(price, fill = cut)) +
geom_histogram(binwidth = 500)
(right <- histPlot)
(bottom <- histPlot + theme(legend.position = "bottom"))
(top <- histPlot + theme(legend.position = "top"))
(left <- histPlot + theme(legend.position = "left"))
expect_legend <- function(p) {
plotLegend <- grab_legend(p)
expect_true(inherits(plotLegend, "gtable"))
expect_true(inherits(plotLegend, "gTree"))
expect_true(inherits(plotLegend, "grob"))
expect_print(plotLegend)
}
expect_legend(right)
expect_legend(bottom)
expect_legend(top)
expect_legend(left)
})
test_that("legend", {
expect_print(
ggally_points(iris, ggplot2::aes(Sepal.Length, Sepal.Width, color = Species))
)
points_legend <- gglegend(ggally_points)
expect_print(points_legend(
iris, ggplot2::aes(Sepal.Length, Sepal.Width, color = Species)
))
same_points_legend <- gglegend("points")
expect_identical(
attr(attr(points_legend, "fn"), "original_fn"),
attr(attr(same_points_legend, "fn"), "original_fn")
)
custom_legend <- wrap(gglegend("points"), size = 6)
p <- custom_legend(
iris, ggplot2::aes(Sepal.Length, Sepal.Width, color = Species)
)
expect_print(p)
expect_true(inherits(p, "gtable"))
expect_true(inherits(p, "gTree"))
expect_true(inherits(p, "grob"))
expect_silent({
pm <- ggpairs(
iris, 1:2,
mapping = ggplot2::aes(color = Species),
upper = list(continuous = gglegend("points"))
)
print(pm)
})
expect_silent({
pm <- ggpairs(
iris, 1:2,
mapping = ggplot2::aes(color = Species)
)
pm[1, 2] <- points_legend(iris, ggplot2::aes(Sepal.Width, Sepal.Length, color = Species))
print(pm)
})
})
test_that("plotNew", {
points_legend <- gglegend(ggally_points)
expect_print(points_legend(
iris, ggplot2::aes(Sepal.Length, Sepal.Width, color = Species)
))
expect_print(points_legend(
iris, ggplot2::aes(Sepal.Length, Sepal.Width, color = Species)
), plotNew = TRUE)
}) |
CART_R <- function(train, test, maxDepth=90){
alg <- RKEEL::R6_CART_R$new()
alg$setParameters(train, test, maxDepth)
return (alg)
}
R6_CART_R <- R6::R6Class("R6_CART_R",
inherit = RegressionAlgorithm,
public = list(
maxDepth = 90,
setParameters = function(train, test, maxDepth=90){
super$setParameters(train, test)
self$maxDepth <- maxDepth
}
),
private = list(
jarName = "Regr-CART.jar",
algorithmName = "CART-R",
algorithmString = "Regr-CART",
getParametersText = function(){
text <- ""
text <- paste0(text, "maxDepth = ", self$maxDepth, "\n")
return(text)
}
)
) |
CvM_test <- function(x, nullname, ...) UseMethod('CvM_test')
CvM_test.fmx_QLMDe <- function(x, nullname = deparse1(substitute(x)), ...) {
cvm.test(x = x@data, null = function(q) pfmx(q, dist = x), nullname = nullname, ...)
}
CvM_test.fitdist <- function(x, nullname = deparse1(substitute(x)), ...) {
if (!length(x$data)) stop('Re-run ?fitdistrplus::fitdist with option `keepdata = TRUE`')
cvm.test(x = x$data, null = function(q) do.call(paste0('p', x$distname), args = c(list(q = q), as.list.default(x$estimate))), nullname = nullname, ...)
} |
plot.copyright <- function(){
this.year <- strftime(Sys.time(), format = "%Y")
copyr.message <- paste("Copyright \u00A9", this.year,
"by Wegener Center")
mtext(copyr.message, side=1, outer = TRUE, adj = 1, cex = 0.5)
invisible()
} |
dr.pds <- function(TS, Qdr=0.2, WinSize=30) {
doy <- as.factor(TS$doy)
temp <- TS
myVarThresh <- mqt(TS, Qdr, WinSize)
for (i in 1:length(temp$Flow)) {
temp$Thresh[i] <- myVarThresh[doy[i]]
if (temp$Flow[i] < myVarThresh[doy[i]]) {
temp$BelowThresh[i] <- TRUE}
else {temp$BelowThresh[i]<-FALSE}
}
return(temp)
} |
isStrictlyNegativeNumberOrNaOrInfVector <- function(argument, default = NULL, stopIfNot = FALSE, n = NA, message = NULL, argumentName = NULL) {
checkarg(argument, "N", default = default, stopIfNot = stopIfNot, nullAllowed = FALSE, n = NA, zeroAllowed = FALSE, negativeAllowed = TRUE, positiveAllowed = FALSE, nonIntegerAllowed = TRUE, naAllowed = TRUE, nanAllowed = FALSE, infAllowed = TRUE, message = message, argumentName = argumentName)
} |
UCestim = function(sys){
sys$table = NA
sys$hidden$constPar = NA
rubbish = c(sys$hidden$d_t, sys$hidden$innVariance, sys$hidden$objFunValue, TRUE,
sys$outlier, sys$arma, sys$iter, sys$hidden$seas)
rubbish2 = cbind(sys$grad, sys$hidden$constPar, sys$hidden$typePar)
rubbish3 = cbind(sys$hidden$ns, sys$hidden$nPar)
if (is.ts(sys$u)){
u = as.numeric(sys$u)
} else {
u = sys$u
}
nu = dim(u)[2]
kInitial = dim(u)[1]
if (nu == 2){
nu = length(sys$y) + sys$h
kInitial = 0
}
if (is.ts(sys$y)){
output = UCompC("estimate", as.numeric(sys$y), u, sys$model, sys$periods, sys$rhos,
sys$h, sys$tTest, sys$criterion, sys$hidden$truePar, rubbish2, rubbish, sys$verbose,
sys$stepwise, sys$hidden$estimOk, sys$p0, sys$v, sys$yFitV,
sys$hidden$nonStationaryTerms, rubbish3, sys$hidden$harmonics,
as.vector(sys$criteria), sys$hidden$cycleLimits,
cbind(sys$hidden$beta, sys$hidden$betaV), sys$hidden$typeOutliers)
fY = frequency(sys$y)
sY = start(sys$y, frequency = fY)
aux = ts(matrix(NA, length(sys$y) + 1, 1), sY, frequency = fY)
if (length(output$yFor > 0)){
sys$yFor = ts(output$yFor, end(aux), frequency = fY)
sys$yForV = ts(output$yForV, end(aux), frequency = fY)
}
} else {
output = UCompC("estimate", sys$y, u, sys$model, sys$periods, sys$rhos,
sys$h, sys$tTest, sys$criterion, sys$hidden$truePar, rubbish2, rubbish, sys$verbose,
sys$stepwise, sys$hidden$estimOk, sys$p0, sys$v, sys$yFitV,
sys$hidden$nonStationaryTerms, rubbish3, sys$hidden$harmonics,
as.vector(sys$criteria), sys$hidden$cycleLimits,
cbind(sys$hidden$beta, sys$hidden$betaV), sys$hidden$typeOutliers)
if (length(output$yFor > 0)){
sys$yFor = output$yFor
sys$yForV = output$yForV
}
}
sys$hidden$truePar = output$p[, 1]
sys$p0 = output$p0
if (grepl("?", sys$model, fixed = TRUE)){
sys$model = output$model
}
n = length(sys$hidden$truePar)
rubbish2 = matrix(output$rubbish2, n, 3)
sys$grad = rubbish2[, 1]
sys$hidden$constPar = rubbish2[, 2]
sys$hidden$typePar = rubbish2[, 3]
sys$hidden$cycleLimits = matrix(output$cycleLimits,
length(output$cycleLimits) / 2, 2)
sys$hidden$d_t = output$rubbish[1]
sys$hidden$innVariance = output$rubbish[2]
sys$hidden$objFunValue = output$rubbish[3]
sys$iter = output$rubbish[6]
betas = matrix(output$betas, length(output$betas) / 2, 2)
sys$hidden$beta = betas[, 1]
sys$hidden$betaV = betas[, 2]
sys$periods = output$periods
sys$rhos = output$rhos
sys$hidden$estimOk = output$estimOk
sys$hidden$nonStationaryTerms = output$nonStationaryTerms
rubbish3 = matrix(output$rubbish3, 6, 2)
sys$hidden$ns = rubbish3[, 1]
sys$hidden$nPar = rubbish3[, 2]
sys$hidden$harmonics = output$harmonics
criteria = output$criteria;
sys$criteria = matrix(criteria, 1, 4)
colnames(sys$criteria) = c("LLIK", "AIC", "BIC", "AICc")
sys$u = output$u
if (!is.na(sys$outlier)){
k = length(output$u) / nu
nOut = k - kInitial
if (nOut > 0){
sys$u = matrix(output$u, k, nu)
sys$hidden$typeOutliers = output$typeOutliers
}
}
return(sys)
}
|
extract.cluster <- function (x, align) {
if (!inherits(x, "kmean"))
stop("object of class 'kmean' expected")
if (!inherits(align, "align"))
stop("object of class 'align' expected")
nb.clus <- length(x$clusters)
res <- list ()
for(i in 1:nb.clus) {
name <- names(x$clusters[[i]])
res[[i]] <- align[names(align)%in%name]
class (res[[i]]) <- c("align")
}
names(res)<-sapply(c(1:nb.clus), function(j) {paste("cluster",j, sep = "")})
return(res)
} |
.onLoad <-
function (libname, pkgname)
{
utils::data("cov.expr", "cov.M", "cov.protein", "CpG.gene.map.for.DEG", "mean.expr", "mean.expr.with.mapped.protein", "mean.M", "mean.protein", "methyl.gene.level.mean", "protein.gene.map.for.DEP", "rho.expr.protein", "rho.methyl.expr", package=pkgname, envir=parent.env(environment()))
utils::globalVariables(c("par", "rbinom"))
} |
data{
n <- length(survived)
}
model{
for (i in 1:n) {
survived[i] ~ dbern(p[i])
p[i] <- ilogit(mu[i])
mu[i] <- b0 + b1 * BI[i]
}
b0 <- 0.15
b1 ~ dnorm(0, 1e-04)
} |
context("Checking Outputs")
test_that("checking value",{
expect_identical(round(pKumBin(1,8,1.05,1.04),4),
0.2149)
})
test_that("checking class",{
expect_that(pBetaBin(1,8,1.05,1.04),
is_a("numeric"))
})
test_that("checking length of output",{
expect_equal(length(pBetaBin(1:2,8,1.05,1.04)),2)
}) |
.emaIndicator <-
function(series, lambda)
{
x <- rep(mean(series[1:10,]), times=nrow(series))
for (i in 2:nrow(series))
x[i] <- (1-lambda)*series[i] + lambda*x[i-1]
x <- as.timeSeries(data=x, charvec=time(series), units=colnames(series))
x
}
.macdIndicator <-
function(index, spar=0.5, lambda=c(0.80, 0.85, 0.90),
trace = TRUE, doplot=TRUE)
{
rets <- returns(index)
Index <- log(index)[-1, ]
tps <- turnsAnalytics(index=index, spar=spar,
main = "MACD Analytics", trace=TRUE, doplot=FALSE)
ablines <- tps$ablines
ema1 <- .emaIndicator(Index, lambda=lambda[1])
ema2 <- .emaIndicator(Index, lambda=lambda[2])
macd <- ema1 - ema2
signal <- .emaIndicator(macd, lambda[3])
histogram <- macd - signal
indicator <- sign(histogram)
rebalancing <- .rebalancingStats(index, indicator, trace=trace)
if(doplot) {
turnsAnalytics(index=index, spar=spar,
main="MACD Index Indicator",
trace=FALSE, doplot=doplot)
tradePositions <- as.vector(indicator)
tradeForecasts <- c(0, tradePositions[-length(tradePositions)])
outSample <- Index[1] + log(cumulated(rets*tradeForecasts))
Ups <- Index[as.vector(indicator) == 1, ]
if(nrow(Ups) > 0) points(Ups, pch=19, cex=0.33, col="green")
Downs <- Index[as.vector(indicator) == 0, ]
if(nrow(Downs) > 0) points(Downs, pch=19, cex=0.33, col="blue")
lines(outSample, col="magenta")
box(col="white")
box(bty="l")
}
if(doplot) {
plot(macd, col="green", ylab=paste("MACD", colnames(index)))
abline(v=ablines, lty=3, lwd=2, col="grey")
lines(histogram, type="h", col="black")
lines(macd, col="red")
mtext(paste("lambda: ", lambda[1], lambda[2], lambda[2], sep=" "),
adj=0, side=4, cex=0.7, col="darkgrey")
box(col="white")
box(bty="l")
}
invisible(list(index=index, macd=macd, histogram=histogram,
rebalancing=rebalancing))
}
.drawdownsIndicator <-
function(index, spar=0.5, lambda=c(0.80, 0.85, 0.10),
trace=TRUE, doplot=TRUE)
{
rets <- returns(index)
Index <- log(index)[-1, ]
tps <- turnsAnalytics(index=index, spar=spar,
main = "MACD Drawdown Analytics", trace=TRUE, doplot=FALSE)
ablines <- tps$ablines
dd <- drawdowns(rets)
mdd1 <- .emaIndicator(dd, lambda[1])
mdd2 <- .emaIndicator(dd, lambda[2])
macd <- mdd1 - mdd2
signal <- .emaIndicator(macd, lambda[3])
histogram <- macd - signal
indicator <- rets
series(indicator) <- 1-sign(as.integer(macd < 0 & histogram < 0 ))
rebalancing <- .rebalancingStats(index, indicator, trace=trace)
if(doplot) {
tps <- turnsAnalytics(index=index, spar=spar,
main="MACD Drawdown Indicator", trace=FALSE, doplot=doplot)
tradePositions <- as.vector(indicator)
tradeForecasts <- c(0, tradePositions[-length(tradePositions)])
outSample <- Index[1] + log(cumulated(rets*tradeForecasts))
Ups <- Index[as.vector(indicator) == 1,]
if(nrow(Ups) > 0) points(Ups, pch=19, cex=0.33, col="green")
Downs <- Index[as.vector(indicator) == 0, ]
if(nrow(Downs) > 0) points(Downs, pch=19, cex=0.33, col="blue")
lines(outSample, col="magenta")
box(col="white")
box(bty="l")
}
if(doplot) {
plot(mdd1, ylim=c(min(dd), max(macd)), ylab=colnames(index))
positions <- tps$positions
ablines <- tps$ablines
abline(v=ablines, lty=1, lwd=2, col="lightgrey")
Time <- time(indicator)
Time <- Time[!as.logical(indicator)]
abline(v=Time, lty=3, lwd=2, col="steelblue")
lines(mdd1, col="black")
lines(mdd2, col="red")
lines(max(abs(macd))*histogram/max(abs(histogram)), type="h", col="orange")
lines(max(abs(macd))*histogram/max(abs(histogram)), type="l", col="orange")
abline(h=0, col="grey")
mtext(paste("lambda: ", lambda[1], lambda[2], lambda[2], sep=" "),
adj=0, side=4, cex=0.7, col="darkgrey")
box(col="white")
box(bty="l")
}
invisible(list(indicator=indicator, index=index, returns=rets,
drawdowns=dd, macd=macd, signal=signal, histogram=histogram,
rebalancing=rebalancing))
}
.rebalancingStats <-
function(index, indicator, trace=TRUE)
{
rets <- returns(index)
tradePositions <- as.vector(indicator)
tradeForecasts <- c(0, tradePositions[-length(tradePositions)])
rebalancing <- c(
max=sum(abs(rets)),
insample=sum(rets*tradePositions),
forecasts=sum(rets*tradeForecasts),
rets=sum(rets))
if (trace) {
cat("Rebalancing:\n")
print(rebalancing)
}
invisible(rebalancing)
} |
disappointmentRate <-
function(d, x, y, verbose = TRUE, ...) {
if (class(d) == "mosg.lossdistribution") {
expectation <- moment(ld = d, k = 1)
return(1 - cdf(ld = d, x = expectation));
}
else if (class(d) == "matrix") {
n <- nrow(d)
m <- ncol(d)
if (missing(x) && !missing(y)) {
if (verbose) { message("looking for equilibrium for given 2nd player strategy") }
x <- rep(0, times = n)
x[which.min(d %*% y)] <- 1
}
if (missing(y) && !missing(x)) {
if (verbose) { message("looking for equilibrium for given 1st player strategy") }
y <- rep(0, times = m)
y[which.min(t(x) %*% d)] <- 1
}
if (missing(x) && missing(y)) {
if (verbose) { message("looking for equilibrium for given 1st player strategy") }
auxG <- mosg(n, m, goals = 1, losses = as.vector(d), byrow = FALSE)
eq <- mgss(auxG, ...)
x <- as.vector(eq$optimalDefense)
y <- as.vector(eq$optimalAttacks[,1])
}
if (verbose) {
message("using the following equilibrium: ")
message("x* = ")
print(x)
message("y* = ")
print(y)
}
v <- (x%*%d%*%y)[1]
A <- d
A[A < v] <- 1
A[A >= v] <- 0
return((t(x) %*% A %*% y)[1])
}
stop("disappointment rate only defined for classes 'mosg.lossdistribution' and 'matrix'")
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.