code
stringlengths 1
13.8M
|
---|
languageserver_startup <- function(
rlsLib = getOption("langserver_library"),
langServerProcessPatt = getOption("langserver_processPatt"),
strictLibrary = TRUE,
os = tolower(Sys.info()[["sysname"]]),
pid = Sys.getpid()
) {
lg("languageserver_startup Starting")
on.exit(lg("languageserver_startup Exiting"))
if (identical(Sys.getenv("RSTUDIO"), "1")) {
lg(" This looks like RStudio, not doing anything.")
return(invisible(NA))
}
oldLibPaths <- .libPaths()
lg(" Current .libPaths: ", toString(oldLibPaths))
isLangServer <- languageserver_detect(
pid = pid,
os = os,
langServerProcessPatt = langServerProcessPatt
)
if (!isLangServer) {
lg(" Not language server process. No changes made.")
return(invisible(NA))
}
lgsrvr(" This seems to be language server process. Aligning libraries.")
newLibLoc <- if (isTRUE(strictLibrary)) {
c(rlsLib, .libPaths()[length(.libPaths())])
} else {
c(rlsLib, .libPaths())
}
lgsrvr(" Determined new library locations: ", toString(newLibLoc))
assign(".lib.loc", newLibLoc, envir = environment(.libPaths))
lgsrvr(" Now .libpaths() is:\n ", paste(.libPaths(), collapse = "\n "))
lgsrvr(" Trying to requireNamespace of languageserver.")
serverLoadable <- do.call(
"requireNamespace",
list(package = "languageserver", lib.loc = rlsLib, quietly = TRUE)
)
if (!isTRUE(serverLoadable)) {
lg(" Not loadable, restoring .libPaths to: ", toString(oldLibPaths))
assign(".lib.loc", oldLibPaths, envir = environment(.libPaths))
stop(
"The languageserver package is not loadable. \n",
"You can try running languageserver_install()"
)
} else {
lgsrvr(" Package languageserver is loadable, functionality should work.")
}
invisible(serverLoadable)
} |
.pmmlMiningSchema <- function(field, target = NULL, transformed = NULL,
missing_value_replacement = NULL,
invalidValueTreatment = "returnInvalid") {
namelist <- .origFieldList(field, transformed, target)
mining.schema <- xmlNode("MiningSchema")
target <- .removeAsFactor(target)
unknownVal <- NULL
for (j in 1:length(namelist)) {
if (!is.na(namelist[[j]])) {
usage <- ifelse(namelist[[j]] == target, "predicted", "active")
if ((!is.null(target)) && (namelist[[j]] != target)) {
if (!is.null(missing_value_replacement)) {
unknownVal <- missing_value_replacement
invalidValueTreatment <- "asMissing"
}
} else if (is.null(target) && !is.null(missing_value_replacement)) {
unknownVal <- missing_value_replacement
invalidValueTreatment <- "asMissing"
}
if (namelist[[j]] == "Temp" || namelist[[j]] == "DiscretePlaceHolder") {
if (length(field$levels[[namelist[[j]]]]) == 1) {
mf <- xmlNode("MiningField", attrs = c(
name = namelist[[j]],
usageType = usage, missingValueReplacement = field$levels[[namelist[[j]]]]
))
}
} else {
mf <- xmlNode("MiningField", attrs = c(
name = namelist[[j]], usageType = usage,
missingValueReplacement = unknownVal, invalidValueTreatment = invalidValueTreatment
))
}
mining.schema <- append.XMLNode(mining.schema, mf)
}
}
return(mining.schema)
}
.pmmlMiningSchemaARIMA <- function(field, target = NULL, transformed = NULL,
missing_value_replacement = NULL,
invalidValueTreatment = "returnInvalid") {
namelist <- .origFieldList(field, transformed, target)
mining.schema <- xmlNode("MiningSchema")
target <- .removeAsFactor(target)
unknownVal <- NULL
for (j in 1:length(namelist)) {
if (!is.na(namelist[[j]])) {
usage <- ifelse(namelist[[j]] == target, "predicted", ifelse(namelist[[j]] == "h", "supplementary", "active"))
if ((!is.null(target)) && (namelist[[j]] != target)) {
if (!is.null(missing_value_replacement)) {
unknownVal <- missing_value_replacement
invalidValueTreatment <- "asMissing"
}
} else if (is.null(target) && !is.null(missing_value_replacement)) {
unknownVal <- missing_value_replacement
invalidValueTreatment <- "asMissing"
}
if (namelist[[j]] == "Temp" || namelist[[j]] == "DiscretePlaceHolder") {
if (length(field$levels[[namelist[[j]]]]) == 1) {
mf <- xmlNode("MiningField", attrs = c(
name = namelist[[j]],
usageType = usage, missingValueReplacement = field$levels[[namelist[[j]]]]
))
}
} else {
mf <- xmlNode("MiningField", attrs = c(
name = namelist[[j]], usageType = usage,
missingValueReplacement = unknownVal, invalidValueTreatment = invalidValueTreatment
))
}
mining.schema <- append.XMLNode(mining.schema, mf)
}
}
return(mining.schema)
}
.pmmlMiningSchemaSurv <- function(field, timeName, statusName, target = NULL, inactive = NULL, transformed = NULL, missing_value_replacement = NULL) {
namelist <- NULL
number.of.fields <- length(field$name)
mining.fields <- list()
targetExists <- 0
ii <- 0
unknownVal <- NULL
for (i in 1:number.of.fields)
{
if (length(grep(":", field$name[i])) == 1) {
} else {
ii <- ii + 1
if (is.null(target)) {
usage <- "active"
} else {
usage <- ifelse(field$name[i] == target, "predicted", "active")
}
if (usage != "predicted") {
unknownVal <- missing_value_replacement
}
if (usage == "predicted") {
targetExists <- 1
}
if (field$name[i] %in% inactive) usage <- "supplementary"
if (!is.null(transformed)) {
if (is.na(transformed$field_data[field$name[i], "orig_field_name"])) {
if (is.na(transformed$field_data[field$name[i], "transform"])) {
if (!(field$name[i] %in% namelist)) {
namelist <- c(namelist, field$name[i])
}
}
} else {
ofname <- transformed$field_data[field$name[i], "orig_field_name"][[1]]
for (j in 1:length(ofname))
{
fname <- ofname[j]
while (!is.na(ofname[j])) {
fname <- ofname[j]
xvalue <- transformed$field_data[fname, "transform"]
if (!is.na(xvalue) && xvalue == "MapValues") {
parents <- transformed$field_data[fname, "orig_field_name"][[1]]
for (j in 1:length(parents))
{
if (!(parents[j] %in% namelist)) {
namelist <- c(namelist, parents[j])
}
}
fname <- NA
break
}
ofname[j] <- transformed$field_data[ofname[j], "orig_field_name"][[1]]
}
if (!(fname %in% namelist)) {
namelist <- c(namelist, fname)
}
}
}
nmbr <- 1
for (ndf2 in 1:length(namelist))
{
if (!is.na(namelist[ndf2])) {
mining.fields[[nmbr]] <- xmlNode("MiningField", attrs = c(
name = namelist[ndf2],
usageType = usage, missingValueReplacement = unknownVal,
invalidValueTreatment = "asMissing"
))
nmbr <- nmbr + 1
}
}
} else {
fName <- .removeAsFactor(field$name[i])
mining.fields[[i]] <- xmlNode("MiningField",
attrs = c(
name = fName,
usageType = usage, missingValueReplacement = unknownVal,
invalidValueTreatment = "asMissing"
)
)
}
}
}
if (targetExists == 0) {
mining.fields[[ii + 1]] <- xmlNode("MiningField",
attrs = c(
name = statusName,
usageType = "active"
)
)
}
mining.fields[[ii + 2]] <- xmlNode("MiningField",
attrs = c(
name = timeName,
usageType = "active"
)
)
mining.fields[[ii + 3]] <- xmlNode("MiningField",
attrs = c(
name = "cumulativeHazard",
usageType = "predicted"
)
)
mining.schema <- xmlNode("MiningSchema")
mining.schema$children <- mining.fields
return(mining.schema)
}
.origFieldList <- function(field, transformed = NULL, target = NULL) {
number.of.fields <- length(field$name)
mining.fields <- list()
if (field$name[1] == "ZementisClusterIDPlaceHolder" || field$name[1] == "ZementisHiddenTargetField") {
begin <- 2
} else {
begin <- 1
}
DPL1 <- "DiscretePlaceHolder"
DPL2 <- "Temp"
DPL3 <- "predictedScore"
namelist <- list()
dnamelist <- list()
if (!is.null(transformed)) {
for (i in begin:number.of.fields) {
if (.removeAsFactor(field$name[i]) %in% c(target, .removeAsFactor(target), DPL1, DPL2, DPL3)) {
namelist <- c(namelist, .removeAsFactor(field$name[i]))
next
}
if (transformed$field_data[field$name[i], "type"] == "original") {
if (!(.removeAsFactor(field$name[i]) %in% namelist)) {
namelist <- c(namelist, .removeAsFactor(field$name[i]))
}
} else {
ofnames <- strsplit(transformed$field_data[field$name[i], "orig_field_name"][[1]], ",")[[1]]
for (j in 1:length(ofnames)) {
ofname <- gsub("^\\s+|\\s+$", "", ofnames[j])
hname <- transformed$field_data[ofname, "orig_field_name"]
ancestorField <- ofname
while (!is.na(hname)) {
ancestorField <- hname
hname <- transformed$field_data[hname, "orig_field_name"]
}
fname <- .removeAsFactor(ancestorField)
if ((!(fname %in% namelist)) && (!(fname %in% dnamelist))) {
namelist <- c(namelist, fname)
if (!(.removeAsFactor(fname) %in% dnamelist)) {
dnamelist <- c(dnamelist, .removeAsFactor(field$name[i]))
}
}
}
}
}
} else {
for (i in begin:number.of.fields) {
fName <- .removeAsFactor(field$name[i])
if (!(fName %in% namelist) && fName != "ZementisClusterIDPlaceHolder") {
namelist <- c(namelist, fName)
}
}
}
return(namelist)
} |
AEI.grad <- function(x, model, new.noise.var=0, y.min=NULL, type = "UK", envir=NULL){
d <- length(x)
if (d != model@d){ stop("x does not have the right size") }
newdata.num <- as.numeric(x)
newdata <- data.frame(t(newdata.num))
colnames(newdata) = colnames(model@X)
if (is.null(y.min))
{
pred <- predict(object=model, newdata=model@X, type=type, checkNames = FALSE)
mk <- pred$mean
sk <- pred$sd
qk <- mk + qnorm(0.75)*sk
y.min <- mk[which.min(qk)]
}
T <- model@T
X <- model@X
z <- model@z
u <- model@M
covStruct <- model@covariance
if (is.null(envir))
{ predx <- predict.km(object=model, newdata=newdata, type=type, checkNames = FALSE)
mk <- predx$mean
sk <- predx$sd
c <- predx$c
v <- predx$Tinv.c
xcr <- (y.min - mk)/sk
xcr.prob <- pnorm(xcr)
xcr.dens <- dnorm(xcr)
} else
{ toget <- matrix(c("xcr", "xcr.prob", "xcr.dens", "c", "Tinv.c", "mk","sk"),1,7)
apply(toget, 2, get, envir=envir)
xcr <- envir$xcr
xcr.prob <- envir$xcr.prob
xcr.dens <- envir$xcr.dens
c <- envir$c
v <- envir$Tinv.c
mk <- envir$mk
sk <- envir$sk
}
F.newdata <- model.matrix([email protected], data=newdata)
if (sk < sqrt(model@covariance@sd2)/1e6)
{ aei.grad.val <- rep(0,d)
} else
{
ei.val <- (y.min - mk) * xcr.prob + sk * xcr.dens
pen <- (1- sqrt(new.noise.var)/sqrt(new.noise.var + sk^2))
aei.val <- ei.val * pen
dc <- covVector.dx(x=newdata.num, X=X, object=covStruct, c=c)
f.deltax <- trend.deltax(x=newdata.num, model=model)
W <- backsolve(t(T), dc, upper.tri=FALSE)
mk.grad <- t(W)%*%z + t([email protected]%*%f.deltax)
if (type=="UK")
{ tuuinv <- solve(t(u)%*%u)
sk2.grad <- t( -2*t(v)%*%W +
2*(F.newdata - t(v)%*%u )%*% tuuinv %*%
( f.deltax - t(t(W)%*%u) ))
} else
{ sk2.grad <- t( -2*t(v)%*%W)
}
sk.grad <- sk2.grad / (2*sk)
ei.grad <- - mk.grad * xcr.prob + sk.grad * xcr.dens
pen.grad <- sqrt(new.noise.var)/2*sk2.grad*(new.noise.var + sk^2)^(-3/2)
aei.grad.val <- ei.grad*pen + ei.val*pen.grad
}
return(aei.grad.val)
} |
check_rnaturalearthhires <- function() {
rnaturalearthhires_version <- "0.0.0.9000"
if (!requireNamespace("rnaturalearthhires", quietly = TRUE)) {
message("The rnaturalearthhires package needs to be installed.")
install_rnaturalearthhires()
} else if (utils::packageVersion("rnaturalearthhires") < rnaturalearthhires_version) {
message("The rnaturalearthhires package needs to be updated.")
install_rnaturalearthhires()
}
}
install_rnaturalearthhires <- function() {
instructions <- paste(" Please try installing the package for yourself",
"using the following command: \n",
" devtools::install_github(\"ropensci/rnaturalearthhires\")")
error_func <- function(e) {
stop(paste("Failed to install the rnaturalearthhires package.\n", instructions))
}
input <- 1
if (interactive()) {
input <- utils::menu(c("Yes", "No"),
title = "Install the rnaturalearthhires package?")
}
if (input == 1) {
message("Installing the rnaturalearthhires package.")
tryCatch(
devtools::install_github("ropensci/rnaturalearthhires"),
error = error_func, warning = error_func)
} else {
stop(paste("The rnaturalearthhires package is necessary for that method.\n",
instructions))
}
} |
RRlin.ll <- function(phi, nbeta, y, u, gidx, U, intercept, n.star, PWarray, PWpp, M){
beta <- phi[1:nbeta]
sigma <- phi[nbeta+1]
pi.true <- c(phi[(nbeta+2):length(phi)] , 0)
pi.true[length(pi.true)] <- 1-sum(pi.true)
G <- ncol(n.star)
J <- nrow(n.star)
K <- ncol(PWarray[,,1])
N <- length(y)
if(M==1){
patterns <- dimnames(PWarray)$true
}else{
patterns <- unlist(strsplit(dimnames(PWarray)$true,":"))
}
patterns <- matrix(as.numeric(patterns),nrow=K,byrow=T)
first <- rep(0,G)
sec <- rep(0,G)
for (g in 1:G){
Ng <- colSums(n.star)[g]
t1 <- matrix(NA,Ng,K)
for (j in 1:K){
xx <- matrix(patterns[j,], Ng, M, T)
if (U>0){
xx <- cbind(xx, u[gidx == g,,drop=FALSE])
}
if (intercept){
xx <- cbind(1,xx)
}
t1[,j] <- exp(-.5 * ( ( y[gidx == g] - (xx %*% beta) ) /sigma)^2 )
}
t2 <- PWpp[gidx == g,] * matrix(pi.true, nrow=Ng, ncol=K, byrow=T)
temp <- rowSums(t1 * t2) / (sigma*sqrt(2*pi) * PWpp[gidx == g,]%*%pi.true )
first[g] <- sum(log( temp ) )
sec[g] <- t(n.star[,g]) %*% log( PWarray[,,g] %*% pi.true )
}
ll <- sum (first,sec)
if(ll == -Inf)
ll <- -9e300
ll
} |
expected <- eval(parse(text="TRUE"));
test(id=0, code={
argv <- eval(parse(text="list(3.001e+155, 0)"));
do.call(`>`, argv);
}, o=expected); |
Greg.em <-
function(formula,data=NULL,R=1,tol=1e-10,itmax=1000,verbose=F){
model=model.frame(formula,data)
Y=model.response(model)
X=model.matrix(formula,model)
ldmvnorm=function(y,mu=rep(0,length(y)),Sig=diag(1,length(y))){
-.5*( length(y)*log(2*pi) + log(det(Sig)) + t(y-mu)%*%solve(Sig)%*%(y-mu) )
}
p=dim(Y)[2] ; q=dim(X)[2] ; n=dim(Y)[1]
dvar = apply(Y,2,var)
A = diag(dvar)
iA=diag(1/dvar)
B=matrix(rep(t(coef(lm(Y~X[,2]))),R),ncol=q*R)
iter=0
LL=NULL
rll=10
while( rll > tol & iter<itmax)
{
B0=B ; iter=iter+1
Vz=array(dim=c(R,R,n)) ; Mz=matrix(nrow=n,ncol=R)
for(i in 1:n)
{
Bx=apply(array(B,dim=c(p,q,R)),3,"%*%",X[i,])
Vz[,,i]=solve( t(Bx)%*%iA%*%Bx + diag(R) )
Mz[i,]=Vz[,,i]%*%t(Bx)%*%iA%*%Y[i,]
}
Y1=Y ; X1=NULL ; for(r in 1:R) { X1=cbind(X1,diag(Mz[,r])%*%X )}
Y0=matrix(0,nrow=n*R,ncol=p) ; X0=NULL
for(i in 1:n)
{
xi=matrix(outer(X[i,],diag(R)),nrow=R*q,ncol=R)
ZZ=xi%*%Vz[,,i]%*%t(xi) ; ZZ=.5*(ZZ+t(ZZ))
Z=eigen(ZZ);Z=Z$vec[,1:R]%*%diag(sqrt(Z$val[1:R]),nrow=R)
X0=rbind(X0,t(Z))
}
YA=rbind(Y0,Y1) ; XA=rbind(X0,X1)
B=t(YA)%*%XA%*%solve(t(XA)%*%XA)
E=YA-XA%*%t(B)
A= (t(E)%*%E)/n
dA=diag(A)
iA=diag(1/dA)
A = diag(dA)
if(iter%%5==0)
{
ll=0
for(i in 1:dim(Y)[1])
{
xi=matrix(outer(X[i,],diag(R)),nrow=R*q,ncol=R)
ll=ll+ldmvnorm(Y[i,],Sig=A+B%*%xi%*%t(xi)%*%t(B))
}
LL=c(LL,ll)
if(iter>5){rll=abs(LL[length(LL)]-LL[length(LL)-1])/abs(LL[length(LL)])}
if (verbose )cat(iter,log(rll,base=10),ll," ",round(diag(A),2)," ",round(c(B),2),"\n")
}
}
list(A=A,B=B)
} |
get_recdevs <- function(iteration, n, seed = 21) {
set.seed(seed)
x <- sample(1:1e6)[iteration]
set.seed(x)
rnorm(n, 0, 1)
} |
pdfpng <- function(
expr,
file,
pdf=TRUE,
png=TRUE,
overwrite=FALSE,
open=TRUE,
quiet=FALSE,
tracewarnmes=!quiet,
filargs=NULL,
width=7,
height=5,
units="in",
res=500,
seed=runif(1,-1e9,1e9),
envlevel=1,
pdfargs=NULL,
pngargs=NULL,
...
)
{
if(!is.logical(pdf)) stop("pdf argument must be logical (T/F), not '", class(pdf),"'.")
if(!is.logical(png)) stop("pdf argument must be logical (T/F), not '", class(png),"'.")
if(!pdf & !png) {warning("pdf and png both FALSE, not saving plot."); return(expr)}
fig <- normalizePath(file, winslash="/", mustWork=FALSE)
fig <- paste0(fig, c(".pdf",".png"))
fig <- do.call(newFilename, owa(
list(filename=fig[c(pdf,png)], ignore=overwrite, quiet=quiet), filargs))
fig <- rep(fig, length.out=2)
dots <- list(...)
seed <- seed
if(pdf)
{
do.call(grDevices::pdf, owa(
c(list(file=fig[1], width=width, height=height), dots), pdfargs))
set.seed(seed)
tryStack( eval.parent(substitute(expr), envlevel), silent=quiet, warn=tracewarnmes,
skip=c("tryStack(eval.parent(substitute(expr), envlevel), silent = quiet, ",
"eval(expr, p)"))
dev.off()
}
if(png)
{
do.call(grDevices::png, owa(
c(list(file=fig[2], width=width, height=height, units=units, res=res), dots), pngargs))
set.seed(seed)
tryStack( eval.parent(substitute(expr), envlevel), silent=quiet, warn=tracewarnmes,
skip=c("tryStack(eval.parent(substitute(expr), envlevel), silent = quiet, ",
"eval(expr, p)"))
dev.off()
}
if(open) { if(pdf) openPDF(fig[1]) ; if(png) openFile(fig[2]) }
return(invisible(fig))
} |
W_from_AD <- function(A, X){
K <-dim(A)[2]
n <- dim(X)[2]
W_hat <- matrix(0, K, n)
M <- rbind(diag(K-1), rep(-1,K-1))
bM <- diag(K)[,K]
Dmat <- 2*t(A%*%M)%*%(A%*%M)
Amat <- t(M)
bvec <- -bM
AM <- A%*%M
AbM <- A%*%bM
if (class(X)!="matrix"){
X <- as.matrix(X)
}
for (i in 1:n){
dvec <- 2*t(X[,i]-AbM)%*%AM
qp_sol <- solve.QP(Dmat, dvec, Amat, bvec)$solution
W_hat[,i] <- c(qp_sol, 1-sum(qp_sol))
}
W_hat <- pmax(W_hat,0)
return(W_hat)
} |
md_issue <- function(repo, num) {
if (suppressWarnings(any(is.na(as.numeric(num))))) {
stop("The num must be coercible to numeric.")
}
if (!grepl("/", repo)) {
warning("use the \"user/repo\" format")
}
glue::glue("{repo}
} |
covI <-
function(II,m,n,ll, ThePsiJ){
K <- length(II)
Pll <- ThePsiJ[[ll]]
Nll <- (length(Pll)-1)/2
bigans <- 0
for(k in 1:K) {
Pk <- ThePsiJ[[k]]
Nk <- (length(Pk)-1)/2
mintau <- max(-Nll+n-m, -Nk)
maxtau <- min(Nll+n-m, Nk)
if (mintau <= maxtau) {
v <- mintau:maxtau
ans <- sum(Pk[v+Nk+1]*Pll[m-n+v+Nll+1])
}
else
ans <- 0
bigans <- bigans + II[k]*ans
}
2*bigans^2
} |
context("Kristof Coefficient")
test_that('kristof and population covariance matrices',{
cong.1f<-as.numeric(round(kristof(cong1f)[[1]],6))
expect_that(cong.1f, equals(.854614))
par.1f<-as.numeric(round(kristof(par1f)[[1]],6))
expect_that(par.1f, equals(.888889))
tau.1f<-as.numeric(round(kristof(tau1f)[[1]],6))
expect_that(tau.1f, equals(.833575))
cong.3f<-as.numeric(round(kristof(cong3f)[[1]],6))
expect_that(cong.3f, equals(.801693))
par.3f<-as.numeric(round(kristof(par3f)[[1]],6))
expect_that(par.3f, equals(.841216))
tau.3f<-as.numeric(round(kristof(tau3f)[[1]],6))
expect_that(tau.3f, equals(.778395))
cong.5f<-as.numeric(round(kristof(cong5f)[[1]],6))
expect_that(cong.5f, equals(.848932))
par.5f<-as.numeric(round(kristof(par5f)[[1]],6))
expect_that(par.5f, equals(.880476))
tau.5f<-as.numeric(round(kristof(tau5f)[[1]],6))
expect_that(tau.5f, equals(.829901))
}) |
msk.create <- function(language = "English",
dictionaries = NA,
col = "black",
modify.nutrients = F)
{
dict <- ecoval.dict(language,dictionaries)
morphol <- msk.morphol.1998.create(language=language,dictionaries=dictionaries,col=col)
hydrol <- msk.hydrol.2011.create (language=language,dictionaries=dictionaries,col=col)
physapp <- msk.physapp.2007.create(language=language,dictionaries=dictionaries,col=col)
phys.branch <- utility.aggregation.create(name.node = ecoval.translate("N_phys",dict),
nodes = list(morphol,hydrol,physapp),
name.fun = "utility.aggregate.addmin",
par = c(0.4,0.4,0.2,0.5),
names.par = c("w_morphol","w_hydrol","w_physapp",
"w_add_phys"),
col = col,
required = TRUE)
nutrients <- msk.nutrients.2010.create(language=language,dictionaries=dictionaries,col=col,
modify=modify.nutrients)
chem.branch <- utility.aggregation.create(name.node = ecoval.translate("N_chem",dict),
nodes = list(nutrients),
name.fun = "utility.aggregate.addmin",
par = c(1,0.5),
names.par = c("w_nutrients","w_add_chem"),
col = col,
required = TRUE)
diatoms <- msk.diatoms.2007.create (language=language,dictionaries=dictionaries,col=col)
invertebrates <- msk.invertebrates.2010.create(language=language,dictionaries=dictionaries,col=col)
fish <- msk.fish.2004.create (language=language,dictionaries=dictionaries,col=col)
biol.branch <- utility.aggregation.create(name.node = ecoval.translate("N_biol",dict),
nodes = list(diatoms,invertebrates,fish),
name.fun = "utility.aggregate.addmin",
par = c(1,1,1,0.5),
names.par = c("w_diatoms","w_invertebrates","w_fish",
"w_add_biol"),
col = col,
required = TRUE)
ecol <- utility.aggregation.create(name.node = ecoval.translate("N_ecol",dict),
nodes = list(phys.branch,chem.branch,biol.branch),
name.fun = "utility.aggregate.addmin",
par = c(1,1,1,0.5),
names.par = c("w_phys","w_chem","w_biol","w_add_ecol"),
col = col)
return(ecol)
} |
theta.ini <-
function(z, X, CorModels, use.nugget,
use.anisotropy, dist.hydro.data, x.dat, y.dat, REs)
{
n.models <- length(CorModels)
var.resid <- mean((z - X %*% mginv(t(X) %*% X) %*% t(X) %*% z)^2)
theta <- NULL
scale <- NULL
type <- NULL
terms <- NULL
if(length(grep("tailup",CorModels)) > 0){
if(length(grep("tailup",CorModels)) > 1)
stop("Cannot have more than 1 tailup model")
theta <- rbind(theta,matrix(c(log(.9/n.models*var.resid),
log(mean(dist.hydro.data))),ncol = 1))
scale <- c(scale,c("log","log"))
type <- c(type,c("parsill","range"))
terms <- c(terms,rep(CorModels[grep("tailup",CorModels)],
times = 2))
}
if(length(grep("taildown",CorModels)) > 0){
if(length(grep("taildown",CorModels)) > 1)
stop("Cannot have more than 1 taildown model")
theta <- rbind(theta,matrix(c(log(.9/n.models*var.resid),
log(mean(dist.hydro.data))),ncol = 1))
scale <- c(scale,c("log","log"))
type <- c(type,c("parsill","range"))
terms <- c(terms,rep(CorModels[grep("taildown",CorModels)],
times = 2))
}
if(length(grep("Euclid",CorModels)) > 0){
if(length(grep("Euclid",CorModels)) > 1)
stop("Cannot have more than 1 Euclidean model")
dist.Euclid.data <- distGeo(x.dat,y.dat,x.dat,y.dat)
if(use.anisotropy == FALSE) {
theta <- rbind(theta,matrix(c(log(.9/n.models*var.resid),
log(mean(dist.Euclid.data))),ncol = 1))
scale <- c(scale,c("log","log"))
type <- c(type,c("parsill","range"))
terms <- c(terms,rep(CorModels[grep("Euclid",CorModels)],
times = 2))
}
else {
theta <- rbind(theta,matrix(c(log(.9/n.models*var.resid),
log(mean(dist.Euclid.data)),0,0),ncol = 1))
scale <- c(scale,c("log","log","logistic","logistic180"))
type <- c(type,c("parsill","range","axratio","rotate"))
terms <- c(terms,rep(CorModels[grep("Euclid",CorModels)],
times = 4))
}
}
if(length(REs) > 0) {
theta <- rbind(theta,matrix(rep(log(.9/n.models*var.resid),
times = length(REs)), ncol = 1))
scale <- c(scale,rep("log", times = length(REs)))
type <- c(type,rep("parsill", times = length(REs)))
terms <- c(terms, names(REs))
}
if(use.nugget == TRUE) {
if(is.null(theta)) {
theta <- log(var.resid)
scale <- "log"
type <- "parsill"
terms <- "Nugget"
} else {
theta <- c(theta,log(.1*var.resid))
scale <- c(scale,"log")
type <- c(type,"parsill")
terms <- c(terms,"Nugget")
}
}
attr(theta,"scale") <- scale
attr(theta,"type") <- type
attr(theta,"terms") <- terms
theta
} |
index_age_rc <- function(pars = NULL, long = TRUE){
child_dependency <- NULL
comp1 <- c("a1", "alpha1")
comp2 <- c("a2", "alpha2", "lambda2", "mu2")
if (any(comp1 %in% names(pars))){
stopifnot(all(comp1 %in% names(pars)))
}
if (any(comp2 %in% names(pars))){
stopifnot(all(comp2 %in% names(pars)))
}
p <- pars
tibble::tibble(
peaking = p[stringr::str_detect(string = names(p), pattern = "mu2")],
child_dependency =
p[stringr::str_starts(string = names(p), pattern = "a1")]/
p[stringr::str_starts(string = names(p), pattern = "a2")],
labor_dependency = 1/child_dependency,
labor_asymmetry =
p[stringr::str_detect(string = names(p), pattern = "lambda2")]/
p[stringr::str_detect(string = names(p), pattern = "alpha2")],
regularity =
p[stringr::str_detect(string = names(p), pattern = "alpha1")]/
p[stringr::str_detect(string = names(p), pattern = "alpha2")]
) %>%
{if(long) tidyr::pivot_longer(data = ., cols = 1:ncol(.), names_to = "measure") else .}
} |
test_that("regr_brnn", {
requirePackagesOrSkip("brnn", default.method = "load")
parset.list = list(
list(),
list(neurons = 3L),
list(mu = 0.001)
)
old.predicts.list = list()
for (i in seq_along(parset.list)) {
pars = list(formula = regr.formula, data = regr.train)
pars = c(pars, parset.list[[i]])
set.seed(getOption("mlr.debug.seed"))
capture.output({
m = do.call(brnn::brnn, pars)
})
p = predict(m, newdata = regr.test)
old.predicts.list[[i]] = p
}
testSimpleParsets("regr.brnn", regr.df, regr.target, regr.train.inds,
old.predicts.list, parset.list)
}) |
player_season_multicomp = function (username, password, competitionmatrix, version = "v6",
baseurl = "https://data.statsbomb.com/api/", parallel = TRUE,
cores = detectCores())
{
events <- tibble()
for (i in 1:dim(competitionmatrix)[1]) {
player_seasons <- tibble()
competition_id <- as.numeric(competitionmatrix[i, 1])
season_id <- as.numeric(competitionmatrix[i, 2])
player_seasons = player_season(username, password, competition_id, season_id)
events <- bind_rows(events, player_seasons)
}
return(events)
} |
source("ESEUR_config.r")
pal_col=rainbow(3)
feld=read.csv(paste0(ESEUR_dir, "developers/feldman_cases76.csv.xz"), as.is=TRUE)
feld$col=pal_col[feld$P-1]
up=subset(feld, Parity == "up")
down=subset(feld, Parity == "down")
plot(feld$BC, feld$PropCorr*100, col=feld$col,
xlab="Boolean complexity", ylab="Percent correct\n")
legend(x="bottomleft", legend=c("P = 2", "P = 3", "P = 4"), bty="n", fill=pal_col, cex=1.2) |
geom_ribbon <- function(mapping = NULL, data = NULL,
stat = "identity", position = "identity",
...,
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE) {
layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomRibbon,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
na.rm = na.rm,
...
)
)
}
GeomRibbon <- gganimintproto("GeomRibbon", Geom,
default_aes = aes(colour = NA, fill = "grey20", size = 0.5, linetype = 1,
alpha = NA),
required_aes = c("x", "ymin", "ymax"),
draw_key = draw_key_polygon,
handle_na = function(data, params) {
data
},
draw_group = function(data, panel_scales, coord, na.rm = FALSE) {
if (na.rm) data <- data[stats::complete.cases(data[c("x", "ymin", "ymax")]), ]
data <- data[order(data$group, data$x), ]
aes <- unique(data[c("colour", "fill", "size", "linetype", "alpha")])
if (nrow(aes) > 1) {
stop("Aesthetics can not vary with a ribbon")
}
aes <- as.list(aes)
missing_pos <- !stats::complete.cases(data[c("x", "ymin", "ymax")])
ids <- cumsum(missing_pos) + 1
ids[missing_pos] <- NA
positions <- plyr::summarise(data,
x = c(x, rev(x)), y = c(ymax, rev(ymin)), id = c(ids, rev(ids)))
munched <- coord_munch(coord, positions, panel_scales)
ggname("geom_ribbon", polygonGrob(
munched$x, munched$y, id = munched$id,
default.units = "native",
gp = gpar(
fill = alpha(aes$fill, aes$alpha),
col = aes$colour,
lwd = aes$size * .pt,
lty = aes$linetype)
))
},
pre_process = function(g, g.data, ...) {
if("fill"%in%names(g.data) & !"colour"%in%names(g.data)){
g.data[["colour"]] <- g.data[["fill"]]
}
return(list(g = g, g.data = g.data))
}
)
geom_area <- function(mapping = NULL, data = NULL, stat = "identity",
position = "stack", na.rm = FALSE, show.legend = NA,
inherit.aes = TRUE, ...) {
layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomArea,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
na.rm = na.rm,
...
)
)
}
GeomArea <- gganimintproto("GeomArea", GeomRibbon,
default_aes = aes(colour = NA, fill = "grey20", size = 0.5, linetype = 1,
alpha = NA),
required_aes = c("x", "y"),
setup_data = function(data, params) {
transform(data, ymin = 0, ymax = y)
},
pre_process = function(g, g.data, ...) {
g$geom <- "ribbon"
g.data <- g.data[order(g.data$x), ]
return(list(g = g, g.data = g.data))
}
) |
setGeneric('getAnnual', function(data, output = 'series', minRecords = 355,
...) {
standardGeneric('getAnnual')
})
setMethod('getAnnual', signature('data.frame'),
function(data, output, minRecords, ...) {
result <- getAnnual.TS(data)
getAnnual.plot(result, output, minRecords, ...)
return(result)
})
setMethod('getAnnual', signature('list'),
function(data, output, minRecords, ...) {
result <- getAnnual.list(data)
getAnnual.plot(result, output, minRecords, ...)
return(result)
})
getAnnual.TS <- function(dataframe) {
Date <- as.POSIXlt(dataframe[, 1])
stations <- colnames(dataframe)[2:ncol(dataframe)]
data <- lapply(stations, function(x) {
dataframe_new <- data.frame(Date, dataframe[, x])
colnames(dataframe_new)[2] <- x
getAnnual_dataframe(dataframe_new)
})
data <- rbindlist(data)
data$Year <- factor(data$Year, levels = sort(unique(data$Year)), ordered = TRUE)
rownames(data) <- NULL
return(data)
}
getAnnual.list <- function(datalist) {
data <- lapply(datalist, FUN = getAnnual_dataframe)
data <- rbindlist(data)
data$Year <- factor(data$Year, levels = sort(unique(data$Year)), ordered = TRUE)
rownames(data) <- NULL
return(data)
}
getAnnual.plot <- function(data, output, minRecords, ...) {
theme_set(theme_bw())
if (output == 'mean') {
validData <- data[data$recordNum >= minRecords,]
data <- aggregate(validData$AnnualPreci, list(validData$Name), mean)
colnames(data) <- c('Name', 'AnnualPreci')
mainLayer <- with(data, {
ggplot(data)+
geom_bar(aes(x = Name, y = AnnualPreci, fill = Name), stat = 'identity')+
labs(empty = NULL, ...)
})
print(mainLayer)
} else {
plotData <- with(data, {
subset(data, select = c(Year, Name, NANum, AnnualPreci))
})
plotData <- melt(plotData, var.id = c('Year', 'Name'))
mainLayer <- with(plotData, {
ggplot(plotData) +
geom_bar(aes(x = Year, y = value , fill = Name),
stat = 'identity') +
facet_grid(variable ~ Name, scale = 'free') +
xlab('Year') +
ylab(NULL) +
labs(empty = NULL, ...) +
theme(plot.title = element_text(size = 20, face = 'bold', vjust = 1)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size = rel(1.5)),
axis.text.y = element_text(size = rel(1.5)))
})
print(mainLayer)
}
}
getAnnual_dataframe <- function(dataset) {
if (!grepl('-|/', dataset[1, 1])) {
stop ('First column is not date or Wrong Date formate, check the format in ?as.Date{base},
and use as.Date to convert.')
}
Date <- as.Date(dataset[, 1])
year <- format(Date, '%Y')
yearUnique <- unique(year)
calcuNum <- c(1:length(yearUnique))
annualPreci <- tapply(dataset[, 2], INDEX = year, FUN = sum, na.rm = TRUE)
recordNum <- tapply(dataset[, 2], INDEX = year, function(x) length(which(!is.na(x))))
NANum <- tapply(dataset[, 2], INDEX = year, function(x) length(which(is.na(x))))
name <- rep(colnames(dataset)[2], length(calcuNum))
output <- data.frame(Year = as.numeric(yearUnique), Name = name, AnnualPreci = annualPreci,
recordNum, NANum)
return(output)
} |
context("testing 'sequences' package'")
test_that("dnaseq validity", {
data(dnaseq)
expect_true(validObject(dnaseq))
})
test_that("readFasta", {
data(dnaseq)
f <- dir(system.file("extdata",package="sequences"),pattern="fasta",full.names=TRUE)
xx <- readFasta(f[1])
expect_true(all.equal(xx, dnaseq))
})
test_that("ccpp code", {
gccountr <-
function(x) tabulate(factor(strsplit(x, "")[[1]]))
x <- "AACGACTACAGCATACTAC"
expect_true(identical(gccount(x), gccountr(x)))
expect_true(identical(gccount2(x), gccountr(x)))
}) |
library(HiddenMarkov)
n <- 5000
N <- 1
Pi <- matrix(c(0.8, 0.2,
0.3, 0.7),
byrow=TRUE, nrow=2)
delta <- c(1, 0)
y <- dthmm(NULL, Pi=Pi, distn="norm", delta=delta, pm=list(mean=c(5, 2), sd=c(1, 1)))
y <- simulate(y, nsim=N*n, seed=10)
print(logLik(y))
tmp <- BaumWelch(y, bwcontrol(posdiff=FALSE, tol=1e-05, prt=FALSE))
print(summary(tmp))
print(logLik(tmp))
glmformula <- formula(y$x ~ 1)
glmfamily <- gaussian(link="identity")
Xdesign <- model.matrix(glmformula)
beta <- matrix(c(5, 2),
ncol=ncol(Pi), nrow=ncol(Xdesign), byrow=TRUE)
y1 <- mmglm1(y$x, Pi, delta, glmfamily, beta, Xdesign, sigma=c(1, 1), msg=FALSE)
print(logLik(y1))
tmp1 <- BaumWelch(y1, bwcontrol(posdiff=FALSE, tol=1e-05, prt=FALSE))
print(summary(tmp1))
print(logLik(tmp1, fortran=TRUE))
print(logLik(tmp1, fortran=FALSE))
if (abs(logLik(tmp)-logLik(tmp1)) > 1e-06)
warning("WARNING: See tests/dthmm-mmglm1-gaussian.R, log-likelihoods are different")
if (any(Viterbi(tmp)!=Viterbi(tmp1)))
warning("WARNING: See tests/dthmm-mmglm1-gaussian.R, Viterbi paths are different")
if (any(abs(residuals(tmp)-residuals(tmp1)) > 1e-06))
warning("WARNING: See tests/dthmm-mmglm1-gaussian.R, residuals are different")
print(tmp$pm)
print(tmp1$beta)
print(tmp1$sigma)
print(tmp$Pi)
print(tmp1$Pi) |
Proportional <-
function(E,C,Names=NULL){
lambda<-E/sum(C)
R<-lambda*C
Output<-list(Results=R,Claims=C,Method="Proportional Rule",Short="P",E=E,Names=Names)
class(Output)<-"ClaimsRule"
return(Output)
} |
str_replace <- function(string, pattern, replacement) {
if (!missing(replacement) && is_replacement_fun(replacement)) {
replacement <- as_function(replacement)
return(str_transform(string, pattern, replacement))
}
check_lengths(string, pattern, replacement)
switch(type(pattern),
empty = stop("Empty `pattern` not supported", call. = FALSE),
bound = stop("Boundary `pattern` not supported", call. = FALSE),
fixed = stri_replace_first_fixed(string, pattern, replacement,
opts_fixed = opts(pattern)),
coll = stri_replace_first_coll(string, pattern, replacement,
opts_collator = opts(pattern)),
regex = stri_replace_first_regex(string, pattern, fix_replacement(replacement),
opts_regex = opts(pattern))
)
}
str_replace_all <- function(string, pattern, replacement) {
if (!missing(replacement) && is_replacement_fun(replacement)) {
replacement <- as_function(replacement)
return(str_transform_all(string, pattern, replacement))
}
if (!is.null(names(pattern))) {
vec <- FALSE
replacement <- unname(pattern)
pattern[] <- names(pattern)
} else {
check_lengths(string, pattern, replacement)
vec <- TRUE
}
switch(type(pattern),
empty = stop("Empty `pattern`` not supported", call. = FALSE),
bound = stop("Boundary `pattern` not supported", call. = FALSE),
fixed = stri_replace_all_fixed(string, pattern, replacement,
vectorize_all = vec, opts_fixed = opts(pattern)),
coll = stri_replace_all_coll(string, pattern, replacement,
vectorize_all = vec, opts_collator = opts(pattern)),
regex = stri_replace_all_regex(string, pattern, fix_replacement(replacement),
vectorize_all = vec, opts_regex = opts(pattern))
)
}
is_replacement_fun <- function(x) {
is.function(x) || is_formula(x)
}
fix_replacement <- function(x) {
if (!is.character(x)) {
stop("`replacement` must be a character vector", call. = FALSE)
}
vapply(x, fix_replacement_one, character(1), USE.NAMES = FALSE)
}
fix_replacement_one <- function(x) {
if (is.na(x)) {
return(x)
}
chars <- str_split(x, "")[[1]]
out <- character(length(chars))
escaped <- logical(length(chars))
in_escape <- FALSE
for (i in seq_along(chars)) {
escaped[[i]] <- in_escape
char <- chars[[i]]
if (in_escape) {
if (char == "$") {
out[[i]] <- "\\\\$"
} else if (char >= "0" && char <= "9") {
out[[i]] <- paste0("$", char)
} else {
out[[i]] <- paste0("\\", char)
}
in_escape <- FALSE
} else {
if (char == "$") {
out[[i]] <- "\\$"
} else if (char == "\\") {
in_escape <- TRUE
} else {
out[[i]] <- char
}
}
}
paste0(out, collapse = "")
}
str_replace_na <- function(string, replacement = "NA") {
stri_replace_na(string, replacement)
}
str_transform <- function(string, pattern, replacement) {
loc <- str_locate(string, pattern)
str_sub(string, loc, omit_na = TRUE) <- replacement(str_sub(string, loc))
string
}
str_transform_all <- function(string, pattern, replacement) {
locs <- str_locate_all(string, pattern)
for (i in seq_along(string)) {
for (j in rev(seq_len(nrow(locs[[i]])))) {
loc <- locs[[i]]
str_sub(string[[i]], loc[j, 1], loc[j, 2]) <- replacement(str_sub(string[[i]], loc[j, 1], loc[j, 2]))
}
}
string
} |
arg_match <- function(arg,
values = NULL,
...,
multiple = FALSE,
error_arg = caller_arg(arg),
error_call = caller_env()) {
check_dots_empty0(...)
arg_expr <- enexpr(arg)
error_arg <- as_string(error_arg)
check_symbol(arg_expr, arg = "arg", call = caller_env(), .internal = TRUE)
check_character(arg, arg = error_arg, call = error_call)
if (is_null(values)) {
fn <- caller_fn()
values <- formals(fn)[[error_arg]]
values <- eval_bare(values, get_env(fn))
}
if (multiple) {
return(arg_match_multi(arg, values, error_arg, error_call))
}
if (length(arg) > 1 && !setequal(arg, values)) {
abort(
arg_match_invalid_msg(arg, values, error_arg),
call = error_call
)
}
arg <- arg[[1]]
arg_match0(
arg,
values,
error_arg,
error_call = error_call
)
}
arg_match_multi <- function(arg, values, error_arg, error_call) {
map_chr(arg, ~ arg_match0(.x, values, error_arg, error_call = error_call))
}
arg_match0 <- function(arg,
values,
arg_nm = caller_arg(arg),
error_call = caller_env()) {
.External(ffi_arg_match0, arg, values, arg_nm, error_call)
}
stop_arg_match <- function(arg, values, error_arg, error_call) {
if (length(arg) > 1) {
sorted_arg <- sort(unique(arg))
sorted_values <- sort(unique(values))
if (!identical(sorted_arg, sorted_values)) {
msg <- sprintf(
"%s must be length 1 or a permutation of %s.",
format_arg("arg"),
format_arg("values")
)
abort(msg, call = quote(arg_match()))
}
}
msg <- arg_match_invalid_msg(arg, values, error_arg)
candidate <- NULL
i_partial <- pmatch(arg, values)
if (!is_na(i_partial)) {
candidate <- values[[i_partial]]
}
i_close <- adist(arg, values) / nchar(values)
if (any(i_close <= 0.5)) {
candidate <- values[[which.min(i_close)]]
}
if (is_null(candidate)) {
i_close_nocase <- adist(arg, values, ignore.case = TRUE) / nchar(values)
if (any(i_close_nocase <= 0.5)) {
candidate <- values[[which.min(i_close_nocase)]]
}
}
if (!is_null(candidate)) {
candidate <- chr_quoted(candidate, "\"")
msg <- c(msg, i = paste0("Did you mean ", candidate, "?"))
}
abort(msg, call = error_call)
}
arg_match_invalid_msg <- function(val, values, error_arg) {
msg <- paste0(format_arg(error_arg), " must be one of ")
msg <- paste0(msg, chr_enumerate(chr_quoted(values, "\"")))
if (is_null(val)) {
msg <- paste0(msg, ".")
} else {
msg <- paste0(msg, sprintf(', not "%s\".', val[[1]]))
}
msg
}
check_required <- function(x,
arg = caller_arg(x),
call = caller_env()) {
if (!missing(x)) {
invisible(return(TRUE))
}
arg_expr <- substitute(x)
if (!is_symbol(arg_expr)) {
abort(sprintf("%s must be an argument name.", format_arg("x")))
}
msg <- sprintf("%s is absent but must be supplied.", format_arg(arg))
abort(msg, call = call)
}
chr_quoted <- function(chr, type = "`") {
paste0(type, chr, type)
}
chr_enumerate <- function(chr, sep = ", ", final = "or") {
n <- length(chr)
if (n < 2) {
return(chr)
}
n <- length(chr)
head <- chr[seq_len(n - 1)]
last <- chr[length(chr)]
head <- paste(head, collapse = sep)
if (n > 2) {
paste0(head, sep, final, " ", last)
} else {
paste0(head, " ", final, " ", last)
}
}
check_exclusive <- function(...,
.require = TRUE,
.frame = caller_env(),
.error_call = .frame) {
args <- enexprs(..., .named = TRUE)
if (length(args) < 2) {
abort("Must supply at least two arguments.")
}
if (!every(args, is_symbol)) {
abort("`...` must be function arguments.")
}
present <- map_lgl(args, ~ inject(!base::missing(!!.x), .frame))
n_present <- sum(present)
if (n_present == 0) {
if (.require) {
args <- map(names(args), format_arg)
enum <- chr_enumerate(args)
msg <- sprintf("One of %s must be supplied.", enum)
abort(msg, call = .error_call)
} else {
return("")
}
}
if (n_present == 1) {
return(as_string(args[[which(present)]]))
}
args <- map_chr(names(args), format_arg)
enum <- chr_enumerate(args)
msg <- sprintf("Exactly one of %s must be supplied.", enum)
if (n_present != length(args)) {
enum <- chr_enumerate(args[present], final = "and")
msg <- c(msg, x = sprintf("%s were supplied together.", enum))
}
abort(msg, call = .error_call)
}
missing_arg <- function() {
.Call(ffi_missing_arg)
}
is_missing <- function(x) {
missing(x) || identical(x, quote(expr = ))
}
maybe_missing <- function(x, default = missing_arg()) {
if (is_missing(x)) {
default
} else {
x
}
} |
isDigit <- function(ch) switch(ch,'0'= , '1'= , '2'= , '3'= , '4'= , '5'= , '6'= , '7'= , '8'= , '9'=TRUE,FALSE) |
tumat<-function(sim,family="gaussian"){
Smat<-sim
FA<-family
if (is.null(Smat$O1)) {Base<-0} else {Base<-1}
Nstage<-nstage(data=Smat)
Dmat<-atsscan(data=Smat)
G<-nrow(Dmat)
Uvec<-NULL
if (Nstage==1 && Base==0) {
for (g in 1:G) {ATS<-as.numeric(Dmat[g,2])
ATS[which(is.na(ATS))]<-0
S1<-Smat[which(Smat$A1==ATS[1]),]
u<-S1$MEAN
Uvec<-c(Uvec,u)
}} else
if (Nstage==1 && Base==1) {
for (g in 1:G) {ATS<-as.numeric(Dmat[g,2:3])
ATS[which(is.na(ATS))]<-0
S0<-Smat[which(Smat$O1==0 & Smat$A1==ATS[1]),]
S1<-Smat[which(Smat$O1==1 & Smat$A1==ATS[2]),]
u0<-S0$MEAN; if (length(u0)==0L) {u0<-0}
u1<-S1$MEAN; if (length(u1)==0L) {u1<-0}
p0<-S0$P1; if (length(p0)==0L) {p0<-0}
p1<-S1$P1; if (length(p1)==0L) {p1<-0}
u<-sum(p0*u0,p1*u1)
Uvec<-c(Uvec,u)
}} else
if (Nstage==2 && Base==0) {
for (g in 1:G){
ATS<-as.numeric(Dmat[g,2:4])
ATS[which(is.na(ATS))]<-0
S0<-Smat[which(Smat$A1==ATS[1] &
Smat$O2==0 & Smat$A2==ATS[2]),]
S1<-Smat[which(Smat$A1==ATS[1] &
Smat$O2==1 & Smat$A2==ATS[3]),]
u0<-S0$MEAN; if (length(u0)==0L) {u0<-0}
u1<-S0$MEAN; if (length(u1)==0L) {u1<-0}
p0<-S0$P2; if (length(p0)==0L) {p0<-0}
p1<-S1$P2; if (length(p1)==0L) {p1<-0}
u<-sum(p0*u0,p1*u1)
Uvec<-c(Uvec,u)
}} else
if (Nstage==2 && Base==1) {
for (g in 1:G){
ATS<-as.numeric(Dmat[g,2:8])
ATS[which(is.na(ATS))]<-0
S00<-Smat[which(Smat$O1==0 & Smat$A1==ATS[1] &
Smat$O2==0 & Smat$A2==ATS[3]),]
S01<-Smat[which(Smat$O1==0 & Smat$A1==ATS[1] &
Smat$O2==1 & Smat$A2==ATS[4]),]
S10<-Smat[which(Smat$O1==1 & Smat$A1==ATS[2] &
Smat$O2==0 & Smat$A2==ATS[5]),]
S11<-Smat[which(Smat$O1==1 & Smat$A1==ATS[2] &
Smat$O2==1 & Smat$A2==ATS[6]),]
u00<-S00$MEAN; if (length(u00)==0L) {u00<-0}
u01<-S01$MEAN; if (length(u01)==0L) {u01<-0}
u10<-S10$MEAN; if (length(u10)==0L) {u10<-0}
u11<-S11$MEAN; if (length(u11)==0L) {u11<-0}
p0<-S01$P1; if (length(p0)==0L) {p0<-0}
p1<-S11$P1; if (length(p1)==0L) {p1<-0}
p00<-S00$P2; if (length(p00)==0L) {p00<-0}
p01<-S01$P2; if (length(p01)==0L) {p01<-0}
p10<-S00$P2; if (length(p10)==0L) {p10<-0}
p11<-S01$P2; if (length(p11)==0L) {p11<-0}
u<-sum(p0*p00*u00,p0*p01*u01,p1*p10*u10,p1*p11*u11)
Uvec<-c(Uvec,u)
}} else
if (Nstage==3 && Base==0) {
for (g in 1:G){
ATS<-as.numeric(Dmat[g,2:8])
ATS[which(is.na(ATS))]<-0
S00<-Smat[which(Smat$A1==ATS[1] & Smat$O2==0 &
Smat$A2==ATS[2] & Smat$O3==0 & Smat$A3==ATS[4]),]
S01<-Smat[which(Smat$A1==ATS[1] & Smat$O2==0 &
Smat$A2==ATS[2] & Smat$O3==1 & Smat$A3==ATS[5]),]
S10<-Smat[which(Smat$A1==ATS[1] & Smat$O2==1 &
Smat$A2==ATS[3] & Smat$O3==0 & Smat$A3==ATS[6]),]
S11<-Smat[which(Smat$A1==ATS[1] & Smat$O2==1 &
Smat$A2==ATS[3] & Smat$O3==1 & Smat$A3==ATS[7]),]
u00<-S00$MEAN; if (length(u00)==0L) {u00<-0}
u01<-S01$MEAN; if (length(u01)==0L) {u01<-0}
u10<-S10$MEAN; if (length(u10)==0L) {u10<-0}
u11<-S11$MEAN; if (length(u11)==0L) {u11<-0}
p0<-S01$P2; if (length(p0)==0L) {p0<-0}
p1<-S11$P2; if (length(p1)==0L) {p1<-0}
p00<-S00$P3; if (length(p00)==0L) {p00<-0}
p01<-S01$P3; if (length(p01)==0L) {p01<-0}
p10<-S00$P3; if (length(p10)==0L) {p10<-0}
p11<-S01$P3; if (length(p11)==0L) {p11<-0}
u<-sum(p0*p00*u00,p0*p01*u01,p1*p10*u10,p1*p11*u11)
Uvec<-c(Uvec,u)
}}
Umat<-matrix(Uvec,ncol=1)
return(Umat)
} |
Single_Mptbl<-R6Class("Single_Mptbl",
public =list(
field= NULL,
variable= NULL,
default_field= NULL,
preselection_field= NULL,
mapping_table= NULL,
initialize = function(field=NULL,
variable=NULL,
default_field=NULL,
preselection_field=NULL,
mapping_table=NULL
) {
self$field=field
self$variable=variable
self$default_field=default_field
self$preselection_field=preselection_field
self$mapping_table=mapping_table
},
create_mptbl= function() {
nr<-length(self$variable)
nc<-length(self$field)
local_mptbl<-matrix(as.integer(rep(0,nc*nr)),nrow=nr,ncol=nc, byrow = TRUE)
colnames(local_mptbl)<-self$field
rownames(local_mptbl)<-self$variable
self$mapping_table <- local_mptbl
local_mptbl<-NULL
},
mptbl_select_default_field= function(default_field) {
local_mptbl<-self$mapping_table
nr<-nrow(local_mptbl)
nc<-ncol(local_mptbl)
for (i in seq_len(nr)){
if(sum(local_mptbl[i,])!=1){
local_mptbl[i,]<-rep(0,nc)
local_mptbl[i,default_field]<- 1
}
}
self$mapping_table <- local_mptbl
local_mptbl<-NULL
},
mptbl_select_preselection_field= function() {
},
mptbl_event_select= function(selection_inf) {
aaa<-selection_inf
local_mptbl<-self$mapping_table
for (i in 1:nrow(selection_inf)){
n_row<-as.integer(selection_inf[i,1])
n_col<-as.integer(selection_inf[i,2])
if (selection_inf[i,3]==0 ) {
local_mptbl[n_row,n_col]<-1
selection_inf[i,3]<-1
self$mptbl_event_fill(selection_inf)
}else{
if (selection_inf[i,3]==1 ) {
local_mptbl[n_row,n_col]<-0
selection_inf[i,3]<-0
self$mptbl_event_fill(selection_inf)
}
}
}
local_mptbl<-NULL
},
mptbl_event_fill= function(fill_inf) {
local_mptbl<-self$mapping_table
select_row_fist<-fill_inf[1,1]
select_row_end<-fill_inf[nrow(fill_inf),1]
select_col<-as.integer(c(fill_inf[1,2]))
select_mptal_range<-local_mptbl[select_row_fist:select_row_end,,drop=F]
nr<-nrow(select_mptal_range)
nc<-ncol(select_mptal_range)
if (fill_inf[1,3]==0){
select_mptal_range[,select_col]<-rep(0,nrow(select_mptal_range))
local_mptbl[select_row_fist:select_row_end,]<-select_mptal_range
self$mapping_table <- local_mptbl
local_mptbl<-NULL
self$mptbl_select_default_field(self$default_field)
}
else
{
select_mptal_range[]<-matrix(rep(0,nr*nc),nrow=nr,ncol=nc)
select_mptal_range[,select_col]<-rep(1,nr)
local_mptbl[select_row_fist:select_row_end,]<-select_mptal_range
self$mapping_table <- local_mptbl
}
}
)
)
Multiple_Mptbl<-R6Class("Multiple_Mptbl",
inherit =Single_Mptbl,
public =list(
mptbl_event_select= function(selection_inf) {
aaa<-selection_inf
local_mptbl<-self$mapping_table
for (i in 1:nrow(selection_inf)){
n_row<-as.integer(selection_inf[i,1])
n_col<-as.integer(selection_inf[i,2])
if (selection_inf[i,3]==0 ) {
local_mptbl[n_row,n_col]<-1
}else{
if (selection_inf[i,3]==1 ) {
local_mptbl[n_row,n_col]<-0
}
}
}
self$mapping_table <- local_mptbl
local_mptbl<-NULL
},
mptbl_event_fill= function(fill_inf) {
local_mptbl<-self$mapping_table
select_row_fist<-fill_inf[1,1]
select_row_end<-fill_inf[nrow(fill_inf),1]
select_col<-as.integer(c(fill_inf[1,2]))
select_mptal_range<-local_mptbl[select_row_fist:select_row_end,,drop=F]
nr<-nrow(select_mptal_range)
nc<-ncol(select_mptal_range)
if (fill_inf[1,3]==0){
select_mptal_range[,select_col]<-rep(0,nrow(select_mptal_range))
local_mptbl[select_row_fist:select_row_end,]<-select_mptal_range
self$mapping_table <- local_mptbl
local_mptbl<-NULL
self$mptbl_select_default_field(self$default_field)
}
else
{
select_mptal_range[,select_col]<-rep(1,nr)
local_mptbl[select_row_fist:select_row_end,]<-select_mptal_range
self$mapping_table <- local_mptbl
}
}
)
)
Mapping_Table_class<-R6Class("Mapping_Table",
public =list(
field_groups= NULL,
variable= NULL,
default_field= NULL,
preselection_field= NULL,
field_index_transform= NULL,
mapping_table= NULL,
inf_of_mptbl= NULL,
field_right_bound= NULL,
DT_container= NULL,
initialize = function(field_groups=NULL,
variable=NULL,
default_field=NULL,
preselection_field=NULL,
field_index_transform=NULL,
mapping_table=NULL,
inf_of_mptbl=NULL,
field_right_bound=NULL,
DT_container=NULL
){self$field_groups=field_groups
self$variable=variable
self$default_field=default_field
self$preselection_field=preselection_field
self$field_index_transform=field_index_transform
self$mapping_table=mapping_table
self$inf_of_mptbl=inf_of_mptbl
self$field_right_bound=field_right_bound
self$DT_container=DT_container
},
create_mptbl= function() {
nr<-length(self$variable)
nc<-length(self$field_groups[1,])
local_mptbl<-matrix(as.integer(rep(0,nc*nr)),nrow=nr,ncol=nc, byrow = TRUE)
colnames(local_mptbl)<-self$field_groups[1,]
rownames(local_mptbl)<-self$variable
self$mapping_table <- local_mptbl
local_mptbl<-NULL
self$mptbl_select_default_field(self$default_field)
self$set_field_right_bound(self$field_groups)
self$set_DT_container(self$field_groups)
},
assign_field_by_field_group=function() {
field_value<-self$field_groups[1,]
field_group<-as.integer(c(self$field_groups[2,]))
field_display_group<-self$field_groups[3,]
group<-unique(self$field_group)
for (i in group){
if (i%%2 ==0){
group_name<-paste("multiple",i,sep ="")
group_index<-c(field_group==i)
assign(group_name,Multiple_Mptbl$new(mapping_table=self$mapping_table[,group_index,drop=F]))
}else{
group_name<-paste("single",i,sep ="")
group_index<-c(field_group==i)
assign(group_name,Multiple_Mptbl$new(mapping_table=self$mapping_table[,group_index,drop=F]))
}
}
},
mptbl_select_default_field = function(default_field) {
self$mapping_table[,default_field]<-1L
},
mptbl_select_preselection_field = function(preselection_field) {
self$mptbl_event_select(preselection_field)
},
mptbl_event_select= function(selection_inf) {
for (j in 1:nrow(selection_inf)){
i_num<-selection_inf[j,2]
i<-as.integer(self$field_groups[2,i_num])
if (i%%2 ==0){
group_name<-paste("multiple",i,sep ="")
assign(group_name,self$get_sub_group(group_name))
selection_inf[j,2]<-self$field_index_transform[4,i_num]
local_default_field<-as.integer(self$field_index_transform[4,self$default_field])
local_obj<-get(group_name)
local_obj$default_field<-local_default_field
local_obj$mptbl_event_select(selection_inf[j,,drop=F])
self$set_mptabl(group_name,local_obj$mapping_table)
}else{
group_name<-paste("single",i,sep ="")
assign(group_name,self$get_sub_group(group_name))
selection_inf[j,2]<-self$field_index_transform[4,i_num]
local_default_field<-as.integer(self$field_index_transform[4,self$default_field])
local_obj<-get(group_name)
local_obj$default_field<-local_default_field
local_obj$mptbl_event_select(selection_inf[j,,drop=F])
self$set_mptabl(group_name,local_obj$mapping_table)
}
}
},
mptbl_event_fill= function(fill_inf) {
fill_inf<-as.matrix(fill_inf,ncol=3)
i_num<-fill_inf[1,2]
i<-as.integer(self$field_groups[2,i_num])
if (i%%2 ==0){
group_name<-paste("multiple",i,sep ="")
assign(group_name,self$get_sub_group(group_name))
fill_inf[,2]<-field_index_transform[4,i_num]
local_default_field<-as.integer(field_index_transform[4,self$default_field])
local_obj<-get(group_name)
local_obj$default_field<-local_default_field
local_obj$mptbl_event_fill(fill_inf)
self$set_mptabl(group_name,local_obj$mapping_table)
}else{
group_name<-paste("single",i,sep ="")
assign(group_name,self$get_sub_group(group_name))
fill_inf[,2]<-as.integer(field_index_transform[4,i_num])
local_default_field<-as.integer(field_index_transform[4,self$default_field])
local_obj<-get(group_name)
local_obj$default_field<-local_default_field
local_obj$mptbl_event_fill(fill_inf)
self$set_mptabl(group_name,local_obj$mapping_table)
}
},
get_sub_group=function(group_name) {
field_group<-as.integer(c(self$field_groups[2,]))
aaab<-rbind(self$field_groups,NA)
self$field_index_transform<-aaab
get_group<-function(group_name){
if (substring(group_name,1,1)=="s") {
substring(group_name,7,8)
}else{
if (substring(group_name,1,1)=="m") substring(group_name,9,10)
}
}
group<-get_group(group_name)
i<-as.integer(group)
if (i%%2 ==0){
group_index<-c(field_group==i)
assign(group_name,Multiple_Mptbl$new(mapping_table=self$mapping_table[,group_index,drop=F]))
self$field_index_transform[4,group_index]<-c(1:sum(group_index))
}else{
group_index<-c(field_group==i)
assign(group_name,Single_Mptbl$new(mapping_table=self$mapping_table[,group_index,drop=F]))
self$field_index_transform[4,group_index]<-c(1:sum(group_index))
}
get(group_name)
},
set_mptabl = function(group_name,mptbl) {
field_group<-as.integer(c(self$field_groups[2,]))
get_group<-function(group_name){
if (substring(group_name,1,1)=="s") {
substring(group_name,7,8)
}else{
if (substring(group_name,1,1)=="m") substring(group_name,9,10)
}
}
group<-as.integer(get_group(group_name))
group_index<-c(field_group==group)
self$mapping_table[,group_index]<-mptbl
self$mptbl_to_inf(self$mapping_table)
},
mptbl_to_inf= function(local_mapping_table) {
nr<-nrow(local_mapping_table)
nc<-ncol(local_mapping_table)
m<-matrix(as.numeric(rep(0,nr*nc*3)),nrow=nr*nc,ncol=3, byrow = TRUE)
colnames(m)<-c("row","col","value")
k<-1
for (i in seq_len(nr)) {
for (j in seq_len(nc)) {
m[k,1] = i
m[k,2] = j
m[k,3] = local_mapping_table[i,j]
k<-k+1
}
}
self$inf_of_mptbl<-m
},
set_field_right_bound = function(field_groups) {
field_display_group<-field_groups[3,]
v<-c()
for (i in 1: (length(field_display_group)-1)){
if (field_display_group[i]!=field_display_group[i+1]){
v<-c(v,i)
}
}
self$field_right_bound<-v
},
set_DT_container = function(field_groups) {
field_value<-field_groups[1,]
field_display_group<-field_groups[3,]
group<-unique(field_display_group)
n_of_group<-length(group)
container_head<-c("<table class = 'display'> <thead>")
container_foot<-c("</thead></table>")
container_body1_element<-NULL
for (i in 1:length(group)){
container_body1_element[i]<-paste(sep="",collapse = "",
"<th ",
"colspan =",sum(c(field_display_group==group[i]))," ",
"class='sorting_disabled dt-center' ",
"style='border-right: solid;'>",
"",group[i],"",
"</th>"
)
}
container_body1_element<-c(c("<th rowspan = 2 ></th>"),container_body1_element)
container_body1_es<-paste(sep="",collapse = "",container_body1_element)
container_body1<-paste(sep="",collapse = "","<tr>",container_body1_es,"</tr>")
container_body2_element<-NULL
for (i in 1:length(field_value)){
container_body2_element[i]<-paste(sep="",collapse = "","<th>","",field_value[i],"","</th>")
}
container_body2_es<-paste(sep="",collapse = "",container_body2_element)
container_body2<-paste(sep="",collapse = "","<tr>",container_body2_es,"</r>")
container_value<-paste(sep="",collapse = "",container_head,container_body1,"",container_body2,container_foot)
self$DT_container<-container_value
}
)
) |
createSSP <- function(x, f=30e3, nc=NULL, ncVars=c('salinity', 'water_temp'), dropNA=TRUE) {
if(!all(c('UTC', 'Longitude', 'Latitude') %in% names(x))) {
warning('Need UTC, Longitude, and Latitude columns')
return(NULL)
}
if(is.null(nc)) {
nc <- PAMmisc::hycomList
nc$varSelect <- nc$vars %in% ncVars
}
result <- vector('list', length=nrow(x))
x <- matchEnvData(x, nc=nc, raw=TRUE)
for(i in seq_along(result)) {
ssp <- wasp(f=f, t=x[[i]][[ncVars[2]]], s=x[[i]][[ncVars[1]]], d=x[[i]]$matchDepth, medium='sea')
if(dropNA) {
keepers <- !is.na(ssp$c)
result[[i]] <- list(speed=ssp$c[keepers], depth=x[[i]]$matchDepth[keepers])
} else {
result[[i]] <- list(speed=ssp$c, depth=x[[i]]$matchDepth)
}
}
result
} |
add_sublinks <- function(x, ..., .track_id = "genes", .transform = "aa2nuc"){
UseMethod("add_sublinks")
}
add_sublinks.gggenomes <- function(x, ..., .track_id = "genes",
.transform = "aa2nuc"){
x$data <- add_sublinks(x$data, ..., .track_id = {{ .track_id }},
.transform = .transform)
x
}
add_sublinks.gggenomes_layout <- function(x, ..., .track_id = "genes",
.transform = c("aa2nuc", "none","nuc2aa")){
if(!has_dots()) return(x)
dot_exprs <- enexprs(...)
.transform <- match_arg(.transform)
if(.transform != "none")
inform(str_glue('Transforming sublinks with "{.transform}".',
' Disable with `.transform = "none"`'))
tracks <- as_tracks(list(...), dot_exprs, track_ids(x))
add_sublink_tracks(x, {{.track_id}}, tracks, .transform)
}
add_sublink_tracks <- function(x, parent_track_id, tracks, transform){
feats <- pull_track(x, {{parent_track_id}})
links <- map(tracks, as_sublinks, get_seqs(x), feats, transform = transform,
compute_layout=FALSE)
x$links <- c(x$links, map(links, layout_links, get_seqs(x)))
x$orig_links <- c(x$orig_links, map(links, as_orig_links, get_seqs(x)))
x
}
as_sublinks <- function(x, seqs, feats, ..., everything=TRUE){
UseMethod("as_sublinks")
}
as_sublinks.default <- function(x, seqs, feats, ..., everything=TRUE) {
as_sublinks(as_tibble(x), ...)
}
as_sublinks.tbl_df <- function(x, seqs, feats, ..., everything=TRUE,
transform = c("none", "aa2nuc", "nuc2aa"), compute_layout=TRUE){
transform <- match.arg(transform)
vars <- c("feat_id","feat_id2")
require_vars(x, vars)
require_vars(feats, "feat_id")
x <- mutate_at(x, vars(feat_id, feat_id2), as.character)
if(!has_vars(x, c("start", "end", "start2", "end2"))){
if(has_vars(x, c("start", "end", "start2", "end2"),any=TRUE)){
abort("Need either all of start,fend1,start2,end2 or none!")
}
x <- x %>%
left_join(select(feats, feat_id=feat_id, seq_id=seq_id, .feat_start=start,
.feat_end = end, .feat_strand = strand), by = shared_names(x, "seq_id", "feat_id")) %>%
mutate(
start = .feat_start, end = .feat_end,
.feat_start=NULL, .feat_end=NULL) %>%
left_join(select(feats, feat_id2=feat_id, seq_id2=seq_id, .feat_start=start,
.feat_end = end, .feat_strand2 = strand), by = shared_names(x, "seq_id2", "feat_id2")) %>%
mutate(
start2 = .feat_start, end2 = .feat_end,
strand = strand_chr(.feat_strand == .feat_strand2),
.feat_start=NULL, .feat_end=NULL, .feat_strand=NULL, .feat_strand2=NULL)
vars <- c("feat_id", "start", "end", "feat_id2", "start2", "end2")
other_vars <- if(everything) tidyselect::everything else function() NULL;
x <- as_tibble(select(x, vars, other_vars()))
}else{
vars <- c("feat_id", "start", "end", "feat_id2", "start2", "end2")
other_vars <- if(everything) tidyselect::everything else function() NULL;
x <- as_tibble(select(x, vars, other_vars()))
x %<>% mutate_if(is.factor, as.character)
if(!has_name(x, "strand")){
x$strand <- strand_chr((x$start < x$end) == (x$start2 < x$end2))
}else{
x$strand <- strand_chr(x$strand)
}
x <- x %>% swap_if(start > end, start, end)
x <- x %>% swap_if(start2 > end2, start2, end2)
if(transform != "none"){
transform <- switch(transform,
aa2nuc = ~3*.x-2,
nuc2aa = ~(.x+2)/3)
x <- mutate(x, across(c(start, end, start2, end2), transform))
}
feats <- select(feats, feat_id, seq_id, bin_id,
.feat_start=start, .feat_end=end, .feat_strand=strand)
x <- x %>%
inner_join(feats, by = shared_names(x, "seq_id", "bin_id", "feat_id")) %>%
mutate(
start = if_reverse(.feat_strand, .feat_end-start, .feat_start+start),
end = if_reverse(.feat_strand, .feat_end-end, .feat_start+end),
.feat_start=NULL, .feat_end=NULL, .feat_strand=NULL)
feats <- rename_with(feats, ~paste0(.x,"2"))
x <- x %>%
inner_join(feats, by = shared_names(x, "seq_id2", "bin_id2", "feat_id2")) %>%
mutate(
start2 = if_reverse(.feat_strand2, .feat_end2-start2, .feat_start2+start2),
end2 = if_reverse(.feat_strand2, .feat_end2-end2, .feat_start2+end2),
.feat_start2=NULL, .feat_end2=NULL, .feat_strand2=NULL)
x$strand <- strand_chr((x$start < x$end) == (x$start2 < x$end2))
x <- x %>% swap_if(start > end, start, end)
x <- x %>% swap_if(start2 > end2, start2, end2)
}
if(compute_layout)
layout_links(x, seqs, ...)
else
x
} |
hard.hmac<- function(hmacobj,level=NULL,n.cluster=NULL,plot=TRUE,colors=1:6,...){
if(class(hmacobj)!="hmac") stop (" Your object is not of class hmac")
if(is.null(level)+is.null(n.cluster)+(plot!="TRUE")==3) level=1
if(is.null(level) & is.null(n.cluster)){
par(ask=TRUE)
cat("As no specific level is specified plots are provided for all levels",unique(hmacobj$level),"\n\n")
for(i in 1:max(hmacobj$level)){
cat("Level",i,"..")
hard.hmac(hmacobj,level=i)
}
par(ask=FALSE)
}
else{
if(is.null(level) & !is.null(n.cluster)){
levels=hmacobj$level[which(hmacobj$n.cluster>=n.cluster)]
if(length(levels)==0){
stop("Cannot find a level with ", n.cluster," clusters \n Choose the number of clusters between ",
min(hmacobj$n.cluster), " and ", max(hmacobj$n.cluster))}
else {
level=max(levels)
if(hmacobj$n.cluster[min(which(hmacobj$level==level))]==n.cluster)
cat("The level at which there are",n.cluster,"clusters is", level,"\n")
else{
cat("There are no levels with exactly",n.cluster,"clusters\n")
cat("The level at which there are",hmacobj$n.cluster[min(which(hmacobj$level==level))],"clusters is", level,"\n")
}
}}
if(level> max(hmacobj$level) ) stop ("Provide a level not greater than ", max(hmacobj$level))
member=hmacobj$membership[[level]]
hmacobj$dat=as.matrix(hmacobj$dat)
if(plot==TRUE){
if(dim(hmacobj$dat)[2]==1){
plot(density(hmacobj$dat,bw=apply(hmacobj$dat,2,sd)*min(hmacobj$sigmas[which(level==hmacobj$level)])),main="",xlab="Data")
for(k in 1:max(unique(member))){
points(hmacobj$dat[member==k],rep(0,sum(member==k)),pch="|",col=k,main=NULL)
}
}
else plot(data.frame(hmacobj$dat),col=colors[member],...)
title(main="Hard clustering produced by HMAC \n Colors represent different clusters",cex.main=1,sub=paste("Level",level,"with number of clusters",length(unique(member))) )
}
else return(member)
}
} |
var.sim <-
function(mu, Sigma)
mu * (1 - mu) - (sqrt(exp(1/(Sigma * mu^2 * (1 - mu)^2))) * gamma(1/2) * (1 -
pgamma(1/(2 * Sigma * mu^2 * (1 - mu)^2), 1/2)))/sqrt(2 * Sigma) |
test_that('REQUIRE TEST normalsurvey Monte Carlo', {
z <- znormalsurvey$new()
test.normalsurvey <- z$mcunit(plot = FALSE)
expect_true(test.normalsurvey)
})
test_that('REQUIRE TEST to_zelig for normalsurvey', {
data(api)
dstrat <- svydesign(id = ~1, strata = ~stype, weights = ~pw, data = apistrat,
fpc = ~fpc)
m1 <- svyglm(api00 ~ ell + meals + mobility, design = dstrat)
expect_error(plot(sim(setx(m1))), NA)
}) |
HDcvlars <- function(X, y, nbFolds = 10, index = seq(0, 1, by = 0.01), mode = c("fraction", "lambda"), maxSteps = 3 * min(dim(X)), partition = NULL, intercept = TRUE, eps = .Machine$double.eps^0.5)
{
mode <- match.arg(mode)
if (missing(X))
stop("X is missing.")
if (missing(y))
stop("y is missing.")
index = unique(index)
.checkcvlars(X, y, maxSteps, eps, nbFolds, index, intercept, mode)
if (!is.null(partition))
{
if (!is.numeric(partition) || !is.vector(partition))
stop("partition must be a vector of integer.")
if (length(partition) != length(y))
stop("partition and y must have the same size.")
part = table(partition)
nbFolds = length(part)
if (max(part) - min(part) > 1)
stop("Size of different folds are not good.")
nam = as.numeric(names(part))
for (i in 1:length(nam))
{
if (!(nam[i] == i))
stop("check the number in the partition vector.")
}
ord = order(part, decreasing = TRUE)
partb = partition
for (i in 1:nbFolds)
partition[partb == ord[i]] = i
partition = partition - 1
}
else
partition = -1
lambdaMode = FALSE
if (mode == "lambda")
lambdaMode = TRUE
val = .Call("cvlars", X, y, nrow(X), ncol(X), maxSteps, intercept, eps, nbFolds, partition, index, lambdaMode, PACKAGE = "HDPenReg")
cv = list(cv = val$cv, cvError = val$cvError, minCv = min(val$cv), minIndex = index[which.min(val$cv)], index = index, maxSteps = maxSteps, mode = mode)
class(cv) = "HDcvlars"
return(cv)
}
.checkcvlars = function(X, y, maxSteps, eps, nbFolds, index, intercept, mode)
{
if (!is.numeric(X) || !is.matrix(X))
stop("X must be a matrix of real")
if (!is.numeric(y) || !is.vector(y))
stop("y must be a vector of real")
if (length(y) != nrow(X))
stop("The number of rows of X doesn't match with the length of y")
if (!.is.wholenumber(maxSteps))
stop("maxSteps must be a positive integer")
if (maxSteps <= 0)
stop("maxSteps must be a positive integer")
if (!.is.wholenumber(nbFolds))
stop("nbFolds must be a positive integer")
if (nbFolds <= 0)
stop("nbFolds must be a positive integer")
if (nbFolds > length(y))
stop("nbFolds must be lower than the number of samples")
if (!is.double(eps))
stop("eps must be a positive real")
if (eps <= 0)
stop("eps must be a positive real")
if (!is.numeric(index) || !is.vector(index))
stop("index must be a vector")
if ((mode == "fraction") && (max(index) > 1 || min(index) < 0))
stop("index must be a vector of real between 0 and 1")
if ((mode == "lambda") && (min(index) < 0))
stop("index must be a vector of positive real")
if (!is.logical(intercept))
stop("intercept must be a boolean")
}
plot.HDcvlars <- function(x, ...)
{
if (missing(x))
stop("x is missing.")
if (class(x) != "HDcvlars")
stop("x must be an output of the HDcvlars function.")
index = x$index
minIndex = x$minIndex
lab = "Fraction L1 Norm"
if (x$mode == "lambda")
{
lab = "log(lambda)"
index = log(index)
minIndex = log(minIndex)
}
plot(index, x$cv, type = "b", ylim = range(x$cv, x$cv + x$cvError, x$cv - x$cvError), xlab = lab, ylab = "Cross-Validated MSE", ...)
lines(index, x$cv + x$cvError, lty = 2)
lines(index, x$cv - x$cvError, lty = 2)
abline(v = minIndex, lty = "dotted", col = "blue")
invisible()
} |
anocvaStats =function(idx, dataDist, id, k, N, r, clusteringFunction){
Ab = array(0, c(k, N, N))
Abb = array(0, c(N, N))
Sj = array(0, c(k, N))
S = array(0, N)
for (j in seq(k)){
if (idx != 1){
resample = sample(seq(dim(dataDist)[1]), sum(id == j), replace = TRUE)
}else{
resample = which(id == j)
}
dataJ = dataDist[resample, , ]
Ab[j, , ] = colMeans(dataJ)
Abb = Abb + colSums(dataJ)
}
Abb = (Abb / dim(dataDist)[1])
labels = clusteringFunction(1 - Abb, r)
S = silhouette(labels, dmatrix = Abb)[, 3]
deltaS = 0
deltaSq = array(0, N)
for (j in seq(k)){
Sj[j, ] = silhouette(labels, dmatrix = Ab[j, , ])[, 3]
deltaS = deltaS + ((S - Sj[j, ]) %*% (S - Sj[j, ]))
deltaSq = deltaSq + ((S - Sj[j, ]) ^ 2)
}
return(list("deltaS" = deltaS, "deltaSq" = deltaSq))
} |
reg_const <- function(regulator, r_const, CVswitch, CVcap, pe_constr)
{
regulator <- toupper(regulator)
if (regulator=="USER"){
if (missing(CVswitch) | missing(CVcap) | missing(r_const)){
stop("r_const, CVswitch and CVcap must be given.")
}
if (missing(pe_constr)) pe_constr <- TRUE
r <- list(name="USER", CVswitch=CVswitch, r_const=r_const, CVcap=CVcap,
pe_constr=pe_constr)
}
else if (regulator=="FDA"){
r <- list(name="FDA", CVswitch=0.3, r_const=log(1.25)/0.25, CVcap=Inf,
est_method="ISC")
}
else if (regulator=="EMA"){
r <- list(name="EMA", CVswitch=0.3, r_const=0.76, CVcap=0.5)
}
else if (regulator=="HC"){
r <- list(name="HC", CVswitch=0.3, r_const=0.76,
CVcap=0.57382,
est_method="ISC")
}
else if (regulator=="GCC") {
r <- list(name="GCC", CVswitch=0.3, r_const=log(1/0.75)/CV2se(0.3), CVcap=0.3)
}
else {
stop("Unknown regulator.")
}
class(r) <- "regSet"
if (is.null(r$pe_constr)) r$pe_constr <- TRUE
if (is.null(r$est_method)) r$est_method <- "ANOVA"
r
}
reg_check <- function(regulator, choices=c("EMA", "HC", "FDA", "GCC"))
{
if (class(regulator)=="character"){
reg <- toupper(regulator)
reg <- match.arg(reg, choices)
reg <- reg_const(reg)
} else if (class(regulator)=="regSet") {
reg <- regulator
} else {
stop("Arg. regulator has to be character or an object of class 'regSet'.")
reg <- NULL
}
reg
}
print.regSet <- function(x, ...)
{
if(x$name=="USER"){
cat(x$name, "defined regulatory settings\n")
} else {
cat(x$name, "regulatory settings\n")
}
cat("- CVswitch =", x$CVswitch,"\n")
if (!is.null(x$CVcap)) {
if (is.finite(x$CVcap)){
cat("- cap on scABEL if CVw(R) > ", x$CVcap,"\n",sep="")
} else {
cat("- no cap on scABEL\n", sep="")
}
}
cat("- regulatory constant =", x$r_const,"\n")
if (is.null(x$pe_constr)) x$pe_constr <- TRUE
if (x$pe_constr) {
cat("- pe constraint applied")
} else {
cat("- no pe constraint")
}
cat("\n")
}
scABEL <- function(CV, regulator)
{
if (missing(regulator)) regulator <-"EMA"
rc <- reg_check(regulator)
CVcap <- rc$CVcap
CVswitch <- rc$CVswitch
r_const <- rc$r_const
ret <- ifelse(CV <= (CVswitch + 1e-10), 1.25, exp(r_const*CV2se(CV)))
ret <- ifelse(CV>CVcap, exp(r_const*CV2se(CVcap)), ret)
if (length(CV)>1){
ret <- cbind(1/ret, ret)
colnames(ret) <- c("lower", "upper")
} else {
ret <- c(1/ret, ret)
names(ret) <- c("lower", "upper")
}
ret
}
scABEL_LO <- function(CV)
{
gamma <- 0.0336
sw0 <- 0.3853
gamma <- 0.03361
sw0 <- 0.38535
beta <- scABEL(CV=0.5)["upper"]
uppr <- 1.25 + (beta - 1.25)/(1 + exp(-(CV2se(CV)-sw0)/gamma))
if (length(CV)>1){
ret <- cbind(1/uppr, uppr)
colnames(ret) <- c("lower", "upper")
} else {
ret <- c(1/uppr, uppr)
names(ret) <- c("lower", "upper")
}
ret
} |
context("Consistency of getcdfupdates with updates")
set.seed(129301)
checkupdates <- function(chart, X){
P <- chart@model$Pofdata(X)
xi <- chart@model$xiofP(P)
sample <- unlist(replicate(500,updates(chart,data=chart@model$resample(P),xi)))
f1 <- ecdf(sample)
f2 <- getcdfupdates(chart,P,xi)
x <- c(sample,rcauchy(100))
res <- max(abs(f1(x)-f2(x)))
id <- paste(class(chart)[1],"Maximum error:",format(res))
expect_true(res<0.03,id)
}
test_that("Updates/Resample CUSUM Normal",{
skip_on_cran()
chart <- new("SPCCUSUM",model=SPCModelNormal(Delta=1))
checkupdates(chart,rexp(50))
checkupdates(chart,rnorm(100))
checkupdates(chart,rnorm(1000))
checkupdates(chart,runif(1000))
})
test_that("Updates/Resample CUSUM Nonnpar",{
skip_on_cran()
chart <- new("SPCCUSUM",model=SPCModelNonparCenterScale(Delta=1))
checkupdates(chart,rexp(50))
checkupdates(chart,rnorm(100))
checkupdates(chart,rnorm(1000))
checkupdates(chart,runif(1000))
})
test_that("Updates/Resample Shewhart Normal",{
skip_on_cran()
chart <- new("SPCShew",model=SPCModelNormal(),twosided=TRUE)
checkupdates(chart,rexp(50))
checkupdates(chart,rnorm(100))
checkupdates(chart,rnorm(1000))
chart <- new("SPCShew",model=SPCModelNormal(),twosided=FALSE)
checkupdates(chart,rexp(50))
checkupdates(chart,rnorm(100))
checkupdates(chart,rnorm(1000))
})
test_that("Updates/Resample Shewhart Nonnpar",{
skip_on_cran()
chart <- new("SPCShew",model=SPCModelNonparCenterScale(),twosided=TRUE)
checkupdates(chart,rexp(50))
checkupdates(chart,rnorm(100))
checkupdates(chart,rnorm(1000))
chart <- new("SPCShew",model=SPCModelNonparCenterScale(),twosided=FALSE)
checkupdates(chart,rexp(50))
checkupdates(chart,rnorm(100))
checkupdates(chart,rnorm(1000))
}) |
computeMemberIndices<-function
(
h,
clusterIdx
) {
clusterCount<-nrow(h$merge)
indices<-rep(NA,clusterCount+1)
indicesCount<-0
toTraverse<-c(clusterIdx,rep(NA,clusterCount-1))
toTraverseStart<-1
toTraverseEnd<-2
repeat {
i<-toTraverse[toTraverseStart]
if (is.na(i)) break
toTraverse[toTraverseStart]<-NA
toTraverseStart<-toTraverseStart+1
if (h$merge[i,1]>0) {
toTraverse[toTraverseEnd]<-h$merge[i,1]
toTraverseEnd<-toTraverseEnd+1
} else {
indicesCount<-indicesCount+1
indices[indicesCount]<- -h$merge[i,1]
}
if (h$merge[i,2]>0) {
toTraverse[toTraverseEnd]<-h$merge[i,2]
toTraverseEnd<-toTraverseEnd+1
} else {
indicesCount<-indicesCount+1
indices[indicesCount]<- -h$merge[i,2]
}
}
return(indices[1:indicesCount])
} |
context("Compute genotype counts")
test_that("compute genotype counts correctly", {
s<-make_seq_mappoly(tetra.solcap, 3:7)
o<-cache_counts_twopt(s)
expect_is(o, "cache.info")
expect_null(print(o))
o2<-get_cache_two_pts_from_web(ploidy = 4)
expect_is(o2, "cache.info")
}) |
series_info_url = function(x) {
url = "https://www3.bcb.gov.br/sgspub/consultarvalores/consultarValoresSeries.do?method=consultarGraficoPorId"
httr::modify_url(url, query = list(hdOidSeriesSelecionadas = x$code))
}
series_info = function(x) {
url = series_info_url(x)
res = httr::GET(url)
cnt = httr::content(res, as = "text")
doc = xml2::read_html(cnt)
info = xml2::xml_find_first(doc, '//tr[@class="fundoPadraoAClaro3"]')
if (length(info) == 0)
stop("Given code returned no info from SGS: ", x$code)
info = xml2::xml_find_all(info, ".//td")
info = xml2::xml_text(info)
info = as.list(info[-length(info)])
info = setNames(info, c("code", "description", "unit", "frequency", "start_date", "last_date"))
if (as.numeric(info$code) != x$code)
stop("Downloaded info is different from series info")
info$code = NULL
info$url = url
info$start_date = as.Date(info$start_date, "%d/%m/%Y")
info$last_date = as.Date(info$last_date, "%d/%m/%Y")
info
} |
tv_programmes <- function(legislature = NULL, start_date = "1900-01-01",
end_date = Sys.Date(), extra_args = NULL,
tidy = TRUE, tidy_style = "snake",
verbose = TRUE) {
dates <- paste0(
"&max-endDate=", as.Date(end_date),
"T23:59:59Z&min-startDate=", as.Date(start_date),
"T00:00:00Z"
)
legislature <- tolower(legislature)
if (legislature == "commons") {
leg_query <- "&legislature.prefLabel=House%20of%20Commons"
} else if (legislature == "lords") {
leg_query <- "&legislature.prefLabel=House%20of%20Lords"
} else {
leg_query <- ""
}
baseurl <- paste0(url_util, "tvprogrammes.json?")
tv <- jsonlite::fromJSON(paste0(
baseurl, leg_query, dates,
extra_args, "&_pageSize=1"
),
flatten = TRUE
)
if (verbose == TRUE) {
message("Connecting to API")
}
jpage <- floor(tv$result$totalResults / 100)
query <- paste0(baseurl, leg_query, dates, extra_args)
df <- loop_query(query, jpage, verbose)
if (nrow(df) == 0) {
message("The request did not return any data.
Please check your parameters.")
} else {
if (tidy == TRUE) {
df <- tv_tidy(df, tidy_style)
}
df
}
}
hansard_tv_programmes <- tv_programmes
tv_clips <- function(mp_id = NULL, start_date = "1900-01-01",
end_date = Sys.Date(), extra_args = NULL, tidy = TRUE,
tidy_style = "snake", verbose = TRUE) {
dates <- paste0(
"&max-startDate=", as.Date(end_date), "T00:00:00Z",
"&min-startDate=", as.Date(start_date), "T00:00:00Z"
)
member_query <- ifelse(
is.null(mp_id) == FALSE,
paste0("&member=http://data.parliament.uk/members/", mp_id),
""
)
baseurl <- paste0(url_util, "tvclips.json?")
tv <- jsonlite::fromJSON(paste0(
baseurl, member_query,
dates, extra_args
),
flatten = TRUE
)
jpage <- floor(tv$result$totalResults / 100)
query <- paste0(baseurl, member_query, dates, extra_args)
df <- loop_query(query, jpage, verbose)
if (nrow(df) == 0) {
message("The request did not return any data.
Please check your parameters.")
} else {
if (tidy == TRUE) {
df <- tv_tidy2(df, mp_id, tidy_style)
}
df <- tibble::as_tibble(df)
df
}
}
hansard_tv_clips <- tv_clips
tv_channels <- function(tidy = TRUE, tidy_style = "snake",
verbose = TRUE) {
channels <- jsonlite::fromJSON(paste0(
url_util, "tvchannels.json?_pageSize=100"
),
flatten = TRUE
)
df <- tibble::as_tibble(channels$result$items)
if (tidy == TRUE) {
df <- hansard_tidy(df, tidy_style)
}
df
}
hansard_tv_channels <- tv_channels |
AEplot = function(RSinput = 30, makeTitle=FALSE){
boxcolors = colorRampPalette(c("lightgrey", "red"))(6)
if(is.null(RSinput))
RSinput = 48
par.save = par(mai=c(0,0,0.5,0.5))
aeProb = c(2.9,15,57,20,5,0.1)
boxwidths = c(1, (nnt[RSinput] - 1) * aeProb / 100)
opts = options(warn=-1)
symbols(x=rep(0, 7), y=1:7, inches=F,
xlim=c(-ceiling(max(boxwidths)), ceiling(max(boxwidths))) * 0.75,
rectangles = cbind(boxwidths, 1), bg = c("green", boxcolors),
axes=F,
xlab="", ylab="")
par(par.save)
options(opts)
"%except%" <- function (vector, condition) vector[match(vector, condition, 0) == 0]
verticalsX = lapply(boxwidths[-1], function(bw)
if(bw <= 1) numeric(0) else -floor(bw/2):floor(bw/2)
)
verticalsY = rep(1:6, times=sapply(verticalsX, length))
segments(x0= unlist(verticalsX),
y0 = verticalsY + 1/2, y1 = verticalsY + 3/2
)
graphics::text(x = boxwidths/2, y=1:7,
c("benefitted", "no adverse event", "mild AE", "moderate", "severe AE", "life-threatening AE",
"fatal toxicity"),
pos=4 , xpd=NA)
graphics::text(x = - boxwidths/2, y=1:7, round(boxwidths, 1),
pos=2 , xpd=NA)
if(makeTitle)
title(paste0("Outcomes for ",
round(nnt[RSinput]), " patients, \n",
"if all are treated, and 1 benefits\n",
"RS = ", RSinput, " NNT = ", round(nnt[RSinput])))
print(paste0("RS = ", RSinput, " NNT = ", round(nnt[RSinput])))
print(boxwidths)
} |
searchExtremePaired <-
function(TX, N, alternative, method, int, delta, alpha, lookupArray) {
TXunique <- unique(TX[is.na(TX[ , 4]), 3])
if (length(TXunique) == 0) {return(TX[as.logical(TX[ , 4]), 1:2, drop=FALSE])}
m <- floor(length(TXunique)/2) + 1
s <- TXunique[m]
if (method %in% c("am", "amcc")) {
pvalue <- ifelse(alternative=="two.sided",2,1)*pnorm(TX[TX[,3] == s, 3][[1]])
} else {
Tbls <- TX[TX[,3] <= s, 1:2, drop=FALSE]
pvalue <- maxPvaluePairedLookup(Tbls, int=int, lookupArray=lookupArray, doublePvalue=FALSE)$pvalue
}
if (pvalue <= alpha){ TX[TX[,3] <= s, 4] <- TRUE
} else { TX[TX[,3] >= s, 4] <- FALSE }
return(searchExtremePaired(TX = TX, N = N, alternative = alternative, method = method, int = int, delta = delta, alpha = alpha,
lookupArray = lookupArray))
} |
parMIKD <-
function (idx) {
getIndex <- function(idx){
rrow <- ceiling((-1 + sqrt(8*idx + 1))/2)
ccol <- idx - rrow*(rrow - 1)/2
ans <- c(rrow, ccol)
return(ans)
}
idx <- getIndex(idx)
x <- counts[idx[1], ]
y <- counts[idx[2], ]
xgs <- 50
ygs <- 50
bandx<- tryCatch(dpik(x), error=function(err) 0.2)
bandy<- tryCatch(dpik(y), error=function(err) 0.2)
Px <- KernSec(x, xgridsize = xgs, xbandwidth = bandx)$yden
Py <- KernSec(y, xgridsize = ygs, xbandwidth = bandy)$yden
Pxy <- KernSur(x, y, xgridsize = xgs, ygridsize = ygs, xbandwidth = bandx, ybandwidth = bandy)$zden
s <- sum(Px)
Px <- Px/s;
s <- sum(Py);
Py <- Py/s;
s <- sum(Pxy);
Pxy <- Pxy/s;
ans <- 0
for(i in 1:xgs)
for(j in 1:ygs){
tmp <- Pxy[i,j]*log2(Pxy[i,j]/(Px[i]*Py[j]))
if(tmp != "NaN") ans <- ans + tmp
}
return(ans)
} |
predict.psf <- function (object, n.ahead = 1, ...) {
original.n.ahead = n.ahead
fit = n.ahead %% object$cycle
if (fit > 0) {
n.ahead = object$cycle * ceiling(n.ahead / object$cycle)
warning(paste("Prediction horizon is not multiple of", object$cycle, ". Using", n.ahead, "as prediction horizon!"))
}
res <- psf_predict(object$train_data, object$k, object$w, n.ahead, object$cycle)
res <- as.vector(t(res))[1:original.n.ahead]
res <- res * (object$dmax - object$dmin) + object$dmin
return(res)
} |
context("Test main api functions")
library(tibble)
httptest::with_mock_api({
test_that("Ask for api key before making any request", {
expect_error(get_installation_by_id(8077), "You have to set apikey first! See set_apikey function.")
expect_error(get_nearest_installations(50.11670, 19.91429), "You have to set apikey first! See set_apikey function.")
expect_error(get_nearest_measurements(50.11670, 19.91429), "You have to set apikey first! See set_apikey function.")
expect_error(get_point_measurements(50.11670, 19.91429), "You have to set apikey first! See set_apikey function.")
expect_error(get_installation_measurements(8077), "You have to set apikey first! See set_apikey function.")
expect_error(get_indexes(), "You have to set apikey first! See set_apikey function.")
expect_error(get_measurements_info(), "You have to set apikey first! See set_apikey function.")
expect_error(remaining_requests(), "You have to set apikey first! See set_apikey function.")
})
})
test_that("Apikey is set correctly", {
set_apikey("testkey")
key <- .get_apikey()
expect_equal(key, "testkey")
})
test_that("Api key takes only string as argument", {
expect_error(set_apikey(123456789), "apikey must be a string")
})
httptest::with_mock_api({
test_that("Get installation by id is working", {
set_apikey("testkey")
station <- get_installation_by_id(8077)
expected <- tibble(id = 8077,
elevation = 2137,
is_airly = TRUE,
location = tibble(latitude = 50,
longitude = 19),
address = tibble(country = "Poland",
city = "Kraków",
street = "Mikołajska",
number = "4",
displayAddress1 = "Kraków",
displayAddress2 = "Mikołajska"),
sponsor = tibble(id = 489,
name ="Chatham Financial",
description = "Airly Sensor's sponsor",
logo = "https://cdn.airly.eu/some.jpg",
link = "https://crossweb.pl")
)
expect_equal(station, expected)
})
})
httptest::with_mock_api({
test_that("Get nearest installation is working", {
set_apikey("testkey")
station <- get_nearest_installations(lat = 50, lng = 19)[2,]
expected <- tibble(id = 8077,
elevation = 2137,
is_airly = TRUE,
location = tibble(latitude = 50,
longitude = 19),
address = tibble(country = "Poland",
city = "Kraków",
street = "Mikołajska",
number = "4",
displayAddress1 = "Kraków",
displayAddress2 = "Mikołajska"),
sponsor = tibble(id = 489,
name ="Chatham Financial",
description = "Airly Sensor's sponsor",
logo = "https://cdn.airly.eu/some.jpg",
link = "https://crossweb.pl")
)
expect_equal(station, expected)
})
})
httptest::with_mock_api({
test_that("Get nearest measurements is working", {
set_apikey("testkey")
current <- get_nearest_measurements(lat = 21, lng = 37)$current
history <- get_nearest_measurements(lat = 21, lng = 37)$history[1:7,]
forecast <- get_nearest_measurements(lat = 21, lng = 37)$forecast[1:7,]
exp_curr <- tibble(time = tibble(from = as.POSIXct(strptime("2020-03-11T10:43:29.983Z", format = "%Y-%m-%dT%H:%M:%OSZ")),
to = as.POSIXct(strptime("2020-03-11T11:43:29.983Z", format = "%Y-%m-%dT%H:%M:%OSZ"))),
measure = tibble(PM1 = 4.58,
PM25 = 6.83,
PM10 = 12.54,
PRESSURE = 1010.3,
HUMIDITY = 77.69,
TEMPERATURE = 10.82),
index = tibble(AIRLY_CAQI = 12.54)
)
exp_hist <- tibble(time = tibble(from = as.POSIXct(strptime(c("2020-03-10T11:00:00.000Z",
"2020-03-10T12:00:00.000Z",
"2020-03-10T13:00:00.000Z",
"2020-03-10T14:00:00.000Z",
"2020-03-10T15:00:00.000Z",
"2020-03-10T16:00:00.000Z",
"2020-03-10T17:00:00.000Z"),
format = "%Y-%m-%dT%H:%M:%OSZ")),
to = as.POSIXct(strptime(c("2020-03-10T12:00:00.000Z",
"2020-03-10T13:00:00.000Z",
"2020-03-10T14:00:00.000Z",
"2020-03-10T15:00:00.000Z",
"2020-03-10T16:00:00.000Z",
"2020-03-10T17:00:00.000Z",
"2020-03-10T18:00:00.000Z"),
format = "%Y-%m-%dT%H:%M:%OSZ"))),
measure = tibble(PM1 = c(9.87, 8.40, 7.45, 7.70, 7.99, 8.52, 13.64),
PM25 = c(14.16, 11.86, 10.53, 10.99, 11.60, 11.89, 19.93),
PM10 = c(25.74, 21.66, 19.16, 20.31, 21.60, 21.42, 37.58),
PRESSURE = c(1013.37, 1012.38, 1011.55, 1010.89, 1010.23, 1010.26, 1010.37),
HUMIDITY = c(66.01, 58.89, 54.92, 53.60, 55.20, 59.38, 64.65),
TEMPERATURE = c(10.75, 11.50, 11.91, 11.96, 11.39, 10.24, 8.92)),
index = tibble(AIRLY_CAQI = c(21.66, 21.66, 19.16, 20.31, 21.60, 21.42,37.58))
)
exp_fore <- tibble(time = tibble(from = as.POSIXct(strptime(c("2020-03-11T11:00:00.000Z",
"2020-03-11T12:00:00.000Z",
"2020-03-11T13:00:00.000Z",
"2020-03-11T14:00:00.000Z",
"2020-03-11T15:00:00.000Z",
"2020-03-11T16:00:00.000Z",
"2020-03-11T17:00:00.000Z"),
format = "%Y-%m-%dT%H:%M:%OSZ")),
to = as.POSIXct(strptime(c("2020-03-11T12:00:00.000Z",
"2020-03-11T13:00:00.000Z",
"2020-03-11T14:00:00.000Z",
"2020-03-11T15:00:00.000Z",
"2020-03-11T16:00:00.000Z",
"2020-03-11T17:00:00.000Z",
"2020-03-11T18:00:00.000Z"),
format = "%Y-%m-%dT%H:%M:%OSZ"))),
measure = tibble(PM25 = c(2.07, 0.38, 0.10, 0.10, 0.45, 2.70, 5.33),
PM10 = c(7.82, 5.75, 4.28, 5.09, 7.35, 10.37, 13.86)),
index = tibble(AIRLY_CAQI = c(7.82, 5.75, 4.28, 5.09, 7.35, 10.37, 13.86))
)
expect_equal(history, exp_hist)
expect_equal(current, exp_curr)
expect_equal(forecast, exp_fore)
})
})
httptest::with_mock_api({
test_that("Get id measurements is working", {
set_apikey("testkey")
current <- get_installation_measurements(id = 5)$current
history <- get_installation_measurements(id = 5)$history[1:7,]
forecast <- get_installation_measurements(id = 5)$forecast[1:7,]
exp_curr <- tibble(time = tibble(from = as.POSIXct(strptime("2020-03-11T18:58:36.380Z", format = "%Y-%m-%dT%H:%M:%OSZ")),
to = as.POSIXct(strptime("2020-03-11T19:58:36.380Z", format = "%Y-%m-%dT%H:%M:%OSZ"))),
measure = tibble(PM1 = 8.73,
PM25 = 12.37,
PM10 = 23.29,
PRESSURE = 1014.23,
HUMIDITY = 81.05,
TEMPERATURE = 8.27),
index = tibble(AIRLY_CAQI = 23.29)
)
exp_hist <- tibble(time = tibble(from = as.POSIXct(strptime(c("2020-03-10T19:00:00.000Z",
"2020-03-10T20:00:00.000Z",
"2020-03-10T21:00:00.000Z",
"2020-03-10T22:00:00.000Z",
"2020-03-10T23:00:00.000Z",
"2020-03-11T00:00:00.000Z",
"2020-03-11T01:00:00.000Z"),
format = "%Y-%m-%dT%H:%M:%OSZ")),
to = as.POSIXct(strptime(c("2020-03-10T20:00:00.000Z",
"2020-03-10T21:00:00.000Z",
"2020-03-10T22:00:00.000Z",
"2020-03-10T23:00:00.000Z",
"2020-03-11T00:00:00.000Z",
"2020-03-11T01:00:00.000Z",
"2020-03-11T02:00:00.000Z"),
format = "%Y-%m-%dT%H:%M:%OSZ"))),
measure = tibble(PM1 = c(16.30, 10.72, 9.97, 7.42, 2.95, 4.12, 3.17),
PM25 = c(24.18, 15.42, 14.13, 10.53, 4.55, 6.23, 4.87),
PM10 = c(46.03, 28.37, 25.84, 19.07, 8.57, 11.55, 9.33),
PRESSURE = c(1009.94, 1009.13, 1008.49, 1007.75, 1007.17, 1006.81, 1006.01),
HUMIDITY = c(66.36, 66.55, 67.21, 69.24, 71.58, 73.79, 77.09),
TEMPERATURE = c(7.84, 7.42, 7.03, 7.04, 7.19, 7.42, 7.40)),
index = tibble(AIRLY_CAQI = c(46.03, 28.37, 25.84, 19.07, 8.57, 11.55, 9.33))
)
exp_fore <- tibble(time = tibble(from = as.POSIXct(strptime(c("2020-03-11T19:00:00.000Z",
"2020-03-11T20:00:00.000Z",
"2020-03-11T21:00:00.000Z",
"2020-03-11T22:00:00.000Z",
"2020-03-11T23:00:00.000Z",
"2020-03-12T00:00:00.000Z",
"2020-03-12T01:00:00.000Z"),
format = "%Y-%m-%dT%H:%M:%OSZ")),
to = as.POSIXct(strptime(c("2020-03-11T20:00:00.000Z",
"2020-03-11T21:00:00.000Z",
"2020-03-11T22:00:00.000Z",
"2020-03-11T23:00:00.000Z",
"2020-03-12T00:00:00.000Z",
"2020-03-12T01:00:00.000Z",
"2020-03-12T02:00:00.000Z"),
format = "%Y-%m-%dT%H:%M:%OSZ"))),
measure = tibble(PM25 = c(10.22, 9.22, 7.53, 5.57, 3.82, 2.47, 1.57),
PM10 = c(18.57, 16.76, 13.75, 10.00, 6.64, 4.16, 2.44)),
index = tibble(AIRLY_CAQI = c(18.57, 16.76, 13.75, 10.00, 6.64, 4.16, 2.62))
)
expect_equal(history, exp_hist)
expect_equal(current, exp_curr)
expect_equal(forecast, exp_fore)
})
})
httptest::with_mock_api({
test_that("Get point measurements is working", {
set_apikey("testkey")
current <- get_point_measurements(50.11670, 19.91429)$current
history <- get_point_measurements(50.11670, 19.91429)$history[1:7,]
forecast <- get_point_measurements(50.11670, 19.91429)$forecast[1:7,]
exp_curr <- tibble(time = tibble(from = as.POSIXct(strptime("2020-03-11T18:58:36.380Z", format = "%Y-%m-%dT%H:%M:%OSZ")),
to = as.POSIXct(strptime("2020-03-11T19:58:36.380Z", format = "%Y-%m-%dT%H:%M:%OSZ"))),
measure = tibble(PM1 = 8.73,
PM25 = 12.37,
PM10 = 23.29,
PRESSURE = 1014.23,
HUMIDITY = 81.05,
TEMPERATURE = 8.27),
index = tibble(AIRLY_CAQI = 23.29)
)
exp_hist <- tibble(time = tibble(from = as.POSIXct(strptime(c("2020-03-10T19:00:00.000Z",
"2020-03-10T20:00:00.000Z",
"2020-03-10T21:00:00.000Z",
"2020-03-10T22:00:00.000Z",
"2020-03-10T23:00:00.000Z",
"2020-03-11T00:00:00.000Z",
"2020-03-11T01:00:00.000Z"),
format = "%Y-%m-%dT%H:%M:%OSZ")),
to = as.POSIXct(strptime(c("2020-03-10T20:00:00.000Z",
"2020-03-10T21:00:00.000Z",
"2020-03-10T22:00:00.000Z",
"2020-03-10T23:00:00.000Z",
"2020-03-11T00:00:00.000Z",
"2020-03-11T01:00:00.000Z",
"2020-03-11T02:00:00.000Z"),
format = "%Y-%m-%dT%H:%M:%OSZ"))),
measure = tibble(PM1 = c(16.30, 10.72, 9.97, 7.42, 2.95, 4.12, 3.17),
PM25 = c(24.18, 15.42, 14.13, 10.53, 4.55, 6.23, 4.87),
PM10 = c(46.03, 28.37, 25.84, 19.07, 8.57, 11.55, 9.33),
PRESSURE = c(1009.94, 1009.13, 1008.49, 1007.75, 1007.17, 1006.81, 1006.01),
HUMIDITY = c(66.36, 66.55, 67.21, 69.24, 71.58, 73.79, 77.09),
TEMPERATURE = c(7.84, 7.42, 7.03, 7.04, 7.19, 7.42, 7.40)),
index = tibble(AIRLY_CAQI = c(46.03, 28.37, 25.84, 19.07, 8.57, 11.55, 9.33))
)
exp_fore <- tibble(time = tibble(from = as.POSIXct(strptime(c("2020-03-11T19:00:00.000Z",
"2020-03-11T20:00:00.000Z",
"2020-03-11T21:00:00.000Z",
"2020-03-11T22:00:00.000Z",
"2020-03-11T23:00:00.000Z",
"2020-03-12T00:00:00.000Z",
"2020-03-12T01:00:00.000Z"),
format = "%Y-%m-%dT%H:%M:%OSZ")),
to = as.POSIXct(strptime(c("2020-03-11T20:00:00.000Z",
"2020-03-11T21:00:00.000Z",
"2020-03-11T22:00:00.000Z",
"2020-03-11T23:00:00.000Z",
"2020-03-12T00:00:00.000Z",
"2020-03-12T01:00:00.000Z",
"2020-03-12T02:00:00.000Z"),
format = "%Y-%m-%dT%H:%M:%OSZ"))),
measure = tibble(PM25 = c(10.22, 9.22, 7.53, 5.57, 3.82, 2.47, 1.57),
PM10 = c(18.57, 16.76, 13.75, 10.00, 6.64, 4.16, 2.44)),
index = tibble(AIRLY_CAQI = c(18.57, 16.76, 13.75, 10.00, 6.64, 4.16, 2.62))
)
expect_equal(history, exp_hist)
expect_equal(current, exp_curr)
expect_equal(forecast, exp_fore)
})
})
httptest::with_mock_api({
test_that("get_indexes return valid index information", {
indexes <- get_indexes()
expected <- data.frame(
minValue = c(0.0, 25.0, 50.0, 75.0, 87.5, 100.0, 125.0, 0.0, 25.0, 50.0, 75.0, 100.0, 0.0, 1.0, 3.0 , 5.0, 7.0, 10.0),
maxValue = c(25.0, 50.0, 75.0, 87.5, 100.0, 125.0, NA, 25.0, 50.0, 75.0, 100.0, NA, 1.0, 3.0, 5.0, 7.0, 10.0, NA),
description = c("Very Low","Low","Medium","High","Very High","Extreme","Airmageddon!", "Very Low","Low",
"Medium","High","Very High","Very Good","Good","Moderate","Satisfactory", "Bad","Very Bad"),
color = c("
"
name = c("AIRLY_CAQI", "AIRLY_CAQI", "AIRLY_CAQI", "AIRLY_CAQI", "AIRLY_CAQI", "AIRLY_CAQI", "AIRLY_CAQI",
"CAQI","CAQI","CAQI", "CAQI","CAQI","PIJP","PIJP","PIJP","PIJP","PIJP","PIJP" ),
stringsAsFactors = FALSE
)
expect_equal(indexes, expected)
})
})
httptest::with_mock_api({
test_that("get_measurements_info return valid index information", {
measures <- get_measurements_info()
expected <- data.frame(
name = c("PM1", "PM25", "PM10", "TEMPERATURE", "HUMIDITY", "PRESSURE", "WIND_SPEED", "WIND_BEARING", "NO2", "O3", "SO2", "CO", "H2S", "NO"),
label = c("PM1", "PM2.5", "PM10", "Temperature", "Humidity", "Pressure", "Wind speed", "Wind bearing", "NO₂", "O₃", "SO₂", "CO", "H₂S", "NO"),
unit = c("µg/m³", "µg/m³", "µg/m³", "°C", "%", "hPa", "km/h", "°", "µg/m³", "µg/m³", "µg/m³", "µg/m³", "µg/m³", "µg/m³"),
stringsAsFactors = FALSE)
expect_equal(measures, expected)
})
})
test_that("remaining request are not set", {
expect_warning(remaining_requests(), "You should make at least one request. Check me after making first call.")
}) |
if (require("testthat")) {
library(see)
if (length(strsplit(packageDescription("see")$Version, "\\.")[[1]]) > 3) {
Sys.setenv("RunAllseeTests" = "yes")
} else {
Sys.setenv("RunAllseeTests" = "no")
}
osx <- tryCatch(
{
si <- Sys.info()
if (!is.null(si["sysname"])) {
si["sysname"] == "Darwin" || grepl("^darwin", R.version$os)
} else {
FALSE
}
},
error = function(e) {
FALSE
}
)
if (!osx) {
test_check("see")
}
} |
contextCluster <- function(datasets, clusterCounts,
dataDistributions="diagNormal", prior=NULL,
maxIter=1000, burnin=NULL, lag=3,
verbose = FALSE){
nDataPoints <- dim(datasets[[1]])[1]
nContexts <- length(datasets)
if (nContexts != length(clusterCounts)) stop("Number of datasets is different from number of contexts.")
if (sum(laply(datasets, function(dt) nrow(dt) == nDataPoints)) != nContexts) {
stop("Number of data points is different in individual contexts.")
}
if(verbose) message('Running initialisation')
if (length(dataDistributions) == 1) {
fullDataDistributions <- rep(dataDistributions, nContexts)
} else {
fullDataDistributions <- dataDistributions
}
if (is.null(burnin)) {
burnin <- maxIter/2
}
if (is.null(prior)) {
prior <- empiricalBayesPrior(datasets, fullDataDistributions)
}
state <- createGibbsState(datasets, clusterCounts, fullDataDistributions)
dataStats <- createDataStats(datasets, prior, fullDataDistributions)
logliks <- rep(0, maxIter)
assignSamples <- vector("list", length=maxIter)
for (iter in 1:maxIter) {
if (((iter%%10) == 0 ) && verbose) { message(paste("iter ",iter)) }
state <- state %>%
gibbsSampleZ(dataStats, prior, clusterCounts) %>%
gibbsSampleContextK(prior, clusterCounts)
logliks[iter] <- logJoint(state, prior, clusterCounts)
assignSamples[[iter]]$dataAssignments <- state$Z
assignSamples[[iter]]$contextAssignments <- state$contextK
}
thinned_logliks <- getMCMCSamples(logliks, burnin, lag)
thinned_samples <- getMCMCSamples(assignSamples, burnin, lag)
DIC <- computeDIC(thinned_logliks, thinned_samples, nDataPoints,
state$distributions, clusterCounts, prior, datasets)
assignments <-
thinned_samples %>%
llply(getClustersAssignments)
return(list(samples=assignments, logliks=logliks, DIC=DIC))
}
logsumexp <- function(xs){
xMax <- max(xs)
values <- (xs - xMax) %>% exp %>% sum %>% log
values + xMax
}
normalizeProbs <- function(logliks) {
normalizer <- logsumexp(logliks)
probs <- exp(logliks - normalizer)
probs
}
sampleCategorical <- function(logliks) {
normalizer <- logsumexp(logliks)
probs <- exp(logliks - normalizer)
cdf <- cumsum(probs)
min(which(runif(1) < cdf))
}
rep.row<-function(x,n){
matrix(rep(x,each=n),nrow=n)
}
rep.col<-function(x,n){
matrix(rep(x,each=n), ncol=n, byrow=TRUE)
}
generateMapping <- function(clusterCounts) {
nGlobalClusters <- prod(clusterCounts)
nContexts <- length(clusterCounts)
mapping <- matrix(nrow=nGlobalClusters,ncol=nContexts+1)
mapping[,nContexts+1] <- 1:nGlobalClusters
localClusters <- list()
for(c in 1:nContexts) localClusters[[c]] <- 1:clusterCounts[c]
grid <- expand.grid(localClusters)
for(c in 1:nContexts) mapping[,c] <- grid[[c]]
mapping
}
getGlobalClusters <- function(state, mapping) {
nDataPoints <- length(state$Z)
nContexts <- length(state$distributions)
clusterAssgn <- matrix(nrow=nDataPoints,ncol=nContexts)
for (context in 1:nContexts) {
clusterAssgn[,context] <- state$contextK[state$Z, context]
}
aaply(clusterAssgn,1,function(row)
as.character(row) %>% paste(collapse=''))
}
saveSample <- function(state, mapping, iter, filename) {
nDataPoints <- length(state$Z)
nContexts <- length(state$distributions)
if (!file.exists(filename)) {
contextHeaders <-
laply(1:nContexts, function(context) {
laply(1:nDataPoints, function(n) paste("Context", context, " x", n, sep='')) %>% paste(collapse=',')
}) %>% paste(collapse=',')
globalHeaders1 <-
laply(1:nDataPoints, function(n) paste("Global_assgn x", n, sep='')) %>% paste(collapse=',')
globalHeaders2 <-
laply(1:nDataPoints, function(n) paste("Global x", n, sep='')) %>% paste(collapse=',')
header <- paste('Sample', contextHeaders, globalHeaders1, globalHeaders2, sep=',')
fileConn<-file(filename,open = "a")
writeLines(header, fileConn)
} else {
fileConn<-file(filename, open = "a")
}
clusterAssgn = matrix(nrow=nDataPoints,ncol=nContexts)
for (context in 1:nContexts) {
clusterAssgn[,context] <- state$contextK[state$Z, context]
}
contextClusters <- clusterAssgn %>% as.vector %>% as.matrix
assignments <- state$Z %>% as.matrix
globalClusters <- getGlobalClusters(state, mapping)
writeLines(c(iter, t(contextClusters), t(assignments), t(globalClusters)) %>% paste(collapse=','), fileConn)
close(fileConn)
}
computeDIC <- function(thinned_logliks, thinned_samples, nDataPoints, distributions, clusterCounts, prior, datasets) {
D_bar <- -2 * mean(thinned_logliks)
theta_bar <- c()
modeDataAssignments <-
1:nDataPoints %>%
laply(function(id) {thinned_samples %>% laply(function(s) s$dataAssignments[id])}) %>%
aaply(1, function(column) which.max(tabulate(column)))
idx2d <-
dim(thinned_samples[[1]]$contextAssignments) %>%
lapply(seq) %>%
expand.grid() %>%
as.matrix
modeContextAssignmentsTmp <-
idx2d %>%
aaply(1, function(x) {
i <- x[1]; j <- x[2]
thinned_samples %>%
laply(function(s) s$contextAssignments[i,j]) %>%
tabulate %>%
which.max
})
modeContextAssignments <-
matrix(nrow=nrow(thinned_samples[[1]]$contextAssignments), ncol=ncol(thinned_samples[[1]]$contextAssignments))
for (i in 1:nrow(modeContextAssignments)) {
for (j in 1:ncol(modeContextAssignments)) {
modeContextAssignments[i,j] <- modeContextAssignmentsTmp[idx2d[,1] == i & idx2d[,2] == j]
}
}
modeState <- c()
modeState$Z <- modeDataAssignments
modeState$contextK <- modeContextAssignments
modeState$distributions <- distributions
modeState$clusterStats <-
precomputeClusterStatistics(datasets, clusterCounts, modeState$Z, modeState$contextK, modeState$distributions)
D_hat <- -2 * logJoint(modeState, prior, clusterCounts)
p_D <- D_bar - D_hat
DIC <- p_D + D_bar
DIC
}
precomputeClusterStatistics <- function(datasets, clusterCounts, Z, contextAssgn, fullDataDistributions) {
N <- laply(1:clusterCounts$global, function(s) sum(Z == s))
globalStats <- llply(1:length(datasets), function(context) {
initGlobalClusterStatistics(Z, clusterCounts$global, datasets[[context]], fullDataDistributions[[context]])
})
globalStats$N <- N
contextStats <- llply(1:length(datasets), function(context) {
initContextClusterStatistics(Z, contextAssgn[,context], clusterCounts$context[context],
datasets[[context]], fullDataDistributions[[context]])
})
list(globalStats = globalStats, contextStats = contextStats)
}
createDataStats <- function(datasets, prior, fullDataDistributions) {
dataStats <- precomputeDataStatistics(datasets, fullDataDistributions)
dataMarginals <- precomputeDataMarginals(dataStats, prior, fullDataDistributions)
list(Stats = dataStats, Marginals = dataMarginals)
}
precomputeDataStatistics <- function(datasets, fullDataDistributions) {
nContexts <- length(datasets)
N <- nrow(datasets[[1]])
llply(1:N, function(n) {
llply(1:nContexts, function(context) {
getDataStatistics(datasets[[context]][n,, drop=F], fullDataDistributions[[context]])
})
})
}
precomputeDataMarginals <- function(dataStatistics, prior, fullDataDistributions) {
N <- length(dataStatistics)
nContexts <- length(fullDataDistributions)
llply(1:N, function(n) {
llply(1:nContexts, function(context) {
getMarginal(dataStatistics[[n]][[context]], 1, prior[[context]], fullDataDistributions[[context]])
})
})
}
createGibbsState <- function(datasets, clusterCounts, fullDataDistributions) {
nContexts <- length(datasets)
N <- nrow(datasets[[1]])
Z <- rep(1, N)
contextK <- matrix(nrow=clusterCounts$global, ncol=nContexts)
for (ic in 1:nContexts) {
contextK[,ic] <- sample(1:clusterCounts$context[ic], clusterCounts$global, replace=T)
}
clusterStats <- precomputeClusterStatistics(datasets, clusterCounts, Z, contextK, fullDataDistributions)
state <- list(Z = Z, contextK = contextK, distributions=fullDataDistributions,
clusterStats = clusterStats)
state
}
getLogliksForGlobalCluster <- function(n, N, state, dataStats, prior) {
S <- nrow(state$contextK)
nContexts <- length(state$distributions)
N_s <- state$clusterStats$globalStats$N
N_s[state$Z[n]] <- N_s[state$Z[n]] - 1
globalCurrentCluster <- state$Z[n]
contextLogliks <- matrix(nrow=nContexts, ncol=S)
for (context in 1:nContexts) {
currentCluster <- state$contextK[globalCurrentCluster, context]
clusterStats <- state$clusterStats$contextStats[[context]]
cl <-
predictiveLoglik(dataStats$Stats[[n]][[context]],
dataStats$Marginals[[n]][[context]], 1,
currentCluster, clusterStats,
prior[[context]], state$distributions[[context]])
contextLogliks[context, ] <- cl[state$contextK[,context]]
}
logliks <- log(prior$gamma/S + N_s) - log(prior$gamma + N - 1) + colSums(contextLogliks)
logliks
}
gibbsSampleZ <- function(state, dataStats, prior, clusterCounts) {
N <- length(state$Z)
nContexts <- length(state$distributions)
for (n in (sample(1:N,N,replace = F))) {
logliks <- getLogliksForGlobalCluster(n, N, state, dataStats, prior)
probs <- normalizeProbs(logliks)
newZ <- sample(length(probs), 1, prob=probs)
oldZ <- state$Z[n]
if (newZ != oldZ) {
state$clusterStats$globalStats$N[newZ] <- state$clusterStats$globalStats$N[newZ] + 1
state$clusterStats$globalStats$N[oldZ] <- state$clusterStats$globalStats$N[oldZ] - 1
for (context in 1:nContexts) {
state$clusterStats$globalStats[[context]] <-
updateGlobalClusterStats(oldZ, newZ, state$clusterStats$globalStats[[context]], state$clusterStats$globalStats$N,
dataStats$Stats[[n]][[context]], 1, state$distributions[[context]])
oldK <- state$contextK[oldZ, context]
newK <- state$contextK[newZ, context]
if(oldK != newK) {
state$clusterStats$contextStats[[context]] <-
updateContextClusterStats(oldK, newK, state$clusterStats$contextStats[[context]],
dataStats$Stats[[n]][[context]], 1, state$distributions[[context]])
}
}
state$Z[n] <- newZ
}
}
state
}
getLogliksForContextCluster <- function(s, S, context, K, state, prior) {
xStats <- getGlobalClusterStats(s, state$clusterStats$globalStats[[context]],
state$distributions[[context]])
clusterMarginal <- getMarginal(xStats, state$clusterStats$globalStats$N[s],
prior[[context]], state$distribution[[context]])
m_l <- rep(0,K)
for (l in 1:K) {
kIdxs <- which(state$contextK[,context] == l)
m_l[l] <- if (s %in% kIdxs) {length(kIdxs) - 1} else {length(kIdxs)}
}
clusterLoglik <-
predictiveLoglik(xStats, clusterMarginal, state$clusterStats$globalStats$N[s],
state$contextK[s,context], state$clusterStats$contextStats[[context]],
prior[[context]], state$distributions[[context]])
log(prior[[context]]$alpha/K + m_l) -
log(prior[[context]]$alpha + S - 1) +
clusterLoglik
}
gibbsSampleContextK <- function(state, prior, clusterCounts) {
nContexts <- length(clusterCounts$context)
S <- clusterCounts$global
for (context in 1:nContexts) {
K <- clusterCounts$context[context]
for (s in (sample(1:S, S, replace=F))) {
logliks <- getLogliksForContextCluster(s, S, context, K, state, prior)
probs <- normalizeProbs(logliks)
oldK <- state$contextK[s,context]
newK <- sample(length(probs), 1, prob=probs)
if (oldK != newK) {
globalClusterStats <- getGlobalClusterStats(s, state$clusterStats$globalStats[[context]], state$distributions[[context]])
xN <- state$clusterStats$globalStats$N[s]
contextClusterStats <- state$clusterStats$contextStats[[context]]
contextClusterStats$xN <- state$clusterStats$contextStats[[context]]$xN
state$clusterStats$contextStats[[context]] <-
updateContextClusterStats(oldK, newK, contextClusterStats,
globalClusterStats, xN, state$distributions[[context]])
}
state$contextK[s,context] <- newK
}
}
state
}
predictiveLoglik <- function(x, xMarginals, xN, currentCluster, clusterStats,
priorParams, distribution) {
switch(distribution,
diagNormal=predictiveLogLik_diagNormal(x, xMarginals, xN, currentCluster, clusterStats, priorParams))
}
getMarginal <- function(dataStats, dataN, priorParams, distribution) {
switch(distribution,
diagNormal=marginal_diagNormal(dataStats, dataN, priorParams))
}
getDataStatistics <- function(xs, distribution) {
switch(distribution,
diagNormal=getDataStatistics_diagNormal(xs))
}
initContextClusterStatistics <- function(Z, contextK, K, contextData, distribution) {
switch(distribution,
diagNormal = initContextClusterStatistics_diagNormal(Z, contextK, K, contextData))
}
initGlobalClusterStatistics <- function(Z, S, contextData, distribution) {
switch(distribution,
diagNormal = initGlobalClusterStatistics_diagNormal(Z, S, contextData))
}
updateContextClusterStats <- function(oldK, newK, clusterStats, dataStats, xN, distribution) {
switch(distribution,
diagNormal=updateContextClusterStats_diagNormal(oldK, newK, clusterStats, dataStats, xN))
}
updateGlobalClusterStats <- function(oldZ, newZ, clusterStats, clusterN, dataStats, xN, distribution) {
switch(distribution,
diagNormal=updateGlobalClusterStats_diagNormal(oldZ, newZ, clusterStats, clusterN, dataStats, xN))
}
getGlobalClusterStats <- function(globalClusterIdxs, clusterStats, distribution) {
switch(distribution,
diagNormal=getGlobalClusterStats_diagNormal(globalClusterIdxs, clusterStats))
}
getContextClusterMarginals <- function(clusterStats, prior, distribution) {
switch(distribution,
diagNormal = getContextClusterMarginals_diagNormal(clusterStats, prior))
}
getParams_diagNormal <- function(sumX, sumX2, N, priorParams) {
D <- length(priorParams$theta$m)
S <- nrow(sumX)
sXX <- sumX*sumX
beta <- priorParams$theta$beta + N
a <- priorParams$theta$a + N/2
invN <- 1/N
invN[is.infinite(invN)] <- 0
if (all(priorParams$theta$m == 0)) {
dataContributions <- 0.5 * sumX2 - sXX * (1/2*invN) +
(priorParams$theta$beta * invN * sXX ) * 1/(2*(priorParams$theta$beta + N))
b <- sweep(dataContributions, 2, priorParams$theta$b, '+')
} else {
dataContributions <- 0.5 * sumX2 - rep.col(1/(2*N), D) * sXX +
(priorParams$theta$beta * (
rep.col(1/N, D) * sXX - 2*rep.row(priorParams$theta$m,S) * sumX +
rep.col(N, D) * rep.row(priorParams$theta$m^2, S)))/rep.col(2*(priorParams$theta$beta + N), D)
b <- sweep(dataContributions, 2, priorParams$theta$b, '+')
}
list(a = a, b = b, beta = beta)
}
predictiveLogLik_diagNormal <- function(xStats, xMarginals, xN, currentCluster,
clusterStats, priorParams) {
clusterStats$xN[currentCluster] <- clusterStats$xN[currentCluster] - xN
clusterStats$sumX[currentCluster,] <- clusterStats$sumX[currentCluster,] - xStats$sumX
clusterStats$sumX2[currentCluster,] <- clusterStats$sumX2[currentCluster,] - xStats$sumX2
paramsOther <- getParams_diagNormal(clusterStats$sumX, clusterStats$sumX2,
clusterStats$xN, priorParams)
paramsAll <- getParams_diagNormal(sweep(clusterStats$sumX, 2, xStats$sumX, '+'),
sweep(clusterStats$sumX2, 2, xStats$sumX2, '+'),
clusterStats$xN + xN, priorParams)
D <- length(priorParams$theta$m)
logliks <-
D * (lgamma(paramsAll$a) - lgamma(paramsOther$a)) +
paramsOther$a * rowSums(log(paramsOther$b)) -
paramsAll$a * rowSums(log(paramsAll$b)) +
D/2 * (log(paramsOther$beta) - log(paramsAll$beta)) -
(xN * D)/2 * log(2*pi)
logliks[clusterStats$xN == 0] <- xMarginals
logliks
}
marginal_diagNormal <- function(xStats, xN, priorParams) {
params <- getParams_diagNormal(xStats$sumX %>% as.matrix, xStats$sumX2 %>% as.matrix, xN, priorParams)
D <- length(priorParams$theta$m)
loglik <-
D* (lgamma(params$a) - lgamma(priorParams$theta$a)) +
D/2 * (log(priorParams$theta$beta) - log(params$beta)) -
(xN * D)/2 * log(2*pi) +
sum( priorParams$theta$a * log(priorParams$theta$b) - params$a * log(params$b))
loglik[xN == 0] <- 0
loglik
}
getDataStatistics_diagNormal <- function(xs) {
list(sumX = t(as.matrix(colSums(xs))), sumX2 = t(as.matrix(colSums(xs*xs))))
}
initGlobalClusterStatistics_diagNormal <- function(Z, S, contextData) {
sumX <- laply(1:S, function(s) contextData[Z == s,,drop=F] %>% colSums, .drop=F)
sumX2 <- laply(1:S, function(s) {
xs <- contextData[Z == s,,drop=F]
(xs * xs) %>% colSums }, .drop=F)
list(sumX = sumX, sumX2 = sumX2)
}
initContextClusterStatistics_diagNormal <- function(Z, contextK, K, contextData) {
sumX <- laply(1:K, function(k) {
globalIdxs <- which(contextK == k)
contextData[Z %in% globalIdxs,,drop=F] %>% colSums
}, .drop = F)
sumX2 <- laply(1:K, function(k) {
globalIdxs <- which(contextK == k)
xs <- contextData[Z %in% globalIdxs,,drop=F]
(xs * xs) %>% colSums
}, .drop=F)
xN <- laply(1:K, function(k) {
globalIdxs <- which(contextK == k)
sum(Z %in% globalIdxs)
})
list(sumX = sumX, sumX2 = sumX2, xN = xN)
}
updateContextClusterStats_diagNormal <- function(oldK, newK, clusterStats, dataStats, xN) {
clusterStats$xN[newK] <- clusterStats$xN[newK] + xN
clusterStats$sumX[newK,] <- clusterStats$sumX[newK,] + dataStats$sumX
clusterStats$sumX2[newK,] <- clusterStats$sumX2[newK,] + dataStats$sumX2
clusterStats$xN[oldK] <- clusterStats$xN[oldK] - xN
if (clusterStats$xN[oldK] == 0) {
clusterStats$sumX[oldK,] <- 0
clusterStats$sumX2[oldK,] <- 0
} else {
clusterStats$sumX[oldK,] <- clusterStats$sumX[oldK,] - dataStats$sumX
clusterStats$sumX2[oldK,] <- clusterStats$sumX2[oldK,] - dataStats$sumX2
}
clusterStats
}
updateGlobalClusterStats_diagNormal <- function(oldZ, newZ, clusterStats, clusterN, dataStats, xN) {
clusterStats$sumX[newZ,] <- clusterStats$sumX[newZ,] + dataStats$sumX
clusterStats$sumX2[newZ,] <- clusterStats$sumX2[newZ,] + dataStats$sumX2
if (clusterN[oldZ] == 0) {
clusterStats$sumX[oldZ,] <- 0
clusterStats$sumX2[oldZ,] <- 0
} else {
clusterStats$sumX[oldZ,] <- clusterStats$sumX[oldZ,] - dataStats$sumX
clusterStats$sumX2[oldZ,] <- clusterStats$sumX2[oldZ,] - dataStats$sumX2
}
clusterStats
}
getGlobalClusterStats_diagNormal <- function(s, clusterStats) {
sumX <- clusterStats$sumX[s,,drop=F]
sumX2 <- clusterStats$sumX2[s,,drop=F]
list(sumX = sumX, sumX2 = sumX2)
}
logZ <- function(state, S, N, prior) {
n_s <- state$clusterStats$globalStats$N
lgamma(prior$gamma) - (S * lgamma(prior$gamma/S)) +
sum(lgamma(prior$gamma/S + n_s)) - lgamma(prior$gamma + N)
}
logK <- function(state, context, K, prior) {
m_l <- rep(0,K)
for (l in 1:K) {
m_l[l] <- sum(state$contextK[,context]==l)
}
M <- state$contextK %>% nrow
lgamma(prior[[context]]$alpha) - K * lgamma(prior[[context]]$alpha/K) +
sum(lgamma(prior[[context]]$alpha/K + m_l)) - lgamma(prior[[context]]$alpha + M)
}
logJoint <- function(state, prior, clusterCounts) {
S <- clusterCounts$global
N <- state$Z %>% length
nContexts <- length(clusterCounts$context)
dataLoglik <- 0
loglikK <- 0
for (context in 1:nContexts) {
dataLoglik <-
dataLoglik + getContextClusterMarginals(state$clusterStats$contextStats[[context]], prior[[context]],
state$distributions[[context]]) %>%
sum
loglikK <- loglikK + logK(state, context, clusterCounts$context[context], prior)
}
logZ(state, S, N, prior) + loglikK +
dataLoglik
}
getContextClusterMarginals_diagNormal <- function(clusterStats, priorParams) {
params <- getParams_diagNormal(clusterStats$sumX, clusterStats$sumX2,
clusterStats$xN, priorParams)
D <- length(priorParams$theta$m)
loglik <-
D * (lgamma(params$a) - lgamma(priorParams$theta$a)) +
D/2 * (log(priorParams$theta$beta) - log(params$beta)) -
(clusterStats$xN * D)/2 * log(2*pi) +
rowSums( priorParams$theta$a * log(priorParams$theta$b) - params$a * log(params$b))
loglik[clusterStats$xN == 0] <- 0
loglik
} |
.sfcr_make_scenario_matrix <- function(baseline, scenario, periods) {
sfcr_random <- function(.f, ...) {
match.arg(.f, c("rnorm", "rbinom", "runif"))
args <- list(...)
args$n <- NULL
n <- list(n=periods)
args <- c(n, args)
do.call(eval(parse(text=.f)), args)
}
steady <- utils::tail(attributes(baseline)$matrix, n = 1)
m <- steady[rep(seq_len(nrow(steady)), periods), ]
external <- attr(baseline, "external")
exgs_names <- external$lhs
exg_exprs <- purrr::map(external$rhs, function(x) parse(text=x))
for (var in seq_along(exgs_names)) {
m[, exgs_names[[var]]] <- eval(exg_exprs[[var]])
}
scenario_eqs <- purrr::map(scenario, function(x) .eq_as_tb(x[[1]]))
scenario_names <- purrr::map(scenario_eqs, function(x) x$lhs)
scenario_exprs <- purrr::map(scenario_eqs, function(x) purrr::map(x$rhs, function(y) parse(text = y)))
scenario_start <- purrr::map(scenario, function(x) x[[2]])
scenario_end <- purrr::map(scenario, function(x) x[[3]])
sfcr_random <- function(.f, ...) {
match.arg(.f, c("rnorm", "rbinom", "runif"))
args <- list(...)
args$n <- NULL
n <- list(n=shock_length)
args <- c(n, args)
do.call(eval(parse(text=.f)), args)
}
for (scenario in seq_len(vctrs::vec_size(scenario_eqs))) {
shock_length <- length(seq(scenario_start[[scenario]], scenario_end[[scenario]]))
scenario_nms <- scenario_names[[scenario]]
scenario_xprs <- scenario_exprs[[scenario]]
for (var in seq_along(scenario_nms)) {
m[scenario_start[[scenario]]:scenario_end[[scenario]], scenario_nms[[var]]] <- eval(scenario_xprs[[var]])
}
}
return(m)
}
.extend_baseline_matrix <- function(baseline, periods) {
steady <- utils::tail(attributes(baseline)$matrix, n = 1)
m <- steady[rep(seq_len(nrow(steady)), periods), ]
sfcr_random <- function(.f, ...) {
match.arg(.f, c("rnorm", "rbinom", "runif"))
args <- list(...)
args$n <- NULL
n <- list(n=periods)
args <- c(n, args)
do.call(eval(parse(text=.f)), args)
}
external <- attr(baseline, "external")
exgs_names <- external$lhs
exg_exprs <- purrr::map(external$rhs, function(x) parse(text=x))
for (var in seq_along(exgs_names)) {
m[, exgs_names[[var]]] <- eval(exg_exprs[[var]])
}
return(m)
}
.abort_wrong_shock_var <- function(wrong_var) {
if (length(wrong_var) == 1) {
rlang::abort(message = paste0("Shocked variable `", wrong_var, "` is not included in the external variables of the model. Please check your shocks and try again."))
} else {
rlang::abort(message = paste0("Shocked variables `", paste0(wrong_var, collapse = ", "), "` are not present in the external variables of the model. Please check your shocks and try again."))
}
}
.check_shock_consistency <- function(shock, periods=periods) {
start = shock$start
end = shock$end
if (start < 0) {
rlang::abort("Please supply a non-negative start period for the shock.")
}
if (end > periods) {
rlang::abort("The end of the shock must be smaller or equal to the periods in the scenario.")
}
length_shock = length(seq(start, end))
vars <- .eq_as_tb(shock$variables)
vars <- dplyr::filter(vars, stringr::str_detect(.data$rhs, "sfcr_random", negate=TRUE))
if (vctrs::vec_is_empty(vars)) {
} else {
parse_vars <- purrr::map(vars$rhs, ~eval(parse(text=.x)))
vars_length <- purrr::map_dbl(parse_vars, length)
if (mean(vars_length) > 1) {
abortifnot(all(vars_length %in% c(1, length_shock)), "All exogenous variables supplied as a shock must have either length 1 or exactly the same length as the shock.")
rlang::warn("Passing exogenous series with a shock can lead to unexpected behavior if the length of the series is smaller than the periods to the end of the scenario. Be cautious when using this functionality.", .frequency_id = "scenario_warn", .frequency="once")
}
}
}
sfcr_scenario <- function(baseline, scenario, periods, max_iter = 350, tol = 1e-10, method = "Broyden", ...) {
match.arg(method, c("Gauss", "Newton", "Broyden"))
if (inherits(scenario, "sfcr_shock")) {
scenario <- list(scenario)
}
eqs <- attr(baseline, "calls")
if (!is.null(scenario)) {
check_all_shocks <- purrr::map_lgl(scenario, ~inherits(.x, "sfcr_shock"))
if (mean(check_all_shocks) < 1) rlang::abort("Please use `sfcr_shock()` to create shocks.")
ends_names <- eqs$lhs
all_names <- colnames(baseline)
exgs_names <- all_names[which(!(all_names %in% ends_names))]
all_shock_vars <- unlist(purrr::map(scenario, ~.eq_as_tb(.x$variables)$lhs))
check_valid_vars <- purrr::map_lgl(all_shock_vars, ~{.x %in% exgs_names})
if (mean(check_valid_vars) < 1) {
wrong_var <- all_shock_vars[!check_valid_vars]
.abort_wrong_shock_var(wrong_var)
}
}
if (!is.null(scenario)) {
purrr::map(scenario, ~.check_shock_consistency(.x, periods))
}
if (is.null(scenario)) {
m <- .extend_baseline_matrix(baseline, periods)
} else {
m <- .sfcr_make_scenario_matrix(baseline, scenario, periods)
}
if (method == "Gauss") {
s1 <- .sfcr_gauss_seidel(m, eqs, periods, max_iter, tol)
} else if (method == "Newton") {
s1 <- .sfcr_newton(m, eqs, periods, max_iter, tol, ...)
} else {
s1 <- .sfcr_broyden(m, eqs, periods, max_iter, tol)
}
s2 <- tibble::tibble(data.frame(s1)) %>%
dplyr::mutate(period = dplyr::row_number()) %>%
dplyr::select(-tidyselect::contains('block')) %>%
dplyr::select(.data$period, tidyselect::everything())
ext <- attr(baseline, "external")
x <- new_sfcr_tbl(s2, s1, eqs, ext)
return(x)
} |
dashboard = function(maf, color = NULL, rmOutlier = TRUE, log_conv = FALSE, titv.color = NULL, sfs = statFontSize, fontSize = fs, n = 10, donut = pie, rawcount = TRUE, stat = NULL, titleSize = NULL, barcodes = NULL, barcodeSize = NULL){
if(is.null(color)){
col = get_vcColors()
}else{
col = color
}
vcs = getSampleSummary(maf)
vcs = vcs[,colnames(vcs)[!colnames(x = vcs) %in% c('total', 'Amp', 'Del', 'CNV_total')], with = FALSE]
vcs = vcs[,c(1,order(colSums(x = vcs[,2:(ncol(vcs)), with =FALSE]), decreasing = TRUE)+1), with =FALSE]
vcs.m = data.table::melt(data = vcs, id = 'Tumor_Sample_Barcode')
colnames(vcs.m) = c('Tumor_Sample_Barcode', 'Variant_Classification', 'N')
data.table::setDF(vcs)
rownames(x = vcs) = vcs$Tumor_Sample_Barcode
vcs = vcs[,-1, drop = FALSE]
vcs = t(vcs)
lo = matrix(data = 1:6, nrow = 2, byrow = TRUE)
graphics::layout(mat = lo, heights = c(3.5, 3), widths = c(3, 2, 2))
par(cex.axis = fontSize, font = 3, cex.main = titleSize[1], lwd = 1.2)
vc.plot.dat = rev(rowSums(vcs))
if(log_conv){
vc.plot.dat = log10(vc.plot.dat)
}
xt = pretty(c(0, vc.plot.dat))
par(mar = c(3, 9, 3, 1))
b = barplot(vc.plot.dat, axes = FALSE, horiz = TRUE, col = col[names(vc.plot.dat)], border = NA,
xlim = c(0, max(xt)), names.arg = rep("", length(vc.plot.dat)))
abline(v = xt, h = 1:length(b)-0.25, lty = 2, lwd = 0.3, col = grDevices::adjustcolor(col = "gray70", alpha.f = 0.6))
axis(side = 2, at = b, labels = names(vc.plot.dat), lwd = 1.2, cex.axis = fontSize,
las = 2, line = 0.2, hadj = 0.9, font = 3, tick = FALSE)
axis(side = 1, at = xt, lwd = 1.2, font = 3, las = 2, cex.axis = fontSize*0.9)
title(main = "Variant Classification", adj = 0, cex.main = titleSize[1], font = 3)
if(log_conv){
axis(side = 2, at = 0, labels = "(log10)", lwd = 1.2, font = 3,
las = 1, cex.axis = fontSize*0.9, hadj = 0.5, padj = 2, line = 0.75, tick = FALSE, outer = FALSE)
}
vt.plot.dat = [email protected]
vt.plot.dat = vt.plot.dat[,colnames(vt.plot.dat)[!colnames(x = vt.plot.dat) %in% c('total', 'CNV')], with = FALSE]
vt.plot.dat = suppressWarnings(data.table::melt(vt.plot.dat[,c(2:(ncol(vt.plot.dat))), with = FALSE], id = NULL)[,sum(value), variable])
colnames(vt.plot.dat)[2] = "sum"
vt.cols = RColorBrewer::brewer.pal(n = 10, name = "Set3")
if(log_conv){
vt.plot.dat$sum = log10(vt.plot.dat$sum)
}
xt = pretty(c(0, vt.plot.dat$sum))
par(mar = c(3, 3, 3, 1))
b = barplot(vt.plot.dat$sum, axes = FALSE, horiz = TRUE, col = vt.cols[1:length(vt.plot.dat$variable)],
border = NA, xlim = c(0, max(xt)))
abline(v = xt, h = 1:length(b)-0.25, lty = 2, lwd = 0.3, col = grDevices::adjustcolor(col = "gray70", alpha.f = 0.6))
axis(side = 2, at = b, labels = vt.plot.dat$variable, lwd = 1.2, cex.axis = fontSize,
las = 2, line = 0.2, hadj = 0.8, font = 3, tick = FALSE)
axis(side = 1, at = xt, lwd = 1.2, font = 3, las = 2, cex.axis = fontSize*0.9)
title(main = "Variant Type", adj = 0, cex.main = titleSize[1], font = 3)
if(log_conv){
axis(side = 2, at = 0, labels = "(log10)", lwd = 1.2, font = 3,
las = 1, cex.axis = fontSize*0.9, hadj = 0.5, padj = 2, line = 0.75, tick = FALSE, outer = FALSE)
}
titv = titv(maf = maf, useSyn = TRUE, plot = FALSE)
titv.counts = titv$raw.counts
titv.sums = data.frame(value = colSums(titv.counts[,2:7]), stringsAsFactors = FALSE)
titv.sums$class = rownames(titv.sums)
if(!rawcount){
titv.sums$raw_value = titv.sums$value
titv.sums$value = titv.sums$value/sum(titv.sums$value)
xt = seq(0, 1, 0.25)
}else{
xt = as.integer(seq(0, max(titv.sums$value, na.rm = TRUE), length.out = 4))
}
if(is.null(titv.color)){
titv.color = get_titvCol()
}
par(mar = c(3, 3, 3, 1))
b = barplot(titv.sums$value, axes = FALSE, horiz = TRUE, col = titv.color[rownames(titv.sums)],
border = NA, xlim = c(0, xt[length(xt)]))
axis(side = 2, at = b, labels = rownames(titv.sums), lwd = 1.2, cex.axis = fontSize,
las = 2, line = 0.2, hadj = 0.8, font = 3, tick = FALSE)
axis(side = 1, at = xt, lwd = 1.2, font = 3, las = 2, cex.axis = fontSize*0.9)
title(main = "SNV Class", adj = 0, cex.main = titleSize[1], font = 3)
if(!rawcount){
text(x = titv.sums$value+0.03, y = b, labels = titv.sums$raw_value,
font = 4, col = "black", cex = fontSize, adj = 0)
}
abline(v = xt, h = 1:length(b)-0.25, lty = 2, lwd = 0.3, col = grDevices::adjustcolor(col = "gray70", alpha.f = 0.6))
if(barcodes){
par(mar = c(6, 2, 3, 1))
} else{
par(mar = c(3, 2, 3, 1))
}
if(log_conv){
vcs = apply(vcs, 2, function(x){
x_fract = x / sum(x)
x_log_total = log10(sum(x))
x_fract * x_log_total
})
vcs[is.nan(x = vcs)] = 0
vcs = vcs[,-which(colSums(x = vcs) == 0)]
}
b = barplot(vcs, col = col[rownames(vcs)], border = NA, axes = FALSE, names.arg = rep("", ncol(vcs)))
axis(side = 2, at = as.integer(seq(0, max(colSums(vcs)), length.out = 4)), lwd = 1.2, font = 3, las = 2,
line = -0.3, hadj = 0.6, cex.axis = fontSize)
title(main = "Variants per sample", adj = 0, cex.main = titleSize[1], font = 3, line = 2)
if(barcodes){
mtext(text = colnames(vcs), side = 1, line = 0.2, outer = FALSE, las = 2, at = b, cex = barcodeSize)
}
if(!is.null(stat)){
if(stat == 'mean'){
med.line = round(maf@summary[ID %in% "total", Mean], 2)
if(log_conv){
med.line = round(log10(med.line), 2)
}
df = data.frame(y = c(med.line), x = as.integer(0.8*nrow(getSampleSummary(maf))), label = c(paste('Mean: ', med.line, sep='')))
title(main = paste0("Mean: ", med.line), adj = 0, cex.main = titleSize[1]*0.8, font = 3, line = 1)
lines(x = c(1, b[length(b)]), y = c(med.line, med.line), col = "maroon", lwd = 1.2, lty = 2)
}else if(stat == 'median'){
med.line = round(maf@summary[ID %in% "total", Median], 2)
if(log_conv){
med.line = log10(med.line)
title(main = paste0("Median: ", round(10^med.line)), adj = 0, cex.main = titleSize[1]*0.8, font = 3, line = 1)
}else{
title(main = paste0("Median: ", med.line), adj = 0, cex.main = titleSize[1]*0.8, font = 3, line = 1)
}
df = data.frame(y = c(med.line), x = as.integer(0.8*nrow(getSampleSummary(maf))), label = c(paste('Median: ', med.line, sep='')))
lines(x = c(1, b[length(b)]), y = c(med.line, med.line), col = "maroon", lwd = 1.2, lty = 2)
}
}
if(log_conv){
mtext(text = "(log10)", side = 2, lwd = 1.2, font = 3, cex = fontSize*0.7, line = 1)
}
par(mar = c(3, 2, 3, 1))
boxH = vcs.m[,boxplot.stats(N)$stat[5], by = .(Variant_Classification)]
colnames(boxH)[ncol(boxH)] = 'boxStat'
bcol = col[levels(vcs.m$Variant_Classification)]
b = boxplot(N ~ Variant_Classification, data = vcs.m, xaxt="n", outline=FALSE, lty=1, lwd = 1.4, outwex=0,
staplewex=0, axes = FALSE, border = bcol)
axis(side = 2, at = as.integer(seq(0, max(boxH[,boxStat], na.rm = TRUE), length.out = 4)),
lwd = 1.2, font = 3, cex.axis = fontSize, las = 2)
title(main = "Variant Classification \nsummary", adj = 0, cex.main = titleSize[1], font = 3, line = 1)
abline(v = 1:length(bcol), h = as.integer(seq(0, max(boxH[,boxStat], na.rm = TRUE), length.out = 4)),
lty = 2,
lwd = 0.3, col = grDevices::adjustcolor(col = "gray70", alpha.f = 0.6))
gs = getGeneSummary(maf)
nsamps = as.numeric(maf@summary[ID %in% "Samples", summary])
gs.load = gs[,.(Hugo_Symbol, AlteredSamples)]
gs.load[,AlteredSamples := round(AlteredSamples/nsamps, digits = 2) * 100]
data.table::setDF(x = gs.load, rownames = gs.load$Hugo_Symbol)
gs = gs[,colnames(gs)[!colnames(x = gs) %in% c('total', 'Amp', 'Del', 'CNV_total', 'MutatedSamples', 'AlteredSamples')], with = FALSE]
if(nrow(gs) < n){
gs.dat = gs
}else{
gs.dat = gs[1:n]
}
data.table::setDF(gs.dat)
rownames(gs.dat) = gs.dat$Hugo_Symbol
gs.dat = gs.dat[,-1, drop = FALSE]
gs.dat = t(gs.dat)
gs.dat = gs.dat[names(sort(rowSums(gs.dat), decreasing = TRUE)),, drop = FALSE]
gs.dat = gs.dat[,names(sort(colSums(gs.dat))), drop = FALSE]
xt = as.integer(seq(0, max(colSums(gs.dat))+2, length.out = 4))
par(mar = c(3, 4, 3, 1))
gs.load = gs.load[rev(colnames(gs.dat)),,]
b = barplot(gs.dat, axes = FALSE, horiz = TRUE, col = col[rownames(gs.dat)], border = NA,
xlim = c(0, max(xt)+(max(xt)*0.15)), names.arg = rep("", ncol(gs.dat)))
axis(side = 2, at = b, labels = colnames(gs.dat), lwd = 1.2, cex.axis = fontSize,
las = 2, line = 0.2, hadj = 0.8, font = 3, tick = FALSE)
axis(side = 1, at = xt, lwd = 1.2, font = 3, las = 2, cex.axis = fontSize*0.9)
title(main = paste0('Top ', n, '\nmutated genes'), adj = 0, cex.main = titleSize[1], font = 3)
text(x = colSums(gs.dat)+1, y = b, labels = rev(paste0(gs.load$AlteredSamples, "%")),
font = 4, col = "black", cex = fontSize*0.9, adj = 0, xpd = TRUE)
abline(h = b, v = xt,lty = 2, lwd = 0.3,
col = grDevices::adjustcolor(col = "gray70", alpha.f = 0.6))
} |
convert_umlauts_to_tex <- function(x) {
s <- iconv(enc2native(x), to = "UTF-8", sub = "unicode")
s <- gsub("\u00e4", "\\\\\u0022a{}", s)
s <- gsub("\u00c4", "\\\\\u0022A{}", s)
s <- gsub("\u00f6", "\\\\\u0022o{}", s)
s <- gsub("\u00d6", "\\\\\u0022O{}", s)
s <- gsub("\u00fc", "\\\\\u0022u{}", s)
s <- gsub("\u00dc", "\\\\\u0022U{}", s)
s <- gsub("\u00df", "\\\\ss{}", s)
return(s)
} |
library(shiny)
library(DT)
library(ggplot2)
library(fontawesome)
unminified_css_dep <-
htmltools::htmlDependency(
name = "font-awesome",
version = fontawesome:::fa_version,
src = "fontawesome",
package = "fontawesome",
stylesheet = c("css/all.css", "css/v4-shims.css")
)
ui <- fluidPage(
titlePanel("Basic DataTable"),
fluidRow(
column(
width = 4,
selectInput(
inputId = "man",
label = tags$p(fa_i("car", html_dependency = unminified_css_dep), "Manufacturer:"),
choices = c(
"All",
unique(as.character(mpg$manufacturer))))
),
column(
width = 4,
selectInput(
inputId = "trans",
label = tags$p(fa_i("car", html_dependency = unminified_css_dep), "Transmission:"),
choices = c(
"All",
unique(as.character(mpg$trans))))
),
column(
width = 4,
selectInput(
inputId = "cyl",
label = tags$p(fa_i("car", html_dependency = unminified_css_dep), "Cylinders:"),
choices = c(
"All",
unique(as.character(mpg$cyl))))
)
),
fluidRow(
dataTableOutput("table")
)
)
server <- function(input, output) {
output$table <- renderDataTable({
data <- mpg
if (input$man != "All") {
data <- data[data$manufacturer == input$man,]
}
if (input$cyl != "All") {
data <- data[data$cyl == input$cyl,]
}
if (input$trans != "All") {
data <- data[data$trans == input$trans,]
}
data
})
}
shinyApp(ui = ui, server = server) |
md.vcov <- function(r, nt, nc, n_rt = NA, n_rc = NA, sdt, sdc)
{ K <- length(r)
if (length(as.vector(nt)) == length(as.matrix(nt)[, 1])) {
colum.number <- 1} else { colum.number <- ncol(nt)}
col.vac.number <- (colum.number + 1)*colum.number/2
if (is.na(n_rt)&(length(n_rt) == 1)){
n_rt <- rep(list(matrix(NA, colum.number, colum.number)), K) }
for (k in 1:K) {
for (i in 1:colum.number){
for (j in 1:colum.number){
if (is.na(n_rt[[k]][i, j]))
n_rt[[k]][i, j] <- min(nt[k, i], nt[k, j])
}
}
}
if (is.na(n_rc)&(length(n_rc) == 1)){
n_rc <- rep(list(matrix(NA, colum.number, colum.number)), K) }
for (k in 1:K) {
for (i in 1:colum.number){
for (j in 1:colum.number){
if (is.na(n_rc[[k]][i, j]))
n_rc[[k]][i, j] <- min(nc[k, i], nc[k, j])
}
}
}
list.corr.st.varcovar <- list()
for (k in 1:K){
list.corr.st.varcovar[[k]] <- matrix(NA, colum.number, colum.number)
for (i in 1:colum.number){
for (j in 1:colum.number)
{list.corr.st.varcovar[[k]][i, j] <- unlist(r[[k]][i, j]*n_rc[[k]][i, j]*sdc[k, i]*sdc[k, j]/(nc[k, i]*nc[k, j]) +
r[[k]][i, j]*n_rt[[k]][i, j]*sdt[k, i]*sdt[k, j]/(nt[k, i]*nt[k, j]))
}
}
}
corr.st.varcovar <- matrix(unlist(lapply(1:K, function(k){
smTovec(list.corr.st.varcovar[[k]])})), K, col.vac.number, byrow = TRUE)
list(list.vcov = list.corr.st.varcovar,
matrix.vcov = corr.st.varcovar)
} |
cnltspec.plot <-function(spec, timevec, scalevec, zrange = NULL, xtitle = "Time", ytitle= "Scale", col.scale = tim.colors(64)[1:45], SFratio = 2, dt = 1, parsw = 3, axis4 = FALSE, frequencies = NULL){
scaleP <- unique(round(scalevec))
freqP <- (SFratio*2^scaleP)/dt
switch(parsw,
{
par( mar=c( 3.5,4,2,6.5))
},
{
par( mar=c( 3.5,4,2,6.5))
},
{
par( mar=c( 5,4,4,7.5))
})
spec[which(is.na(spec))]<-0
if(is.null(zrange)){
zrange<-range(spec, na.rm = TRUE)
}
image(sort(timevec),scalevec,t(spec)[order(timevec),], xlab=xtitle,ylab=ytitle, add=FALSE,col=col.scale,zlim=zrange, cex.lab=1, axes=FALSE)
axis(1)
axis(2)
box()
if(axis4){
if(is.null(frequencies)){
axis(4, at= scaleP, tick=TRUE, labels= freqP);
}
else{
axis(4, at= logb((frequencies*dt)/2,2), tick=TRUE, labels= frequencies)
}
}
image.plot(sort(timevec),scalevec,t(spec)[order(timevec),],legend.only=TRUE, zlim=zrange, graphics.reset=FALSE,col=col.scale)
} |
tsmcplm <- function(Y, X, method = c("lasso", "adapt", "mcp", "scad"), c) {
n <- length(Y)
m <- ceiling(c * sqrt(n))
q <- floor(n/m)
K_temp <- matrix(0, nrow = q, ncol = q, byrow = TRUE)
p <- dim(X)[2]
if (length(p) == 0) {
p <- 0
}
X_temp <- cbind(rep(1, n), X)
Y_temp <- c(Y)
for (i in 1:q) K_temp[i, 1:i] <- rep(1, i)
x <- NULL
y <- NULL
x[[1]] <- as.matrix(X_temp[1:((n - (q - 1) * m)), ])
y[[1]] <- Y_temp[1:((n - (q - 1) * m))]
for (i in 2:q) {
x[[i]] <- as.matrix(X_temp[(n - (q - i + 1) * m + 1):((n - (q -
i) * m)), ])
y[[i]] <- Y_temp[(n - (q - i + 1) * m + 1):((n - (q - i) * m))]
}
X_temp1 <- lapply(1:length(x), function(j, mat, list) kronecker(K_temp[j,
, drop = FALSE], x[[j]]), mat = K_temp, list = x)
Xn <- do.call("rbind", X_temp1)
w1 <- stats::lm(Y_temp ~ Xn - 1)$coefficients
w <- w1
for (i in 1:q) {
w[c(((i - 1) * (p + 1) + 1):(i * (p + 1)))] <- sum(w1[c(((i - 1) *
(p + 1) + 1):(i * (p + 1)))]^2)
}
if (method == "adapt") {
X_temp <- scale(Xn, center = FALSE, scale = 1/w)
object <- lars(X_temp, Y_temp, type = "lasso", intercept = FALSE,
normalize = FALSE)
bic <- log(n) * object$df + n * log(as.vector(object$RSS)/n)
step.bic2 <- which.min(bic)
coeff <- coef.lars(object, s = step.bic2, mode = "step")
adcp.coef.s <- sum(abs(coeff))
adcp.coef.v.m <- abs(matrix(c(coeff), q, (p + 1), byrow = T))
adcp.coef.m <- c(apply(adcp.coef.v.m, 1, max))
adcp.cp <- which(adcp.coef.m != 0)
if (length(adcp.cp) > 1) {
for (i in 2:length(adcp.cp)) {
if (adcp.cp[i] - adcp.cp[i - 1] == 1)
adcp.cp[i] <- 0
}
}
adcp.cp1 <- adcp.cp[adcp.cp > 1 & adcp.cp < q]
d1 <- length(adcp.cp1)
if (d1 == 0) {
adcpcss.cp <- 0
}
if (d1 >= 1) {
adcpcss.cp <- NULL
adcp.cp1 <- c(0, adcp.cp1, q + 1)
for (i in 1:d1) {
y1 <- NULL
x1 <- NULL
for (k in (adcp.cp1[i + 1] - 1):(adcp.cp1[i + 1] + 1))
{
y1 <- c(y1, y[[k]])
x1 <- rbind(x1, as.matrix(x[[k]]))
}
cp <- css(y1, x1)
if (cp == 0)
next
if (cp != 0) {
if (adcp.cp1[i + 1] == 0)
adcpcss.cp <- c(adcpcss.cp, cp)
if (adcp.cp1[i + 1] > 0)
adcpcss.cp <- c(adcpcss.cp, cp + n - (q - adcp.cp1[i +
1] + 2) * m)
}
}
if (length(adcpcss.cp) == 0)
adcpcss.cp <- 0 else adcpcss.cp <- adcpcss.cp
}
tt <- which(abs(diff(adcpcss.cp)) < 10)
if (length(tt) > 0)
adcpcss.cp <- adcpcss.cp[-tt]
if (length(adcpcss.cp) >= 1 & min(adcpcss.cp) > 0) {
cp1 <- c(0, adcpcss.cp, n)
q <- length(cp1) - 1
K_temp <- matrix(0, nrow = q, ncol = q, byrow = TRUE)
for (i in 1:q) K_temp[i, 1:i] <- rep(1, i)
X_temp <- cbind(rep(1, n), X)
Y_temp <- c(Y)
x <- NULL
y <- NULL
for (t in 2:length(cp1)) {
x[[t - 1]] <- as.matrix(X_temp[(cp1[t - 1] + 1):cp1[t],
])
y[[t - 1]] <- Y_temp[(cp1[t - 1] + 1):cp1[t]]
}
cp0 <- NULL
for (t in 2:(length(cp1) - 1)) {
x1 <- rbind(x[[t - 1]], x[[t]])
y1 <- c(y[[t - 1]], y[[t]])
tt <- css(y1, x1)
if (tt != 0)
cp0 <- c(cp0, tt + cp1[t - 1])
}
adcpcss.cp <- cp0
tt <- which(abs(diff(adcpcss.cp)) < 10)
if (length(tt) > 0)
adcpcss.cp <- adcpcss.cp[-tt]
}
}
if (method == "lasso") {
X_temp <- Xn
object <- lars(X_temp, Y_temp, type = "lasso", intercept = FALSE,
normalize = FALSE)
bic <- log(n) * object$df + n * log(as.vector(object$RSS)/n)
step.bic2 <- which.min(bic)
coeff <- coef.lars(object, s = step.bic2, mode = "step")
adcp.coef.s <- sum(abs(coeff))
adcp.coef.v.m <- abs(matrix(c(coeff), q, (p + 1), byrow = T))
adcp.coef.m <- c(apply(adcp.coef.v.m, 1, max))
adcp.cp <- which(adcp.coef.m != 0)
if (length(adcp.cp) > 1) {
for (i in 2:length(adcp.cp)) {
if (adcp.cp[i] - adcp.cp[i - 1] == 1)
adcp.cp[i] <- 0
}
}
adcp.cp1 <- adcp.cp[adcp.cp > 1 & adcp.cp < q]
d1 <- length(adcp.cp1)
if (d1 == 0) {
adcpcss.cp <- 0
}
if (d1 >= 1) {
adcpcss.cp <- NULL
adcp.cp1 <- c(0, adcp.cp1, q + 1)
for (i in 1:d1) {
y1 <- NULL
x1 <- NULL
for (k in (adcp.cp1[i + 1] - 1):(adcp.cp1[i + 1] + 1))
{
y1 <- c(y1, y[[k]])
x1 <- rbind(x1, as.matrix(x[[k]]))
}
cp <- css(y1, x1)
if (cp == 0)
next
if (cp != 0) {
if (adcp.cp1[i + 1] == 0)
adcpcss.cp <- c(adcpcss.cp, cp)
if (adcp.cp1[i + 1] > 0)
adcpcss.cp <- c(adcpcss.cp, cp + n - (q - adcp.cp1[i +
1] + 2) * m)
}
}
if (length(adcpcss.cp) == 0)
adcpcss.cp <- 0 else adcpcss.cp <- adcpcss.cp
}
tt <- which(abs(diff(adcpcss.cp)) < 10)
if (length(tt) > 0)
adcpcss.cp <- adcpcss.cp[-tt]
if (length(adcpcss.cp) >= 1 & min(adcpcss.cp) > 0) {
cp1 <- c(0, adcpcss.cp, n)
q <- length(cp1) - 1
K_temp <- matrix(0, nrow = q, ncol = q, byrow = TRUE)
for (i in 1:q) K_temp[i, 1:i] <- rep(1, i)
X_temp <- cbind(rep(1, n), X)
Y_temp <- c(Y)
x <- NULL
y <- NULL
for (t in 2:length(cp1)) {
x[[t - 1]] <- as.matrix(X_temp[(cp1[t - 1] + 1):cp1[t],
])
y[[t - 1]] <- Y_temp[(cp1[t - 1] + 1):cp1[t]]
}
cp0 <- NULL
for (t in 2:(length(cp1) - 1)) {
x1 <- rbind(x[[t - 1]], x[[t]])
y1 <- c(y[[t - 1]], y[[t]])
tt <- css(y1, x1)
if (tt != 0)
cp0 <- c(cp0, tt + cp1[t - 1])
}
adcpcss.cp <- cp0
tt <- which(abs(diff(adcpcss.cp)) < 10)
if (length(tt) > 0)
adcpcss.cp <- adcpcss.cp[-tt]
}
}
if (method == "mcp") {
object <- plus(Xn, Y_temp, method = "mc+", gamma = 2.4, intercept = F,
normalize = F, eps = 1e-30)
bic <- log(dim(Xn)[1]) * object$dim + dim(Xn)[1] * log(as.vector((1 -
object$r.square) * sum(Y_temp^2))/length(Y_temp))
step.bic <- which.min(bic)
mcp.coef <- coef.plus(object, lam = object$lam[step.bic])
mcp.coef.s <- sum(abs(mcp.coef))
mcp.coef.v.m <- abs(matrix(c(mcp.coef), q, (p + 1), byrow = T))
mcp.coef.m <- c(apply(mcp.coef.v.m, 1, max))
mcp.cp <- which(mcp.coef.m != 0)
if (length(mcp.cp) > 1) {
for (i in 2:length(mcp.cp)) {
if (mcp.cp[i] - mcp.cp[i - 1] == 1)
mcp.cp[i] <- 0
}
}
mcp.cp1 <- mcp.cp[mcp.cp > 1 & mcp.cp < q]
d1 <- length(mcp.cp1)
if (d1 == 0) {
mcpcss.cp <- 0
adcpcss.cp <- mcpcss.cp
}
if (d1 >= 1) {
mcpcss.cp <- NULL
mcp.cp1 <- c(0, mcp.cp1, q + 1)
for (i in 1:d1) {
y1 <- NULL
x1 <- NULL
for (k in (mcp.cp1[i + 1] - 1):(mcp.cp1[i + 1] + 1))
{
y1 <- c(y1, y[[k]])
x1 <- rbind(x1, as.matrix(x[[k]]))
}
cp <- css(y1, x1)
if (cp == 0)
next
if (cp != 0) {
if (mcp.cp1[i + 1] == 0)
mcpcss.cp <- c(mcpcss.cp, cp)
if (mcp.cp1[i + 1] > 0)
mcpcss.cp <- c(mcpcss.cp, cp + n - (q - mcp.cp1[i +
1] + 2) * m)
}
}
if (length(mcpcss.cp) == 0)
adcpcss.cp <- 0 else adcpcss.cp <- mcpcss.cp
}
tt <- which(abs(diff(adcpcss.cp)) < 10)
if (length(tt) > 0)
adcpcss.cp <- adcpcss.cp[-tt]
if (length(adcpcss.cp) >= 1 & min(adcpcss.cp) > 0) {
n <- length(Y)
cp1 <- c(0, adcpcss.cp, n)
q <- length(cp1) - 1
K_temp <- matrix(0, nrow = q, ncol = q, byrow = TRUE)
for (i in 1:q) K_temp[i, 1:i] <- rep(1, i)
X_temp <- cbind(rep(1, n), X)
Y_temp <- c(Y)
x <- NULL
y <- NULL
for (t in 2:length(cp1)) {
x[[t - 1]] <- as.matrix(X_temp[(cp1[t - 1] + 1):cp1[t],
])
y[[t - 1]] <- Y_temp[(cp1[t - 1] + 1):cp1[t]]
}
cp0 <- NULL
for (t in 2:(length(cp1) - 1)) {
x1 <- rbind(x[[t - 1]], x[[t]])
y1 <- c(y[[t - 1]], y[[t]])
tt <- css(y1, x1)
if (tt != 0)
cp0 <- c(cp0, tt + cp1[t - 1])
}
adcpcss.cp <- cp0
tt <- which(abs(diff(adcpcss.cp)) < 10)
if (length(tt) > 0)
adcpcss.cp <- adcpcss.cp[-tt]
}
}
if (method == "scad") {
object <- plus(Xn, Y_temp, method = "scad", gamma = 2.4, intercept = F,
normalize = F, eps = 1e-30)
bic <- log(dim(Xn)[1]) * object$dim + dim(Xn)[1] * log(as.vector((1 -
object$r.square) * sum(Y_temp^2))/length(Y_temp))
step.bic <- which.min(bic)
mcp.coef <- coef.plus(object, lam = object$lam[step.bic])
mcp.coef.s <- sum(abs(mcp.coef))
mcp.coef.v.m <- abs(matrix(c(mcp.coef), q, (p + 1), byrow = T))
mcp.coef.m <- c(apply(mcp.coef.v.m, 1, max))
mcp.cp <- which(mcp.coef.m != 0)
if (length(mcp.cp) > 1) {
for (i in 2:length(mcp.cp)) {
if (mcp.cp[i] - mcp.cp[i - 1] == 1)
mcp.cp[i] <- 0
}
}
mcp.cp1 <- mcp.cp[mcp.cp > 1 & mcp.cp < q]
d1 <- length(mcp.cp1)
if (d1 == 0) {
mcpcss.cp <- 0
adcpcss.cp <- mcpcss.cp
}
if (d1 >= 1) {
mcpcss.cp <- NULL
mcp.cp1 <- c(0, mcp.cp1, q + 1)
for (i in 1:d1) {
y1 <- NULL
x1 <- NULL
for (k in (mcp.cp1[i + 1] - 1):(mcp.cp1[i + 1] + 1))
{
y1 <- c(y1, y[[k]])
x1 <- rbind(x1, as.matrix(x[[k]]))
}
cp <- css(y1, x1)
if (cp == 0)
next
if (cp != 0) {
if (mcp.cp1[i + 1] == 0)
mcpcss.cp <- c(mcpcss.cp, cp)
if (mcp.cp1[i + 1] > 0)
mcpcss.cp <- c(mcpcss.cp, cp + n - (q - mcp.cp1[i +
1] + 2) * m)
}
}
if (length(mcpcss.cp) == 0)
adcpcss.cp <- 0 else adcpcss.cp <- mcpcss.cp
}
tt <- which(abs(diff(adcpcss.cp)) < 10)
if (length(tt) > 0)
adcpcss.cp <- adcpcss.cp[-tt]
if (length(adcpcss.cp) >= 1 & min(adcpcss.cp) > 0) {
n <- length(Y)
cp1 <- c(0, adcpcss.cp, n)
q <- length(cp1) - 1
K_temp <- matrix(0, nrow = q, ncol = q, byrow = TRUE)
for (i in 1:q) K_temp[i, 1:i] <- rep(1, i)
X_temp <- cbind(rep(1, n), X)
Y_temp <- c(Y)
x <- NULL
y <- NULL
for (t in 2:length(cp1)) {
x[[t - 1]] <- as.matrix(X_temp[(cp1[t - 1] + 1):cp1[t],
])
y[[t - 1]] <- Y_temp[(cp1[t - 1] + 1):cp1[t]]
}
cp0 <- NULL
for (t in 2:(length(cp1) - 1)) {
x1 <- rbind(x[[t - 1]], x[[t]])
y1 <- c(y[[t - 1]], y[[t]])
tt <- css(y1, x1)
if (tt != 0)
cp0 <- c(cp0, tt + cp1[t - 1])
}
adcpcss.cp <- cp0
tt <- which(abs(diff(adcpcss.cp)) < 10)
if (length(tt) > 0)
adcpcss.cp <- adcpcss.cp[-tt]
}
}
if (length(adcpcss.cp) == 0)
return(0) else return(change.points = adcpcss.cp)
} |
newpolr <- function(formula, data, weights, start, ..., subset,
na.action, contrasts = NULL, Hess = FALSE, model = TRUE,
method = c("logit", "probit", "cloglog","loglog", "cauchit"))
{
m <- match.call(expand.dots = FALSE)
method <- match.arg(method)
if(is.matrix(eval.parent(m$data))) m$data <- as.data.frame(data)
m$start <- m$Hess <- m$method <- m$model <- m$... <- NULL
m[[1L]] <- as.name("model.frame")
m <- eval.parent(m)
Terms <- attr(m, "terms")
x <- model.matrix(Terms, m, contrasts)
xint <- match("(Intercept)", colnames(x), nomatch = 0L)
n <- nrow(x)
pc <- ncol(x)
cons <- attr(x, "contrasts")
if(xint > 0L) {
x <- x[, -xint, drop = FALSE]
pc <- pc - 1L
} else warning("an intercept is needed and assumed")
wt <- model.weights(m)
if(!length(wt)) wt <- rep(1, n)
offset <- model.offset(m)
if(length(offset) <= 1L) offset <- rep(0, n)
y <- model.response(m)
if(!is.factor(y)) stop("response must be a factor")
lev <- levels(y); llev <- length(lev)
if(llev <= 2L) stop("response must have 3 or more levels")
y <- unclass(y)
q <- llev - 1L
Y <- matrix(0, n, q)
if(missing(start)) {
q1 <- llev %/% 2L
y1 <- (y > q1)
X <- cbind(Intercept = rep(1, n), x)
fit <-
switch(method,
"logit"= glm.fit(X, y1, wt, family = binomial(), offset = offset),
"probit" = glm.fit(X, y1, wt, family = binomial("probit"), offset = offset),
"cloglog" = glm.fit(X, y1, wt, family = binomial("probit"), offset = offset),
"loglog" = glm.fit(X, y1, wt, family = binomial("probit"), offset = offset),
"cauchit" = glm.fit(X, y1, wt, family = binomial("cauchit"), offset = offset))
if(!fit$converged)
stop("attempt to find suitable starting values failed")
coefs <- fit$coefficients
if(any(is.na(coefs))) {
warning("design appears to be rank-deficient, so dropping some coefs")
keep <- names(coefs)[!is.na(coefs)]
coefs <- coefs[keep]
x <- x[, keep[-1L], drop = FALSE]
pc <- ncol(x)
}
logit <- function(p) log(p/(1 - p))
spacing <- logit((1L:q)/(q+1L))
if(method != "logit") spacing <- spacing/1.7
gammas <- -coefs[1L] + spacing - spacing[q1]
start <- c(coefs[-1L], gammas)
} else if(length(start) != pc + q)
stop("'start' is not of the correct length")
ans <- newpolr.fit(x, y, wt, start, offset, method, hessian = Hess, ...)
beta <- ans$coefficients
zeta <- ans$zeta
deviance <- ans$deviance
grad <- ans$grad
res <- ans$res
niter <- c(f.evals = res$counts[1L], g.evals = res$counts[2L])
eta <- if(pc) offset + drop(x %*% beta) else offset + rep(0, n)
pfun <- switch(method, logit = plogis, probit = pnorm,
cloglog = pgumbel,loglog=pGumbel, cauchit = pcauchy)
dfun <- switch(method, logit = dlogis, probit = dnorm,
cloglog = dgumbel,loglog=dGumbel, cauchit = dcauchy)
cumpr <- matrix(pfun(matrix(zeta, n, q, byrow=TRUE) - eta), , q)
dcumpr <- matrix(dfun(matrix(zeta, n, q, byrow=TRUE) - eta), , q)
dimnames(cumpr) <- dimnames(dcumpr) <- list(row.names(m), names(zeta))
fitted <- t(apply(cumpr, 1L, function(x) diff(c(0, x, 1))))
dfitted <- t(apply(dcumpr, 1L, function(x) diff(c(0, x, 0))))
dimnames(fitted) <- dimnames(dfitted) <- list(row.names(m), lev)
fit <- list(coefficients = beta, zeta = zeta, deviance = deviance,
grad = grad, cumpr=cumpr, dcumpr=dcumpr,
fitted.values = fitted, dfitted.values=dfitted,
lev = lev, terms = Terms,
df.residual = sum(wt) - pc - q, edf = pc + q, n = sum(wt),
nobs = sum(wt), call = match.call(), method = method,
convergence = res$convergence, niter = niter, lp = eta)
if(Hess) {
dn <- c(names(beta), names(zeta))
H <- res$hessian
dimnames(H) <- list(dn, dn)
Hessian <- matrix(nrow=nrow(H),ncol=ncol(H))
dimnames(Hessian) <- list(c(names(zeta),names(beta)),c(names(zeta),names(beta)))
Hessian[names(zeta),names(zeta)] <- -H[names(zeta),names(zeta)]
Hessian[names(beta),names(beta)] <- -H[names(beta),names(beta)]
Hessian[names(beta),names(zeta)] <- H[names(beta),names(zeta)]
Hessian[names(zeta),names(beta)] <- H[names(zeta),names(beta)]
fit$Hessian <- Hessian
}
if(model) fit$model <- m
fit$na.action <- attr(m, "na.action")
fit$contrasts <- cons
fit$xlevels <- .getXlevels(Terms, m)
class(fit) <- "polr"
fit
}
newpolr.fit <- function(x, y, wt, start, offset, method, ...)
{
fmin <- function(beta) {
theta <- beta[pc + ind_q]
gamm <- c(-Inf , theta, Inf)
eta <- offset
if (pc) eta <- eta + drop(x %*% beta[ind_pc])
pr <- pfun(pmin(100, gamm[y + 1L] - eta)) -
pfun(pmax(-100, gamm[y] - eta))
if (all(pr > 0)) -sum(wt * log(pr)) else Inf
}
gmin <- function(beta)
{
theta <- beta[pc + ind_q]
gamm <- c(-Inf, theta, Inf)
eta <- offset
if(pc) eta <- eta + drop(x %*% beta[ind_pc])
z1 <- pmin(100, gamm[y+1L] - eta)
z2 <- pmax(-100, gamm[y] - eta)
pr <- pfun(z1) - pfun(z2)
p1 <- dfun(z1); p2 <- dfun(z2)
g1 <- if(pc) crossprod(x, wt*(p1 - p2)/pr) else numeric()
xx <- .polrY1*p1 - .polrY2*p2
g2 <- - crossprod(xx, wt/pr)
if (all(pr > 0)) c(g1, g2) else rep(NA_real_, pc+q)
}
gfun <- function(beta) {
theta <- beta[pc + ind_q]
gamm <- c(-Inf, theta, Inf)
eta <- offset
if(pc) eta <- eta + drop(x %*% beta[ind_pc])
z1 <- gamm[y + 1L] - eta
z2 <- gamm[y] - eta
pr <- pfun(z1) - pfun(z2)
p1 <- dfun(z1)
p2 <- dfun(z2)
g1 <- if(pc) x * (wt*(p1 - p2)/pr) else numeric()
xx <- .polrY1*p1 - .polrY2*p2
g2 <- - xx * (wt/pr)
if(all(pr > 0)) cbind(-g2, g1) else matrix(NA_real_, n, pc+q)
}
dgamma <- function(beta) {
theta <- beta[pc + ind_q]
gamm <- c(-Inf, theta, Inf)
eta <- offset
if(pc) eta <- eta + drop(x %*% beta[ind_pc])
p1 <- dfun(gamm[y + 1L] - eta)
g1 <- if(pc) x * (wt*(p1))
g2 <- .polrY1*wt*p1
cbind(g1,g2)
}
dp0 <- function(beta) {
theta <- beta[pc + ind_q]
gamm <- c(-Inf, theta, Inf)
eta <- offset
if(pc) eta <- eta + drop(x %*% beta[ind_pc])
p1 <- dfun(gamm[y + 1L] - eta)
p2 <- dfun(gamm[y] - eta)
g1 <- if(pc) x * (wt*(p1 - p2))
g2 <- wt*(.polrY1*p1 - .polrY2*p2)
cbind(g1,g2)
}
pfun <- switch(method, logit = plogis, probit = pnorm,
cloglog = pgumbel,loglog=pGumbel, cauchit = pcauchy)
dfun <- switch(method, logit = dlogis, probit = dnorm,
cloglog = dgumbel,loglog=dGumbel, cauchit = dcauchy)
n <- nrow(x)
pc <- ncol(x)
ind_pc <- seq_len(pc)
lev <- levels(y)
if(length(lev) <= 2L) stop("response must have 3 or more levels")
y <- unclass(y)
q <- length(lev) - 1L
ind_q <- seq_len(q)
Y <- matrix(0L, n, q)
.polrY1 <- col(Y) == y; .polrY2 <- col(Y) == (y - 1L)
res <- optim(start, fmin, gmin, method="BFGS", ...)
beta <- res$par[ind_pc]
theta <- res$par[pc + ind_q]
zeta <- theta
grad <- gfun(res$par)
deviance <- 2 * res$value
names(zeta) <- paste(lev[-length(lev)], lev[-1L], sep="|")
if(pc) names(beta) <- colnames(x)
dn <- c(names(beta), names(zeta))
colnames(grad) <- dn
list(coefficients = beta, zeta = zeta, grad=grad, deviance = deviance,
res = res)
} |
setId = function(learner, id) {
.Deprecated("setLearnerId")
learner = checkLearner(learner)
assertString(id)
learner$id = id
return(learner)
} |
calculateStatistics_gui <- function(data = NULL, target = NULL, quant = 0.95,
group = NULL, count = NULL,
env = parent.frame(), savegui = NULL,
debug = FALSE, parent = NULL) {
.gData <- NULL
.gDataName <- NULL
fnc <- as.character(match.call()[[1]])
if (debug) {
print(paste("IN:", fnc))
}
strWinTitle <- "Calculate summary statistics"
strChkGui <- "Save GUI settings"
strBtnHelp <- "Help"
strFrmDataset <- "Dataset"
strLblDataset <- "Dataset:"
strDrpDataset <- "<Select dataset>"
strLblRows <- "rows"
strFrmOptions <- "Options"
strLblTarget <- "Select target column:"
strLblGroupBy <- "Group by column(s):"
strLblCount <- "Count unique values in column:"
strDrpColumn <- "<Select Columns>"
strLblQuantile <- "Calculate quantile"
strFrmSave <- "Save as"
strLblSave <- "Name for result:"
strBtnCalculate <- "Calculate"
strBtnProcessing <- "Processing..."
strMsgDataset <- "A dataset must be selected."
strMsgTitleDataset <- "Dataset not selected"
strMsgCheck <- "Data frame is NULL!\n\nMake sure to select a sample dataset."
strMsgTitleError <- "Error"
dtStrings <- getStrings(gui = fnc)
if (!is.null(dtStrings)) {
strtmp <- dtStrings["strWinTitle"]$value
strWinTitle <- ifelse(is.na(strtmp), strWinTitle, strtmp)
strtmp <- dtStrings["strChkGui"]$value
strChkGui <- ifelse(is.na(strtmp), strChkGui, strtmp)
strtmp <- dtStrings["strBtnHelp"]$value
strBtnHelp <- ifelse(is.na(strtmp), strBtnHelp, strtmp)
strtmp <- dtStrings["strFrmDataset"]$value
strFrmDataset <- ifelse(is.na(strtmp), strFrmDataset, strtmp)
strtmp <- dtStrings["strLblDataset"]$value
strLblDataset <- ifelse(is.na(strtmp), strLblDataset, strtmp)
strtmp <- dtStrings["strDrpDataset"]$value
strDrpDataset <- ifelse(is.na(strtmp), strDrpDataset, strtmp)
strtmp <- dtStrings["strLblRows"]$value
strLblRows <- ifelse(is.na(strtmp), strLblRows, strtmp)
strtmp <- dtStrings["strFrmOptions"]$value
strFrmOptions <- ifelse(is.na(strtmp), strFrmOptions, strtmp)
strtmp <- dtStrings["strLblTarget"]$value
strLblTarget <- ifelse(is.na(strtmp), strLblTarget, strtmp)
strtmp <- dtStrings["strLblGroupBy"]$value
strLblGroupBy <- ifelse(is.na(strtmp), strLblGroupBy, strtmp)
strtmp <- dtStrings["strLblCount"]$value
strLblCount <- ifelse(is.na(strtmp), strLblCount, strtmp)
strtmp <- dtStrings["strDrpColumn"]$value
strDrpColumn <- ifelse(is.na(strtmp), strDrpColumn, strtmp)
strtmp <- dtStrings["strLblQuantile"]$value
strLblQuantile <- ifelse(is.na(strtmp), strLblQuantile, strtmp)
strtmp <- dtStrings["strFrmSave"]$value
strFrmSave <- ifelse(is.na(strtmp), strFrmSave, strtmp)
strtmp <- dtStrings["strLblSave"]$value
strLblSave <- ifelse(is.na(strtmp), strLblSave, strtmp)
strtmp <- dtStrings["strBtnCalculate"]$value
strBtnCalculate <- ifelse(is.na(strtmp), strBtnCalculate, strtmp)
strtmp <- dtStrings["strBtnProcessing"]$value
strBtnProcessing <- ifelse(is.na(strtmp), strBtnProcessing, strtmp)
strtmp <- dtStrings["strMsgDataset"]$value
strMsgDataset <- ifelse(is.na(strtmp), strMsgDataset, strtmp)
strtmp <- dtStrings["strMsgTitleDataset"]$value
strMsgTitleDataset <- ifelse(is.na(strtmp), strMsgTitleDataset, strtmp)
strtmp <- dtStrings["strMsgCheck"]$value
strMsgCheck <- ifelse(is.na(strtmp), strMsgCheck, strtmp)
strtmp <- dtStrings["strMsgTitleError"]$value
strMsgTitleError <- ifelse(is.na(strtmp), strMsgTitleError, strtmp)
}
w <- gwindow(title = strWinTitle, visible = FALSE)
addHandlerUnrealize(w, handler = function(h, ...) {
.saveSettings()
if (!is.null(parent)) {
focus(parent)
}
if (gtoolkit() == "tcltk") {
if (as.numeric(gsub("[^0-9]", "", packageVersion("gWidgets2tcltk"))) <= 106) {
message("tcltk version <= 1.0.6, returned TRUE!")
return(TRUE)
} else {
message("tcltk version >1.0.6, returned FALSE!")
return(FALSE)
}
} else {
message("RGtk2, returned FALSE!")
return(FALSE)
}
})
gv <- ggroup(
horizontal = FALSE,
spacing = 8,
use.scrollwindow = FALSE,
container = w,
expand = TRUE
)
gh <- ggroup(container = gv, expand = FALSE, fill = "both")
savegui_chk <- gcheckbox(text = strChkGui, checked = FALSE, container = gh)
addSpring(gh)
help_btn <- gbutton(text = strBtnHelp, container = gh)
addHandlerChanged(help_btn, handler = function(h, ...) {
print(help(fnc, help_type = "html"))
})
data_frm <- gframe(
text = strFrmDataset,
horizontal = TRUE,
spacing = 5,
container = gv
)
glabel(text = strLblDataset, container = data_frm)
dfs <- c(strDrpDataset, listObjects(env = env, obj.class = "data.frame"))
data_drp <- gcombobox(
items = dfs,
selected = 1,
editable = FALSE,
container = data_frm,
ellipsize = "none"
)
data_rows_lbl <- glabel(
text = paste(" 0", strLblRows),
container = data_frm
)
addHandlerChanged(data_drp, handler = function(h, ...) {
.updateWidgets()
})
option_frm <- gframe(
text = strFrmOptions,
horizontal = FALSE,
spacing = 10,
container = gv
)
glabel(text = strLblTarget, container = option_frm)
target_drp <- gcombobox(
items = strDrpColumn,
container = option_frm, ellipsize = "none"
)
glabel(text = strLblGroupBy, container = option_frm)
group_drp <- gcombobox(
items = strDrpColumn,
container = option_frm, ellipsize = "none"
)
group_edt <- gedit(
text = ifelse(is.null(group), "", paste(group, collapse = ",")),
container = option_frm
)
addHandlerChanged(group_drp, handler = function(h, ...) {
val_column <- svalue(group_drp)
val_value <- svalue(group_edt)
if (!is.null(val_column)) {
if (val_column != strDrpColumn) {
if (nchar(val_value) == 0) {
svalue(group_edt) <- val_column
} else {
svalue(group_edt) <- paste(val_value, val_column, sep = ",")
}
}
}
})
glabel(text = strLblCount, container = option_frm)
count_drp <- gcombobox(
items = strDrpColumn,
container = option_frm, ellipsize = "none"
)
glabel(text = strLblQuantile, container = option_frm)
quant_spb <- gspinbutton(
from = 0, to = 1,
by = 0.01, value = quant,
container = option_frm
)
save_frame <- gframe(text = strFrmSave, container = gv)
glabel(text = strLblSave, container = save_frame)
save_edt <- gedit(expand = TRUE, fill = TRUE, container = save_frame)
calculate_btn <- gbutton(text = strBtnCalculate, container = gv)
addHandlerClicked(calculate_btn, handler = function(h, ...) {
val_data <- .gData
val_obj <- .gDataName
val_name <- svalue(save_edt)
val_target <- svalue(target_drp)
val_group <- svalue(group_edt)
val_count <- svalue(count_drp)
val_quant <- svalue(quant_spb)
if (val_group == strDrpColumn) {
val_group <- NULL
}
if (debug) {
print("Read Values:")
print("val_data")
print(head(val_data))
print("val_name")
print(val_name)
print("val_target")
print(val_target)
print("val_group")
print(val_group)
print("val_count")
print(val_count)
print("val_quant")
print(val_quant)
}
if (!is.null(val_data)) {
if (!nchar(val_target) > 0 || val_target == strDrpColumn) {
val_target <- NULL
} else {
val_target
}
if (!nchar(val_group) > 0 || val_group == strDrpColumn) {
val_group <- NULL
} else {
val_group <- unlist(strsplit(val_group, split = ","))
}
if (!nchar(val_count) > 0 || val_count == strDrpColumn) {
val_count <- NULL
} else {
val_count
}
requiredCol <- c(val_target, val_group, val_count)
requiredCol <- requiredCol[requiredCol != strDrpColumn]
ok <- checkDataset(
name = val_obj, reqcol = requiredCol,
env = env, parent = w, debug = debug
)
if (ok) {
if (debug) {
print("Sent Values:")
print("val_target")
print(val_target)
print("val_group")
print(val_group)
print("val_count")
print(val_count)
print("val_quant")
print(val_quant)
}
blockHandlers(calculate_btn)
svalue(calculate_btn) <- strBtnProcessing
unblockHandlers(calculate_btn)
enabled(calculate_btn) <- FALSE
datanew <- calculateStatistics(
data = val_data,
target = val_target,
group = val_group,
count = val_count,
quant = val_quant,
debug = debug
)
keys <- list("data", "target", "group", "count", "quant")
values <- list(val_obj, val_target, val_group, val_count, val_quant)
datanew <- auditTrail(
obj = datanew, key = keys, value = values,
label = fnc, arguments = FALSE,
package = "strvalidator"
)
saveObject(name = val_name, object = datanew, parent = w, env = env)
if (debug) {
print(str(datanew))
print(head(datanew))
print(paste("EXIT:", fnc))
}
if (all(is.null(data), is.null(target), is.null(group))) {
.saveSettings()
}
dispose(w)
}
} else {
gmessage(
msg = strMsgDataset,
title = strMsgTitleDataset,
icon = "error",
parent = w
)
}
})
.updateWidgets <- function() {
val_obj <- svalue(data_drp)
if (val_obj != strDrpDataset) {
.gData <<- get(val_obj, envir = env)
.gDataName <<- val_obj
svalue(data_rows_lbl) <- paste(nrow(.gData), strLblRows)
target_columns <- unique(c(strDrpColumn, names(.gData)))
target_drp[] <- target_columns
group_drp[] <- target_columns
count_drp[] <- target_columns
svalue(target_drp, index = TRUE) <- ifelse(is.null(target), 1,
which(target_columns %in% target)
)
svalue(group_drp, index = TRUE) <- 1
svalue(count_drp, index = TRUE) <- ifelse(is.null(count), 1,
which(target_columns %in% count)
)
svalue(save_edt) <- paste(val_obj, "_stats", sep = "")
} else {
.gData <<- NULL
.gDataName <<- NULL
svalue(data_drp, index = TRUE) <- 1
svalue(data_rows_lbl) <- paste(" 0", strLblRows)
svalue(save_edt) <- ""
target_drp[] <- strDrpColumn
group_drp[] <- strDrpColumn
count_drp[] <- strDrpColumn
svalue(target_drp, index = TRUE) <- 1
svalue(group_drp, index = TRUE) <- 1
svalue(count_drp, index = TRUE) <- 1
}
blockHandlers(calculate_btn)
svalue(calculate_btn) <- strBtnCalculate
unblockHandlers(calculate_btn)
enabled(calculate_btn) <- TRUE
}
.loadSavedSettings <- function() {
if (!is.null(savegui)) {
svalue(savegui_chk) <- savegui
enabled(savegui_chk) <- FALSE
if (debug) {
print("Save GUI status set!")
}
} else {
if (exists(".strvalidator_calculateStatistics_gui_savegui", envir = env, inherits = FALSE)) {
svalue(savegui_chk) <- get(".strvalidator_calculateStatistics_gui_savegui", envir = env)
}
if (debug) {
print("Save GUI status loaded!")
}
}
if (debug) {
print(svalue(savegui_chk))
}
if (svalue(savegui_chk)) {
if (exists(".strvalidator_calculateStatistics_gui_group", envir = env, inherits = FALSE)) {
svalue(group_edt) <- get(".strvalidator_calculateStatistics_gui_group", envir = env)
}
if (exists(".strvalidator_calculateStatistics_gui_quant", envir = env, inherits = FALSE)) {
svalue(quant_spb) <- get(".strvalidator_calculateStatistics_gui_quant", envir = env)
}
if (debug) {
print("Saved settings loaded!")
}
}
}
.saveSettings <- function() {
if (svalue(savegui_chk)) {
assign(x = ".strvalidator_calculateStatistics_gui_savegui", value = svalue(savegui_chk), envir = env)
assign(x = ".strvalidator_calculateStatistics_gui_group", value = svalue(group_edt), envir = env)
assign(x = ".strvalidator_calculateStatistics_gui_quant", value = svalue(quant_spb), envir = env)
} else {
if (exists(".strvalidator_calculateStatistics_gui_savegui", envir = env, inherits = FALSE)) {
remove(".strvalidator_calculateStatistics_gui_savegui", envir = env)
}
if (exists(".strvalidator_calculateStatistics_gui_group", envir = env, inherits = FALSE)) {
remove(".strvalidator_calculateStatistics_gui_group", envir = env)
}
if (exists(".strvalidator_calculateStatistics_gui_quant", envir = env, inherits = FALSE)) {
remove(".strvalidator_calculateStatistics_gui_quant", envir = env)
}
if (debug) {
print("Settings cleared!")
}
}
if (debug) {
print("Settings saved!")
}
}
i_drp <- which(dfs %in% data)
svalue(data_drp, index = TRUE) <- ifelse(length(i_drp) == 0, 1, i_drp)
.updateWidgets()
if (all(is.null(data), is.null(target), is.null(group))) {
.loadSavedSettings()
} else {
enabled(savegui_chk) <- FALSE
}
visible(w) <- TRUE
focus(w)
} |
mash <- function(object, origin = c(0,0), clustergroup = NULL, ...) {
if (is.list(clustergroup) & (length(clustergroup) > 1)) {
if (ms(object))
stop ("cannot regroup multisession capthist")
out <- vector('list')
for (i in 1:length(clustergroup)) {
out[[i]] <- mash (object, origin, clustergroup[[i]], ...)
}
names(out) <- names(clustergroup)
class(out) <- c('capthist', 'list')
if (length(out) == 1) out <- out[[1]]
return(out)
}
else if (ms(object)) {
out <- lapply(object, mash, origin, clustergroup, ...)
names(out) <- names(object)
class(out) <- c('capthist', 'list')
if (length(out) == 1) out <- out[[1]]
return(out)
}
else {
if (!is.null(clustergroup)) {
trapsi <- clusterID(traps(object)) %in% clustergroup
object <- subset(object, traps = trapsi)
}
trps <- traps(object)
if (!is.null(covariates(trps)))
warning ("detector covariates are discarded by mash()")
if (!is.null(usage(trps)))
warning ("usage discarded by mash()")
cluster <- clusterID(trps)
centres <- cluster.centres(trps)
cl <- cluster[trap(object, names = FALSE, sortorder = 'ksn')]
ID <- animalID(object, names = FALSE, sortorder = 'ksn')
n.mash <- table (cl[match(unique(ID),ID)])
class(n.mash) <- 'integer'
if (is.null(cluster))
stop ("requires cluster covariate")
tmp <- split(trps, cluster)
if (length(unique(sapply(tmp, nrow))) != 1)
warning ("unequal number of detectors per cluster")
trapnum <- clustertrap(trps)
if (is.null(trapnum)) {
tmp <- lapply(tmp, function(x) {x$trapnum <- 1:nrow(x); x})
trapnum <- unlist(sapply(tmp, function(x) x$trapnum))
tmp[[1]]$trapnum <- NULL
}
newtraps <- tmp[[1]]
rownames(newtraps) <- 1:nrow(newtraps)
mxy <- apply(newtraps, 2, min)
newtraps <- shift(newtraps, origin-mxy[1:2])
attr(newtraps, 'cluster') <- NULL
attr(newtraps, 'clustertrap') <- NULL
attr(newtraps, 'covariates') <- NULL
sigcov <- NULL
if ( length(animalID(object)) == 0) {
tempdf <- data.frame(
session = session(object),
ID = 'NONE',
occ = ncol(object),
trap = 1)
}
else {
tempdf <- data.frame(
session = rep(session(object), length(animalID(object))),
ID = animalID(object, sortorder = 'ksn'),
occ = occasion(object, sortorder = 'ksn'),
trap = trapnum[trap(object, names=FALSE, sortorder = 'ksn')]
)
if (!is.null(attr(object, 'signalframe'))) {
tempdf <- cbind(tempdf, attr(object, 'signalframe'))
sigcov <- names(tempdf)[!(names(tempdf) %in% c('signal','noise'))]
}
}
tempcapt <- make.capthist(tempdf, newtraps, cutval = attr(object, "cutval"),
signalcovariates = sigcov, ...)
attr(tempcapt, 'n.mash') <- n.mash
attr(tempcapt, 'centres') <- centres
tempcapt
}
} |
library(hamcrest)
require("org.renjin.test:native")
m <- matrix(as.double(1:12), nrow = 3)
copy <- .Call("call_copyMatrix", m)
assertThat(copy, identicalTo(matrix(as.double(1:12), nrow = 3))) |
knitr::opts_chunk$set(
collapse = TRUE,
comment = "
)
library(pcLasso)
set.seed(944)
n <- 100
p <- 20
x <- matrix(rnorm(n*p), n, p)
beta <- matrix(c(rep(2, 5), rep(0, 15)), ncol = 1)
y <- x %*% beta + rnorm(n)
fit <- pcLasso(x, y, ratio = 0.8)
fit$a0[20]
fit$beta[, 20]
fit$nzero
predict(fit, x[1:5, ])[, 20]
groups <- vector("list", 4)
for (k in 1:4) {
groups[[k]] <- 5 * (k-1) + 1:5
}
groups
fit <- pcLasso(x, y, ratio = 0.8, groups = groups)
groups[[1]] <- 1:7
groups
fit <- pcLasso(x, y, ratio = 0.8, groups = groups)
fit$a0[20]
fit$origbeta[, 20]
fit$orignzero
cvfit <- cv.pcLasso(x, y, groups = groups, ratio = 0.8)
cvfit <- cv.pcLasso(x, y, groups = groups, ratio = 0.8, nfolds = 5)
foldid <- sample(rep(seq(10), length = n))
cvfit <- cv.pcLasso(x, y, groups = groups, ratio = 0.8, foldid = foldid)
plot(cvfit)
plot(cvfit, orignz = FALSE)
cvfit$lambda.min
cvfit$lambda.1se
predict(cvfit, x[1:5, ])
predict(cvfit, x[1:5, ], s = "lambda.min") |
gdina_standardize_weights <- function( weights )
{
N <- length(weights)
weights <- N*weights / sum(weights)
return(weights)
} |
`focleg` <-
function(i)
{
FKINDS = c("STRIKESLIP" , "REV-OBL STRK-SLP" , "OBLIQUE REVERSE",
"REVERSE" , "NORM-OBLQ STRKSLP", "OBLQ NORM",
"NORMAL")
return(FKINDS[i])
} |
CovStep = function()
{
e$STEP = "COV"
StartTime = Sys.time()
Rmat = hessian(Obj, e$FinalPara)/2
Smat = CalcSmat()
if (is.nan(Smat[1,1])) stop("Error - NaN produced")
invR = solve(Rmat)
Cov = invR %*% Smat %*% invR
SE = sqrt(diag(Cov))
Correl = cov2cor(Cov)
InvCov = Rmat %*% solve(Smat) %*% Rmat
EigenVal = sort(eigen(Correl)$values)
RunTime = difftime(Sys.time(), StartTime)
Result = list(RunTime, SE, Cov, Correl, InvCov, EigenVal, Rmat, Smat)
names(Result) = list("Time", "Standard Error", "Covariance Matrix of Estimates", "Correlation Matrix of Estimates", "Inverse Covariance Matrix of Estimates", "Eigen Values", "R Matrix", "S Matrix")
return(Result)
} |
str(exp1)
res1 <-
expirest_osle(data = exp1[exp1$Batch %in% c("b2", "b5", "b7"), ],
response_vbl = "Potency", time_vbl = "Month",
batch_vbl = "Batch", sl = 95, sl_sf = 3, srch_range = c(0, 500))
gg1 <- plot_expirest_osle(
model = res1, show_grouping = "no", response_vbl_unit = "%",
y_range = c(93, 107), x_range = NULL, plot_option = "full",
ci_app = "line")
gg2 <- plot(gg1)
class(gg1)
class(gg2) |
sessionInfo <- function(package = NULL)
{
z <- list()
z$R.version <- R.Version()
z$platform <- z$R.version$platform
if(nzchar(.Platform$r_arch))
z$platform <- paste(z$platform, .Platform$r_arch, sep = "/")
z$platform <- paste0(z$platform, " (", 8*.Machine$sizeof.pointer, "-bit)")
z$locale <- Sys.getlocale()
if (.Platform$OS.type == "windows") {
z$running <- win.version()
} else if (nzchar(Sys.which('uname'))) {
uname <- system("uname -a", intern = TRUE)
os <- sub(" .*", "", uname)
z$running <-
switch(os,
"Linux" = if(file.exists("/etc/os-release")) {
tmp <- readLines("/etc/os-release")
t2 <- if (any(startsWith(tmp, "PRETTY_NAME=")))
sub("^PRETTY_NAME=", "",
grep("^PRETTY_NAME=", tmp, value = TRUE)[1L])
else if (any(startsWith(tmp, "NAME")))
sub("^NAME=", "",
grep("^NAME=", tmp, value = TRUE)[1L])
else "Linux (unknown distro)"
sub('"(.*)"', "\\1", t2)
} else if(file.exists("/etc/system-release")) {
readLines("/etc/system-release")
},
"Darwin" = {
ver <- readLines("/System/Library/CoreServices/SystemVersion.plist")
ind <- grep("ProductUserVisibleVersion", ver)
ver <- ver[ind + 1L]
ver <- sub(".*<string>", "", ver)
ver <- sub("</string>$", "", ver)
ver1 <- strsplit(ver, ".", fixed = TRUE)[[1L]][2L]
sprintf("OS X %s (%s)", ver,
switch(ver1,
"4" = "Tiger",
"5" = "Leopard",
"6" = "Snow Leopard",
"7" = "Lion",
"8" = "Mountain Lion",
"9" = "Mavericks",
"10" = "Yosemite",
"11" = "El Capitan",
"unknown"))
},
"SunOS" = {
ver <- system('uname -r', intern = TRUE)
paste("Solaris",
strsplit(ver, ".", fixed = TRUE)[[1L]][2L])
},
uname)
}
if(is.null(package)){
package <- grep("^package:", search(), value=TRUE)
keep <- vapply(package, function(x) x == "package:base"
|| !is.null(attr(as.environment(x), "path")), NA)
package <- .rmpkg(package[keep])
}
pkgDesc <- lapply(package, packageDescription, encoding = NA)
if(length(package) == 0) stop("no valid packages were specified")
basePkgs <- sapply(pkgDesc,
function(x) !is.null(x$Priority) && x$Priority=="base")
z$basePkgs <- package[basePkgs]
if(any(!basePkgs)){
z$otherPkgs <- pkgDesc[!basePkgs]
names(z$otherPkgs) <- package[!basePkgs]
}
loadedOnly <- loadedNamespaces()
loadedOnly <- loadedOnly[!(loadedOnly %in% package)]
if (length(loadedOnly)) {
names(loadedOnly) <- loadedOnly
pkgDesc <- c(pkgDesc, lapply(loadedOnly, packageDescription))
z$loadedOnly <- pkgDesc[loadedOnly]
}
class(z) <- "sessionInfo"
z
}
print.sessionInfo <- function(x, locale=TRUE, ...)
{
mkLabel <- function(L, n) {
vers <- sapply(L[[n]], function(x) x[["Version"]])
pkg <- sapply(L[[n]], function(x) x[["Package"]])
paste(pkg, vers, sep = "_")
}
cat(x$R.version$version.string, "\n", sep = "")
cat("Platform: ", x$platform, "\n", sep = "")
if (!is.null(x$running)) cat("Running under: ", x$running, "\n", sep = "")
cat("\n")
if(locale){
cat("locale:\n")
print(strsplit(x$locale, ";", fixed=TRUE)[[1]], quote=FALSE, ...)
cat("\n")
}
cat("attached base packages:\n")
print(x$basePkgs, quote=FALSE, ...)
if(!is.null(x$otherPkgs)){
cat("\nother attached packages:\n")
print(mkLabel(x, "otherPkgs"), quote=FALSE, ...)
}
if(!is.null(x$loadedOnly)){
cat("\nloaded via a namespace (and not attached):\n")
print(mkLabel(x, "loadedOnly"), quote=FALSE, ...)
}
invisible(x)
}
toLatex.sessionInfo <- function(object, locale=TRUE, ...)
{
opkgver <- sapply(object$otherPkgs, function(x) x$Version)
nspkgver <- sapply(object$loadedOnly, function(x) x$Version)
z <- c("\\begin{itemize}\\raggedright",
paste0(" \\item ", object$R.version$version.string,
", \\verb|", object$R.version$platform, "|"))
if(locale){
z <- c(z,
paste0(" \\item Locale: \\verb|",
gsub(";","|, \\\\verb|", object$locale) , "|"))
}
z <- c(z, strwrap(paste("\\item Base packages: ",
paste(sort(object$basePkgs), collapse = ", ")),
indent = 2, exdent = 4))
if(length(opkgver)){
opkgver <- opkgver[sort(names(opkgver))]
z <- c(z,
strwrap(paste(" \\item Other packages: ",
paste(names(opkgver), opkgver, sep = "~",
collapse = ", ")),
indent = 2, exdent = 4))
}
if(length(nspkgver)){
nspkgver <- nspkgver[sort(names(nspkgver))]
z <- c(z,
strwrap(paste(" \\item Loaded via a namespace (and not attached): ",
paste(names(nspkgver), nspkgver, sep = "~",
collapse = ", ")),
indent = 2, exdent = 4))
}
z <- c(z, "\\end{itemize}")
class(z) <- "Latex"
z
} |
plot.evodivparam <-
function(x, legend = TRUE, legendposi = "topright", axisLABEL = "Tree-based diversity", type="b", col = if(is.numeric(x)) NULL else sample(colors(distinct = TRUE), nrow(x$div)), lty = if(is.numeric(x)) NULL else rep(1, nrow(x$div)), pch = if(is.numeric(x)) NULL else rep(19, nrow(x$div)), ...)
{
if(is.numeric(x)){
y <- as.vector(x)
names(y) <- names(x)
dotchart(y, xlab = axisLABEL, ...)
}
if(is.list(x)){
if(length(col)==1) col <- rep(col, nrow(x$div))
if(length(pch)==1) pch <- rep(pch, nrow(x$div))
plot(x$q, x$div[1, ], type = type, col = col[1], ylim = c(min(x$div), max(x$div)), pch = pch[1], , ylab = axisLABEL, xlab="q", ...)
for(i in 1:nrow(x$div)){
lines(x$q, x$div[i, ], type = type, col = col[i], pch = pch[i], ...)
}
if(legend[1]){
legend(legendposi, legend = rownames(x$div), col = col, lty = lty, pch = pch, ...)
}
}
} |
tvSURE <- function (formula, z = NULL, ez = NULL, bw = NULL, cv.block = 0, data,
method = c("tvOLS", "tvFGLS", "tvGLS"), Sigma = NULL,
est = c("lc", "ll"), tkernel = c("Triweight", "Epa", "Gaussian"),
bw.cov = NULL, singular.ok = TRUE, R = NULL, r = NULL,
control = tvreg.control(...), ...)
{
is.data <- inherits(data, c("data.frame", "matrix"))
if(!is.data)
stop("\nArgument 'data' should be entered and it should be a 'matrix' or a 'data.frame'.\n")
if(!inherits(data, c("data.frame")))
data <- as.data.frame(data)
if(!inherits(formula, "list"))
stop("\nArgument 'formula' must be a list of formulas. \n")
if(!all(lapply(formula, class) == "formula"))
stop("\nArgument 'formula' must contain only objects of class 'formula'")
neq <- length(formula)
if(neq < 2)
stop("\nThe list 'formula' should contain at least two equations for multivariate analysis.\n")
if(!is.null(Sigma))
if(any(is.na(Sigma)))
stop("\nNAs in Sigma.\n")
method <- match.arg(method)
tkernel <- match.arg(tkernel)
est <- match.arg(est)
nvar <- numeric(neq)
if(is.null(names(formula)))
{
eq.names <- paste0("eq", c(1:neq))
}
else
{
eq.names <- names(formula)
if(sum(regexpr(" |_", eq.names) != -1) > 0)
stop("\nEquation labels may not contain blanks (' ') or underscores ('_')")
}
results <- list()
callNoDots <- match.call(expand.dots = FALSE)
mf <- callNoDots[c(1, match(c("data"), names(callNoDots), 0L))]
mf$na.action <- as.name("na.pass")
mf[[1]] <- as.name("model.frame")
y <- NULL
x <- list()
y.names <- NULL
for(i in 1:neq)
{
mf.eq <- mf
mf.eq$formula <- formula[[i]]
eval.mf <- eval(mf.eq, parent.frame())
terms <- attr(eval.mf, "terms")
y <- cbind(y, stats::model.extract(eval.mf, "response"))
y.names <- c(y.names, formula[[i]][[2]])
x[[i]] <- stats::model.matrix(terms, eval.mf)
nvar[i] <- NCOL(x[[i]])
if(is.null(colnames(x[[i]])))
colnames(x[[i]]) <- paste0("X", i, 1:nvar[i])
}
names(x) <- eq.names
colnames(y) <- y.names
obs <- NROW(y)
if(!is.null(R))
{
R <- as.matrix(R)
if(NCOL(R) != sum(nvar))
stop("\nWrong dimension of R, it should have as many columns as variables
in the whole system. \n")
if (is.null(r))
r <- rep(0, NROW(R))
else if (length(r) == 1)
r <- rep(r, NROW(R))
else if (length(r) != NROW(R) & length(r) != 1)
stop("\nWrong dimension of r, it should be as long as the number of
rows in R. \n")
}
if (method == "identity" | method == "tvOLS")
{
if (is.null(bw))
{
cat("Calculating regression bandwidth...\n")
bw <- bw(x = x, y = y, z = z, cv.block = cv.block, est = est, tkernel = tkernel,
singular.ok = singular.ok)
cat("bw = ", bw, "\n")
}
else
{
if (any(bw < 5/obs))
stop("\nAt least one of your bw bandwidths is smaller than 5/obs,
please increase! \n")
else if (any(is.na(bw)))
stop("\nThe bandwidth cannot be a no number.\n")
}
result <- tvGLS(x = x, y = y, z = z, ez = ez, bw = bw, R = R, r = r,
est = est, tkernel = tkernel)
Sigma <- array(rep(crossprod(result$residuals)/ (obs - neq), obs), dim = c(neq, neq, obs))
}
else if(method == "tvFGLS")
{
if (is.null(bw))
{
cat("Calculating regression bandwidth...\n")
bw <- bw(x = x, y = y, z = z, cv.block = cv.block, est = est, tkernel = tkernel,
singular.ok = singular.ok)
cat("bw = ", bw, "\n")
}
else
{
if (any(bw<5/obs))
stop("\nAt least one of your bw bandwidths is smaller than 5/obs, please increase! \n")
else if (any (is.na(bw)))
stop("\nThe bandwidth cannot be a no number.\n")
}
result <- tvGLS(x = x, y = y, z = z, ez = ez, bw = bw, R = R, r = r, est = est, tkernel = tkernel)
if(is.null(bw.cov))
{
cat("Calculating variance-covariance estimation bandwidth...\n")
bw.cov <- bwCov(x = result$residuals, cv.block = cv.block, tkernel = tkernel)
cat("bw = ", bw.cov, "\n")
}
Sigma <- tvCov(x = result$residuals, bw = bw.cov, tkernel = tkernel)
result <- tvGLS(x = x, y = y, z = z, ez = ez, bw = bw, Sigma = Sigma, R = R, r = r,
est = est, tkernel = tkernel)
itertemp <- 1
tol <- control$tol
maxiter <- control$maxiter
tolold <- sum(result$coefficients^2)
tolnew <- 0
while((abs(tolold-tolnew)>tol) && (itertemp < maxiter))
{
tolold <- tolnew
Sigma <- tvCov(bw = bw.cov, x = result$residuals, tkernel = tkernel)
temp <- tvGLS(x = x, y = y, z = z, ez = ez, bw = bw, Sigma = Sigma, R = R, r = r,
est = est, tkernel = tkernel)
tolnew <- sqrt(sum((result$coefficients - temp$coefficients)^2)/sum(result$coefficients^2))
result <- temp
itertemp <- itertemp + 1
}
}
else if(method == "tvGLS")
{
if(is.matrix(Sigma))
{
if(NCOL(Sigma) != neq | NROW(Sigma) != neq)
stop("\nWrong dimensions of Sigma. \n.")
Sigma2 <- array(0, dim = c(neq, neq, obs))
for (t in 1:obs)
Sigma2[, , t] <- Sigma
Sigma <- Sigma2
}
else if (is.array(Sigma))
{
dimensions <- dim(Sigma)
if(dimensions[3] != obs | dimensions[2] != neq | dimensions[1] != neq)
stop("\nWrong dimensions of Sigma. \n.")
}
else
stop("\nSigma must be a matrix of dimensions neq x neq or an array of dimensions
neq x neq x obs. \n")
if (is.null(bw))
{
cat("Calculating regression bandwidth...\n")
bw <- bw(x = x, y = y, z = z, Sigma = Sigma, est = est, tkernel = tkernel)
cat("bw = ", bw, "\n")
}
else
{
if (any(bw < 5/obs))
stop("\nAt least one of your bw bandwidths is smaller than 5/obs,
please increase! \n")
else if (any (is.na(bw)))
stop("\nThe bandwidth cannot be a no number.\n")
}
result <- tvGLS(x = x, y = y, z = z, ez = ez, bw = mean(bw), Sigma = Sigma, R = R, r = r,
est = est, tkernel = tkernel)
}
coefficients <- result$coefficients
resid <- result$residuals
fitted <- result$fitted
if(length(bw) == 1)
names(bw) <- "bw.mean"
else
names(bw) <- paste("bw.", eq.names, sep = "")
colnames(resid) <- eq.names
colnames(fitted) <- eq.names
var.names <- NULL
for(i in 1:neq)
var.names <- c(var.names, paste(colnames(x[[i]]), ".", eq.names[i], sep = ""))
colnames(coefficients) <- var.names
result <- list(coefficients = coefficients, Lower = NULL, Upper = NULL, Sigma = Sigma,
fitted = fitted, residuals = resid, x = x, y = y, z = z, ez = ez,
bw = bw, cv.block = cv.block, obs = obs, neq = neq, nvar = nvar,
method = method, est = est, tkernel = tkernel, bw.cov = bw.cov,
level = 0, runs = 0, tboot = NULL, BOOT = NULL,
R = R, r = r, control = control, formula = formula, call = match.call())
class(result) <- "tvsure"
return(result)
} |
cv.mrelnet <- function(predmat,y,type.measure,weights,foldid,grouped){
ndim = dim(y)
nc = ndim[2]
nobs = ndim[1]
N = nobs - apply(is.na(predmat[, 1,,drop=FALSE ]), 2, sum)
bigY = array(y, dim(predmat))
cvraw = switch(type.measure,
mse = apply((bigY - predmat)^2,c(1, 3), sum),
deviance = apply((bigY - predmat)^2, c(1,3), sum),
mae = apply(abs(bigY - predmat), c(1, 3), sum)
)
list(cvraw=cvraw,weights=weights,N=N,type.measure=type.measure,grouped=grouped)
} |
bgcolor <- function(color){
theme(panel.background = element_rect(fill = color))
} |
make.lapply.loop.resample <- function(one_subset, replicates, pop_size) {
return(list("elements" = one_subset, replicate(replicates, sample(1:pop_size, length(one_subset), replace = TRUE))))
}
make.lapply.loop.nosample <- function(one_subset, replicates, pop_size) {
return(list("elements" = one_subset, replicate(replicates, sample((1:pop_size)[-one_subset], length(one_subset), replace = TRUE))))
}
one.randtest <- function(results, replicates, resample, alternative, get.p.value, match_call) {
observed <- c(results$elements)
simulated <- c(results[[2]])
test_results <- c("Mean Normal residuals" = mean((observed - mean(simulated)) / stats::sd(simulated)), "Random mean" = mean(simulated), "Random variance" = stats::var(simulated))
p_value <- get.p.value(simulated, observed, replicates)
res <- list()
res$rep <- replicates
res$observed <- observed
res$random <- simulated
res$call <- match_call
res$sim <- simulated
res$obs <- observed
r0 <- c(simulated, observed)
l0 <- max(simulated, observed) - min(simulated, observed)
w0 <- l0/(log(length(simulated), base = 2) + 1)
xlim0 <- range(r0) + c(-w0, w0)
h0 <- graphics::hist(simulated, plot = FALSE, nclass = 10)
res$plot <- list(hist = h0, xlim = xlim0)
res$alter <- alternative
res$pvalue <- p_value
res$expvar <- test_results
class(res) <- "randtest"
return(res)
} |
context("test-featureimpcluster")
set.seed(123)
nr_other_vars = 2
dat <- create_random_data(n=100,nr_other_vars = nr_other_vars)$data
res <- flexclust::kcca(dat,k=4)
base_pred <- flexclust::predict(res,dat)
biter = 2
f <- FeatureImpCluster(res,dat,biter=biter)
test_that("Test dimension of outputs", {
expect_equal(dim(f$misClassRate)[1],biter)
expect_equal(dim(f$misClassRate)[2],nr_other_vars+2)
expect_equal(length(f$featureImp),nr_other_vars+2)
})
test_that("Result is not NA", {
expect_equal(sum(is.na(f$featureImp)),0)
})
set.seed(123)
dat_cat <- copy(dat)
dat_cat[,cat1:=factor(rbinom(100,size=1,prob=0.3),labels = c("yes","no"))]
dat_cat[,cat2:=factor(c(rep("yes",50),rep("no",50)))]
test_that("FeatureImp plots", {
p <- plot(f)
ps <- plot(f,showPoints = TRUE)
expect_equal(p$labels$y,"Misclassification rate")
expect_equal(ps$layers[[2]]$position$width,.1)
expect_error(plot(f,color="type"))
res_kproto <- clustMixType::kproto(x=dat_cat,k=4)
f <- FeatureImpCluster(res_kproto,dat_cat,biter=biter)
p_cat <- plot(f,dat_cat,color="type")
}) |
meltt_duplicates = function(object,columns=NULL){
UseMethod("meltt_duplicates")
}
meltt_duplicates.meltt = function(object,columns=NULL){
if(length(columns)==0){
for(m in seq_along(object$inputData)){columns = c(columns,colnames(object$inputData[[m]]))}
columns = unique(columns)
columns = c('dataset','obs.count',columns[!columns %in% c('dataset','obs.count')])
}else{
columns = c('dataset','obs.count',columns)
}
event_to_event = object$processed$event_matched
episode_to_episode = object$processed$episode_matched
key = rbind.fill(event_to_event,episode_to_episode)
key$match_type = c(rep("event_to_event",nrow(event_to_event)),
rep("episode_to_episode",nrow(episode_to_episode)))
determine = !apply(key,1,function(x){all(as.numeric(x[-1*c(1,2,length(x))])==0)})
key = key[determine,]
data_key = key[,seq_along(key) %% 2 != 0];data_key = data_key[,colnames(data_key)!="match_type"]
data_key = data_key[,!colnames(data_key) %in% c("data0","dataNA")]
obs_key = key[,seq_along(key) %% 2 == 0]
input_data = object$inputData
key2 = key[,colnames(key)!="match_type"]; key3 = c()
for(row in 1:nrow(key2)){
even = function(x){x1 = 1:ncol(x);x1 %% 2 == 0}
datanames = paste0('data',key2[row,!even(key2)])
eventnames = paste0('event',key2[row,!even(key2)])
col_names=c();for(i in 1:length(datanames)){col_names=c(col_names,datanames[i],eventnames[i])}
s = key2[row,]
colnames(s) = col_names
key3 = rbind.fill(key3,s)
}
recon_key = key3[,!(colnames(key3) %in% c("data0","event0","dataNA","eventNA"))]
datanames = colnames(recon_key)[grepl("data",colnames(recon_key))];datanames = datanames[order(datanames)]
eventnames = colnames(recon_key)[grepl("event",colnames(recon_key))];eventnames = eventnames[order(eventnames)]
col_names=c();for(i in 1:length(datanames)){col_names=c(col_names,datanames[i],eventnames[i])}
recon_key = recon_key[,col_names]
drop = !apply(recon_key,1,function(x){sum(!is.na(x)) <= 2})
recon_key=recon_key[drop,]
recon_key[is.na(recon_key)] = 0
recon_key = recon_key[!apply(recon_key,1,function(x) all(x == 0)),]
for(d in ncol(data_key):1){
consider = input_data[[d]]
consider = consider[consider$obs.count %in% obs_key[data_key==d],]
consider2 = consider[,colnames(consider) %in% columns]
colnames(consider2)[!colnames(consider2) %in% c("dataset","obs.count")] = paste(object$inputDataNames[d],colnames(consider2)[!colnames(consider2) %in% c("dataset","obs.count")],sep = "_")
if(d==ncol(data_key)){
out = merge(recon_key,consider2,
by.x=c(paste0("data",d),paste0("event",d)),
by.y=c("dataset","obs.count"),all.x=T)
}else{
out = merge(out,consider2,
by.x=c(paste0("data",d),paste0("event",d)),
by.y=c("dataset","obs.count"),all.x=T)
}
}
.ind = c();for( c in data_key){.ind = c(.ind,c)}
viable_options = unique(.ind[.ind>0])
colnames(out)[colnames(out) %in% colnames(data_key)] = paste0(object$inputDataNames[viable_options],"_dataset")
colnames(out)[colnames(out) %in% colnames(obs_key)] = paste0(object$inputDataNames[viable_options],"_event")
return(out)
} |
test_multi_arg_fn <- function() {
expect_error(
ops <- mk_td('d', 'str') %.>%
extend(., strs = paste(str, collapse = ', ')))
invisible(NULL)
}
test_multi_arg_fn() |
print.permutest.rma.uni <- function(x, digits=x$digits, signif.stars=getOption("show.signif.stars"), signif.legend=signif.stars, ...) {
mstyle <- .get.mstyle("crayon" %in% .packages())
.chkclass(class(x), must="permutest.rma.uni")
digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE)
if (!exists(".rmspace"))
cat("\n")
if (!x$int.only) {
cat(mstyle$section(paste0("Test of Moderators (coefficient", ifelse(x$m == 1, " ", "s "), .format.btt(x$btt),"):")))
cat("\n")
if (is.element(x$test, c("knha","adhoc","t"))) {
cat(mstyle$result(paste0("F(df1 = ", x$QMdf[1], ", df2 = ", x$QMdf[2], ") = ", .fcf(x$QM, digits[["test"]]), ", p-val* ", .pval(x$QMp, digits=digits[["pval"]], showeq=TRUE, sep=" "))))
} else {
cat(mstyle$result(paste0("QM(df = ", x$QMdf[1], ") = ", .fcf(x$QM, digits[["test"]]), ", p-val* ", .pval(x$QMp, digits[["pval"]], showeq=TRUE, sep=" "))))
}
cat("\n\n")
}
if (is.element(x$test, c("knha","adhoc","t"))) {
res.table <- data.frame(estimate=.fcf(c(x$beta), digits[["est"]]), se=.fcf(x$se, digits[["se"]]), tval=.fcf(x$zval, digits[["test"]]), df=round(x$ddf,2), "pval*"=.pval(x$pval, digits[["pval"]]), ci.lb=.fcf(x$ci.lb, digits[["ci"]]), ci.ub=.fcf(x$ci.ub, digits[["ci"]]), stringsAsFactors=FALSE)
colnames(res.table)[5] <- "pval*"
if (x$permci)
colnames(res.table)[6:7] <- c("ci.lb*", "ci.ub*")
} else {
res.table <- data.frame(estimate=.fcf(c(x$beta), digits[["est"]]), se=.fcf(x$se, digits[["se"]]), zval=.fcf(x$zval, digits[["test"]]), "pval*"=.pval(x$pval, digits[["pval"]]), ci.lb=.fcf(x$ci.lb, digits[["ci"]]), ci.ub=.fcf(x$ci.ub, digits[["ci"]]), stringsAsFactors=FALSE)
colnames(res.table)[4] <- "pval*"
if (x$permci)
colnames(res.table)[5:6] <- c("ci.lb*", "ci.ub*")
}
rownames(res.table) <- rownames(x$beta)
signif <- symnum(x$pval, corr=FALSE, na=FALSE, cutpoints=c(0, 0.001, 0.01, 0.05, 0.1, 1), symbols = c("***", "**", "*", ".", " "))
if (signif.stars) {
res.table <- cbind(res.table, signif)
colnames(res.table)[ncol(res.table)] <- ""
}
if (x$int.only)
res.table <- res.table[1,]
cat(mstyle$section("Model Results:"))
cat("\n\n")
if (x$int.only) {
tmp <- capture.output(.print.vector(res.table))
} else {
tmp <- capture.output(print(res.table, quote=FALSE, right=TRUE, print.gap=2))
}
.print.table(tmp, mstyle)
if (signif.legend) {
cat("\n")
cat(mstyle$legend("---\nSignif. codes: "), mstyle$legend(attr(signif, "legend")))
cat("\n")
}
if (!exists(".rmspace"))
cat("\n")
invisible()
} |
allowed.styles <- c("wide", "tall", "tallw")
rpart.rules <- function(x=stop("no 'x' argument"),
style="wide", cover=FALSE, nn=FALSE,
roundint=TRUE, clip.facs=FALSE,
varorder=NULL, ...)
{
if(!inherits(x, "rpart"))
stop("Not an rpart object")
ret <- check.if.dot.arg.supported.by.rpart.rules(...)
extra <- ret$extra
digits <- ret$digits
varlen <- ret$varlen
faclen <- ret$faclen
trace <- ret$trace
facsep <- ret$facsep
eq <- ret$eq
lt <- ret$lt
ge <- ret$ge
and <- ret$and
when <- ret$when
because <- ret$because
null.model <- ret$null.model
response.name <- ret$response.name
rpart.predict <- ret$rpart.predict
where <- ret$where
obj <- x
style <- match.choices(style, allowed.styles)
cover <- check.boolean(cover)
nn <- check.boolean(nn)
roundint <- check.boolean(roundint)
clip.facs <- check.boolean(clip.facs)
rpart.predict <- check.boolean(rpart.predict)
digits <- process.digits.arg(digits)
if(digits < 0)
digits <- -digits
varlen <- check.integer.scalar(varlen, logical.ok=FALSE)
faclen <- check.integer.scalar(faclen, logical.ok=FALSE)
obj$varinfo <- get.modelframe.info(obj, roundint, trace,
parent.frame(), "rpart.rules")
if(is.null(response.name))
response.name <- obj$varinfo$response.name
stopifnot.string(response.name)
stopifnot.string(facsep)
stopifnot.string(eq, allow.empty=TRUE);
eq <- trim.surrounding.space(eq)
stopifnot.string(lt, allow.empty=TRUE);
lt <- trim.surrounding.space(lt)
stopifnot.string(ge, allow.empty=TRUE);
ge <- trim.surrounding.space(ge)
stopifnot.string(and, allow.empty=TRUE);
and <- trim.surrounding.space(and)
stopifnot.string(when, allow.empty=TRUE);
when <- trim.surrounding.space(when)
if(when == "" && (style %in% c("tall", "tallw") || rpart.predict))
when <- ":EMPTY:"
else if(nrow(obj$frame) == 1)
when <- null.model
stopifnot.string(because, allow.empty=TRUE);
because <- trim.surrounding.space(because)
stopifnot.string(null.model)
trace <- as.numeric(check.numeric.scalar(trace, logical.ok=TRUE))
varnames <-
if(nrow(obj$frame) == 1)
varnames <- ":NULL.MODEL:"
else
unique(rownames(obj$splits))
ret <- get.raw.rules(obj, extra, varlen, faclen, roundint, trace,
facsep, varnames)
rules <- ret$rules
nrules.per.var <- ret$nrules.per.var
if(trace >= 1)
trace.print.rules(rules, "raw rules")
rules <- process.rules(obj, rules, style, cover, nn, clip.facs,
rpart.predict, where,
eq, lt, ge, and, when, because, null.model,
digits, trace, varorder, varlen,
nrules.per.var, varnames,
response.name,
obj$method == "class" || is.class.response(obj),
attr(obj, "ylevels"))
node.numbers <- rownames(rules)
if(trace >= 1)
trace.print.rules(rules, "processed rules")
class(rules) <- c("rpart.rules", "data.frame")
attr(rules, "style") <- style
attr(rules, "eq") <- eq
attr(rules, "and") <- and
attr(rules, "when") <- when
if(rpart.predict) {
rules <- capture.output(print.rpart.rules(rules))
rules <- rules[-1]
rules <- gsub("^ ", "", rules)
}
rules
}
print.rpart.rules <- function(x=stop("no 'x' argument"),
style=attr(x, "style"), ...)
{
old.warnPartialMatchDollar <- getOption("warnPartialMatchDollar")
if(is.boolean(old.warnPartialMatchDollar))
on.exit(options(warnPartialMatchDollar=old.warnPartialMatchDollar))
options(warnPartialMatchDollar=FALSE)
dots <- match.call(expand.dots=FALSE)$...
if(!is.null(dots$di) || !is.null(dots$dig) || !is.null(dots$digi) ||
!is.null(dots$digit) || !is.null(dots$digits))
stop0("specify 'digits' in rpart.rules (not in print.rpart.rules)")
if(is.boolean(old.warnPartialMatchDollar))
options(warnPartialMatchDollar=old.warnPartialMatchDollar)
stop.if.dot.arg.used(...)
style <- match.choices(style, allowed.styles)
old.width <- options(width=1e4)$width
on.exit(options(width=old.width))
if(style == "wide") {
class(x) <- "data.frame"
print(x, row.names=FALSE)
} else if(style == "tall" || style == "tallw")
print.style.tall(x, style, eq=attr(x, "eq"),
and=attr(x, "and"), when=attr(x, "when"))
else
stop0("illegal style ", style)
}
get.raw.rules <- function(obj, extra, varlen, faclen, roundint, trace,
facsep, varnames)
{
ret <- get.node.and.split.labs(obj, extra, faclen, roundint, trace,
facsep, under.percent=2)
node.labs <- ret$node.labs
split.labs <- ret$split.labs
frame <- obj$frame
is.leaf <- is.leaf(frame)
node.numbers <- as.numeric(row.names(frame))
iframe.to.isplit.mat <- descendants(node.numbers)
maxrules <- 1e3
nrules <- 0
rules <- matrix("", nrow=maxrules, ncol=4 + 3 * length(varnames))
colnames(rules) <- c("lab", "fit", "iclass", "cover",
paste0(rep(varnames, each=3), c("=", "<", ">=")))
rules <- as.data.frame(rules, stringsAsFactors=FALSE)
nrules.per.var <- repl(0, length(varnames))
names(nrules.per.var) <- varnames
trace1(trace, "\n")
for(iframe in 1:nrow(frame)) if(is.leaf[iframe]) {
nrules <- nrules + 1
if(nrules > maxrules)
stopf("too many rules (maximum number of rules is %d)", maxrules)
ret <- get.rule(obj, rules[nrules,], nrules.per.var,
iframe, node.numbers, node.labs, split.labs,
iframe.to.isplit.mat, trace)
rules[nrules,] <- ret$rule
nrules.per.var <- ret$nrules.per.var
}
trace1(trace, "\n")
rules <- rules[1:nrules, , drop=FALSE]
rownames(rules) <- rownames(obj$frame)[is.leaf]
list(rules=rules, nrules.per.var=nrules.per.var)
}
get.node.and.split.labs <- function(obj, extra, faclen, roundint, trace,
facsep, under.percent)
{
class.stats <- NULL
if(obj$method == "class" || is.class.response(obj))
class.stats <- get.class.stats(obj)
extra <- handle.extra.for.rules(extra, obj, class.stats)
list(node.labs =
internal.node.labs(obj,
node.fun=NULL, node.fun.name="NULL",
type=TYPE0.default, extra=extra,
under=FALSE, xsep=NULL,
digits=-10,
varlen=0,
prefix="", suffix="", class.stats, under.percent),
split.labs =
internal.split.labs(obj, type=TYPE4.fancy.all,
digits=-10,
varlen=0,
faclen=faclen, roundint=roundint,
clip.facs=FALSE,
clip.left.labs=FALSE, clip.right.labs=FALSE, xflip=FALSE,
trace=trace, facsep=facsep,
eq="|=|", logical.eq="|=|", lt="|<|", ge="|>=|",
split.prefix="", right.split.suffix="",
split.suffix="", right.split.prefix=""))
}
handle.extra.for.rules <- function(extra, obj, class.stats)
{
if(is.numeric(extra)) {
stopifnot(length(extra) == 1)
if(extra >= 100)
extra <- extra - 100
if(extra == EX0)
extra <- get.default.extra(obj, class.stats) - 100
else if(extra == EX1.NOBS ||
extra == EX2.CLASS.RATE ||
extra == EX3.MISCLASS.RATE) {
warning0(
"extra=", extra, " is not supported by rpart.rules (although useable for plots)")
extra <- get.default.extra(obj, class.stats) - 100
} else if(extra == EX5.PROB.PER.CLASS.DONT ||
extra == EX7.PROB.2ND.CLASS.DONT ||
extra == EX11.PROB.ACROSS.ALL.2ND.CLASS.DONT)
extra <- extra - 1
} else if(is.auto(extra, n=1)) {
extra <- get.default.extra(obj, class.stats)
} else
stop0("rpart.rules: illegal extra")
if(obj$method == "poisson" || obj$method == "exp")
extra <- 0
if(extra < 100)
extra <- extra + 100
extra
}
descendants <- function(nodes, include = TRUE)
{
n <- length(nodes)
if (n == 1L) return(matrix(TRUE, 1L, 1L))
ind <- 1:n
desc <- matrix(FALSE, n, n)
if (include) diag(desc) <- TRUE
parents <- match((nodes %/% 2L), nodes)
lev <- floor(log(nodes, base = 2))
desc[1L, 2L:n] <- TRUE
for (i in max(lev):2L) {
desc[cbind(ind[parents[lev == i]], ind[lev == i])] <- TRUE
parents[lev == i] <- parents[parents[lev == i]]
lev[lev == i] <- i - 1L
}
desc
}
get.rule <- function(obj, rule, nrules.per.var,
iframe, node.numbers, node.labs, split.labs,
iframe.to.isplit.mat, trace)
{
ret <- parse.node.lab(node.labs[iframe])
rule$lab <- ret$lab
rule$fit <- ret$fit
rule$cover <- ret$cover
rule$iclass <- floor(obj$frame[iframe, "yval"])
path <- split.labs[iframe.to.isplit.mat[, iframe]]
trace1(trace, "iframe %3d node %3d path %s\n",
iframe, node.numbers[iframe], bar.to.space(path))
stopifnot(path[1] == "root")
path <- path[-1]
for(split.lab in path) {
ret <- parse.split.lab(split.lab, trace)
if(!var.is.in.rule(ret$varname, rule))
nrules.per.var[ret$varname] <- nrules.per.var[ret$varname] + 1
rule[paste0(ret$varname, ret$op)] <- ret$cut
}
list(rule=rule, nrules.per.var=nrules.per.var)
}
trace.print.rules <- function(rules, msg)
{
old.width <- options(width=1e4)$width
on.exit(options(width=old.width))
cat0(msg, ":\n")
class(rules) <- "data.frame"
print(rules)
cat0("\n")
}
process.rules <- function(obj, rules, style, cover, nn, clip.facs,
rpart.predict, where,
eq, lt, ge, and, when, because, null.model,
digits, trace, varorder, varlen,
nrules.per.var, varnames, response.name,
is.class.response, ylevels)
{
ret <- format.fit(rules$fit, digits, is.class.response)
rules$fit <- ret$fit
rowmaxs <- ret$rowmaxs
ncol.fit <- ret$ncol.fit
fit <- rules$fit
rules <- order.rows(rules, rowmaxs)
ret <- order.cols(rules, varorder, varnames, nrules.per.var)
rules <- ret$rules
varnames <- ret$varnames
if(varlen != 0) {
ret <- apply.varlen.to.colnames(rules, varnames, varlen)
colnames(rules) <- ret$colnames
varnames <- ret$shortnames
}
rules.cover <- rules$cover
rules <- format.rules(rules, style, cover, clip.facs, eq, lt, ge, and, when,
digits, trace,
response.name, varnames, ncol.fit)
rules <- rules[, apply(rules, 2, function(col) any(col != "")), drop=FALSE]
if(!rpart.predict) {
colnames(rules) <- c(response.name, repl("", ncol(rules)-1))
} else {
if(nrow(obj$frame) == 1)
rules <- as.data.frame(matrix(paste0(because, paste0(" ", null.model)),
nrow=nrow(rules)), stringsAsFactors=FALSE)
else {
iwhen <- match("when", colnames(rules))[1]
if(iwhen < ncol(rules)) {
rules <- rules[, iwhen:ncol(rules), drop=FALSE]
rules[1] <- because
}
colnames(rules) <- NULL
}
}
if(cover) {
rules$cover <- sprint("%3.0f%%", as.numeric(rules.cover))
colnames(rules)[ncol(rules)] <- " cover"
}
if(nn) {
colnames <- colnames(rules)
rules <- cbind(rownames(rules), rules, stringsAsFactors=FALSE)
colnames(rules) <- c("nn", colnames)
}
if(rpart.predict) {
if(style != "wide")
stop0("style = \"", style, "\" is not supported by rpart.predict")
check.vec(where, "where", na.ok=TRUE)
nn <- as.numeric(rownames(obj$frame)[where])
stopifnot(!any(is.na(nn)))
rules.nn <- rules
rules.nn[1:max(nn), ] <- rules[1, , drop=FALSE]
for(name in rownames(rules))
rules.nn[as.numeric(name),] <-
rules[which(rownames(rules) == name), , drop=FALSE]
rules <- rules.nn[nn, , drop=FALSE]
rownames(rules) <- NULL
rules <- rules[, apply(rules, 2, function(col) any(col != "")), drop=FALSE]
} else if(ncol.fit > 1) {
colnames(rules)[2+nn] <- fit.colname(ylevels, fit, ncol.fit)
rules[,2+nn] <- paste0("[", rules[,2+nn], "]")
rules[,3+nn] <- paste0(" ", when)
}
trim.leading.space.in.columns(rules)
}
format.fit <- function(fit, digits, is.class.response)
{
fit <- strsplit(fit, " ", fixed=TRUE)
nrow <- length(fit)
fit <- matrix(as.numeric.na.ok(unlist(fit)), nrow=nrow, byrow=TRUE)
ncol.fit <- ncol(fit)
rowmaxs <-
if(ncol.fit == 2) {
rowmaxs <- fit[,2]
} else
rowmaxs <- apply(fit, 1, max)
if(ncol.fit == 1) {
fit <-
if(is.class.response)
format(sprint("%.2f", fit), justify="right")
else
format(fit, digits=digits, justify="right")
} else {
digits <- 2
max.rowmaxs <- max(rowmaxs, na.rm=TRUE)
format <- if(max.rowmaxs >= 1)
sprint("%%%d.%df", digits+2, digits)
else
sprint("%%%d.%df", digits+1, digits)
fit <- matrix(paste(sprint(format, fit)), nrow=nrow(fit))
fit <- apply(fit, 1, paste, collapse=" ")
fit <- if(max.rowmaxs >= 1)
gsub("0.", " .", fit, fixed=TRUE)
else
gsub("0.", ".", fit, fixed=TRUE)
}
list(fit=fit, rowmaxs=rowmaxs, ncol.fit=ncol.fit)
}
trim.surrounding.space <- function(s)
{
gsub("^ | $", "", s)
}
as.numeric.na.ok <- function(x)
{
old.warn <- getOption("warn")
on.exit(options(warn=old.warn))
options(warn=-1)
as.numeric(x)
}
apply.varlen.to.colnames <- function(rules, varnames, varlen)
{
shortnames <- my.abbreviate(varnames, varlen)
colnames <- colnames(rules)
for(i in seq_along(varnames)) {
ivar <- 3 * i + 2
colnames[ivar:(ivar+2)] <-
sub(varnames[i], shortnames[i], colnames[ivar:(ivar+2)], fixed=TRUE)
}
list(colnames=colnames, shortnames=shortnames)
}
format.rules <- function(rules, style, cover, clip.facs, eq, lt, ge, and, when,
digits, trace,
response.name, varnames, ncol.fit)
{
n <- function()
{
icol <<- icol + 1
sprint("c%d", icol)
}
new <- if(ncol.fit > 1)
data.frame(class=rules$lab, fit=rules$fit, stringsAsFactors=FALSE)
else
data.frame(fit=rules$fit, stringsAsFactors=FALSE)
rownames(new) <- rownames(rules)
new$when <- when
icol <- 0
subsequent <- repl(FALSE, nrow(rules))
for(i in seq_along(varnames)) {
varname <- varnames[i]
ivar <- 3 * i + 2
rules.eq <- rules[, ivar]
rules.lt <- rules[, ivar+1]
rules.ge <- rules[, ivar+2]
is.eq <- rules.eq != ""
is.lt <- rules.lt != ""
is.ge <- rules.ge != ""
lt.or.ge <- is.lt | is.ge
if(any(is.eq)) {
new[,n()] <- ifelse(subsequent & is.eq, and, "")
subsequent <- (subsequent | is.eq)
new[,n()] <- ifelse(clip.facs & is.eq & rules.eq == "0",
sprint("not %s", varname),
ifelse(clip.facs & is.eq & rules.eq == "1",
sprint("%s ", varname),
ifelse(!clip.facs & is.eq,
sprint("%s", varname), "")))
new[,n()] <- ifelse(is.eq & !clip.facs, eq, "")
new[,n()] <- ifelse(is.eq & (!clip.facs | (rules.eq != "1" & rules.eq != "0")),
rules.eq, "")
} else if(any(lt.or.ge)) {
new[,n()] <- ifelse(subsequent & lt.or.ge, and, "")
subsequent <- (subsequent | lt.or.ge)
verysmall <- exp10(-abs(digits) - 8)
if(any(is.lt))
rules.lt <- format(as.numeric(rules.lt) + verysmall,
digits=digits, justify="right")
if(any(is.ge))
rules.ge <- format(as.numeric(rules.ge) + verysmall,
digits=digits, justify="right")
new[,n()] <- ifelse(lt.or.ge, varname, "")
if(any(is.lt & is.ge)) {
new[,n()] <- ifelse(is.lt & !is.ge, lt,
ifelse(is.ge & !is.lt, ge,
ifelse(is.ge | is.lt, eq, "")))
new[,n()] <- ifelse(is.lt & is.ge, rules.ge,
ifelse(is.lt, rules.lt, ""))
new[,n()] <- ifelse(is.lt & is.ge, "to", "")
new[,n()] <- ifelse(is.lt & is.ge, rules.lt,
ifelse(is.ge, rules.ge, ""))
} else {
new[,n()] <- ifelse(is.lt, lt,
ifelse(is.ge, ge, ""))
new[,n()] <- ifelse(is.lt, rules.lt,
ifelse(is.ge, rules.ge, ""))
}
}
}
new
}
fit.colname <- function(ylevels, fit, ncol.fit)
{
ylevels <- ylevels[1:ncol.fit]
width <- unlist(gregexpr(" ", substring(fit, 2)))[1]
if(width < 1)
width <- 1
format <- sprint("%%%d.%ds", width, width)
colname <- paste.collapse(sprint(format, ylevels))
colname <- paste0(colname, " ")
colname
}
trim.leading.space.in.columns <- function(x)
{
stopifnot(NROW(x) > 0)
for(j in 1:NCOL(x)) {
x1 <- x[,j]
x1 <- x1[x1 != ""]
len <- unlist(gregexpr("^ +", x1))
if(!is.null(len)) {
min <- min(len)
if(min > 0)
x[,j] <- substring(x[,j], min+1)
}
}
x
}
var.is.in.rule <- function(varname, rule)
{
rule[paste0(varname, "=") ] != "" ||
rule[paste0(varname, "<") ] != "" ||
rule[paste0(varname, ">=")] != ""
}
parse.split.lab <- function(split.lab, trace)
{
i <- gregexpr("|", split.lab, fixed=TRUE)[[1]]
if(length(i) != 2 || i[2] < i[1] + 2)
stopf("Cannot parse split.lab %s", bar.to.space(split.lab))
varname <- substring(split.lab, 1, i[1]-1)
op <- substring(split.lab, i[1]+1, i[2]-1)
cut <- substring(split.lab, i[2]+1)
trace2(trace,
" split.lab %-20.20s varname %s op %s cut %s\n",
bar.to.space(split.lab), varname, op, cut)
list(varname=varname, op=op, cut=cut)
}
parse.node.lab <- function(node.lab)
{
err <- function(node.lab)
stop0("Cannot parse node.lab \"",
gsub("\n", "\\\\n", node.lab[1]), "\"")
lab <- fit <- cover <- repl("", length(node.lab))
i <- gregexpr("\n", node.lab, fixed=TRUE)[[1]]
if(length(i) > 2) {
i <- c(i[1], i[length(i)])
}
if(length(i) == 1) {
if(i <= 0)
err(node.lab)
fit <- substr(node.lab, 1, i-1)
cover <- substr(node.lab, i+1, nchar(node.lab)-1)
} else if(length(i) == 2) {
lab <- substr(node.lab, 1, i[1]-1)
fit <- substr(node.lab, i[1]+1, i[2]-1)
fit <- gsub(" ", " ", fit)
fit <- gsub("\n", " ", fit)
cover <- substr(node.lab, i[2]+1, nchar(node.lab)-1)
} else
err(node.lab)
list(lab=lab, fit=fit, cover=cover)
}
bar.to.space <- function(s)
{
quote.with.c(gsub("|", " ", s, fixed=TRUE))
}
order.rows <- function(rules, rowmaxs)
{
order <- order(as.numeric(rules$iclass), rowmaxs, 1:length(rowmaxs), na.last=TRUE)
rules[order, , drop=FALSE]
}
order.cols <- function(rules, varorder, varnames, nrules.per.var)
{
order <- order(nrules.per.var, decreasing=TRUE)
if(!is.null(varorder)) {
stopifnot(is.character(varorder))
varorder <- rev(varorder)
pmatch <- pmatch(varorder, varnames, duplicates.ok=TRUE)
for(i in seq_along(pmatch)) {
if(is.na(pmatch[i]))
warnf(
"varorder=\"%s\" does not uniquely match one of: %s",
varorder[i], paste.trunc(quotify(varnames)))
else {
order <- order[order != pmatch[i]]
order <- c(pmatch[i], order)
}
}
}
ivar <- 3 * rep(order, each=3) + c(2, 3, 4)
list(rules = rules[, c(1:4, ivar), drop=FALSE],
varnames = varnames[order])
}
print.style.tall <- function(rules, style, eq, and, when)
{
newline.with.spaces <- function()
{
printf("\n")
if(style == "tall")
printf(" ")
else
printf(format, "", " ", "")
printf(format2, "")
if(have.nn)
printf(format.nn, "")
}
colnames <- colnames(rules)
ncol <- ncol(rules)
have.nn <- colnames[1] == "nn"
have.cover <- colnames[ncol] == " cover"
response.name <- colnames[1 + have.nn]
class.probs <- colnames[2 + have.nn]
have.class.probs <- class.probs != ""
if(is.null(and))
and <- " & "
format <- sprint("%%-%ds %%s %%-%ds",
nchar(response.name),
max(nchar(rules[, 1 + have.nn])))
nn.width <- if(have.nn) max(nchar(rules[, 1]))+3 else 0
format.nn <- sprint("%%-%ds", nn.width)
format2 <- "%0.0s"
if(have.class.probs) {
printf(format.nn, "")
printf(format, "", " ", "")
printf(" %s\n", colnames[2 + have.nn])
if(style == "tallw")
format2 <- sprint("%%-%ds ", nchar(colnames[2 + have.nn]))
}
for(i in 1:nrow(rules)) {
if(have.nn)
printf(format.nn, sprint("[%s] ", rules[i, 1]))
printf(format, response.name, eq, rules[i, 1 + have.nn])
for(j in (2 + have.nn):(ncol(rules) - have.cover)) {
e <- trim.surrounding.space(rules[i, j])
if(nchar(e)) {
if(e == when) {
if(have.cover)
printf(" with cover %-s", gsub("^ *", "", rules[i, ncol]))
printf(" %s", if(when == ":EMPTY:") "" else when)
newline.with.spaces()
} else if(e == and)
newline.with.spaces()
else
printf(" %s", e)
}
}
printf("\n")
if(i != nrow(rules))
printf("\n")
}
}
stop.if.dot.arg.used <- function()
{
NULL
}
check.if.dot.arg.supported.by.rpart.rules <- function(x=stop("no 'x' arg"),
type=0,
extra="auto",
under=FALSE, fallen.leaves=FALSE,
nn=FALSE, ni=FALSE, yesno=TRUE,
branch=if(fallen.leaves) 1 else .2,
uniform=TRUE, left=TRUE, xflip=FALSE, yflip=FALSE,
digits=2,
varlen=0, faclen=0,
cex=NULL, tweak=1,
clip.right.labs=TRUE,
compress=TRUE, ycompress=uniform,
Margin=0, space=1, gap=NULL,
snip=FALSE, snip.fun=NULL, trace=FALSE,
box.col=0, box.palette=0,
pal.thresh=NULL, pal.node.fun=FALSE,
border.col=col,
round=NULL, leaf.round=NULL,
shadow.col=0, prefix="", suffix="", xsep=NULL,
under.font=1, under.col=1, under.cex=.8,
split.cex=1, split.font=2, split.family=1, split.col=1,
split.box.col=0, split.border.col=0,
split.lty=1, split.lwd=NULL, split.round=0,
split.shadow.col=0,
split.prefix="", right.split.prefix=NULL,
split.suffix="", right.split.suffix=NULL,
facsep=" or ", eq=" is ",
lt=" < ", ge=" >= ",
branch.col=if(is.zero(branch.type)) 1 else "gray",
branch.lty=1, branch.lwd=NULL,
branch.type=0, branch.tweak=1,
min.branch.width=.002, branch.fill=branch.col,
nn.cex=NULL, nn.font=3, nn.family="", nn.col=1,
nn.box.col=0, nn.border.col=nn.col,
nn.lty=1, nn.lwd=NULL, nn.round=.3,
yes.text="yes", no.text="no",
node.fun=NULL,
split.fun=NULL,
FUN="text",
nspace=branch, minbranch=.3, do.par=TRUE,
add.labs=TRUE,
clip.left.labs=(type == 5),
fam.main="",
yshift=0, yspace=space, shadow.offset=.4,
split.adj=NULL, split.yshift=0, split.space=space,
split.yspace=yspace, split.shadow.offset=shadow.offset,
nn.adj=.5, nn.yshift=0, nn.space=.8, nn.yspace=.5,
ygap=gap/2, under.ygap=.5, yesno.yshift=0,
xcompact=TRUE, ycompact=uniform, xcompact.ratio=.8, min.inter.height=4,
max.auto.cex=1, min.auto.cex=.15, ycompress.cex=.7, accept.cex=1.1,
shift.amounts=c(1.5, 2),
Fallen.yspace=.1,
boxes.include.gap=FALSE,
legend.x=NULL, legend.y=NULL, legend.cex=1,
and=" & ", when=" when ", because=" because ", null.model="null model",
response.name=NULL,
RPART.PREDICT=FALSE, WHERE=NULL)
{
warn1 <- function(arg)
{
warnf("rpart.rules: ignoring argument '%s'", deparse(substitute(arg)))
}
if(!missing(type)) warn1(type)
if(!missing(under)) warn1(under)
if(!missing(fallen.leaves)) warn1(fallen.leaves)
if(!missing(nn)) warn1(nn)
if(!missing(ni)) warn1(ni)
if(!missing(yesno)) warn1(yesno)
if(!missing(branch)) warn1(branch)
if(!missing(uniform)) warn1(uniform)
if(!missing(left)) warn1(left)
if(!missing(xflip)) warn1(xflip)
if(!missing(yflip)) warn1(yflip)
if(!missing(cex)) warn1(cex)
if(!missing(tweak)) warn1(tweak)
if(!missing(clip.right.labs)) warn1(clip.right.labs)
if(!missing(compress)) warn1(compress)
if(!missing(ycompress)) warn1(ycompress)
if(!missing(Margin)) warn1(Margin)
if(!missing(space)) warn1(space)
if(!missing(gap)) warn1(gap)
if(!missing(snip)) warn1(snip)
if(!missing(snip.fun)) warn1(snip.fun)
if(!missing(box.col)) warn1(box.col)
if(!missing(box.palette)) warn1(box.palette)
if(!missing(pal.thresh)) warn1(pal.thresh)
if(!missing(pal.node.fun)) warn1(pal.node.fun)
if(!missing(border.col)) warn1(border.col)
if(!missing(round)) warn1(round)
if(!missing(leaf.round)) warn1(leaf.round)
if(!missing(shadow.col)) warn1(shadow.col)
if(!missing(prefix)) warn1(prefix)
if(!missing(suffix)) warn1(suffix)
if(!missing(xsep)) warn1(xsep)
if(!missing(under.font)) warn1(under.font)
if(!missing(under.col)) warn1(under.col)
if(!missing(under.cex)) warn1(under.cex)
if(!missing(split.cex)) warn1(split.cex)
if(!missing(split.font)) warn1(split.font)
if(!missing(split.family)) warn1(split.family)
if(!missing(split.col)) warn1(split.col)
if(!missing(split.box.col)) warn1(split.box.col)
if(!missing(split.border.col)) warn1(split.border.col)
if(!missing(split.lty)) warn1(split.lty)
if(!missing(split.lwd)) warn1(split.lwd)
if(!missing(split.round)) warn1(split.round)
if(!missing(split.shadow.col)) warn1(split.shadow.col)
if(!missing(split.prefix)) warn1(split.prefix)
if(!missing(right.split.prefix)) warn1(right.split.prefix)
if(!missing(split.suffix)) warn1(split.suffix)
if(!missing(right.split.suffix)) warn1(right.split.suffix)
if(!missing(branch.col)) warn1(branch.col)
if(!missing(branch.lty)) warn1(branch.lty)
if(!missing(branch.lwd)) warn1(branch.lwd)
if(!missing(branch.type)) warn1(branch.type)
if(!missing(branch.tweak)) warn1(branch.tweak)
if(!missing(min.branch.width)) warn1(min.branch.width)
if(!missing(branch.fill)) warn1(branch.fill)
if(!missing(nn.cex)) warn1(nn.cex)
if(!missing(nn.font)) warn1(nn.font)
if(!missing(nn.family)) warn1(nn.family)
if(!missing(nn.col)) warn1(nn.col)
if(!missing(nn.box.col)) warn1(nn.box.col)
if(!missing(nn.border.col)) warn1(nn.border.col)
if(!missing(nn.lty)) warn1(nn.lty)
if(!missing(nn.lwd)) warn1(nn.lwd)
if(!missing(nn.round)) warn1(nn.round)
if(!missing(yes.text)) warn1(yes.text)
if(!missing(no.text)) warn1(no.text)
if(!missing(node.fun)) warn1(node.fun)
if(!missing(split.fun)) warn1(split.fun)
if(!missing(FUN)) warn1(FUN)
if(!missing(nspace)) warn1(nspace)
if(!missing(minbranch)) warn1(minbranch)
if(!missing(do.par)) warn1(do.par)
if(!missing(add.labs)) warn1(add.labs)
if(!missing(clip.left.labs)) warn1(clip.left.labs)
if(!missing(fam.main)) warn1(fam.main)
if(!missing(yshift)) warn1(yshift)
if(!missing(yspace)) warn1(yspace)
if(!missing(shadow.offset)) warn1(shadow.offset)
if(!missing(split.adj)) warn1(split.adj)
if(!missing(split.yshift)) warn1(split.yshift)
if(!missing(split.space)) warn1(split.space)
if(!missing(split.yspace)) warn1(split.yspace)
if(!missing(split.shadow.offset)) warn1(split.shadow.offset)
if(!missing(nn.adj)) warn1(nn.adj)
if(!missing(nn.yshift)) warn1(nn.yshift)
if(!missing(nn.space)) warn1(nn.space)
if(!missing(nn.yspace)) warn1(nn.yspace)
if(!missing(ygap)) warn1(ygap)
if(!missing(under.ygap)) warn1(under.ygap)
if(!missing(yesno.yshift)) warn1(yesno.yshift)
if(!missing(xcompact)) warn1(xcompact)
if(!missing(ycompact)) warn1(ycompact)
if(!missing(xcompact.ratio)) warn1(xcompact.ratio)
if(!missing(min.inter.height)) warn1(min.inter.height)
if(!missing(max.auto.cex)) warn1(max.auto.cex)
if(!missing(min.auto.cex)) warn1(min.auto.cex)
if(!missing(ycompress.cex)) warn1(ycompress.cex)
if(!missing(accept.cex)) warn1(accept.cex)
if(!missing(shift.amounts)) warn1(shift.amounts)
if(!missing(Fallen.yspace)) warn1(Fallen.yspace)
if(!missing(boxes.include.gap)) warn1(boxes.include.gap)
if(!missing(legend.x)) warn1(legend.x)
if(!missing(legend.y)) warn1(legend.y)
if(!missing(legend.cex)) warn1(legend.cex)
list(extra=extra, digits=digits, varlen=varlen, faclen=faclen, trace=trace,
facsep=facsep, eq=eq, lt=lt, ge=ge, and=and,
when=when, because=because, null.model=null.model,
response.name=response.name,
rpart.predict=RPART.PREDICT, where=WHERE)
} |
event.size.plot <- function(netfacs.data) {
plot.netfacs <-
netfacs.data$event.size.information
plot.netfacs <-
plot.netfacs[plot.netfacs$observed.prob > 0 |
plot.netfacs$expected.prob > 0, ]
if (is.null(netfacs.data$used.parameters$test.condition)) {
netfacs.data$used.parameters$test.condition <- "all cases"
}
if (is.null(netfacs.data$used.parameters$null.condition)) {
netfacs.data$used.parameters$null.condition <- "random"
}
plot.data <- data.frame(
combination.size = c(
plot.netfacs$combination.size,
plot.netfacs$combination.size
),
prob = c(
plot.netfacs$expected.prob,
plot.netfacs$observed.prob
),
type = c("expected probability", "observed probability")
)
plot.data$type <- sort(plot.data$type)
plot.data$combination.size <-
as.factor(plot.data$combination.size)
p <- ggplot(
plot.data,
aes(
x = .data$combination.size,
y = .data$prob,
color = .data$type
)
) +
xlab("element size") +
ylab("event size probability") +
ggtitle(paste(
c(
"Comparison of event sizes between ",
netfacs.data$used.parameters$test.condition,
" and ",
netfacs.data$used.parameters$null.condition
),
collapse = ""
)) +
geom_point(size = 3, alpha = 0.7) +
ylim(0, 1) +
theme_bw()
return(p)
} |
realloc <- function(h, x, nh, Nh, nume, my_env){
Dh <- double(h)
deno.new <- 0
DhTot <- 0
for(i in 1:(length(x)-1))
{
if(nh[i] == Nh[i]){
nh[i] <- nh[i]
}
else if(nh[i] > Nh[i]){
Dh[i] <- nh[i]-Nh[i]
DhTot <- DhTot + Dh[i]
nh[i] <- Nh[i]
}
else {
deno.new <- deno.new + nume[i]
}
}
for(i in 1:(length(x)-1))
{
if(nh[i] < Nh[i]){
nh[i] <- nh[i] + DhTot*(nume[i]/deno.new)
}
}
my_env$nh <- nh
} |
type_ensure <- function(df, ensure_nms, type = "numeric") {
ensure <- df[ensure_nms]
is_type <- paste0("is.", type)
is_to_fix <- !purrr::map_lgl(ensure, rlang::as_function(is_type))
col_nms_to_fix <- names(ensure[is_to_fix])
warn_if_changing_type(ensure = ensure, is_to_fix = is_to_fix, type = type)
as_type <- paste0("as.", type)
purrr::modify_at(df, col_nms_to_fix, rlang::as_function(as_type))
}
warn_if_changing_type <- function(ensure, is_to_fix, type) {
nm_to_fix <- names(ensure[is_to_fix])
if (any(is_to_fix)) {
msg <- paste0(
commas(nm_to_fix), " should be ", type, ". Type found: ",
commas(unique(purrr::map_chr(ensure[nm_to_fix], typeof))), "\n",
"* Changing type (of ", commas(nm_to_fix), ") accordingly."
)
warn(msg)
}
} |
add_css_second_header <- function(tableHTML,
css,
second_headers) {
if (!inherits(tableHTML, 'tableHTML')) stop('tableHTML needs to be a tableHTML object')
if (length(css[[1]]) != length(css[[2]])) stop('css needs to be a list of two elements of the
same length')
attributes <- attributes(tableHTML)
css_comp <- paste0(css[[1]], ':', css[[2]], ';')
css_comp <- paste(css_comp, collapse = '')
style <- paste0('style="', css_comp, '"')
for (i in second_headers) {
tableHTML <- gsub(paste0('id="tableHTML_second_header_', i, '" style='),
paste0('id="tableHTML_second_header_', i, '"'),
tableHTML)
tableHTML <- gsub(paste0('id="tableHTML_second_header_', i, '"'),
paste0('id="tableHTML_second_header_', i, '" ', style),
tableHTML)
tableHTML <- gsub(';""', ';', tableHTML)
}
attributes(tableHTML) <- attributes
tableHTML
} |
context('Testing \'bitbucket\'')
skip_if_offline()
skip_on_cran()
test_that('bitbucket_repo_search() works', {
res <- bitbucket_repo_search(repo = 'dominicjbennett/om..hello.world')
expect_true(inherits(res, 'data.frame'))
expect_warning(bitbucket_repo_search(repo = 'dominicjbennett/notarepo'))
})
test_that('bitbucket_tags() works', {
res <- bitbucket_tags(repos = 'dominicjbennett/om..hello.world')
expect_true(inherits(res, 'tbl_df'))
expect_warning(bitbucket_tags(repos = 'dominicjbennett/notarepo'))
}) |
beast123 <- function(
Y,
metadata=list(), prior=list(),mcmc=list(), extra=list(),
season=c('harmonic','dummy','none'), ... )
{
if (!hasArg("Y") || is.list(Y) || length(Y)==1) {
warning("Something is wrong with the input 'Y'. Make sure the Y par is a vector, a matrix, or an 3D array.")
invisible(return(NULL))
}
season=match.arg(season)
if ( season=='none') {
if( is.numeric(metadata) ) {
tmp = metadata
metadata = list();
if (length(tmp)==1){
metadata$startTime=tmp(1)
} else if( length(tmp)==2 ){
metadata$startTime=tmp(1)
metadata$deltaTime=tmp(2)
}
} else if (is.list(metadata)) {
}
else{
metatata=list()
}
} else {
if( is.numeric(metadata) ) {
tmp = metadata
metadata = list();
if(length(tmp)==1){
metadata$period=tmp(1)
}
} else if (is.list(metadata)) {
} else{
metadata=list()
}
}
if (is.null(metadata$season))
metadata$season=season;
funstr='beastv4'
ANS=.Call(
BEASTV4_rexFunction,
list(funstr,Y,metadata,prior,mcmc,extra),
212345)
invisible(return(ANS))
}
meanfilter <- function(x,n=5){filter(x,rep(1,n), sides=2)}
|
simrun_binomial <- function(t, band_const, choose.n = 100,
num.holdout.points = 10, true.beta = c(-1, 0.5),
CI = 0.95)
{
set.seed(123 + t)
choose.k <- band_const * ceiling(choose.n^0.2)
all.results <- matrix(NA, nrow = 9, ncol = 5)
rownames(all.results) = c("comptime", "mse", "bias-para", "mse-para", "confint-para", "width-para", "width-smooth", "intervalscore-smooth","mse-meanresp")
colnames(all.results) = c("mgcv-default", "mgcv-psplines", "gamm4", "vagams-unstructuredA", "vagams-bdiagA")
sel.holdout.points <- sample(1:choose.n, num.holdout.points)
binomial_dat <- gamsim(n = choose.n, dist = "binomial", extra.X = data.frame(intercept = rep(1,choose.n), treatment = rep(c(0,1), each = choose.n/2)), beta = true.beta)
tic <- proc.time()
fit.mgcv1 <- gam(y~treatment + s(x0) + s(x1) + s(x2) + s(x3), data = binomial_dat, family = binomial(link = "logit"))
all.results[1,1] <- (proc.time() - tic)[3]
all.results[2,1] <- mean((fit.mgcv1$linear.predictors - binomial_dat$linear.predictor)^2)
all.results[3,1] <- fit.mgcv1$coefficients[2] - true.beta[2]
all.results[4,1] <- (fit.mgcv1$coefficients[2] - true.beta[2])^2
make.ci <- c(summary(fit.mgcv1)$p.coeff[2] - qnorm(0.5 + CI/2) *summary(fit.mgcv1)$se[2],
summary(fit.mgcv1)$p.coeff[2] + qnorm(0.5 + CI/2) * summary(fit.mgcv1)$se[2])
all.results[5,1] <- findInterval(true.beta[2], make.ci) == 1
all.results[6,1] <- diff(make.ci)
holdout.fit <- function(data, holdout.points)
{
new.dat <- data[-holdout.points,]
fit1 <- gam(y ~ treatment + s(x0) + s(x1) + s(x2) + s(x3), family = binomial(link = "logit"), data = new.dat)
get.pred <- predict.gam(fit1, newdata = data[holdout.points,], se.fit = TRUE)
get.pred <- list(fit = c(get.pred$fit) - cbind(1,data[holdout.points,"treatment"])%*%fit1$coefficients[1:2], se.fit = c(get.pred$se.fit))
make.ci <- cbind(get.pred$fit - qnorm(0.5 + CI/2) * get.pred$se.fit, get.pred$fit + qnorm(0.5 + CI/2) * get.pred$se.fit)
all.widths <- apply(make.ci,1,diff)
all.coverage <- sapply(1:length(holdout.points), function(x) findInterval(data$f[x], make.ci[x,]) != 1)
all.interval.score <- all.widths + (2/(1 - CI))*as.numeric(all.coverage)
return(cbind(all.widths, all.coverage, all.interval.score))
}
do.holdfits <- holdout.fit(data = binomial_dat, holdout.points = sel.holdout.points)
all.results[7,1] <- colMeans(do.holdfits)[1]
all.results[8,1] <- colMeans(do.holdfits)[3]
all.results[9,1] <- mean((binomial()$linkinv(fit.mgcv1$linear.predictors) - binomial()$linkinv(binomial_dat$linear.predictor))^2)
rm(holdout.fit)
tic <- proc.time()
fit.mgcv2 <- gam(y ~ treatment + s(x0, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x1, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x2, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x3, bs = "ps", k = choose.k + 2, m = c(2,1)), data = binomial_dat, family = binomial(link = "logit"))
all.results[1,2] <- (proc.time() - tic)[3]
all.results[2,2] <- mean((fit.mgcv2$linear.predictors - binomial_dat$linear.predictor)^2)
all.results[3,2] <- fit.mgcv2$coefficients[2] - true.beta[2]
all.results[4,2] <- (fit.mgcv2$coefficients[2] - true.beta[2])^2
make.ci <- c(summary(fit.mgcv2)$p.coeff[2] - qnorm(0.5 + CI/2) * summary(fit.mgcv2)$se[2],
summary(fit.mgcv2)$p.coeff[2] + qnorm(0.5 + CI/2) * summary(fit.mgcv2)$se[2])
all.results[5,2] <- findInterval(true.beta[2], make.ci) == 1
all.results[6,2] <- diff(make.ci)
holdout.fit <- function(data, holdout.points)
{
new.dat <- data[-holdout.points,]
fit1 <- gam(y ~ treatment + s(x0, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x1, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x2, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x3, bs = "ps", k = choose.k + 2, m = c(2,1)), family = binomial(link = "logit"), data = new.dat)
get.pred <- predict.gam(fit1, newdata = data[holdout.points,], se.fit = TRUE)
get.pred <- list(fit = c(get.pred$fit) - cbind(1,data[holdout.points,"treatment"])%*%fit1$coefficients[1:2], se.fit = c(get.pred$se.fit))
make.ci <- cbind(get.pred$fit - qnorm(0.5 + CI/2) * get.pred$se.fit, get.pred$fit + qnorm(0.5 + CI/2) * get.pred$se.fit)
all.widths <- apply(make.ci,1,diff)
all.coverage <- sapply(1:length(holdout.points), function(x) findInterval(data$f[x], make.ci[x,]) != 1)
all.interval.score <- all.widths + (2/(1 - CI))*as.numeric(all.coverage)
return(cbind(all.widths, all.coverage, all.interval.score))
}
do.holdfits <- holdout.fit(data = binomial_dat, holdout.points = sel.holdout.points)
all.results[7,2] <- colMeans(do.holdfits)[1]
all.results[8,2] <- colMeans(do.holdfits)[3]
all.results[9,2] <- mean((binomial()$linkinv(fit.mgcv2$linear.predictors) - binomial()$linkinv(binomial_dat$linear.predictor))^2)
rm(holdout.fit)
tic <- proc.time()
fit.gamm4 <- gamm4(y ~ treatment + s(x0, bs = "ps", k = choose.k+2, m = c(2,1)) + s(x1, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x2, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x3, bs = "ps", k = choose.k + 2, m = c(2,1)), data = binomial_dat, family = binomial(link = "logit"))
all.results[1,3] <- (proc.time() - tic)[3]
all.results[2,3] <- mean((fit.gamm4$gam$linear.predictors - binomial_dat$linear.predictor)^2)
all.results[3,3] <- fit.gamm4$gam$coefficients[2] - true.beta[2]
all.results[4,3] <- (fit.gamm4$gam$coefficients[2] - true.beta[2])^2
make.ci <- c(summary(fit.gamm4$gam)$p.coeff[2] - qnorm(0.5 + CI/2) * summary(fit.gamm4$gam)$se[2],
summary(fit.gamm4$gam)$p.coeff[2] + qnorm(0.5 + CI/2) * summary(fit.gamm4$gam)$se[2])
all.results[5,3] <- findInterval(true.beta[2], make.ci) == 1
all.results[6,3] <- diff(make.ci)
holdout.fit <- function(data, holdout.points)
{
new.dat <- data[-holdout.points,]
fit1 <- gamm4(y ~ treatment + s(x0, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x1, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x2, bs = "ps", k = choose.k + 2, m = c(2,1)) + s(x3, bs = "ps", k = choose.k + 2, m = c(2,1)), family = binomial(link = "logit"), data = new.dat)
get.pred <- predict.gam(fit1$gam, newdata = data[holdout.points,], se.fit = TRUE)
get.pred <- list(fit = c(get.pred$fit) - cbind(1,data[holdout.points,"treatment"])%*%fit1$gam$coefficients[1:2], se.fit = c(get.pred$se.fit))
make.ci <- cbind(get.pred$fit - qnorm(0.5 + CI/2) * get.pred$se.fit, get.pred$fit + qnorm(0.5 + CI/2) * get.pred$se.fit)
all.widths <- apply(make.ci,1,diff)
all.coverage <- sapply(1:length(holdout.points), function(x) findInterval(data$f[x], make.ci[x,]) != 1)
all.interval.score <- all.widths + (2/(1 - CI))*as.numeric(all.coverage)
return(cbind(all.widths, all.coverage, all.interval.score))
}
do.holdfits <- try(holdout.fit(data = binomial_dat, holdout.points = sel.holdout.points), silent = TRUE)
if(!inherits(do.holdfits, "try-error"))
{
all.results[7,3] <- colMeans(do.holdfits)[1]
all.results[8,3] <- colMeans(do.holdfits)[3]
rm(holdout.fit)
}
all.results[9,3] <- mean((binomial()$linkinv(fit.gamm4$gam$linear.predictors) - binomial()$linkinv(binomial_dat$linear.predictor))^2)
tic <- proc.time()
Rprof()
fit.va1 <- vagam(y = binomial_dat$y, smooth.X = binomial_dat[,c(2:5)], para.X = data.frame(treatment = binomial_dat$treatment), int.knots = choose.k, A.struct = "unstructured", save.data = TRUE, family = binomial(), para.se = TRUE, control = list(eps = 1e-3, maxit = 1000, trace = FALSE, seed_number = t, mc.samps = 4000, pois.step.size = 0.01))
Rprof(NULL)
all.results[1,4] <- (proc.time() - tic)[3]
all.results[2,4] <- mean((fit.va1$linear.predictors - binomial_dat$linear.predictor)^2)
all.results[3,4] <- fit.va1$kappa[2] - true.beta[2]
all.results[4,4] <- (fit.va1$kappa[2] - true.beta[2])^2
make.ci <- c(fit.va1$para.stat[1,2] - qnorm(0.5 + CI/2) * fit.va1$para.stat[2,2],
fit.va1$para.stat[1,2] + qnorm(0.5 + CI/2) * fit.va1$para.stat[2,2])
all.results[5,4] <- findInterval(true.beta[2], make.ci) == 1
all.results[6,4] <- diff(make.ci)
holdout.fit <- function(data, holdout.points)
{
new.dat <- data[-holdout.points,]
fit1 <- vagam(y = new.dat$y, smooth.X = new.dat[,2:5], para.X = data.frame(treatment = new.dat$treatment), int.knots = choose.k, A.struct = "unstructured", save.data = TRUE, para.se = FALSE, family = binomial(), control = list(eps = 1e-3, maxit = 1000, trace = FALSE, seed_number = t, mc.samps = 4000, pois.step.size = 0.01))
get.pred <- predict.vagam(fit1, new.smoothX = data[holdout.points,2:5])
make.ci <- cbind(get.pred$lower.bound, get.pred$upper.bound)
all.widths <- apply(make.ci,1,diff)
all.coverage <- sapply(1:length(holdout.points), function(x) findInterval(data$f[x], make.ci[x,]) != 1)
all.interval.score <- all.widths + (2/(1 - CI))*as.numeric(all.coverage)
return(cbind(all.widths, all.coverage, all.interval.score))
}
do.holdfits <- holdout.fit(data = binomial_dat, holdout.points = sel.holdout.points)
all.results[7,4] <- colMeans(do.holdfits)[1]
all.results[8,4] <- colMeans(do.holdfits)[3]
all.results[9,4] <- mean((binomial()$linkinv(fit.va1$linear.predictors) - binomial()$linkinv(binomial_dat$linear.predictor))^2)
rm(holdout.fit)
tic <- proc.time()
fit.va2 <- vagam(y = binomial_dat$y, smooth.X = binomial_dat[,2:5], para.X = data.frame(treatment = binomial_dat$treatment), int.knots = choose.k, A.struct = "block", save.data = TRUE, family = binomial(), para.se = TRUE, control = list(eps = 1e-3, maxit = 1000, trace = FALSE, seed_number = t, mc.samps = 4000, pois.step.size = 0.01))
all.results[1,5] <- (proc.time() - tic)[3]
all.results[2,5] <- mean((fit.va2$linear.predictors - binomial_dat$linear.predictor)^2)
all.results[3,5] <- fit.va2$kappa[2] - true.beta[2]
all.results[4,5] <- (fit.va2$kappa[2] - true.beta[2])^2
make.ci <- c(fit.va2$para.stat[1,2] - qnorm(0.5 + CI/2) * fit.va2$para.stat[2,2],
fit.va2$para.stat[1,2] + qnorm(0.5 + CI/2) * fit.va2$para.stat[2,2])
all.results[5,5] <- findInterval(true.beta[2], make.ci) == 1
all.results[6,5] <- diff(make.ci)
holdout.fit <- function(data, holdout.points)
{
new.dat <- data[-holdout.points,]
fit1 <- vagam(y = new.dat$y, smooth.X = new.dat[,2:5], para.X = data.frame(treatment = new.dat$treatment), int.knots = choose.k,
A.struct = "block", save.data = TRUE, para.se = FALSE, family = binomial(), control = list(eps = 1e-3, maxit = 1000, trace = FALSE, seed_number = t, mc.samps = 4000, pois.step.size = 0.01))
get.pred <- predict.vagam(fit1, new.smoothX = data[holdout.points,2:5])
make.ci <- cbind(get.pred$lower.bound, get.pred$upper.bound)
all.widths <- apply(make.ci,1,diff)
all.coverage <- sapply(1:length(holdout.points), function(x) findInterval(data$f[x], make.ci[x,]) != 1)
all.interval.score <- all.widths + (2/(1 - CI))*as.numeric(all.coverage)
return(cbind(all.widths, all.coverage, all.interval.score))
}
do.holdfits <- holdout.fit(data = binomial_dat, holdout.points = sel.holdout.points)
all.results[7,5] <- colMeans(do.holdfits)[1]
all.results[8,5] <- colMeans(do.holdfits)[3]
all.results[9,5] <- mean((binomial()$linkinv(fit.va2$linear.predictors) - binomial()$linkinv(binomial_dat$linear.predictor))^2)
return(all.results)
} |
context("canvasXpress Web Charts - Oncoprint")
test_that("cXoncoprint1", {
check_ui_test(cXoncoprint1())
})
test_that("cXoncoprint2", {
check_ui_test(cXoncoprint2())
})
test_that("cXoncoprint3", {
check_ui_test(cXoncoprint3())
})
test_that("cXoncoprint4", {
if (interactive()) {
check_ui_test(cXoncoprint4())
warning("The values on Tooltips are different from web. In the R version, there are NaN values while the web has zeroes. ",
"This is due to the differences in the underlying data")
} else {
message("Non-interactive oncoprint-4 test skipped")
}
}) |
plot.retention <- function(
x,
type="all",
percentages=TRUE,
omitLast=TRUE,
colors,
durations,
y,
...
) {
if(percentages)
d = x$retainPerc
else
d = x$retainCount
nCohorts = nrow(d)
nPeriods = ncol(d) - ifelse(omitLast, 1, 0)
if(missing(colors))
colors = 1:nPeriods
if (any(c("cohortsByDate", "all") %in% type)) {
plot(c(1, nPeriods), range(d, na.rm=T), type="n",
xlab="Calendar Date", ylab="", axes=F)
axis(1, 1:nPeriods, x$dates[1:nPeriods]); axis(2); axis(4)
title("Retention Rates of Multiple Cohorts")
for (i in 1:nCohorts) {
len = nPeriods - (i - 1)
lines(i:nPeriods, d[i, 1:len], col=colors[i])
}
}
if (any(c("cohortsByAge", "all") %in% type)) {
plot(c(1, nPeriods), range(d, na.rm=T), type="n",
xlab="Periods since Cohort Start", ylab="", axes=F)
axis(1, 1:nPeriods, colnames(d)[1:nPeriods]); axis(2); axis(4)
title("Retention Rates of Multiple Cohorts")
for (i in 1:nCohorts) {
len = nPeriods - (i - 1)
lines(1:len, d[i, 1:len], col=colors[i])
}
}
if (any(c("byDuration", "all") %in% type)) {
if(missing(durations))
durations = c(1, 2, 5, 10)
nDurations = length(durations)
plot(c(1, nPeriods), range(d, na.rm=T), type="n",
xlab="Calendar Date", ylab="", axes=F)
axis(1, 1:nPeriods, x$dates[1:nPeriods]); axis(2); axis(4)
title("Retention for Different Durations")
for (i in 1:nDurations) {
len = nPeriods - (i - 1)
lines(1:len, d[1:len, i], col=colors[i])
}
legend("left", legend=colnames(d)[durations],
lty=rep(1, nDurations), col=colors[1:nDurations], bty="n")
}
} |
summarizeSigCounts <- function(contrastList,
columns = c("P.Value", "adj.P.Val", "Qvalue", "qvalue.lfdr", "ihw.adj_pvalue"),
sigThresholds = c(0.01, 0.05, 0.05, 0.05, 0.05),
fcThreshold = 0) {
assertthat::assert_that(length(columns) == length(sigThresholds),
msg = "Supplied sigThresholds should be same length as supplied columns.")
getSigCounts <- function(df, columns, thresholds, fcThreshold) {
counts <- list()
for (i in 1:length(columns)) {
idx <- df[columns[i]] <= thresholds[i]
if (fcThreshold > 0) {
fcidx <- abs(df$logFC) >= log2(fcThreshold)
idx <- idx & fcidx
}
counts[columns[i]] <- sum(idx)
}
return(unlist(counts))
}
columns <- columns[columns %in% colnames(contrastList[[1]])]
sigThresholds <- sigThresholds[columns %in% colnames(contrastList[[1]])]
myrows <- list()
for (i in 1:length(contrastList)) {
myrows[[i]] <- getSigCounts(contrastList[[i]], columns, sigThresholds, fcThreshold)
}
DF <- do.call("rbind", myrows)
rownames(DF) <- names(contrastList)
colnames(DF) <- columns
return(DF)
} |
last_check <- function() {
package_data$last_handle
} |
JSU <- function (mu.link="identity", sigma.link="log", nu.link ="identity", tau.link="log")
{
mstats <- checklink( "mu.link", "Johnson SU", substitute(mu.link),
c("inverse", "log", "identity", "own"))
dstats <- checklink("sigma.link", "Johnson SU", substitute(sigma.link),
c("inverse", "log", "identity", "own"))
vstats <- checklink( "nu.link", "Johnson SU", substitute(nu.link),
c("inverse", "log", "identity", "own"))
tstats <- checklink( "tau.link", "Johnson SU", substitute(tau.link),
c("inverse", "log", "identity", "own"))
structure(
list(family = c("JSU", "Johnson SU"),
parameters = list(mu=TRUE, sigma=TRUE, nu=TRUE, tau=TRUE),
nopar = 4,
type = "Continuous",
mu.link = as.character(substitute(mu.link)),
sigma.link = as.character(substitute(sigma.link)),
nu.link = as.character(substitute(nu.link)),
tau.link = as.character(substitute(tau.link)),
mu.linkfun = mstats$linkfun,
sigma.linkfun = dstats$linkfun,
nu.linkfun = vstats$linkfun,
tau.linkfun = tstats$linkfun,
mu.linkinv = mstats$linkinv,
sigma.linkinv = dstats$linkinv,
nu.linkinv = vstats$linkinv,
tau.linkinv = tstats$linkinv,
mu.dr = mstats$mu.eta,
sigma.dr = dstats$mu.eta,
nu.dr = vstats$mu.eta,
tau.dr = tstats$mu.eta,
dldm = function(y,mu,sigma,nu,tau) {
rtau <- 1/tau
w <- ifelse(rtau<0.0000001,1,exp(rtau^2))
omega <- -nu*rtau
c <- (.5*(w-1)*(w*cosh(2*omega)+1))^(-0.5)
z <- (y-(mu+c*sigma*w^(.5)*sinh(omega)))/(c*sigma)
r <- -nu + asinh(z)/rtau
dldm <- (z/(z*z+1)+(r/(rtau*(z*z+1)^(.5))))/(c*sigma)
dldm
},
d2ldm2 = function(y,mu,sigma,nu,tau){
rtau <- 1/tau
w <- ifelse(rtau<0.0000001,1,exp(rtau^2))
omega <- -nu*rtau
c <- (.5*(w-1)*(w*cosh(2*omega)+1))^(-0.5)
z <- (y-(mu+c*sigma*w^(.5)*sinh(omega)))/(c*sigma)
r <- -nu + asinh(z)/rtau
dldm <- (z/(z*z+1)+(r/(rtau*(z*z+1)^(.5))))/(c*sigma)
d2ldm2 <- -dldm*dldm
d2ldm2 <- ifelse(d2ldm2 < -1e-15, d2ldm2,-1e-15)
d2ldm2
},
dldd = function(y,mu,sigma,nu,tau) {
rtau <- 1/tau
w <- ifelse(rtau<0.0000001,1,exp(rtau^2))
omega <- -nu*rtau
c <- (.5*(w-1)*(w*cosh(2*omega)+1))^(-0.5)
z <- (y-(mu+c*sigma*w^(.5)*sinh(omega)))/(c*sigma)
r <- -nu + asinh(z)/rtau
dldd <- (z+w^(0.5)*sinh(omega))*(z/(z*z+1)+(r/(rtau*(z*z+1)^(.5))))
dldd <- (dldd-1)/sigma
dldd
} ,
d2ldd2 = function(y,mu,sigma,nu,tau){
rtau <- 1/tau
w <- ifelse(rtau<0.0000001,1,exp(rtau^2))
omega <- -nu*rtau
c <- (.5*(w-1)*(w*cosh(2*omega)+1))^(-0.5)
z <- (y-(mu+c*sigma*w^(.5)*sinh(omega)))/(c*sigma)
r <- -nu + asinh(z)/rtau
dldd <- (z+w^(0.5)*sinh(omega))*(z/(z*z+1)+(r/(rtau*(z*z+1)^(.5))))
dldd <- (dldd-1)/sigma
d2ldd2 <- -dldd*dldd
d2ldd2 <- ifelse(d2ldd2 < -1e-15, d2ldd2,-1e-15)
d2ldd2
},
dldv = function(y,mu,sigma,nu,tau) {
rtau <- 1/tau
w <- ifelse(rtau<0.0000001,1,exp(rtau^2))
omega <- -nu*rtau
c <- (.5*(w-1)*(w*cosh(2*omega)+1))^(-0.5)
z <- (y-(mu+c*sigma*w^(.5)*sinh(omega)))/(c*sigma)
r <- -nu + asinh(z)/rtau
dlogcdv <- (rtau*w*sinh(2*omega))/(w*cosh(2*omega)+1)
dzdv <- -(z+w^(.5)*sinh(omega))*dlogcdv+(rtau*w^(.5)*cosh(omega))
dldv <- -dlogcdv-(z/(z*z+1)+(r/(rtau*(z*z+1)^(.5))))*dzdv+r
dldv
} ,
d2ldv2 = function(y,mu,sigma,nu,tau) {
rtau <- 1/tau
w <- ifelse(rtau<0.0000001,1,exp(rtau^2))
omega <- -nu*rtau
c <- (.5*(w-1)*(w*cosh(2*omega)+1))^(-0.5)
z <- (y-(mu+c*sigma*w^(.5)*sinh(omega)))/(c*sigma)
r <- -nu + asinh(z)/rtau
dlogcdv <- (rtau*w*sinh(2*omega))/(w*cosh(2*omega)+1)
dzdv <- -(z+w^(.5)*sinh(omega))*dlogcdv+(rtau*w^(.5)*cosh(omega))
dldv <- -dlogcdv-(z/(z*z+1)+(r/(rtau*(z*z+1)^(.5))))*dzdv+r
d2ldv2 <- -dldv*dldv
d2ldv2 <- ifelse(d2ldv2 < -1e-4, d2ldv2,-1e-4)
d2ldv2
},
dldt = function(y,mu,sigma,nu,tau) {
rtau <- 1/tau
w <- ifelse(rtau<0.0000001,1,exp(rtau^2))
omega <- -nu*rtau
c <- (.5*(w-1)*(w*cosh(2*omega)+1))^(-0.5)
z <- (y-(mu+c*sigma*w^(.5)*sinh(omega)))/(c*sigma)
r <- -nu + asinh(z)/rtau
dlogcdt <- -rtau*w*((1/(w-1))+((cosh(2*omega))/(w*cosh(2*omega)+1)))
dlogcdt <- dlogcdt+((nu*w*sinh(2*omega))/(w*cosh(2*omega)+1))
dzdt <- -(z+w^(.5)*sinh(omega))*dlogcdt-rtau*w^(.5)*sinh(omega)+nu*w^(.5)*cosh(omega)
dldt <- -dlogcdt-(1/rtau)-(z/(z*z+1)+(r/(rtau*(z*z+1)^(.5))))*dzdt+(r*(r+nu))/rtau
dldt <- -dldt*rtau*rtau
dldt
} ,
d2ldt2 = function(y,mu,sigma,nu,tau) {
rtau <- 1/tau
w <- ifelse(rtau<0.0000001,1,exp(rtau^2))
omega <- -nu*rtau
c <- (.5*(w-1)*(w*cosh(2*omega)+1))^(-0.5)
z <- (y-(mu+c*sigma*w^(.5)*sinh(omega)))/(c*sigma)
r <- -nu + asinh(z)/rtau
dlogcdt <- -rtau*w*((1/(w-1))+((cosh(2*omega))/(w*cosh(2*omega)+1)))
dlogcdt <- dlogcdt+((nu*w*sinh(2*omega))/(w*cosh(2*omega)+1))
dzdt <- -(z+w^(.5)*sinh(omega))*dlogcdt-rtau*w^(.5)*sinh(omega)+nu*w^(.5)*cosh(omega)
dldt <- -dlogcdt-(1/rtau)-(z/(z*z+1)+(r/(rtau*(z*z+1)^(.5))))*dzdt+(r*(r+nu))/rtau
dldt <- -dldt*rtau*rtau
d2ldt2 <- -dldt*dldt
d2ldt2 <- ifelse(d2ldt2 < -1e-4, d2ldt2,-1e-4)
d2ldt2
} ,
d2ldmdd = function(y,mu,sigma,nu,tau) {
rtau <- 1/tau
w <- ifelse(rtau<0.0000001,1,exp(rtau^2))
omega <- -nu*rtau
c <- (.5*(w-1)*(w*cosh(2*omega)+1))^(-0.5)
z <- (y-(mu+c*sigma*w^(.5)*sinh(omega)))/(c*sigma)
r <- -nu + asinh(z)/rtau
dldm <- (z/(z*z+1)+(r/(rtau*(z*z+1)^(.5))))/(c*sigma)
dldd <- (z+w^(0.5)*sinh(omega))*(z/(z*z+1)+(r/(rtau*(z*z+1)^(.5))))
dldd <- (dldd-1)/sigma
d2ldmdd <- -(dldm*dldd)
d2ldmdd
},
d2ldmdv = function(y,mu,sigma,nu,tau) {
rtau <- 1/tau
w <- ifelse(rtau<0.0000001,1,exp(rtau^2))
omega <- -nu*rtau
c <- (.5*(w-1)*(w*cosh(2*omega)+1))^(-0.5)
z <- (y-(mu+c*sigma*w^(.5)*sinh(omega)))/(c*sigma)
r <- -nu + asinh(z)/rtau
dldm <- (z/(z*z+1)+(r/(rtau*(z*z+1)^(.5))))/(c*sigma)
dlogcdv <- (rtau*w*sinh(2*omega))/(w*cosh(2*omega)+1)
dzdv <- -(z+w^(.5)*sinh(omega))*dlogcdv+(rtau*w^(.5)*cosh(omega))
dldv <- -dlogcdv-(z/(z*z+1)+(r/(rtau*(z*z+1)^(.5))))*dzdv+r
d2ldmdv <- -(dldm*dldv)
d2ldmdv
},
d2ldmdt = function(y,mu,sigma,nu,tau) {
rtau <- 1/tau
w <- ifelse(rtau<0.0000001,1,exp(rtau^2))
omega <- -nu*rtau
c <- (.5*(w-1)*(w*cosh(2*omega)+1))^(-0.5)
z <- (y-(mu+c*sigma*w^(.5)*sinh(omega)))/(c*sigma)
r <- -nu + asinh(z)/rtau
dldm <- (z/(z*z+1)+(r/(rtau*(z*z+1)^(.5))))/(c*sigma)
dlogcdt <- -rtau*w*((1/(w-1))+((cosh(2*omega))/(w*cosh(2*omega)+1)))
dlogcdt <- dlogcdt+((nu*w*sinh(2*omega))/(w*cosh(2*omega)+1))
dzdt <- -(z+w^(.5)*sinh(omega))*dlogcdt-rtau*w^(.5)*sinh(omega)+nu*w^(.5)*cosh(omega)
dldt <- -dlogcdt-(1/rtau)-(z/(z*z+1)+(r/(rtau*(z*z+1)^(.5))))*dzdt+(r*(r+nu))/rtau
dldt <- -dldt*rtau*rtau
d2ldmdt <- -(dldm*dldt)
d2ldmdt
},
d2ldddv = function(y,mu,sigma,nu,tau) {
rtau <- 1/tau
w <- ifelse(rtau<0.0000001,1,exp(rtau^2))
omega <- -nu*rtau
c <- (.5*(w-1)*(w*cosh(2*omega)+1))^(-0.5)
z <- (y-(mu+c*sigma*w^(.5)*sinh(omega)))/(c*sigma)
r <- -nu + asinh(z)/rtau
dldd <- (z+w^(0.5)*sinh(omega))*(z/(z*z+1)+(r/(rtau*(z*z+1)^(.5))))
dldd <- (dldd-1)/sigma
dlogcdv <- (rtau*w*sinh(2*omega))/(w*cosh(2*omega)+1)
dzdv <- -(z+w^(.5)*sinh(omega))*dlogcdv+(rtau*w^(.5)*cosh(omega))
dldv <- -dlogcdv-(z/(z*z+1)+(r/(rtau*(z*z+1)^(.5))))*dzdv+r
d2ldddv <- -(dldd*dldv)
d2ldddv
},
d2ldddt = function(y,mu,sigma,nu,tau) {
rtau <- 1/tau
w <- ifelse(rtau<0.0000001,1,exp(rtau^2))
omega <- -nu*rtau
c <- (.5*(w-1)*(w*cosh(2*omega)+1))^(-0.5)
z <- (y-(mu+c*sigma*w^(.5)*sinh(omega)))/(c*sigma)
r <- -nu + asinh(z)/rtau
dldd <- (z+w^(0.5)*sinh(omega))*(z/(z*z+1)+(r/(rtau*(z*z+1)^(.5))))
dldd <- (dldd-1)/sigma
dlogcdt <- -rtau*w*((1/(w-1))+((cosh(2*omega))/(w*cosh(2*omega)+1)))
dlogcdt <- dlogcdt+((nu*w*sinh(2*omega))/(w*cosh(2*omega)+1))
dzdt <- -(z+w^(.5)*sinh(omega))*dlogcdt-rtau*w^(.5)*sinh(omega)+nu*w^(.5)*cosh(omega)
dldt <- -dlogcdt-(1/rtau)-(z/(z*z+1)+(r/(rtau*(z*z+1)^(.5))))*dzdt+(r*(r+nu))/rtau
dldt <- -dldt*rtau*rtau
d2ldddt <- -(dldd*dldt)
d2ldddt
},
d2ldvdt = function(y,mu,sigma,nu,tau) {
rtau <- 1/tau
w <- ifelse(rtau<0.0000001,1,exp(rtau^2))
omega <- -nu*rtau
c <- (.5*(w-1)*(w*cosh(2*omega)+1))^(-0.5)
z <- (y-(mu+c*sigma*w^(.5)*sinh(omega)))/(c*sigma)
r <- -nu + asinh(z)/rtau
dlogcdv <- (rtau*w*sinh(2*omega))/(w*cosh(2*omega)+1)
dzdv <- -(z+w^(.5)*sinh(omega))*dlogcdv+(rtau*w^(.5)*cosh(omega))
dldv <- -dlogcdv-(z/(z*z+1)+(r/(rtau*(z*z+1)^(.5))))*dzdv+r
dlogcdt <- -rtau*w*((1/(w-1))+((cosh(2*omega))/(w*cosh(2*omega)+1)))
dlogcdt <- dlogcdt+((nu*w*sinh(2*omega))/(w*cosh(2*omega)+1))
dzdt <- -(z+w^(.5)*sinh(omega))*dlogcdt-rtau*w^(.5)*sinh(omega)+nu*w^(.5)*cosh(omega)
dldt <- -dlogcdt-(1/rtau)-(z/(z*z+1)+(r/(rtau*(z*z+1)^(.5))))*dzdt+(r*(r+nu))/rtau
dldt <- -dldt*rtau*rtau
d2ldvdt <- -(dldv*dldt)
d2ldvdt
},
G.dev.incr = function(y,mu,sigma,nu,tau,...)
{
-2*dJSU(y,mu,sigma,nu,tau,log=TRUE)
} ,
rqres = expression(
rqres(pfun="pJSU", type="Continuous", y=y, mu=mu, sigma=sigma, nu=nu, tau=tau)) ,
mu.initial = expression(mu <- (y+mean(y))/2),
sigma.initial = expression(sigma<- rep(sd(y)/4, length(y))),
nu.initial = expression(nu <- rep(0, length(y))),
tau.initial = expression(tau <-rep(1, length(y))),
mu.valid = function(mu) TRUE,
sigma.valid = function(sigma) all(sigma > 0),
nu.valid = function(nu) TRUE ,
tau.valid = function(tau) all(tau > 0),
y.valid = function(y) TRUE,
mean = function(mu, sigma, nu, tau) mu,
variance = function(mu, sigma, nu, tau) sigma^2
),
class = c("gamlss.family","family"))
}
dJSU <- function(x, mu = 0, sigma = 1, nu = 1, tau = 1, log = FALSE)
{
if (any(sigma < 0)) stop(paste("sigma must be positive", "\n", ""))
if (any(tau < 0)) stop(paste("tau must be positive", "\n", ""))
rtau <- 1/tau
if (length(tau)>1)
w <- ifelse(rtau<0.0000001,1,exp(rtau^2))
else w <- if (rtau<0.0000001) 1 else exp(rtau^2)
omega <- -nu*rtau
c <- (.5*(w-1)*(w*cosh(2*omega)+1))^(-0.5)
z <- (x-(mu+c*sigma*w^(.5)*sinh(omega)))/(c*sigma)
r <- -nu + asinh(z)/rtau
loglik <- -log(sigma)-log(c)-log(rtau)-.5*log(z*z+1)-.5*log(2*pi)-.5*r*r
if(log==FALSE) ft <- exp(loglik) else ft <- loglik
ft
}
pJSU <- function(q, mu = 0, sigma = 1, nu = 1, tau = 1, lower.tail = TRUE, log.p = FALSE)
{
if (any(sigma < 0)) stop(paste("sigma must be positive", "\n", ""))
if (any(tau < 0)) stop(paste("tau must be positive", "\n", ""))
rtau <- 1/tau
w <- ifelse(rtau<0.0000001,1,exp(rtau^2))
omega <- -nu*rtau
c <- (.5*(w-1)*(w*cosh(2*omega)+1))^(-0.5)
z <- (q-(mu+c*sigma*w^(.5)*sinh(omega)))/(c*sigma)
r <- -nu + asinh(z)/rtau
p <- pNO(r,0,1)
if(lower.tail==TRUE) p <- p else p <- 1-p
if(log.p==FALSE) p <- p else p <- log(p)
p
}
qJSU <- function(p, mu=0, sigma=1, nu=1, tau=1, lower.tail = TRUE, log.p = FALSE)
{
if (any(sigma < 0)) stop(paste("sigma must be positive", "\n", ""))
if (any(tau < 0)) stop(paste("tau must be positive", "\n", ""))
if (log.p==TRUE) p <- exp(p) else p <- p
if (any(p <= 0)|any(p >= 1)) stop(paste("p must be between 0 and 1", "\n", ""))
if (lower.tail==TRUE) p <- p else p <- 1-p
rtau <- 1/tau
r <- qNO(p,0,1)
z <- sinh(rtau*(r+nu))
w <- ifelse(rtau<0.0000001,1,exp(rtau^2))
omega <- -nu*rtau
c <- (.5*(w-1)*(w*cosh(2*omega)+1))^(-0.5)
q <- (mu+c*sigma*w^(.5)*sinh(omega))+c*sigma*z
q
}
rJSU <- function(n, mu=0, sigma=1, nu=1, tau=1)
{
if (any(sigma <= 0)) stop(paste("sigma must be positive", "\n", ""))
n <- ceiling(n)
p <- runif(n)
r <- qJSU(p, mu=mu, sigma=sigma, nu=nu, tau=tau, lower.tail = TRUE, log.p = FALSE)
r
} |
Hitem <- function(X,index=NULL,EO,O){
J <- colnames(X)
nn <- length(J)
cmbf <- combinations(n=nn, r=3, v=J, set=FALSE, repeats.allowed=FALSE)
Hj <- NULL
if (is.null(index)){
for (iter in 1:nn){
persi <- cmbf[which(apply(cmbf,1, function(x) J[iter] %in% x)),]
Hj <- c(Hj,1 - sum(O[persi]) / sum(EO[persi]))
}
}else{
persi <- cmbf[which(apply(cmbf,1, function(x) index %in% x)),]
Hj <- c(Hj,1 - sum(O[persi]) / sum(EO[persi]))
}
return(Hj)
} |
poisson.objective = function(theta, x, y, lower,
components, constraints = constraints,
upper, penalty, residuals){
ini_vars <- function(x,y,upper, lower){
coef_reg = stats::coef(stats::glm.fit(x = x, y = y, family = stats::poisson()))
coef_reg <- ifelse(data.table::between(coef_reg,
upper = upper,
lower = lower), coef_reg,
stats::runif(1, min = lower, max = upper))
names(coef_reg) = c(colnames(x))
return(coef_reg)
}
prepareBounds <- function(upper, lower, x){
total_params = ncol(x)
if(is.null(upper)) upper = rep(Inf, total_params)
if(length(upper) == 1) upper = rep(upper, total_params)
if(length(upper) != total_params)
upper = c(upper, rep(Inf,total_params-length(upper)))
if(is.null(lower))lower = rep(-Inf, total_params)
if(length(lower) == 1)lower = rep(lower, total_params)
if(length(lower) != total_params)
lower = c(lower, rep(-Inf,total_params-length(lower)))
lower = lower[1:total_params]
upper = upper[1:total_params]
return(list(upper = upper, lower = lower))
}
objective = function(theta, x, y, LOWER, UPPER, penalty, constraints = NULL,
nms){
names(theta) = nms
if(is.null(constraints)){
z = 0
}else{
text_split = strsplit(constraints, ',')[[1]]
TH <- data.table::setDT(as.list(theta))
z = sapply(text_split, function(cond){
with(TH, !eval(rlang::parse_expr(cond)))
})
z = any(z) * 1
}
k <- ncol(x)
beta <- theta[1:k]
expected_y <- exp(x %*% beta)
N = nrow(x)
LL <- - sum(stats::dpois(y, lambda = expected_y, log = T)) +
any(beta > UPPER[1:k]) * penalty +
any(beta < LOWER[1:k]) * penalty + z * penalty
return(LL)
}
final_coef <- function(coef){
return(coef)
}
fitted = function(coef, x, components = F, type = c('response')){
expected_y <- x %*% coef
if(components){
MatCoef = diag(coef)
colnames(MatCoef) = names(coef)
com = x %*% MatCoef
expected_y = cbind(expected_y, com)
colnames(expected_y)[1] = 'Total'
}
if(type == 'response'){
expected_y = exp(expected_y)
return(expected_y)
}
if(type == 'link'){
return(expected_y)
}
}
predict = function(coef, newxreg, components = F,
type = c('response')){
expected_y <- newxreg %*% coef
if(components){
MatCoef = diag(coef)
colnames(MatCoef) = names(coef)
com = newxreg %*% MatCoef
expected_y = cbind(expected_y, com)
colnames(expected_y)[1] = 'Total'
}
if(type == 'response'){
expected_y = exp(expected_y)
}
if(type == 'link'){
return(expected_y)
}
return(expected_y)
}
vcov = function(coef, x, fitted, data, residuals){
w <- as.numeric(fitted)
v <- diag(w, length(w), length(w))
XtX_inv <- solve(t(x) %*% v %*% x)
}
error_fun <- function(y, fitted){
data.frame(
LogLik = sum(stats::dpois(y, lambda = fitted, log = T))
)
}
return(list(objective = objective,
ini_vars = ini_vars,
prepareBounds = prepareBounds,
final_coef = final_coef,
fitted = fitted,
predict = predict,
error_fun = error_fun,
vcov = vcov))
} |
knitr::opts_chunk$set(collapse = TRUE, comment = "
library(madrat, quietly = TRUE)
getConfig("cachefolder", verbose = FALSE)
getDependencies("calcTauTotal")
setConfig(verbosity=3)
fp <- madrat:::fingerprint("calcTauTotal", details = TRUE)
setConfig(globalenv = TRUE)
readData <- function() return(1)
readData2 <- function() return(2)
calcExample <- function() {
a <- readSource("Data")
}
calcExample2 <- function() {
a <- readSource("Data")
if(FALSE) b <- readSource("Data2")
}
fp <- madrat:::fingerprint("calcExample", details = TRUE)
fp2 <- madrat:::fingerprint("calcExample2", details = TRUE)
readData <- function() return(99)
fp <- madrat:::fingerprint("calcExample", details = TRUE)
fp2 <- madrat:::fingerprint("calcExample2", details = TRUE)
readData2 <- function() {
"!
return(99)
}
fp <- madrat:::fingerprint("calcExample", details = TRUE)
fp2 <- madrat:::fingerprint("calcExample2", details = TRUE)
calcExample2 <- function() {
"!
a <- readSource("Data")
if(FALSE) b <- readSource("Data2")
}
calcExample3 <- function() {
a <- calcOutput("Example2")
}
fp2 <- madrat:::fingerprint("calcExample2", details = TRUE)
fp3 <- madrat:::fingerprint("calcExample3", details = TRUE) |
[
{
"title": "Rose plot using Deducers ggplot2 plot builder",
"href": "https://www.r-statistics.com/2010/08/rose-plot-using-deducers-ggplot2-plot-builder/"
},
{
"title": "Updated climate projections by cities",
"href": "https://blog.snap.uaf.edu/2015/04/02/updated-climate-projections-by-cities/"
},
{
"title": "Building a custom database of country time-series data using Quandl",
"href": "http://stevepowell.blot.im/building-a-custom-database-using-quandl/"
},
{
"title": "Universal portfolio, part 10",
"href": "http://optimallog.blogspot.com/2012/08/universal-portfolio-part-10.html"
},
{
"title": "HOW TO: Package vignettes in plain LaTeX",
"href": "http://www.jottr.org/2015/02/latex-vignettes.html"
},
{
"title": "Maximum likelihood estimates for multivariate distributions",
"href": "http://blog.free.fr/"
},
{
"title": "A probability exercise on the Bernoulli distribution",
"href": "http://statistic-on-air.blogspot.com/2009/07/probability-exercise-on-bernoulli.html"
},
{
"title": "Variance matrix differences",
"href": "https://feedproxy.google.com/~r/PortfolioProbeRLanguage/~3/DZHwlFLXyKo/"
},
{
"title": "Optimizing a multivariable function parameters using a random method, genetic algorithm and simulated annealing in R",
"href": "http://tata-box-blog.blogspot.com/2013/10/optimizing-multivariable-function.html"
},
{
"title": "Package intergraph goes 2.0",
"href": "http://bc.bojanorama.pl/2013/05/package-intergraph-goes-2-0/"
},
{
"title": "Hadley Wickham goes behind the scenes on ggplot2",
"href": "http://blog.revolutionanalytics.com/2012/01/hadley-wickham-goes-behind-the-scenes-on-ggplot2.html"
},
{
"title": "Multiseat setup via Userful",
"href": "http://dirk.eddelbuettel.com/blog/2009/01/03/"
},
{
"title": "Part 2: Make your R figures legible in Powerpoint/Keynote presentations",
"href": "http://lukemiller.org/index.php/2014/01/part-2-make-your-r-figures-legible-in-powerpointkeynote-presentations/"
},
{
"title": "Practical Implementation of Neural Network based time series (stock) prediction – PART 1",
"href": "http://intelligenttradingtech.blogspot.com/2010/01/systems.html"
},
{
"title": "RMarkdown and Metropolis/Mtheme",
"href": "http://dirk.eddelbuettel.com/blog/2016/06/30/"
},
{
"title": "For those interested in knitr with Rmarkdown to beamer slides",
"href": "https://web.archive.org/web/http://r.andrewredd.us/?p=63"
},
{
"title": "Geolocate IP addresses in R",
"href": "https://heuristically.wordpress.com/2013/05/20/geolocate-ip-addresses-in-r/"
},
{
"title": "Extended (Simple) ASN Graph Visualization Example [R to D3]",
"href": "http://rud.is/b/2013/02/08/extended-simple-example-asn-graph-visualization-example-r-to-d3/"
},
{
"title": "How to Remove State Abbreviations from a Choroplethr Map",
"href": "http://www.arilamstein.com/blog/2015/08/20/how-to-remove-state-abbreviations-from-a-choroplethr-map/"
},
{
"title": "Market predictions for years 2011 and 2012",
"href": "https://feedproxy.google.com/~r/PortfolioProbeRLanguage/~3/uDaLFCWTqYk/"
},
{
"title": "the vim cheat sheet",
"href": "https://xianblog.wordpress.com/2015/03/18/the-vim-cheat-sheet/"
},
{
"title": "Exporting data from R into WinBUGS’s “R/S-plus list format”",
"href": "https://robertgrantstats.wordpress.com/2012/11/27/exporting-data-from-r-into-winbugss-rs-plus-list-format/"
},
{
"title": "How to show explained variance in a multilevel model",
"href": "http://ww1.danielmarcelino.com/how-to-show-explained-variance-in-a-multilevel-model/"
},
{
"title": "R package for Computational Actuarial Science",
"href": "http://freakonometrics.hypotheses.org/17074"
},
{
"title": "Alternate R Markdown Templates",
"href": "http://rud.is/b/2016/02/04/alternate-r-markdown-templates/"
},
{
"title": "Presenting your findings with R",
"href": "https://grollchristian.wordpress.com/2013/09/07/presentations-in-r/"
},
{
"title": "Thoughts on the Ljung-Box test",
"href": "http://robjhyndman.com/hyndsight/ljung-box-test/"
},
{
"title": "Special issue of TOMACS",
"href": "https://xianblog.wordpress.com/2011/03/10/special-issue-of-tomacs/"
},
{
"title": "How UpStream uses R for Attribution Analysis",
"href": "http://blog.revolutionanalytics.com/2013/04/upstream-attribution-analysis.html"
},
{
"title": "New Revolution Analytics office in Singapore",
"href": "http://blog.revolutionanalytics.com/2012/08/new-revolution-analytics-office-in-singapore.html"
},
{
"title": "Rによるモンテカルロ法入門",
"href": "https://xianblog.wordpress.com/2013/05/14/%EF%BD%92%E3%81%AB%E3%82%88%E3%82%8B%E3%83%A2%E3%83%B3%E3%83%86%E3%82%AB%E3%83%AB%E3%83%AD%E6%B3%95%E5%85%A5%E9%96%80/"
},
{
"title": "Programming languages, ranked by popularity",
"href": "http://blog.revolutionanalytics.com/2010/12/programming-languages-ranked-by-popularity.html"
},
{
"title": "Extending accessibility of open-source statistical software to the masses A shiny case study",
"href": "http://educate-r.org//2016/10/07/canam.html"
},
{
"title": "Example 7.36: Propensity score stratification",
"href": "https://feedproxy.google.com/~r/SASandR/~3/6ZjuluboPF4/example-736-propensity-score.html"
},
{
"title": "dvn – Sharing Reproducible Research from R",
"href": "http://ropensci.org/blog/2014/02/20/dvn-dataverse-network/"
},
{
"title": "useR 2010 at NIST in Gaithersburg",
"href": "http://dirk.eddelbuettel.com/blog/2010/07/24/"
},
{
"title": "Slouching towards simulating investment skill",
"href": "https://feedproxy.google.com/~r/PortfolioProbeRLanguage/~3/-Ske3KEykkc/"
},
{
"title": "Documentation and tutorial roundup",
"href": "https://web.archive.org/web/https://dataninja.wordpress.com/2007/06/04/documentation-and-tutorial-roundup/"
},
{
"title": "Streamline your analyses linking R to SAS: the workfloweR experiment",
"href": "http://datascienceplus.com/streamline-your-analyses-linking-r-to-sas/"
},
{
"title": "Migrating from SPSS/Excel to R, Part 3: Preparing your Data",
"href": "https://psychwire.wordpress.com/2011/10/29/migrating-from-spssexcel-to-r-part-3-preparing-your-data/"
},
{
"title": "cricketr adapts to the Twenty20 International!",
"href": "https://gigadom.wordpress.com/2015/08/08/cricketr-adapts-to-the-twenty20-international/"
},
{
"title": "sample(): “Monkey’s Paw” style programming in R",
"href": "http://www.win-vector.com/blog/2016/03/sample-monkeys-paw-style-programming-in-r/"
},
{
"title": "Optimizing My R Code",
"href": "https://web.archive.org/web/http://blog.quanttrader.org/2011/04/optimizing-my-r-code/"
},
{
"title": "An Integrated Shiny App for a Course on Repeated Measurements Analysis",
"href": "http://iprogn.blogspot.com/2015/10/an-integrated-shiny-app-for-course-on.html"
},
{
"title": "Determine your Fitbit stride length using a GPS watch",
"href": "http://blog.tafkas.net/2014/07/15/determine-your-fitbit-stride-length-using-a-gps-watch/"
},
{
"title": "Stan 1.2.0 and RStan 1.2.0",
"href": "http://andrewgelman.com/2013/03/06/stan-1-2-0-and-rstan-1-2-0/"
},
{
"title": "Six of One (Plot), Half-Dozen of the Other",
"href": "http://badhessian.org/2014/07/six-of-one-plot-half-dozen-of-the-other/"
},
{
"title": "bayesboot: An R package for doing the Bayesian bootstrap",
"href": "http://www.sumsar.net/blog/2016/02/bayesboot-an-r-package/"
},
{
"title": "Visualising a Classification in High Dimension, part 2",
"href": "http://freakonometrics.hypotheses.org/19536"
},
{
"title": "R Markdown v0.9.5",
"href": "https://blog.rstudio.org/2016/03/21/rmarkdown-v0-9-5/"
}
] |
cat("\014")
rm(list = ls())
setwd("~/git/of_dollars_and_data")
source(file.path(paste0(getwd(),"/header.R")))
library(MASS)
library(tidyr)
library(dplyr)
n_simulations <- 10000
initial_client_capital <- 1 * 10^6
n_years <- 40
set.seed(12345)
mu_market <- 0.1
sd_market <- 0.2
run_sim <- function(hf_outperformance,
hf_management_fee,
hf_performance_fee,
hf_performance_above_benchmark,
management_and_performance_fee,
hf_deduct_fees,
market_management_fee,
hf_corr_to_market
){
mu_hf <- mu_market + hf_outperformance
sd_hf <- sd_market
rho <- hf_corr_to_market
cor_matrix <- matrix(c(1, rho,
rho, 1),
ncol = 2, byrow = TRUE)
var_matrix <- c(sd_market, sd_hf) %*% t(c(sd_market, sd_hf))
cov_matrix <- var_matrix * cor_matrix
client_hf_matrix <- matrix(NA, nrow = n_simulations, ncol = n_years)
client_market_matrix <- matrix(NA, nrow = n_simulations, ncol = n_years)
for (i in 1:n_years){
ret <- mvrnorm(n_simulations, mu = c(mu_market, mu_hf),
Sigma = cov_matrix,
empirical = TRUE)
if (hf_performance_above_benchmark == 1){
ret_above_benchmark <- pmax(0, ret[, 2] - ret[, 1])
} else {
ret_above_benchmark <- pmax(0, ret[, 2])
}
if (i == 1){
client_market_matrix[, i] <- initial_client_capital * (1 + ret[, 1]) * (1 - market_management_fee)
management_fee <- initial_client_capital * hf_management_fee
if (management_and_performance_fee == 0){
performance_fee <- initial_client_capital * ret_above_benchmark * hf_performance_fee
hf_final_fee <- pmax(management_fee, performance_fee)
} else {
performance_fee <- pmax(0, ((initial_client_capital * ret_above_benchmark - management_fee) * hf_performance_fee))
hf_final_fee <- management_fee + performance_fee
}
client_hf_matrix[, i] <- (initial_client_capital * (1 + ret[, 2])) - hf_final_fee
if (hf_deduct_fees == 1){
fee_deduction <- sapply(performance_fee, function(x){ ifelse(x == 0, management_fee, 0)})
}else {
fee_deduction <- 0
}
} else {
client_market_matrix[, i] <- client_market_matrix[, (i - 1)] * (1 + ret[, 1]) * (1 - market_management_fee)
management_fee <- client_hf_matrix[, (i - 1)] * hf_management_fee
if (management_and_performance_fee == 0){
performance_fee <- client_hf_matrix[, (i - 1)] * ret_above_benchmark * hf_performance_fee
hf_final_fee <- pmax(0, pmax(management_fee, performance_fee) - fee_deduction)
} else {
performance_fee <- pmax(0, ((client_hf_matrix[, (i - 1)] * ret_above_benchmark - management_fee) * hf_performance_fee))
hf_final_fee <- management_fee + performance_fee
}
client_hf_matrix[, i] <- (client_hf_matrix[, (i - 1)] * (1 + ret[, 2])) - hf_final_fee
if (hf_deduct_fees == 1){
fee_deduction <- sapply(performance_fee, function(x){ ifelse(x == 0, management_fee, 0)})
}else {
fee_deduction <- 0
}
}
}
print(paste0("The hedge fund outperformed the market (net of fees) in ", sum(client_hf_matrix[, n_years] > client_market_matrix[, n_years])/n_simulations * 100, "% of simulations"))
return(sum(client_hf_matrix[, n_years] > client_market_matrix[, n_years])/n_simulations)
}
results <- data.frame(hf_outperformance = numeric(),
hf_management_fee = numeric(),
hf_performance_fee = numeric(),
hf_performance_above_benchmark = integer(),
management_and_performance_fee = integer(),
hf_deduct_fees = integer(),
market_management_fee = numeric(),
hf_corr_to_market = numeric(),
hf_outperform_pct = numeric(),
scenario = integer(),
mu_market = numeric(),
sd_market = numeric()
)
i <- 1
for (o in seq(0, 0.04, by = 0.01)){
for (c in c(0, 0.5, 0.9)){
for (scenario in seq(1, 3, by = 1)){
if (scenario == 1){
mf <- 0.01
pf <- 0.3
pab <- 1
map <- 0
df <- 1
} else if (scenario == 2){
mf <- 0.02
pf <- 0.2
pab <- 0
map <- 1
df <- 0
} else if (scenario == 3){
mf <- 0.01
pf <- 0.0
pab <- 0
map <- 1
df <- 0
}
results[i, "hf_outperformance"] <- o
results[i, "hf_management_fee"] <- mf
results[i, "hf_performance_fee"] <- pf
results[i, "hf_performance_above_benchmark"] <- pab
results[i, "management_and_performance_fee"] <- map
results[i, "hf_deduct_fees"] <- df
results[i, "market_management_fee"] <- 0.0005
results[i, "hf_corr_to_market"] <- c
results[i, "scenario"] <- scenario
results[i, "mu_market"] <- mu_market
results[i, "sd_market"] <- sd_market
results[i, "hf_outperform_pct"] <- run_sim(
hf_outperformance = o,
hf_management_fee = mf,
hf_performance_fee = pf,
hf_performance_above_benchmark = pab,
management_and_performance_fee = map,
hf_deduct_fees = df,
market_management_fee = 0.0005,
hf_corr_to_market = c
)
i <- i + 1
print(i)
}
}
}
saveRDS(results, paste0(localdir, "13-hf-correlation-results.Rds")) |
annotatemsbatch <- function(msbatch,
ppm_precursor = 5,
ppm_products = 10,
rttol = 5,
coelCutoff = 0.8,
lipidClassesPos = c("MG", "LPC", "LPE", "PC", "PCo",
"PCp", "PE", "PEo", "PEp", "PG",
"Sph", "SphP", "Cer", "CerP", "AcylCer",
"SM", "Carnitines", "CE", "DG", "TG"),
lipidClassesNeg = c("FA", "FAHFA", "LPC", "LPE",
"LPG", "LPI", "LPS", "PC", "PCo",
"PCp", "PE", "PEo", "PEp", "PG",
"PI", "PS", "Sph", "SphP",
"Cer", "CerP", "AcylCer", "CL", "BA"),
dbs,
simplifyAnnotations = FALSE,
parallel = FALSE,
ncores){
if (parallel){
if (missing(ncores)){
stop("ncores argument is required if parallel is TRUE")
}
if (ncores > parallel::detectCores()){
ncores <- parallel::detectCores() - 1
message("ncores is greater than availables cores. ", ncores, " will be used.")
}
}
if (missing(dbs)){
dbs <- assignDB()
}
toannotate <- which(msbatch$metaData$acquisitionmode %in% c("DIA", "DDA"))
if (parallel) {
cl <- makePSOCKcluster(ncores)
doParallel::registerDoParallel(cl)
`%d%` <- `%dopar%`
} else {
`%d%` <- `%do%`
}
x <- c()
msbatch$msobjects[toannotate] <- foreach::foreach(x = 1:length(toannotate)) %d% {
if (msbatch$msobjects[[toannotate[x]]]$metaData$generalMetadata$polarity == "positive"){
idPOS(msobject = msbatch$msobjects[[toannotate[x]]],
ppm_precursor = 5,
ppm_products = 10,
rttol = 5,
coelCutoff = 0.8,
lipidClasses = lipidClassesPos,
dbs = dbs)
} else if (msbatch$msobjects[[toannotate[x]]]$metaData$generalMetadata$polarity == "negative"){
idNEG(msobject = msbatch$msobjects[[toannotate[x]]],
ppm_precursor = 5,
ppm_products = 10,
rttol = 5,
coelCutoff = 0.8,
lipidClasses = lipidClassesNeg,
dbs = dbs)
}
}
if (parallel){
parallel::stopCluster(cl)
}
msbatch <- joinAnnotationResults(msbatch,
simplifyAnnotations = simplifyAnnotations)
return(msbatch)
} |
library(sp23design)
trueParameters <- list(p0 = 0.3,
p1 = 0.6,
pdiffHyp=0.3,
theta = list(
alpha = log(.17),
beta = log(1.0),
gamma = log(1.0)),
baselineLambda = 0.35,
etaHyp = 0.25)
trialParameters <- list(minimumNumberOfEvents = 20,
minimumIncreaseInV = 0.2,
numberRecruitedEachYear = c(80, 120, 160, 160),
followupTime = 3,
adminCensoringTime = 7,
interimLookTime = c(1, 2, 3, 5, 7),
type1ErrorForResponse = 0.05,
type2ErrorForResponse = 0.01,
glrBoundarySidedness = "one",
type1Error = 0.05,
type2Error = 0.10,
epsType1 = 1/3,
epsType2 = 1/3)
sp23Design <- generateSP23Design(trueParameters, trialParameters)
trialHistory <- exploreSP23Design(sp23Design, numberOfSimulations=25, rngSeed=7325543)
result <- analyzeSP23Design(sp23Design, trialHistory)$designSummary
cat("numberOfTimesH0RIsRejectedAtFirstLook", result[["numberOfTimesH0RIsRejectedAtFirstLook"]], "\n")
cat("numberOfTimesH0RIsRejected", result[["numberOfTimesH0RIsRejected"]], "\n")
cat("numberOfTimesStoppedForFutility", result[["numberOfTimesStoppedForFutility"]], "\n")
cat("numberOfTimesH0SIsAccepted", result[["numberOfTimesH0SIsAccepted"]], "\n")
cat("numberOfTimesH0SIsRejected", result[["numberOfTimesH0SIsRejected"]], "\n")
cat("numberOfTimesFutilityDecidedAtLastLook", result[["numberOfTimesFutilityDecidedAtLastLook"]], "\n")
cat("numberOfTimesTrialEndedAtLook", result[["numberOfTimesTrialEndedAtLook"]], "\n")
cat("avgExitTime", result[["avgExitTime"]], "\n") |
context("xml_schema")
test_that("xml schema validates", {
doc <- read_xml(system.file("extdata/order-doc.xml", package = "xml2"))
schema <- read_xml(system.file("extdata/order-schema.xml", package = "xml2"))
expect_true(xml_validate(doc, schema))
})
test_that("xml schema errors", {
str <- readLines(system.file("extdata/order-doc.xml", package = "xml2"))
str <- sub("<quantity>1", "<quantity>", str)
str <- sub("95819", "ABC95819", str)
str <- sub('partNum="926-AA"', "", str)
doc <- read_xml(paste(str, collapse = "\n"))
schema <- read_xml(system.file("extdata/order-schema.xml", package = "xml2"))
out <- xml_validate(doc, schema)
expect_false(out)
errors <- attr(out, "errors")
expect_is(errors, "character")
expect_length(errors, 4)
}) |
est.mfa <- function (init_para, Y, itmax, tol, conv_measure, ...) {
p <- ncol(Y)
n <- nrow(Y)
fit <- init_para
loglikeNtau <- try(do.call('logL_tau.mfa', c(list(Y = Y), fit)),
silent = TRUE)
if ((any(class(loglikeNtau) %in% "try-error")) ||
(any(class(loglikeNtau) %in% 'character'))) {
FIT <- paste('in computing the log-likelihood before EM-steps')
class(FIT) <- "error"
return(FIT)
}
fit <- append(fit, loglikeNtau)
for (niter in 1 : itmax) {
FIT <- do.call('Mstep.mfa', c(list(Y = Y), fit))
if (any(class(FIT) %in% 'error')) {
FIT <- paste('in ', niter,
'iteration of the M-step', FIT)
class(FIT) <- "error"
return(FIT)
}
loglikeNtau <- try(do.call('logL_tau.mfa', c(list(Y = Y), FIT)),
silent = TRUE)
if ((any(class(loglikeNtau) %in% "try-error")) ||
(any(class(loglikeNtau) %in% 'character'))) {
FIT <- paste('in computing the log-likelihood after the ', niter,
'th the M-step', FIT$logL, sep = '')
class(FIT) <- "error"
return(FIT)
}
FIT <- append(FIT, loglikeNtau)
if ((any(class(FIT$logL) == "NULL")) ||
(any(class(FIT$logL) == 'character'))) {
FIT <- paste('in computing the log-likelihood after the ', niter,
'th the M-step', FIT$logL, sep = '')
class(FIT) <- "error"
return(FIT)
} else {
if ((FIT$logL == -Inf) | is.na(FIT$logL)) {
FIT <- paste('the log-likelihood computed after the ', niter,
'th iteration of the M-step is not finite', sep = '')
class(FIT) <- "error"
return(FIT)
}
}
if ((conv_measure == "diff") && (abs(FIT$logL - fit$logL) < tol))
break
if ((conv_measure == "ratio") && (abs((FIT$logL - fit$logL) / FIT$logL) < tol))
break
fit <- FIT
}
class(FIT) <- "mfa"
return(FIT)
} |
isNonZeroNumberOrInfScalar <- function(argument, default = NULL, stopIfNot = FALSE, message = NULL, argumentName = NULL) {
checkarg(argument, "N", default = default, stopIfNot = stopIfNot, nullAllowed = FALSE, n = 1, zeroAllowed = FALSE, negativeAllowed = TRUE, positiveAllowed = TRUE, nonIntegerAllowed = TRUE, naAllowed = FALSE, nanAllowed = FALSE, infAllowed = TRUE, message = message, argumentName = argumentName)
} |
library(Polychrome)
set.seed(9641)
Dark24 <- createPalette(24, c("
M = 100000)
set.seed(701138)
Light24 <- createPalette(24, "
ddir <- file.path("..", "..", "data")
save(Dark24, file = file.path(ddir, "Dark24.rda"))
save(Light24, file = file.path(ddir, "Light24.rda")) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.