code
stringlengths 1
13.8M
|
---|
add.echogram <-
function(echogram1, echogram2, operator = c("plus", "minus"),
domain = c("linear", "dB")) {
echo1 <- echogram1
echo2 <- echogram2
if ( !inherits(echo1, "echogram") & !inherits(echo2, "echogram") )
stop ("need objects of class 'echogram'")
m1 <- echo1$Sv
m2 <- echo2$Sv
if (dim(m1)[1] != dim(m2)[1] | dim(m1)[2] != dim(m2)[2])
stop("non-conformable echograms, run match.echogram(echogram1, echogram2)")
dB2linear <- function(X) 10^(X/10)
linear2dB <- function(X) 10*log10(X)
if ( missing(domain) )
domain <- "dB"
if (domain == "linear"){
m1 <- dB2linear(m1)
m2 <- dB2linear(m2)
}
ans <- echo1
operator <- match.arg(operator)
ans0 <- switch(operator,
plus = `+`(m1, m2),
minus = `-`(m1, m2))
if (domain == "linear")
ans0 <- linear2dB(ans0)
ans$Sv <- ans0
class(ans) <- "echogram"
ans
} |
"panel.bwplot.intermediate.hh" <-
function (x, y,
horizontal = TRUE,
pch,
col,
lwd,
...
)
{
fac.levels <- if (horizontal) levels(y) else levels(x)
box.par <- list(box.dot=trellis.par.get("box.dot"),
box.rectangle=trellis.par.get("box.rectangle"),
box.umbrella=trellis.par.get("box.umbrella"),
plot.symbol=trellis.par.get("plot.symbol"))
old.box.par <- box.par
on.exit(trellis.par.set(old.box.par))
tpg <- trellis.par.get("superpose.line")
tpg.col <- rep(tpg$col, length=length(fac.levels))
if (!missing(pch)) pch <- rep(pch, length=length(fac.levels))
if (!missing(col)) tpg.col <- rep(col, length=length(fac.levels))
if (!missing(lwd)) {
box.par$box.rectangle$lwd <- rep(lwd, length=length(fac.levels))
box.par$box.umbrella$lwd <- rep(lwd, length=length(fac.levels))
}
for (i in seq(along=fac.levels)) {
if (!missing(pch)) {
box.par$box.dot$pch <- pch[i]
box.par$plot.symbol$pch <- pch[i]
}
for (j in names(box.par)) {
box.par[[j]]$col <- tpg.col[i]
trellis.par.set(j, box.par[[j]])
}
if (horizontal) {
ii <- as.position(y[y == fac.levels[i]])
xy <- x[y == fac.levels[i]]
panel.bwplot(xy, ii, horizontal=horizontal, ...)
}
else {
yx <- y[x == fac.levels[i]]
ii <- as.position(x[x == fac.levels[i]])
panel.bwplot(ii, yx, horizontal=horizontal, ...)
}
}
} |
landmark_frm2amira<-function(path_folder_frm,path_amira_folder){
dir.create(path_amira_folder)
for(j in 1:length(list.files(path_folder_frm))){
first<-readLines(paste(path_folder_frm,list.files(path_folder_frm)[j],sep="/"))
first_pos<-which(substr(first,1,9)=="<pointset")+1
first_range<-which(first[(first_pos+1)]=="</pointset>")
out_land<-matrix(NA,ncol=3,nrow=length(first_range))
for(i in 1:length(first_range)){
second<-strsplit(first[first_pos[first_range]][i],' ')
third<-unlist(strsplit(second[[1]],"<locations>"))
fourth<-unlist(strsplit(third,"</locations>"))
landmark<-as.numeric(fourth)
out_land[i,]<-landmark
}
ref_set<-out_land
lista<-list(ref_set)
names(lista)<-list.files(path_folder_frm)[j]
export_amira(lista,path_amira_folder)
}
} |
pdsoft <-
function(s, lam, tau=1e-4, init=c("soft", "diag", "dense", "user"),
s0=NULL, i0=NULL, standard=TRUE,tolin=1e-8, tolout=1e-8, maxitin=1e4,
maxitout=1e3, quiet=TRUE)
{
p=dim(s)[1]
init=match.arg(init)
if(sum(lam)==0) init="dense"
if(standard)
{
dhat = sqrt(diag(s))
dhat.inv = 1/dhat
S=dhat.inv * s * rep(dhat.inv, each = p)
if(init=="diag")
{
s0=diag(p)
i0=diag(p)
} else if( init=="soft")
{
tmp=abs(S) - lam
tmp=tmp*(tmp > 0)
S.soft=sign(S)*tmp
diag(S.soft)=diag(S)
oe=eigen(S.soft, symmetric=TRUE)
evs=oe$val/2 + sqrt( oe$val^2 + 4*tau) /2
s0=tcrossprod(oe$vec*rep(evs, each=p), oe$vec)
i0=tcrossprod(oe$vec*rep(1/evs, each=p), oe$vec)
} else if( init=="dense")
{
oe=eigen(S, symmetric=TRUE)
evs=oe$val/2 + sqrt( oe$val^2 + 4*tau) /2
s0=tcrossprod(oe$vec*rep(evs, each=p), oe$vec)
i0=tcrossprod(oe$vec*rep(1/evs, each=p), oe$vec)
} else
{
if( is.null(s0) & (!is.null(i0)) )
s0=qr.solve(i0)
if( (!is.null(s0)) & is.null(i0) )
i0=qr.solve(s0)
if(is.null(s0) & is.null(i0) )
stop("Error: must specify either s0 or i0 with user initialization")
}
}else
{
S=s
if(init=="diag")
{
s0=diag(diag(S))
i0=diag(1/diag(S))
} else if( init=="soft")
{
tmp=abs(S) - lam
tmp=tmp*(tmp > 0)
S.soft=sign(S)*tmp
diag(S.soft)=diag(S)
oe=eigen(S.soft, symmetric=TRUE)
evs=oe$val/2 + sqrt( oe$val^2 + 4*tau) /2
s0=tcrossprod(oe$vec*rep(evs, each=p), oe$vec)
i0=tcrossprod(oe$vec*rep(1/evs, each=p), oe$vec)
} else if( init=="dense")
{
oe=eigen(S, symmetric=TRUE)
evs=oe$val/2 + sqrt( oe$val^2 + 4*tau) /2
s0=tcrossprod(oe$vec*rep(evs, each=p), oe$vec)
i0=tcrossprod(oe$vec*rep(1/evs, each=p), oe$vec)
} else
{
if( is.null(s0) & (!is.null(i0)) )
s0=qr.solve(i0)
if( (!is.null(s0)) & is.null(i0) )
i0=qr.solve(s0)
if(is.null(s0) & is.null(i0) )
stop("Error: must specify either s0 or i0 with user initialization")
}
}
if(is.null(dim(lam)[1]))
{
lam = matrix(lam, nrow=p, ncol=p)
}
if( sum(lam) > 0 )
{
Soff=S
diag(Soff)=0
tolmult=sum(abs(Soff))/2
S = as.double(S)
i0 = as.double(i0)
s0 = as.double(s0)
lam = as.double(lam)
b = as.double(tau)
tolin = as.double(tolin*(tolmult/p))
tolout = as.double(tolout*tolmult)
totalout=1
mode(totalout) = "integer"
mode(p) = "integer"
mode(maxitin) = "integer"
mode(maxitout) = "integer"
mode(quiet) = "integer"
tmp=matrix(0, nrow=p, ncol=p)
tmp=as.double(tmp)
tmp2=matrix(0, nrow=p, ncol=p)
tmp2=as.double(tmp2)
tosoft=as.double(rep(0,p))
coutput=.C("pdsc",S=S, Sigma=s0, Omega=i0, tosoft=tosoft, pin=p, lam=lam, tauin=b, tolin=tolin,
maxitin=maxitin, tolout=tolout, maxitout=maxitout, totalout=totalout)
sigma = matrix(coutput$Sigma, nrow=p, ncol=p)
omega = matrix(coutput$Omega, nrow=p, ncol=p)
if(!quiet)
{
cat("Total outer iterations = ", coutput$totalout, "\n")
}
} else
{
sigma=s0
omega=i0
}
if(standard)
{
theta=sigma
theta.inv=omega
sigma = dhat*theta*rep(dhat, each=p)
omega = dhat.inv *theta.inv * rep(dhat.inv, each=p)
} else
{
theta=NULL
theta.inv=NULL
}
return(list(omega=omega, sigma=sigma, theta=theta, theta.inv=theta.inv))
} |
network_analysis <-
function(...) {
lifecycle::deprecate_warn("0.2.0",
"network_analysis()",
"analyse_functional_network()",
details = "This function has been renamed."
)
analyse_functional_network(...)
}
analyse_functional_network <- function(data,
protein_id,
string_id,
organism_id,
score_threshold = 900,
binds_treatment = NULL,
halo_color = NULL,
plot = TRUE) {
if (!requireNamespace("STRINGdb", quietly = TRUE)) {
stop(strwrap("Package \"STRINGdb\" is needed for this function to work. Please install it.",
prefix = "\n", initial = ""
), call. = FALSE)
}
STRINGdb <- get("STRINGdb", envir = loadNamespace("STRINGdb"))
data <- data %>%
dplyr::distinct({{ protein_id }}, {{ string_id }}, {{ binds_treatment }})
if (length(unique(dplyr::pull(data, !!ensym(protein_id)))) != nrow(data)) {
stop(strwrap("Please provide unique annotations for each protein! The number of proteins
does not match the number of rows in your data.", prefix = "\n", initial = ""))
}
string_db <- STRINGdb$new(
version = "11",
species = organism_id,
score_threshold = score_threshold,
input_directory = ""
)
input <- data %>%
dplyr::mutate({{ string_id }} := stringr::str_extract({{ string_id }}, pattern = ".+[^;]")) %>%
tidyr::drop_na({{ string_id }})
string_ids <- dplyr::pull(input, !!ensym(string_id))
payload_id <- NULL
if (!missing(binds_treatment)) {
if (missing(halo_color)) {
coloring <- input %>%
dplyr::filter({{ binds_treatment }}) %>%
dplyr::mutate(color = "
} else {
coloring <- input %>%
dplyr::filter({{ binds_treatment }}) %>%
dplyr::mutate(color = halo_color)
}
payload_id <- string_db$post_payload(coloring$database_string,
colors = coloring$color
)
}
if (plot == TRUE) {
if (length(unique(dplyr::pull(data, !!ensym(protein_id)))) > 400) {
stop(strwrap("Please only provide the top 400 significant proteins for plots! String
cannot plot more at once.", prefix = "\n", initial = ""))
}
string_db$plot_network(string_ids, payload_id = payload_id)
} else {
mapping <- input %>%
dplyr::distinct({{ protein_id }}, {{ string_id }})
interactions <- string_db$get_interactions(string_ids) %>%
dplyr::left_join(mapping, by = c("from" = rlang::as_name(rlang::enquo(string_id)))) %>%
dplyr::rename(from_protein = {{ protein_id }}) %>%
dplyr::left_join(mapping, by = c("to" = rlang::as_name(rlang::enquo(string_id)))) %>%
dplyr::rename(to_protein = {{ protein_id }}) %>%
dplyr::distinct()
return(interactions)
}
} |
fill_empties_Q <- function(data, obj,
row_min_num = 10,
row_num_to_move = 5) {
data <- as.matrix(data)
empty_protos <- which(colSums(obj) == 0)
unassigned <- which(rowSums(obj) == 0)
if(length(unassigned) == 0 & length(empty_protos) == 0) {
return(obj)
} else if(length(unassigned) == 0 & length(empty_protos) > 0) {
num_to_fill <- length(empty_protos)
if(all(colSums(obj, na.rm = TRUE) < row_min_num)) {
stop(
paste0(
"No row groups with at least row_min_num = ", row_min_num,
" rows. Specify a smaller row_min_num value."
)
)
}
for(j in 1:num_to_fill) {
protos_to_choose_from <- which(colSums(obj, na.rm = TRUE) >= row_min_num)
num_in_each <- colSums(matrix(obj[, protos_to_choose_from]))
sampling_frame <- rep(protos_to_choose_from, num_in_each)
proto_to_use <- sample(sampling_frame, 1)
dummy_var <- rep(0, ncol(obj))
dummy_var[empty_protos[j]] <- 1
dummy_var <- dummy_var
chosen_proto_members <- which(obj[, proto_to_use] == 1)
mean_row <- mean(rowMeans(as.matrix(data[chosen_proto_members,]), na.rm = TRUE), na.rm = TRUE)
row_means <- rowMeans(as.matrix(data[chosen_proto_members,]), na.rm = TRUE)
similarity <- (row_means - mean_row) ^ 2
to_move <- which(similarity %in% head(sort(similarity, decreasing = TRUE),
n = row_num_to_move))
for(i in 1:row_num_to_move) {
if(is.na(chosen_proto_members[to_move[i]])) {
stop(
"No row clusters with at least row_num_to_move - 1 rows. Specify a smaller row_num_to_move value."
)
}
obj[chosen_proto_members[to_move[i]],] <- dummy_var
}
}
return(obj)
} else {
num_to_assign <- length(unassigned)
for(i in 1:num_to_assign) {
empty_protos <- colSums(obj)
obj[unassigned[i], which.min(empty_protos)] <- 1
}
return(obj)
}
} |
ccinter <- CCInter <- function( x,
cases,
exposure,
by,
table = FALSE,
full = FALSE
) UseMethod("CCInter", x)
CCInter.data.frame <- function( x,
cases,
exposure,
by,
table = FALSE,
full = FALSE
)
{
L_LABELS1 <- c()
L_TAB <- c()
L_CASES <- c()
L_CONTROLS <- c()
L_CIL <- c()
L_CIH <- c()
L_STATS <- c()
L_ESTIMATE <- c()
NB_TOTAL <- 0
T.Controls <- c()
T.Cases <- c()
T.OR <- c()
T.Marks <- c("++","+-","-+","reference --", "Total")
T.TCA <- 0
T.TCO <- 0
.strate <- as.factor(x[,by])
.strateError = "One of your strata has zero cases in the cells."
.df <- x
getColnames <- function() {
.Col1Label = sprintf("CCInter %s - %s by(%s)", cases, exposure, by)
c(.Col1Label, c("Cases","Controls","P.est.","Stats","95%CI-ll","95%CI-ul"))
}
getColnames2 <- function() {
c("P.estimate","Stats","95%CI-ll","95%CI-ul")
}
getPestNames <- function(ODD) {
sprintf("getPestNames:ODD : %4.4f", ODD)
if (ODD > 1.0) {
c("Odds ratio", "Attrib.risk.exp", "Attrib.risk.pop", "", "", "")
} else {
c("Odds ratio", "Prev. frac. ex.", "Prev. frac. pop", "", "", "")
}
}
getCrudeOR <- function(d) {
.T <- table(d[,cases], d[,exposure])
.r = or(.T)
.r
}
getRisksLabels <- function(.level) {
.label = sprintf("%s = %s", by, .level);
c(.label, "Exposed", "Unexposed", "Total", "Exposed %", "______________")
}
getMHLabels <- function() {
label2 = sprintf("Crude OR for %s", exposure);
label3 = sprintf("MH OR %s adjusted for %s", exposure, by);
c("MH test of Homogeneity (p-value)",
label2, label3, "Adjusted/crude relative change")
}
getRRStats <- function() {
if (!is.factor(x[, cases])) {
.T = table(!x[, exposure], !x[, cases], .strate)
} else {
.d <- x
.d[, cases] <- 1 - (as.numeric(x[, cases])-1)
.d[, exposure] <- 1 - (as.numeric(x[, exposure])-1)
.T = table(.d[, exposure], .d[, cases], .strate)
}
.loop = length(levels(.strate))
.Compute = TRUE
.T <- .T1 <- toNumeric(.T, .loop)
retrieveLast <- function(.T) {
i <- length(.T[1,2,])
if (.T[1,1, i] == 0 | .T[2,1, i] == 0 | .T[1,2, i] == 0 | .T[2,2, i] == 0) {
msg <- sprintf("Stratum %d has values = 0 and has been removed", i)
warning(msg)
.T <- .T[, , -i]
.T <- retrieveLast(.T)
}
.T
}
.T <- retrieveLast(.T)
S_ <- summary(epi.2by2(.T, method = "case.control", outcome="as.columns"))
R <- S_$massoc.detail
.loop = length(.T[1,2,])
NB_LEVELS = .loop
.ind <- .loop:1
for (i in .loop:1) {
j <- .ind[[i]]
.level <- levels(.strate)[i]
A_CE = .T[1,1, i] ;
C_CU = .T[2,1, i] ;
B_HE = .T[1,2, i] ;
D_HU = .T[2,2, i] ;
T_EX <- A_CE + B_HE
T_UN <- C_CU + D_HU
T_CT <- B_HE + D_HU ;
L_LABELS1 <- c(L_LABELS1, getRisksLabels(.level))
L_CASES <- c(L_CASES, NA, A_CE, C_CU);
TOTAL <- A_CE + C_CU;
NB_TOTAL = NB_TOTAL + TOTAL;
EXPOSED_PC <-sprintf("%3.1f%%", (A_CE / TOTAL) * 100)
L_CASES <- c(L_CASES, TOTAL, EXPOSED_PC, NA);
L_CONTROLS <- c(L_CONTROLS, NA, B_HE, D_HU);
TOTAL <- B_HE + D_HU;
NB_TOTAL = NB_TOTAL + TOTAL;
EXPOSED_PC <- sprintf("%3.1f%%", (B_HE / TOTAL) * 100)
L_CONTROLS <- c(L_CONTROLS, TOTAL, EXPOSED_PC, NA);
if (i < 3) {
T.Cases <- c(T.Cases, A_CE, C_CU)
T.Controls <- c(T.Controls, B_HE, D_HU)
T.OR <- c(T.OR, NA, NA)
T.TCA <- T.TCA + A_CE + C_CU
T.TCO <- T.TCO + B_HE + D_HU
}
num <- NULL
.d <- R$OR.strata.wald
.d <- .d %>% mutate(num = 1:nrow(.d)) %>% arrange(desc(num))
ODD <- .d[j, "est"]
.d <- R$OR.strata.mle
.d <- .d %>% mutate(num = 1:nrow(.d)) %>% arrange(desc(num))
.CIL <- .d[j, "lower"]
.CIH <- .d[j, "upper"]
L_STATS <- c(L_STATS, S2(ODD));
L_CIL = c(L_CIL, S2(.CIL));
L_CIH = c(L_CIH, S2(.CIH));
L_ESTIMATE <- c(L_ESTIMATE, getPestNames(round(ODD, 8)))
if (ODD >= 1.0) {
.d <- R$AFest.strata.wald
.d <- .d %>% mutate(num = 1:nrow(.d)) %>% arrange(desc(num))
V_AR = .d[j, "est"]
V_CIL = .d[j, "lower"]
V_CIH = .d[j, "upper"]
L_STATS <- c(L_STATS, S2(V_AR));
L_CIL = c(L_CIL, S2(V_CIL), NA, NA, NA, NA);
L_CIH = c(L_CIH, S2(V_CIH), NA, NA, NA, NA);
.d <- R$PAFest.strata.wald
.d <- .d %>% mutate(num = 1:nrow(.d)) %>% arrange(desc(num))
AFP <- .d[j, "est"]
L_STATS <- c(L_STATS, S2(AFP), NA, NA, NA);
} else {
V_AR <- 1 - ODD
V_CIL <- 1 - .CIH
V_CIH <- 1 - .CIL
L_STATS <- c(L_STATS, S2(V_AR));
L_CIL = c(L_CIL, S2(V_CIL), NA, NA, NA, NA);
L_CIH = c(L_CIH, S2(V_CIH), NA, NA, NA, NA);
Pe <- B_HE / T_CT
AFP <- Pe * (1-ODD)
L_STATS <- c(L_STATS, S2(AFP), NA, NA, NA)
}
}
if (table == TRUE) {
T.Cases <- c(T.Cases, T.TCA)
T.Controls <- c(T.Controls, T.TCO)
T.OR <- c(T.OR, NA)
}
L_CASES = c(L_CASES, NB_TOTAL);
.nrow <- nrow(x)
MIS_TO = .nrow - NB_TOTAL;
MIS_PC = sprintf("%3.2f%s", (MIS_TO / .nrow)*100, '%');
L_CASES = c(L_CASES, MIS_TO);
L_LABELS1 <- c(L_LABELS1, "Number of obs", "Missing")
L_CONTROLS <- c(L_CONTROLS, NA, NA)
L_ESTIMATE <- c(L_ESTIMATE, NA, NA)
L_STATS <- c(L_STATS, NA, NA)
L_CIL <- c(L_CIL, NA, NA)
L_CIH <- c(L_CIH, NA, NA)
DF1 <- data.frame(L_LABELS1, L_CASES, L_CONTROLS, L_ESTIMATE, L_STATS, L_CIL, L_CIH, stringsAsFactors=TRUE)
colnames(DF1) <- getColnames()
df <- x[!is.na(x[,exposure]),]
df <- df[!is.na(df[,by]),]
df <- df[!is.na(df[,cases]),]
.T <- table(df[,cases], df[,exposure], df[,by]);
.T <- toNumeric(.T, .loop)
R <- CC_STATS(.T);
STAT = R$OR.homog.woolf$p.value;
L_STATS <- c(STAT);
.ror <- getCrudeOR(df)
STAT = .ror[1]
CIL = .ror[2]
CIH = .ror[3]
L_STATS <- c(L_STATS, STAT);
L_CIL = c("", S2(CIL));
L_CIH = c("", S2(CIH));
OR.crude = STAT
STAT = R$OR.mh.wald$est;
CIL = R$OR.mh.wald$lower
CIH = R$OR.mh.wald$upper
OR.mh = STAT
L_STATS <- c(L_STATS, STAT);
L_CIL = c(L_CIL, S2(CIL), "_");
L_CIH = c(L_CIH, S2(CIH), "_");
STAT = 100 * ((OR.mh - OR.crude)/OR.crude);
L_STATS <- c(L_STATS, STAT);
L_LABELS1 = getMHLabels()
DF2 <- data.frame(L_LABELS1, S2(L_STATS), L_CIL, L_CIH)
colnames(DF2) <- getColnames2()
if (table == TRUE) {
.Col1 <- sprintf("%s / %s", by, exposure)
T.Col <- c(.Col1, "Cases", "Controls", "OR")
P11 <- T.Cases[1] / (T.Cases[1]+T.Controls[1])
P10 <- T.Cases[2] / (T.Cases[2]+T.Controls[2])
P01 <- T.Cases[3] / (T.Cases[3]+T.Controls[3])
P00 <- T.Cases[4] / (T.Cases[4]+T.Controls[4])
OR11 <- (P11/(1-P11)) / (P00/(1-P00))
OR10 <- (P10/(1-P10)) / (P00/(1-P00))
OR01 <- (P01/(1-P01)) / (P00/(1-P00))
T.OR <- c(round(OR11,2), round(OR10,2), round(OR01,2), NA, NA)
DF3 <- data.frame(T.Marks, T.Cases, T.Controls, T.OR)
colnames(DF3) <- T.Col
.Labs <- c("Observed OR when exposed to both",
"Expected OR if exposed to both and no interaction",
"Interaction")
S.OBOR <- OR11
S.EXOR <- (OR10 - 1) + (OR01 - 1) + 1
S.INTR <- OR11 - S.EXOR
DF4 = data.frame(.Labs, c(round(S.OBOR,2), round(S.EXOR,2), round(S.INTR,2)))
colnames(DF4) <- c("Statistic","Value")
}
if (full == TRUE) {
if (.Compute == TRUE) {
ret <- list(df1 = DF1, df2=DF2, df1.align="lccrrrr", df2.align="lrcc")
} else {
ret <- list(df1 = DF1, df2=.strateError, df1.align="lccrrrr", df2.align="lrcc")
}
if (table == TRUE) {
if (.Compute == TRUE) {
ret <- list(df1 = DF1, df2=DF2, df1.align="lccrrrr", df2.align="lrcc",
df3 = DF3, df4 = DF4)
} else {
ret <- list(df1 = DF1, df2=.strateError, df1.align="lccrrrr", df2.align="lrcc",
df3 = DF3, df4 = DF4)
}
}
} else {
if (.Compute == TRUE) {
ret <- list(df1 = DF1, df2=DF2)
} else {
ret <- list(df1 = DF1, df2=.strateError)
}
if (table == TRUE) {
if (.Compute == TRUE) {
ret <- list(df1 = DF1, df2=DF2, df3 = DF3, df4 = DF4)
} else {
ret <- list(df1 = DF1, df2=.strateError, df3 = DF3, df4 = DF4)
}
}
}
ret
}
getRRStats()
} |
dfCompare <- function(dfOld,dfNew,key) {
dfJoined <- merge(dfOld,dfNew,key)
dfDeletes <- dfOld[is.na(match(dfOld[,key],dfNew[,key])),]
dfAdds <- dfNew[is.na(match(dfNew[,key],dfOld[,key])),]
dfJoined <- sapply(dfJoined,as.character)
isChanged <- function(x,idx,colCount)
{
j <- x[3:idx-1]
k <- x[idx:colCount]
identical(unname(j),unname(k))
}
idx <- grep(".y",colnames(dfJoined))[1]
colCount <- ncol(dfJoined)
ident <- apply(dfJoined,1,function(x) isChanged(x,idx,colCount))
dfJoined <- as.data.frame(dfJoined)
dfJoined <- cbind(dfJoined,ident)
dfChanged <- dfJoined[dfJoined$ident == FALSE,!grepl(".x",colnames(dfJoined))]
rm(dfJoined,ident)
gc()
return(list(DFDeletes = dfDeletes, DFAdds = dfAdds, DFChanges = dfChanged))
} |
sample_strata <- function(data, strata, id, already_sampled = NULL,
design_data, design_strata = "strata",
n_allocated = "n_to_sample") {
if (is.matrix(data) | is.matrix(design_data)) {
data <- as.data.frame(data)
design_data <- as.data.frame(design_data)
}
if (is.data.frame(data) == FALSE | is.data.frame(design_data) == FALSE) {
stop("'data' and 'design_data' must be a dataframe or matrix
with named columns")
}
if (any(c(strata, id) %in% names(data) == FALSE)) {
stop("'strata' and 'id' must be strings matching a column name of
'data'")
}
if (any(c(design_strata, n_allocated) %in% names(design_data) == FALSE)) {
stop("'design_strata' and 'n_allocated' must be strings matching a
column name of 'design_data'")
}
if (length(unique(design_data[, design_strata])) !=
length(design_data[, design_strata])) {
stop("'design_data' may only contain one row per stratum")
}
if (any(design_data[, design_strata] %in% data[, strata] == FALSE)) {
stop("strata names in 'design_data' must all match strata names
in 'data'.")
}
if (is.numeric(design_data[, n_allocated]) == FALSE) {
stop("'n_allocated' must specify a numeric column in 'design_data'
containing only whole number values")
}
if (is.numeric(design_data[, n_allocated]) == TRUE &
any(design_data[, n_allocated] %% 1 != 0)) {
stop("'n_allocated' must specify a numeric column in 'design_data'
containing only whole number values")
}
nsample <- sum(design_data[, n_allocated])
if (is.null(already_sampled) == FALSE) {
if (already_sampled %in% names(data) == FALSE) {
stop("If not NULL, 'already_sampled' must be a character string matching
a column name of 'data'.")
}
if (length(table(data[, already_sampled])) != 2) {
stop("'already_sampled' must be a character string matching a column
in 'data' that has a binary indicator for whether each
unit was already sampled.")
}
if (("Y" %in% data[, already_sampled] == FALSE & 1 %in%
data[, already_sampled] == FALSE) | anyNA(data[, already_sampled])) {
stop("'already_sampled' column must contain '1' (numeric) or 'Y'
(string) as indicators that a unit was sampled in a
previous wave and cannot contain NAs")
}
if (nsample + sum(data[, already_sampled] == "Y") +
sum(data[, already_sampled] == 1) > length(data[, already_sampled])) {
stop("Total sample size across waves, taken as nsampled in
already_sampled + n to allocate in this sample, is larger than
the population size.")
}
}
sampled_ids <- list()
if (is.null(already_sampled) == TRUE) {
for (i in seq_len(nrow(design_data))) {
stratum <- design_data[, design_strata][i]
strata_data <- data[data[, strata] == stratum, c(id, strata)]
sampled_ids[[i]] <- sample(
x = strata_data[, id],
size = design_data[, n_allocated][i]
)
}
}
if (is.null(already_sampled) == FALSE) {
for (i in seq_len(nrow(design_data))) {
stratum <- design_data[, design_strata][i]
strata_data <- data[
data[, strata] == stratum
& data[, already_sampled] != "Y"
& data[, already_sampled] != 1,
c(id, strata)
]
sampled_ids[[i]] <- sample(
x = strata_data[, id],
size = design_data[, n_allocated][i]
)
}
}
sampled_ids <- unlist(sampled_ids)
names(data)[names(data) == id] <- "id"
output_df <- data %>%
dplyr::mutate(sample_indicator = ifelse(id %in% sampled_ids, 1, 0))
return(output_df)
} |
setClass("Kepler", slots = c(
GM = "numeric",
odeSolver = "Euler",
counter = "numeric"
),
contains = c("ODE")
)
setMethod("initialize", "Kepler", function(.Object, ...) {
.Object@GM <- 4 * pi * pi
.Object@state <- vector("numeric", 5)
.Object@odeSolver <- Euler(.Object)
.Object@counter <- 0
return(.Object)
})
setMethod("doStep", "Kepler", function(object, ...) {
object@odeSolver <- step(object@odeSolver)
object@state <- object@odeSolver@ode@state
object
})
setMethod("getTime", "Kepler", function(object, ...) {
return(object@state[5])
})
setMethod("getEnergy", "Kepler", function(object, ...) {
ke <- 0.5 * (object@state[2] * object@state[2] +
object@state[4] * object@state[4])
pe <- -object@GM / sqrt(object@state[1] * object@state[1] +
object@state[3] * object@state[3])
return(pe+ke)
})
setMethod("init", "Kepler", function(object, initState, ...) {
object@state <- initState
object@odeSolver <- init(object@odeSolver, getStepSize(object@odeSolver))
object@counter <- 0
object
})
setReplaceMethod("init", "Kepler", function(object, ..., value) {
object@state <- value
object@odeSolver <- init(object@odeSolver, getStepSize(object@odeSolver))
object@counter <- 0
object
})
setMethod("getRate", "Kepler", function(object, state, ...) {
r2 <- state[1] * state[1] + state[3] * state[3]
r3 <- r2 * sqrt(r2)
object@rate[1] <- state[2]
object@rate[2] <- (- object@GM * state[1]) / r3
object@rate[3] <- state[4]
object@rate[4] <- (- object@GM * state[3]) / r3
object@rate[5] <- 1
object@counter <- object@counter + 1
object@rate
})
setMethod("getState", "Kepler", function(object, ...) {
return(object@state)
})
Kepler <- function() {
kepler <- new("Kepler")
return(kepler)
} |
addreg.smooth.allref <- function(object, data = environment(object), type = c("cem", "em"),
mono, family, addreg.smooth.spec, num.knots) {
type <- match.arg(type)
t <- if(missing(data))
terms(object)
else terms(object, data = data)
termlist <- attr(t, "term.labels")
nvar <- length(termlist)
smoothlist <- sapply(addreg.smooth.spec$smooth.spec,"[[","term")
smoothtype <- sapply(addreg.smooth.spec$smooth.spec,class)
names(smoothtype) <- smoothlist
nsmvar <- length(smoothlist)
if (length(num.knots) != nsmvar)
stop(gettextf("num.knots has length %d should equal %d (number of smooth terms)",
length(num.knots), nsmvar), domain = NA)
num.knots <- as.vector(num.knots, mode = "integer")
names(num.knots) <- smoothlist
if (missing(mono)) mono <- rep(FALSE, nvar)
if (is.null(mono)) mono <- rep(FALSE, nvar)
monotonic <- rep(FALSE, nvar)
names(monotonic) <- termlist
monotonic[mono] <- TRUE
names(monotonic) <- termlist
allref <- list()
for (smth in smoothlist) {
allref[[smth]] <- list()
if (smoothtype[smth] == "Iso.smooth") {
allref[[smth]][[1]] <- 1
} else if (smoothtype[smth] == "B.smooth") {
if (monotonic[smth]) allref[[smth]][[1]] <- 1:(num.knots[smth]+3)
else {
if (family$family != "binomial")
if (type == "cem") allref[[smth]] <- as.list(1:(num.knots[smth]+3))
else allref[[smth]][[1]] <- 0
else
if (type == "cem") allref[[smth]] <- combinat::permn(1:(num.knots[smth]+3))
else allref[[smth]][[1]] <- -(num.knots[smth]+3)
}
} else
stop("smooth type not recognized. Only B() and Iso() are supported by addreg.smooth")
}
list(allref = allref, terms = t, data = data, monotonic = monotonic)
} |
immer_summary_print_package_rsession <- function(pack)
{
sirt::sirt_summary_print_package_rsession(pack=pack)
} |
NULL
getDesignSet <- function(...) {
return(TrialDesignSet(...))
}
summary.TrialDesignSet <- function(object, ..., type = 1, digits = NA_integer_) {
.warnInCaseOfUnknownArguments(functionName = "summary.TrialDesignSet", ...)
.assertIsTrialDesignSet(object)
if (object$isEmpty()) {
stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "cannot create summary because the design set is empty")
}
summaries <- list()
for (design in object$designs) {
s <- .createSummary(design, digits = digits)
summaries <- c(summaries, s)
}
return(summaries)
}
TrialDesignSet <- setRefClass("TrialDesignSet",
contains = "FieldSet",
fields = list(
.plotSettings = "PlotSettings",
designs = "list",
variedParameters = "character"
),
methods = list(
initialize = function(...) {
.plotSettings <<- PlotSettings()
designs <<- list()
variedParameters <<- character(0)
if (length(list(...)) > 0) {
add(...)
}
if (length(designs) > 0) {
masterDesign <- designs[[1]]
if (inherits(masterDesign, "ParameterSet")) {
.self$.plotSettings <<- masterDesign$.plotSettings
}
}
},
getPlotSettings = function() {
return(.plotSettings)
},
show = function(showType = 1, digits = NA_integer_) {
.show(showType = showType, digits = digits, consoleOutputEnabled = TRUE)
},
.show = function(showType = 1, digits = NA_integer_, consoleOutputEnabled = TRUE) {
'Method for automatically printing trial design sets'
.resetCat()
.cat("Trial design set with ", length(designs), " designs\n\n", heading = 1,
consoleOutputEnabled = consoleOutputEnabled)
for (design in designs) {
design$.show(showType = showType, consoleOutputEnabled = consoleOutputEnabled)
}
},
isEmpty = function() {
return(length(designs) == 0)
},
getSize = function() {
return(length(designs))
},
getDesignMaster = function() {
if (length(designs) == 0) {
stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "no design master defined")
}
return(designs[[1]])
},
.validateDesignsArgument = function(designsToAdd, args) {
if (!is.list(designsToAdd)) {
stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'designsToAdd' must be a list")
}
if (length(designsToAdd) == 0) {
stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'designsToAdd' must be not empty")
}
designsToAddValidated <- list()
for (d in designsToAdd) {
if (.isTrialDesign(d)) {
designsToAddValidated <- c(designsToAddValidated, d)
} else {
parentDesign <- d[[".design"]]
if (is.null(parentDesign)) {
stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT,
"'designsToAdd' must be a list of trial designs (found '", class(d), "')")
}
warning("Only the parent design of ", class(d),
" was added to trial design set", call. = FALSE)
designsToAddValidated <- c(designsToAddValidated, parentDesign)
}
}
varPar <- args[["variedParameters"]]
if (!is.null(varPar) && length(varPar) > 0) {
variedParameters <<- c(variedParameters, varPar)
}
args <- args[!(names(args) %in% c("designs", "variedParameters"))]
if (length(args) > 0) {
warning("Argument", ifelse(length(args) > 1, "s", ""), " ",
.arrayToString(args, encapsulate = TRUE), " will be ignored ",
"because for 'designs' only argument 'variedParameters' will be respected", call. = FALSE)
}
designs <<- c(designs, designsToAddValidated)
},
addVariedParameters = function(varPar) {
if (is.null(varPar) || !is.character(varPar)) {
stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'varPar' must be a valid character vector")
}
variedParameters <<- c(variedParameters, varPar)
},
.validateOptionalArguments = function(...) {
args <- list(...)
designsToAdd <- .getOptionalArgument(optionalArgumentName = "designs", ...)
if (!is.null(designsToAdd)) {
.validateDesignsArgument(designsToAdd = designsToAdd, args = args)
return(NULL)
}
design <- .getOptionalArgument(optionalArgumentName = "design", ...)
optionalArgumentsDefined = (length(args) > 0)
if (is.null(design) && !optionalArgumentsDefined) {
stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT,
"please specify a 'design' to add and/or a design parameter, ",
"e.g., deltaWT = c(0.1, 0.3, 0.4)")
}
if (is.null(design) && optionalArgumentsDefined && length(designs) == 0) {
stop(C_EXCEPTION_TYPE_INCOMPLETE_ARGUMENTS,
"at least one design (master) must be defined in this ",
"design set to respect any design parameters")
}
if (!is.null(design)) {
designs <<- c(designs, design)
}
else if (length(designs) > 0) {
design <- designs[[1]]
}
if (!.isTrialDesign(design)) {
stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT,
"'design' (", class(design), ") must be an instance of class 'TrialDesign'")
}
.getArgumentNames(validatedDesign = design, ...)
invisible(design)
},
.getArgumentNames = function(validatedDesign, ...) {
args <- list(...)
if (length(args) == 0) {
return(character(0))
}
argumentNames <- names(args)
if (length(argumentNames) == 0) {
warning("No argument names available for ", paste(args, collapse = ", "), call. = FALSE)
return(character(0))
}
argumentNames <- argumentNames[nchar(argumentNames) != 0]
argumentNames <- argumentNames[!(argumentNames %in% c("design", "designs", "singleDesign"))]
visibleFieldNames <- validatedDesign$.getVisibleFieldNames()
for (arg in argumentNames) {
if (!(arg %in% visibleFieldNames)) {
stop(sprintf(paste0(C_EXCEPTION_TYPE_RUNTIME_ISSUE,
"'%s' does not contain a field with name '%s'"), class(validatedDesign), arg))
}
}
invisible(argumentNames)
},
add = function(...) {
"Adds 'designs' OR a 'design' and/or a design parameter, e.g., deltaWT = c(0.1, 0.3, 0.4)"
design <- .validateOptionalArguments(...)
args <- list(...)
singleDesign <- args[["singleDesign"]]
if (!is.null(singleDesign) && is.logical(singleDesign) && singleDesign) {
return(invisible())
}
if (!is.null(design)) {
d <- .createDesignVariants(validatedDesign = design, ...)
designs <<- c(designs, d)
}
},
assertHaveEqualSidedValues = function() {
if (length(designs) == 0) {
return(invisible())
}
sided = getDesignMaster()$sided
for (design in designs) {
if (sided != design$sided) {
stop(C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS,
"designs have different directions of alternative (design master is ",
ifelse(sided == 1, "one", "two"), " sided)")
}
}
},
.createDesignVariants = function(validatedDesign, ...) {
.assertIsTrialDesign(validatedDesign)
argumentNames <- .getArgumentNames(validatedDesign = validatedDesign, ...)
if (length(argumentNames) == 0) {
warning("Creation of design variants stopped: no valid design parameters found", call. = FALSE)
return(list())
}
if (length(argumentNames) > 2) {
stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT,
"too many arguments (", .arrayToString(argumentNames, encapsulate = TRUE),
"): up to 2 design parameters are allowed")
}
designVariants <- .createDesignVariantsRecursive(designMaster = validatedDesign,
args = list(...), argumentIndex = 1, argumentNames = argumentNames)
return(designVariants)
},
.designSettingExists = function(parameterName, parameterValue, numberOfArguments = 1,
parameterNameBefore = NULL, parameterValueBefore = NULL) {
if (length(designs) == 0) {
return(FALSE)
}
for (design in designs) {
if (!is.null(parameterNameBefore) && !is.null(parameterValueBefore)) {
if (design[[parameterNameBefore]] == parameterValueBefore &&
design[[parameterName]] == parameterValue) {
return(TRUE)
}
} else if (numberOfArguments == 1) {
if (design[[parameterName]] == parameterValue) {
return(TRUE)
}
}
}
return(FALSE)
},
.createDesignVariantsRecursive = function(designMaster, args, argumentIndex, argumentNames,
parameterNameBefore = NULL, parameterValueBefore = NULL) {
if (argumentIndex > length(argumentNames)) {
return(list())
}
designVariants <- list()
argumentName <- argumentNames[argumentIndex]
variedParameters <<- unique(c(variedParameters, argumentName))
argumentValues <- args[[argumentName]]
for (argumentValue in argumentValues) {
if (.designSettingExists(argumentName, argumentValue,
numberOfArguments = length(argumentNames),
parameterNameBefore, parameterValueBefore)) {
if (!is.null(parameterNameBefore) && !is.null(parameterValueBefore)) {
warning(sprintf("Argument ignored: there exists already a design with %s = %s (%s = %s)",
argumentName, argumentValue, parameterNameBefore, parameterValueBefore), call. = FALSE)
} else {
warning(sprintf("Argument ignored: there exists already a design with %s = %s",
argumentName, argumentValue), call. = FALSE)
}
} else {
designMaster2 <- .createDesignVariant(designMaster = designMaster,
argumentName = argumentName, argumentValue = argumentValue)
if (argumentIndex == length(argumentNames)) {
if (is.null(parameterNameBefore) || is.null(parameterValueBefore)) {
.logDebug("Create design variant %s = %s", argumentName, argumentValue)
} else {
.logDebug("Create design variant %s = %s (%s = %s)", argumentName, argumentValue,
parameterNameBefore, parameterValueBefore)
}
designVariants <- c(designVariants, designMaster2)
}
designCopies2 <- .createDesignVariantsRecursive(designMaster = designMaster2,
args = args, argumentIndex = argumentIndex + 1, argumentNames = argumentNames,
parameterNameBefore = argumentName, parameterValueBefore = argumentValue)
if (length(designCopies2) > 0) {
designVariants <- c(designVariants, designCopies2)
}
}
}
return(designVariants)
},
.createDesignVariant = function(designMaster, argumentName, argumentValue) {
if (.isTrialDesignGroupSequential(designMaster)) {
defaultValues <- .getDesignGroupSequentialDefaultValues()
}
else if (.isTrialDesignInverseNormal(designMaster)) {
defaultValues <- .getDesignInverseNormalDefaultValues()
}
else if (.isTrialDesignFisher(designMaster)) {
defaultValues <- .getDesignFisherDefaultValues()
}
for (userDefinedParamName in designMaster$.getUserDefinedParameters()) {
defaultValues[[userDefinedParamName]] <- designMaster[[userDefinedParamName]]
}
defaultValues[[argumentName]] <- argumentValue
if (.isTrialDesignGroupSequential(designMaster)) {
result <- getDesignGroupSequential(
kMax = defaultValues$kMax,
alpha = defaultValues$alpha,
beta = defaultValues$beta,
sided = defaultValues$sided,
informationRates = defaultValues$informationRates,
futilityBounds = defaultValues$futilityBounds,
typeOfDesign = defaultValues$typeOfDesign,
deltaWT = defaultValues$deltaWT,
optimizationCriterion = defaultValues$optimizationCriterion,
gammaA = defaultValues$gammaA,
typeBetaSpending = defaultValues$typeBetaSpending,
userAlphaSpending = defaultValues$userAlphaSpending,
userBetaSpending = defaultValues$userBetaSpending,
gammaB = defaultValues$gammaB,
tolerance = defaultValues$tolerance)
}
else if (.isTrialDesignInverseNormal(designMaster)) {
result <- getDesignInverseNormal(
kMax = defaultValues$kMax,
alpha = defaultValues$alpha,
beta = defaultValues$beta,
sided = defaultValues$sided,
informationRates = defaultValues$informationRates,
futilityBounds = defaultValues$futilityBounds,
typeOfDesign = defaultValues$typeOfDesign,
deltaWT = defaultValues$deltaWT,
optimizationCriterion = defaultValues$optimizationCriterion,
gammaA = defaultValues$gammaA,
typeBetaSpending = defaultValues$typeBetaSpending,
userAlphaSpending = defaultValues$userAlphaSpending,
userBetaSpending = defaultValues$userBetaSpending,
gammaB = defaultValues$gammaB,
tolerance = defaultValues$tolerance)
}
else if (.isTrialDesignFisher(designMaster)) {
result <- getDesignFisher(
kMax = defaultValues$kMax,
alpha = defaultValues$alpha,
method = defaultValues$method,
userAlphaSpending = defaultValues$userAlphaSpending,
informationRates = defaultValues$informationRates,
alpha0Vec = defaultValues$alpha0Vec,
sided = defaultValues$sided,
tolerance = defaultValues$tolerance,
iterations = defaultValues$iterations,
seed = defaultValues$seed)
}
result$.plotSettings <- designMaster$.plotSettings
return(result)
}
)
)
setMethod("[", "TrialDesignSet",
function(x, i, j = NA_character_, ...) {
if (length(x$designs) == 0) {
return(NULL)
}
design <- x$designs[[i]]
if (!missing(j) && !is.na(j) && is.character(j)) {
return(design[[j]])
}
return(design)
}
)
names.TrialDesignSet <- function(x) {
return(x$.getVisibleFieldNames())
}
length.TrialDesignSet <- function(x) {
return(length(x$designs))
}
as.data.frame.TrialDesignSet <- function(x, row.names = NULL,
optional = FALSE, niceColumnNamesEnabled = FALSE, includeAllParameters = FALSE,
addPowerAndAverageSampleNumber = FALSE, theta = seq(-1, 1, 0.02), nMax = NA_integer_, ...) {
.assertIsTrialDesignSet(x)
if (x$isEmpty()) {
stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "cannot create data.frame because the design set is empty")
}
fCall = match.call(expand.dots = FALSE)
theta <- .assertIsValidThetaRange(thetaRange = theta, thetaAutoSeqEnabled = (as.character(fCall$theta)[1] != "seq"))
if (addPowerAndAverageSampleNumber) {
.assertAssociatedArgumentsAreDefined(
addPowerAndAverageSampleNumber = addPowerAndAverageSampleNumber,
theta = theta, nMax = nMax)
}
fisherDesignEnabled <- .isTrialDesignFisher(x$getDesignMaster())
dataFrame <- NULL
for (design in x$designs) {
if (fisherDesignEnabled != .isTrialDesignFisher(design)) {
stop(C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "all trial designs must be from the same type ",
"('", class(x$designs[[1]]), "' != '", class(design), ")'")
}
df <- as.data.frame(design, niceColumnNamesEnabled = niceColumnNamesEnabled,
includeAllParameters = includeAllParameters)
if (.isTrialDesignWithValidFutilityBounds(design)) {
futilityBoundsName <- "futilityBounds"
if (niceColumnNamesEnabled) {
futilityBoundsName <- .getTableColumnNames(design = design)[["futilityBounds"]]
}
kMax <- design$kMax
df[[futilityBoundsName]][kMax] <- design$criticalValues[kMax]
}
if (.isTrialDesignWithValidAlpha0Vec(design)) {
alpha0VecName <- "alpha0Vec"
if (niceColumnNamesEnabled) {
alpha0VecName <- .getTableColumnNames(design = design)[["alpha0Vec"]]
}
kMax <- design$kMax
df[[alpha0VecName]][kMax] <- design$criticalValues[kMax]
}
if (addPowerAndAverageSampleNumber) {
results <- PowerAndAverageSampleNumberResult(design, theta = theta, nMax = nMax)
df2 <- as.data.frame(results, niceColumnNamesEnabled = niceColumnNamesEnabled,
includeAllParameters = includeAllParameters)
df <- merge(df, df2, all.y = TRUE)
}
if (is.null(dataFrame)) {
if (niceColumnNamesEnabled) {
dataFrame <- cbind("Design number" = rep(1, nrow(df)), df)
} else {
dataFrame <- cbind(designNumber = rep(1, nrow(df)), df)
}
} else {
if (niceColumnNamesEnabled) {
df <- cbind("Design number" = rep(max(dataFrame$"Design number") + 1, nrow(df)), df)
} else {
df <- cbind(designNumber = rep(max(dataFrame$designNumber) + 1, nrow(df)), df)
}
dataFrame <- rbind(dataFrame, df)
}
}
return(dataFrame)
}
plot.TrialDesignSet <- function(x, y, ..., type = 1L, main = NA_character_,
xlab = NA_character_, ylab = NA_character_, palette = "Set1",
theta = seq(-1, 1, 0.02), nMax = NA_integer_, plotPointsEnabled = NA,
legendPosition = NA_integer_, showSource = FALSE,
grid = 1, plotSettings = NULL) {
fCall = match.call(expand.dots = FALSE)
designSetName <- deparse(fCall$x)
.assertIsSingleInteger(grid, "grid", validateType = FALSE)
typeNumbers <- .getPlotTypeNumber(type, x)
if (is.null(plotSettings)) {
plotSettings <- .getGridPlotSettings(x, typeNumbers, grid)
}
p <- NULL
plotList <- list()
for (typeNumber in typeNumbers) {
p <- .plotTrialDesignSet(x = x, y = y, type = typeNumber, main = main,
xlab = xlab, ylab = ylab, palette = palette,
theta = theta, nMax = nMax, plotPointsEnabled = plotPointsEnabled,
legendPosition = .getGridLegendPosition(legendPosition, typeNumbers, grid),
showSource = showSource, designSetName = designSetName,
plotSettings = plotSettings, ...)
.printPlotShowSourceSeparator(showSource, typeNumber, typeNumbers)
if (length(typeNumbers) > 1) {
caption <- .getPlotCaption(x, typeNumber, stopIfNotFound = TRUE)
plotList[[caption]] <- p
}
}
if (length(typeNumbers) == 1) {
return(p)
}
return(.createPlotResultObject(plotList, grid))
}
.plotTrialDesignSet <- function(..., x, y, type = 1L, main = NA_character_,
xlab = NA_character_, ylab = NA_character_, palette = "Set1",
theta = seq(-1, 1, 0.02), nMax = NA_integer_, plotPointsEnabled = NA,
legendPosition = NA_integer_, showSource = FALSE,
designSetName = NA_character_, plotSettings = NULL) {
.assertGgplotIsInstalled()
if (!is.call(main) && !isS4(main)) {
.assertIsSingleCharacter(main, "main", naAllowed = TRUE)
}
.assertIsSingleCharacter(xlab, "xlab", naAllowed = TRUE)
.assertIsSingleCharacter(ylab, "ylab", naAllowed = TRUE)
.assertIsSingleCharacter(palette, "palette", naAllowed = TRUE)
theta <- .assertIsValidThetaRange(thetaRange = theta)
.assertIsSingleNumber(nMax, "nMax", naAllowed = TRUE)
.assertIsSingleLogical(plotPointsEnabled, "plotPointsEnabled", naAllowed = TRUE)
.assertIsValidLegendPosition(legendPosition)
.assertIsSingleInteger(type, "type", naAllowed = FALSE, validateType = FALSE)
parameterSet <- x
designMaster <- parameterSet$getDesignMaster()
.assertIsTrialDesign(designMaster)
if (type == 1) {
main <- if (!is.call(main) && !isS4(main) && is.na(main)) "Boundaries" else main
xParameterName <- "informationRates"
yParameterNames <- c("criticalValues")
if (designMaster$sided == 1 || (.isTrialDesignInverseNormalOrGroupSequential(designMaster) &&
designMaster$typeOfDesign == C_TYPE_OF_DESIGN_PT)) {
if (.isTrialDesignWithValidFutilityBounds(designMaster)) {
yParameterNames <- c("futilityBounds", "criticalValues")
}
if (.isTrialDesignWithValidAlpha0Vec(designMaster)) {
yParameterNames <- c("alpha0Vec", "criticalValues")
}
}
}
else if (type == 2) {
stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "designs with undefined endpoint do not support plot type 2")
}
else if (type == 3) {
main <- if (!is.call(main) && !isS4(main) && is.na(main)) "Stage Levels" else main
xParameterName <- "informationRates"
yParameterNames <- "stageLevels"
}
else if (type == 4) {
main <- if (!is.call(main) && !isS4(main) && is.na(main)) "Error Spending" else main
xParameterName <- "informationRates"
yParameterNames <- c("alphaSpent")
if (!.isTrialDesignFisher(designMaster) &&
designMaster$typeBetaSpending != C_TYPE_OF_DESIGN_BS_NONE) {
yParameterNames <- c(yParameterNames, "betaSpent")
palette <- "Paired"
}
plotPointsEnabled <- ifelse(is.na(plotPointsEnabled), FALSE, plotPointsEnabled)
}
else if (type == 5) {
if (!is.call(main) && !isS4(main) && is.na(main)) {
main <- PlotSubTitleItems(title = "Power and Early Stopping")
main$add("N", nMax, "max")
}
xParameterName <- "theta"
yParameterNames <- c("overallEarlyStop", "calculatedPower")
}
else if (type == 6) {
if (!is.call(main) && !isS4(main) && is.na(main)) {
main <- PlotSubTitleItems(title = "Average Sample Size and Power / Early Stop")
main$add("N", nMax, "max")
}
xParameterName <- "theta"
yParameterNames <- c("averageSampleNumber", "overallEarlyStop", "calculatedPower")
}
else if (type == 7) {
if (!is.call(main) && !isS4(main) && is.na(main)) {
main <- PlotSubTitleItems(title = "Power")
main$add("N", nMax, "max")
}
xParameterName <- "theta"
yParameterNames <- "calculatedPower"
}
else if (type == 8) {
if (!is.call(main) && !isS4(main) && is.na(main)) {
main <- PlotSubTitleItems(title = "Early Stopping")
main$add("N", nMax, "max")
}
xParameterName <- "theta"
yParameterNames <- "overallEarlyStop"
}
else if (type == 9) {
if (!is.call(main) && !isS4(main) && is.na(main)) {
main <- PlotSubTitleItems(title = "Average Sample Size")
main$add("N", nMax, "max")
}
xParameterName <- "theta"
yParameterNames <- "averageSampleNumber"
}
else {
stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'type' (", type, ") is not allowed; must be 1, 2, ..., 9")
}
if (type >= 5 && type <= 9) {
designSetName <- paste0("getPowerAndAverageSampleNumber(", designSetName,
", theta = ", .reconstructSequenceCommand(theta), ", nMax = ", nMax, ")")
}
xValues <- NA_real_
if (xParameterName == "theta") {
xValues <- theta
}
srcCmd <- .showPlotSourceInformation(objectName = designSetName,
xParameterName = xParameterName,
yParameterNames = yParameterNames,
nMax = nMax,
type = type,
showSource = showSource,
xValues = xValues)
if (!is.null(srcCmd)) {
if (.isSpecialPlotShowSourceArgument(showSource)) {
return(invisible(srcCmd))
}
return(srcCmd)
}
return(.plotParameterSet(parameterSet = parameterSet, designMaster = designMaster,
xParameterName = xParameterName,
yParameterNames = yParameterNames, mainTitle = main, xlab = xlab, ylab = ylab,
palette = palette, theta = theta, nMax = nMax, plotPointsEnabled = plotPointsEnabled,
legendPosition = legendPosition, plotSettings = plotSettings, ...))
} |
calc_predictive <- function(y, n, p0, N,
direction = "greater", delta = NULL,
prior = c(0.5, 0.5), S = 5000,
theta = 0.95) {
if (length(y) != length(n))
stop("y and n must be the same length")
if ((is.null(p0) & is.null(delta)) | (!is.null(p0) & !is.null(delta)))
stop("Exactly one of delta or p0 must be specified for the two-sample and
one-sample case, respectively")
if (!direction %in% c("greater", "less"))
stop('direction must be either "greater" or "less"')
if (length(y) == 1 & is.null(p0))
stop("p0 must be specified for the one-sample case")
if (length(y) == 2 & is.null(delta))
stop("delta must be specified for the two-sample case")
if (length(y) != length(N))
stop("y and N must be the same length")
if (length(y) == 2) {
rb0 <- rbeta(S, prior[1] + y[1], prior[2] + n[1] - y[1])
rb1 <- rbeta(S, prior[1] + y[2], prior[2] + n[2] - y[2])
if (n[1] < N[1]) {
Y0 <- y[1] + map_dbl(rb0, rbinom, n = 1, size = N[1] - n[1])
} else {
Y0 <- rep(y[1], S)
N[1] <- n[1]
}
if (n[2] < N[2]) {
Y1 <- y[2] + map_dbl(rb1, rbinom, n = 1, size = N[2] - n[2])
} else {
Y1 <- rep(y[2], S)
N[2] <- n[2]
}
post <- map2_dbl(Y0, Y1, ~ calc_posterior(
y = c(.x, .y), n = N, direction = direction, p0 = p0, delta = delta,
prior = prior, S = S
))
} else if (length(y) == 1) {
rb1 <- rbeta(S, prior[1] + y, prior[2] + n - y)
if (n < N) {
Y <- y + map_dbl(rb1, rbinom, n = 1, size = N - n)
} else {
Y <- rep(y, S)
N <- n
}
post <- map_dbl(Y, calc_posterior,
n = N,
direction = direction, p0 = p0,
delta = delta, prior = prior, S = S
)
}
return(mean(post > theta))
} |
makeRLearner.regr.kmforrester = function() {
makeRLearnerRegr(
cl = "regr.kmforrester",
package = "DiceKriging",
par.set = makeParamSet(
makeDiscreteLearnerParam(id = "covtype", default = "matern5_2",
values = list("gauss", "matern5_2", "matern3_2", "exp", "powexp")),
makeNumericVectorLearnerParam(id = "noise.var"),
makeDiscreteLearnerParam(id = "optim.method", default = "BFGS",
values = list("BFGS", "gen")),
makeNumericVectorLearnerParam(id = "lower"),
makeNumericVectorLearnerParam(id = "upper"),
makeUntypedLearnerParam(id = "control")
),
properties = c("numerics", "se"),
name = "Kriging-Reinterpolation",
short.name = "kmforrester",
note = "In predict, we currently always use type = 'SK'."
)
}
trainLearner.regr.kmforrester = function(.learner, .task, .subset, ...) {
d = getTaskData(.task, .subset, target.extra = TRUE)
m = DiceKriging::km(design = d$data, response = d$target, nugget.estim = TRUE, ...)
p = predict(m, d$data, type = "SK")$mean
m = DiceKriging::km(design = d$data, response = p, nugget.estim = FALSE,
coef.trend = [email protected], coef.var = m@covariance@sd2, coef.cov = m@[email protected])
return(m)
}
predictLearner.regr.kmforrester = function(.learner, .model, .newdata, ...) {
se = (.learner$predict.type != "response")
p = predict(.model$learner.model, newdata = .newdata, type = "SK", se.compute = se)
if(!se)
return(p$mean)
else
cbind(p$mean, p$sd)
} |
library(stringr)
library(tableschema.r)
library(testthat)
library(foreach)
library(config)
context("types.castArray")
TESTS <- list(
list('default', list(), list()),
list('default', "[]", list()),
list('default', list('key', 'value'), list('key', 'value')),
list('default', '["key", "value"]', list('key', 'value')),
list('default', 'string', config::get("ERROR", file = system.file("config/config.yml", package = "tableschema.r"))),
list('default', 1, config::get("ERROR", file = system.file("config/config.yml", package = "tableschema.r"))),
list('default', '3.14', config::get("ERROR", file = system.file("config/config.yml", package = "tableschema.r"))),
list('default', '', config::get("ERROR", file = system.file("config/config.yml", package = "tableschema.r")))
)
foreach(j = seq_along(TESTS) ) %do% {
TESTS[[j]] <- setNames(TESTS[[j]], c("format", "value", "result"))
test_that(str_interp('format "${TESTS[[j]]$format}" should check "${TESTS[[j]]$value}" as "${TESTS[[j]]$result}"'), {
expect_equal(types.castArray(TESTS[[j]]$format, TESTS[[j]]$value), TESTS[[j]]$result)
})
} |
.AIC=function(mse,df,n)
{
return(log(mse) + df*2/n)
}
.BIC=function(mse,df,n)
{
return(log(mse) + df*log(n)/n)
}
.IC=function(mse,df,n,Method="BIC")
{
if(Method == "BIC")
{
return(.BIC(mse,df,n))
}
else
{
return(.AIC(mse,df,n))
}
}
plsim.lam = function(...)
{
args = list(...)
if (is(args[[1]],"formula"))
UseMethod("plsim.lam",args[[1]])
else
UseMethod("plsim.lam")
}
plsim.lam.formula = function(formula,data,...)
{
mf = match.call(expand.dots = FALSE)
m = match(c("formula","data"),
names(mf), nomatch = 0)
mf = mf[c(1,m)]
mf.xf = mf
mf[[1]] = as.name("model.frame")
mf.xf[[1]] = as.name("model.frame")
chromoly = deal_formula(mf[["formula"]])
if (length(chromoly) != 3)
stop("invoked with improper formula, please see plsim.lam documentation for proper use")
bronze = lapply(chromoly, paste, collapse = " + ")
mf.xf[["formula"]] = as.formula(paste(" ~ ", bronze[[2]]),
env = environment(formula))
mf[["formula"]] = as.formula(paste(bronze[[1]]," ~ ", bronze[[3]]),
env = environment(formula))
formula.all = terms(as.formula(paste(" ~ ",bronze[[1]]," + ",bronze[[2]], " + ",bronze[[3]]),
env = environment(formula)))
orig.class = if (missing(data))
sapply(eval(attr(formula.all, "variables"), environment(formula.all)),class)
else sapply(eval(attr(formula.all, "variables"), data, environment(formula.all)),class)
arguments.mfx = chromoly[[2]]
arguments.mf = c(chromoly[[1]],chromoly[[3]])
mf[["formula"]] = terms(mf[["formula"]])
mf.xf[["formula"]] = terms(mf.xf[["formula"]])
if(all(orig.class == "ts")){
arguments = (as.list(attr(formula.all, "variables"))[-1])
attr(mf[["formula"]], "predvars") = bquote(.(as.call(c(quote(as.data.frame),as.call(c(quote(ts.intersect), arguments)))))[,.(match(arguments.mf,arguments)),drop = FALSE])
attr(mf.xf[["formula"]], "predvars") = bquote(.(as.call(c(quote(as.data.frame),as.call(c(quote(ts.intersect), arguments)))))[,.(match(arguments.mfx,arguments)),drop = FALSE])
}else if(any(orig.class == "ts")){
arguments = (as.list(attr(formula.all, "variables"))[-1])
arguments.normal = arguments[which(orig.class != "ts")]
arguments.timeseries = arguments[which(orig.class == "ts")]
ix = sort(c(which(orig.class == "ts"),which(orig.class != "ts")),index.return = TRUE)$ix
attr(mf[["formula"]], "predvars") = bquote((.(as.call(c(quote(cbind),as.call(c(quote(as.data.frame),as.call(c(quote(ts.intersect), arguments.timeseries)))),arguments.normal,check.rows = TRUE)))[,.(ix)])[,.(match(arguments.mf,arguments)),drop = FALSE])
attr(mf.xf[["formula"]], "predvars") = bquote((.(as.call(c(quote(cbind),as.call(c(quote(as.data.frame),as.call(c(quote(ts.intersect), arguments.timeseries)))),arguments.normal,check.rows = TRUE)))[,.(ix)])[,.(match(arguments.mfx,arguments)),drop = FALSE])
}
mf = tryCatch({
eval(mf,parent.frame())
},error = function(e){
NULL
})
mf.xf = tryCatch({
eval(mf.xf,parent.frame())
},error = function(e){
NULL
})
if(is.null(mf)){
cat( blue$bold("\n Z (")
%+% black$bold("z")
%+% blue$bold(") should not be NULL.\n")
%+% blue$bold(" If Z is null, please utilize linear models, such as ")
%+% black$bold("lm() ")
%+% blue$bold("function. \n\n")
)
return(NULL)
}
else{
ydat = model.response(mf)
}
xdat = mf.xf
zdat = mf[, chromoly[[3]], drop = FALSE]
ydat = data.matrix(ydat)
if(!is.null(xdat) & is.null(dim(xdat[,1]))){
xdat = data.matrix(xdat)
}
else if(!is.null(dim(xdat[,1]))){
xdat = xdat[,1]
}
if(is.null(dim(zdat[,1]))){
zdat = data.matrix(zdat)
}
else{
zdat = zdat[,1]
}
result = plsim.lam(xdat = xdat, ydat = ydat, zdat = zdat, ...)
return(result)
}
plsim.lam.default = function(xdat=NULL,ydat,zdat,h,zetaini=NULL,penalty="SCAD",lambdaList=NULL,
l1_ratio_List=NULL,lambda_selector="BIC",verbose=TRUE,seed=0,...)
{
data = list(x=xdat,y=ydat,z=zdat)
x = data$x
y = data$y
z = data$z
if ( is.null( .assertion_for_variables(data)) ) return(NULL)
if(is.data.frame(x))
x = data.matrix(x)
if(is.data.frame(z))
z = data.matrix(z)
if(is.data.frame(y))
y = data.matrix(y)
if(is.null(zetaini))
{
zetaini = plsim.ini(x, z, y, verbose = verbose)
}
if(is.null(h))
{
cat("Please input a value for the bandwidth (h).")
return(NULL)
}
n = nrow(y)
if(is.null(x))
{
dx = 0
}
else
{
dx = ncol(x)
}
dz = ncol(z)
a = zetaini[1:dz]
if(is.null(x))
{
b = zetaini[1:dz]
}
else
{
b = zetaini[(dz+1):(dz+dx)]
}
if(is.null(x))
{
x_tmp = matrix()
etandder_result = .etandder(a,b,h,y,z,x_tmp,0)
}
else
{
etandder_result = .etandder(a,b,h,y,z,x)
}
eta = etandder_result$eta
if(is.null(x))
{
r = y - eta
}
else
{
r = y - eta - x%*%b
}
sigma = sqrt(sum(r^2)/n)
if(is.null(lambdaList))
{
lambdaList = sigma * seq(0.1/sqrt(n),2*sqrt(log(n)/n),length=30)
lambdaList = c(0, lambdaList)
}
if(is.null(l1_ratio_List))
{
l1_ratio_List = seq(0,1,length=11)
}
if(penalty != "ElasticNet")
{
goodness = matrix(0,length(lambdaList),1)
for(k in 1:length(lambdaList))
{
plsim_result = plsim.vs.soft(x,z,y,h,zetaini,lambdaList[k],NULL,1,penalty,verbose,seed=seed)
if(is.null(plsim_result))
return(NULL)
beta = plsim_result$zeta
mse = plsim_result$mse
df = sum(beta!=0)
goodness[k] = .IC(mse,df,n,lambda_selector)
if(verbose)
{
cat(paste(black$bold("\n lambda"),lambdaList[k],sep = "="))
cat("\n ")
cat(paste(black$bold(lambda_selector),goodness[k],sep = "="))
cat("\n\n")
}
}
goodness_min = min(goodness)
index_selectorMin = which.min(goodness)
lambda_best = lambdaList[index_selectorMin]
result = list()
result$goodness_best = goodness_min
result$lambda_best = lambda_best
result$lambdaList = lambdaList
}
else
{
goodness = matrix(0,length(lambdaList),length(l1_ratio_List))
cat(paste( cat(blue$bold("\n Select ")
%+%black$bold("lambda")
%+% blue$bold(" and ")
%+%black$bold("l1_ratio")
%+%blue$bold(" for penalized plsim according to")),
black$bold(lambda_selector), sep = " "))
cat("\n")
for(k in 1:length(lambdaList))
{
for(t in 1:length(l1_ratio_List))
{
plsim_result = plsim.vs.soft(x,z,y,h,zetaini,lambdaList[k],l1_ratio_List[t],1,penalty,verbose,seed=seed)
beta = plsim_result$zeta
mse = plsim_result$mse
df = sum(beta!=0)
goodness[k,t] = .IC(mse,df,n,lambda_selector)
if(verbose)
{
cat(paste(black$bold("\n lambda"),lambdaList[k],sep = "="))
cat(paste(black$bold("\n l1_ratio"),l1_ratio_List[t],sep = "="))
cat("\n ")
cat(paste(black$bold(lambda_selector),goodness[k,t],sep = "="))
cat("\n\n")
}
}
}
goodness_min = min(goodness)
index_selectorMin = arrayInd( which.min(goodness) ,dim(goodness),dimnames(goodness))
lambda_best = lambdaList[index_selectorMin[1]]
l1_ratio_best = l1_ratio_List[index_selectorMin[2]]
result = list()
result$goodness_best = goodness_min
result$lambda_best = lambda_best
result$l1_ratio_best = l1_ratio_best
result$lambdaList = lambdaList
class(result) = "lamsel"
}
return(result)
} |
wh.predict <- function(x)
{
M1 <- 30269
M2 <- 30307
M3 <- 30323
y <- round(M1*M2*M3*x)
s1 <- y %% M1
s2 <- y %% M2
s3 <- y %% M3
s1 <- (171*26478*s1) %% M1
s2 <- (172*26070*s2) %% M2
s3 <- (170*8037*s3) %% M3
(s1/M1 + s2/M2 + s3/M3) %% 1
}
RNGkind("Wichmann-Hill")
xnew <- runif(1)
maxerr <- 0
for (i in 1:1000) {
xold <- xnew
xnew <- runif(1)
err <- abs(wh.predict(xold) - xnew)
maxerr <- max(err, maxerr)
}
print(maxerr)
library(randtoolbox)
options(width = 40)
congruRand(10)
options( width =40)
setSeed(1)
congruRand(10)
options( width =40)
setSeed(1)
congruRand(10, echo=TRUE)
options( width =40)
setSeed(1614852353)
congruRand(5, echo=TRUE)
options( width =30)
setSeed(12)
congruRand(5, mod = 2^8, mult = 25, incr = 16, echo= TRUE)
options( width =40)
SFMT(10)
SFMT(5, 2)
options( width =40)
SFMT(10, mexp = 607)
options( width =40)
halton(10)
halton(10, 2)
options( width =40)
halton(5)
halton(5, init=FALSE)
options( width =40)
sobol(10)
sobol(10, scramb=3)
par(mfrow = c(2,1))
plot(sobol(1000, 2), xlab ="u", ylab="v", main="Sobol (no scrambling)")
plot(sobol(10^3, 2, scram=1), xlab ="u", ylab="v", main="Sobol (Owen)")
options( width =40)
torus(10)
options( width =40)
torus(5, use =TRUE)
options( width =40)
torus(5, p =7)
options( width =40)
torus(5, mixed =TRUE)
par(mfrow = c(2,1))
acf(torus(10^5))
acf(torus(10^5, mix=TRUE))
par(mfrow = c(2,1))
plot(SFMT(1000, 2), xlab ="u", ylab="v", main="SFMT")
plot(torus(1000, 2), xlab ="u", ylab="v", main="Torus")
par(mfrow = c(2,1))
plot(WELL(1000, 2), xlab ="u", ylab="v", main="WELL 512a")
plot(sobol(10^3, 2, scram=2), xlab ="u", ylab="v", main="Sobol (Faure-Tezuka)")
options( width =40)
I25 <- -1356914
nb <- c(1200, 14500, 214000)
ans <- NULL
for(i in 1:3)
{
tij <- sobol(nb[i], dim=25, scramb=2, norm=TRUE )
Icos <- mean(cos(sqrt( apply( tij^2/2, 1, sum ) ))) * pi^(25/2)
ans <- rbind(ans, c(n=nb[i], I25=Icos, Delta=(Icos-I25)/I25 ))
}
data.frame(ans)
par(mfrow = c(2,1))
hist(SFMT(10^3), 100)
hist(torus(10^3), 100)
options( width =40)
res <- gap.test(runif(1000), echo=FALSE)
stat <- res$statistic
pvalue <- res$p.value
df <- res$parameter
obsnum <- res$observed
expnum <- res$expected
cat("\n\t Gap test\n")
cat("\nchisq stat = ", stat, ", df = ",df, "\n, p-value = ", pvalue, "\n", sep="")
cat("\n(sample size : ",1000,")\n\n", sep="")
cat("length observed freq theoretical freq\n")
for(i in 1:(df+1))
cat(i,"\t", obsnum[i],"\t", expnum[i],"\n")
options( width =40)
res <- order.test(runif(4000), d=4, echo=FALSE)
stat <- res$statistic
pvalue <- res$p.value
df <- res$parameter
obsnum <- res$observed
expnum <- res$expected
cat("\n\t Order test\n")
cat("\nchisq stat = ", stat, ", df = ",df, "\n, p-value = ", pvalue, "\n", sep="")
cat("\n (sample size : ", 1000,")\n\n", sep="")
cat("observed number ",obsnum[1:6],"\n",obsnum[7:18],"\n", obsnum[19:24],"\n")
cat("expected number ",expnum,"\n")
options( width =40)
res <- freq.test(runif(1000), 1:4, echo=FALSE)
stat <- res$statistic
pvalue <- res$p.value
df <- res$parameter
obsnum <- res$observed
expnum <- res$expected
cat("\n\t Frequency test\n")
cat("\nchisq stat = ", stat, ", df = ",df, "\n, p-value = ", pvalue, "\n", sep="")
cat("\n (sample size : ",1000,")\n\n", sep="")
cat("observed number ",obsnum,"\n")
cat("expected number ",expnum,"\n")
options( width =40)
res <- serial.test(runif(3000), 3, echo=FALSE)
stat <- res$statistic
pvalue <- res$p.value
df <- res$parameter
obsnum <- res$observed
expnum <- res$expected
cat("\n\t Serial test\n")
cat("\nchisq stat = ", stat, ", df = ",df, "\n, p-value = ", pvalue, "\n", sep="")
cat("\n (sample size : ",3000,")\n\n", sep="")
cat("observed number ",obsnum[1:4],"\n", obsnum[5:9])
cat("expected number ",expnum,"\n")
options( width =40)
res <- coll.test(runif, 2^7, 2^10, 1, echo=FALSE)
stat <- res$statistic
pvalue <- res$p.value
df <- res$parameter
obsnum <- res$observed
expnum <- res$expected
cat("\n\t Collision test\n")
cat("\nchisq stat = ", stat, ", df = ", df, "\n, p-value = ", pvalue, "\n", sep="")
cat("\n exact distribution \n(sample number : ", 1000,"/sample size : ", 128,"\n / cell number : ", 1024,")\n\n", sep="")
cat("collision observed expected\n", "number count count\n", sep="")
for(i in 1:(df + 1) )
cat(" ", i," ", obsnum[i]," ", expnum[i],"\n")
options( width =40)
res <- coll.test(congruRand, 2^8, 2^14, 1, echo=FALSE)
stat <- res$statistic
pvalue <- res$p.value
df <- res$parameter
obsnum <- res$observed
expnum <- res$expected
cat("\n\t Collision test\n")
cat("\nchisq stat = ", stat, ", df = ", df, "\n, p-value = ", pvalue, "\n", sep="")
cat("\n Poisson approximation \n(sample number : ", 1000,"/sample size : ", 256,"\n / cell number : ", 16384,")\n\n", sep="")
cat("collision observed expected\n", "number count count\n", sep="")
for(i in 1:(df + 1) )
cat(" ", i-1," ", obsnum[i]," ", expnum[i],"\n")
options( width =40)
res <- poker.test(SFMT(10000), echo=FALSE)
stat <- res$statistic
pvalue <- res$p.value
df <- res$parameter
obsnum <- res$observed
expnum <- res$expected
cat("\n\t Poker test\n")
cat("\nchisq stat = ", stat, ", df = ", df, "\n, p-value = ", pvalue, "\n", sep="")
cat("\n (sample size : ", 10000,")\n\n", sep="")
cat("observed number ", obsnum,"\n")
cat("expected number ", expnum,"\n")
wh.predict <- function(x)
{
M1 <- 30269
M2 <- 30307
M3 <- 30323
y <- round(M1*M2*M3*x)
s1 <- y %% M1
s2 <- y %% M2
s3 <- y %% M3
s1 <- (171*26478*s1) %% M1
s2 <- (172*26070*s2) %% M2
s3 <- (170*8037*s3) %% M3
(s1/M1 + s2/M2 + s3/M3) %% 1
}
RNGkind("Wichmann-Hill")
xnew <- runif(1)
err <- 0
for (i in 1:1000)
{
xold <- xnew
xnew <- runif(1)
err <- max(err, abs(wh.predict(xold)-xnew))
}
print(err) |
"data.DF2011" |
approxTime1 <- function (x, xout, rule = 1) {
if (!is.matrix(x)) x <- as.matrix(x)
if ((!is.numeric(xout)) | (length(xout) != 1))
stop("xout must be a scalar numeric value")
if ((!is.numeric(rule)) | (length(rule) != 1))
stop("rule must be a scalar numeric value")
n <- nrow(x)
if (xout >= x[n, 1]) {
y <- c(xout, x[n, -1])
if (rule == 1 & (xout > x[n + 1]))
y[2:length(y)] <- NA
}
else if (xout <= x[1, 1]) {
y <- c(xout, x[1, -1])
if (rule == 1 & (xout < x[1]))
y[2:length(y)] <- NA
}
else {
i <- which.max(x[, 1] > xout)
x1 <- x[i - 1, 1]
x2 <- x[i, 1]
y1 <- x[i - 1, ]
y2 <- x[i, ]
y <- y1 + (y2 - y1) * (xout - x1)/(x2 - x1)
}
names(y) <- dimnames(x)[[2]]
y
} |
plot_borders <- function(data, window_length, skip_window, test01_res, chaos_borders_final, reg_borders_final) {
test01_res <- test01_res$test01_res
plot(seq(1, length(data) - window_length, skip_window) + window_length/2, test01_res, col = "green", xlab = "",
ylim = c(-0.1, 1))
lines(data, col = "red")
lines(seq(1, length(data) - window_length, skip_window) + window_length/2, test01_res, col = "green")
if (length(chaos_borders_final[[1]]) > 0) {
points(chaos_borders_final[[1]], matrix(1, length(chaos_borders_final[[1]]), 1), col = "blue", pch = 16)
points(chaos_borders_final[[2]], matrix(1, length(chaos_borders_final[[2]]), 1), col = "blue", pch = 16)
}
if (length(reg_borders_final[[1]]) > 0) {
points(reg_borders_final[[1]], matrix(0, length(reg_borders_final[[1]]), 1), col = "blue", pch = 16)
points(reg_borders_final[[2]], matrix(0, length(reg_borders_final[[2]]), 1), col = "blue", pch = 16)
}
} |
panel_node_events <- function(data = NULL,
legend = data$config$node_events$legend,
base_size = data$config$base_size,
expand_x = data$config$expand,
x_start = data$config$limits$start,
x_end = data$config$limits$end) {
starvz_check_data(data, tables = list("Events" = c("Value")))
if (is.null(legend) || !is.logical(legend)) {
legend <- TRUE
}
if (is.null(base_size) || !is.numeric(base_size)) {
base_size <- 22
}
if (is.null(expand_x) || !is.numeric(expand_x)) {
expand_x <- 0.05
}
if (is.null(x_start) || (!is.na(x_start) && !is.numeric(x_start))) {
x_start <- NA
}
if (is.null(x_end) || (!is.na(x_end) && !is.numeric(x_end))) {
x_end <- NA
}
data$Events %>% filter(.data$Type == "program event type") -> program_events
program_events %>%
group_by(.data$Container) %>%
filter(.data$Value == "fxt_start_flush" | .data$Value == "fxt_stop_flush") %>%
mutate(Last_Value = lag(.data$Value), Last_Start = lag(.data$Start)) %>%
filter(.data$Value == "fxt_stop_flush") %>%
mutate(Name = "fxt_flush") -> matched_fxt_flush_events
matched_fxt_flush_events %>%
filter(.data$Value == .data$Last_Value) %>%
nrow() -> is_wrong
if (is_wrong > 0) {
starvz_warn("Something wrong matching fxt flush events")
}
matched_fxt_flush_events %>%
select(.$Container) %>%
distinct() %>%
arrange(.data$Container) %>%
mutate(Id = as.numeric(.data$Container) + 0.5) -> cont_all_names
cont_all_names %>% .$Container -> cont_names
cont_all_names %>% .$Id -> cont_breaks
matched_fxt_flush_events %>%
mutate(
y = as.numeric(.data$Container) + 0.1,
ymax = .data$y + 0.9
) %>%
ggplot(aes(
ymin = .data$y, ymax = .data$ymax,
xmin = .data$Start, xmax = .data$Last_Start,
fill = .data$Name
)) +
default_theme(base_size, expand_x) +
geom_rect() +
scale_y_continuous(breaks = cont_breaks, labels = cont_names, expand = c(expand_x, 0)) +
ylab("Events") +
coord_cartesian(
xlim = c(x_start, x_end)
) -> panel
if (!legend) {
panel <- panel + theme(legend.position = "none")
} else {
panel <- panel + theme(legend.position = "top")
}
return(panel)
} |
anisotropyT <- function(id, ...) {
.args <- list(...)
do.call(paste0('anisotropyT', id), args = .args)
}
anisotropyT2 <- function(id, arglist) {
do.call(paste0('anisotropyT', id), args = arglist)
}
anisotropyTaffine <- function(spacepoints, phi1, phi2, phi12, theta) {
The <- matrix(c(cos(theta), sin(theta), -sin(theta), cos(theta)), ncol = 2)
Phi <- matrix(c(phi1, phi12, phi12, phi2), ncol = 2)
return( t(Phi %*% The %*% t(spacepoints)) )
}
anisotropyTswirl <- function(spacepoints, x0, y0, b, alpha) {
x <- spacepoints[,1]
y <- spacepoints[,2]
r <- sqrt((x - x0)^2 + (y - y0)^2)
The <- alpha * exp(-(r / b)^2)
xani <- (x - x0) * cos(The) - (y - y0) * sin(The) + x0
yani <- (x - x0) * sin(The) + (y - y0) * cos(The) + y0
return(cbind(xani, yani))
}
anisotropyTwave <- function(spacepoints, phi1, phi2, beta, theta) {
x <- spacepoints[,1]
y <- spacepoints[,2]
xani <- phi1 * x * cos(theta) - y * sin(theta)
yani <- x * sin(theta) + phi1 * y * cos(theta) + beta * sin(x)
return(cbind(xani, yani))
} |
epoce <- function(fit, pred.times, newdata = NULL, newdata.Longi = NULL){
if (missing(fit)) stop("The argument fit must be specified")
if (class(fit)!="jointPenal" & class(fit)!="longiPenal" & class(fit)!="trivPenal" & class(fit)!="trivPenalNL") stop("The argument fit must be a class 'jointPenal', 'longiPenal' or 'trivPenal'")
if (missing(pred.times)) stop("The argument pred.times must be specified")
if (class(pred.times)!="numeric") stop("pred.times must contain numerical values")
if(!missing(newdata) & (class(newdata)!="data.frame")) stop("The argument newdata must be a 'data.frame'")
if(!missing(newdata.Longi) & (class(newdata.Longi)!="data.frame")) stop("The argument newdata must be a 'data.frame'")
if(class(fit)== "jointPenal" & !missing(newdata.Longi))warning("The argument newdata.Longi is not required and thus ignored")
if(class(fit)== "longiPenal" & !missing(newdata.Longi) & missing(newdata))warning("For an object of class 'longiPenal' both datasets should be given")
if(class(fit)== "trivPenal" & !missing(newdata.Longi) & missing(newdata))warning("For an object of class 'trivPenal' both datasets should be given")
if(class(fit)== "trivPenalNL" & missing(newdata.Longi) & !missing(newdata))warning("For an object of class 'trivPenalNL' both datasets should be given")
nt <- length(pred.times)
vopt <- fit$varHtotal
b <- fit$b
np <- length(fit$b)
typeof <- fit$typeof
nva <- fit$nvar
if (typeof == 0){
nz <- fit$n.knots
zi <- fit$zi
}else{
nz <- 0
zi <- 0
}
if (typeof == 1){
nbintervR <- fit$nbintervR
nbintervDC <- fit$nbintervDC
ttt <- fit$time
tttdc <- fit$timedc
}else{
nbintervR <- 0
nbintervDC <- 0
ttt <- 0
tttdc <- 0
}
m <- fit$call
m0 <- match.call()
if (!missing(newdata)){
if (length(colnames(eval(m$data)))!=length(colnames(eval(m0$newdata)))) stop("Your new dataset must have the same number of columns than the dataset used in the 'fit'")
if (any(colnames(eval(m$data))!=colnames(eval(m0$newdata)))) stop("Your new dataset must have the very same variables than the dataset used in the 'fit'")
}
if (!missing(newdata.Longi)){
if (length(colnames(eval(m$data.Longi)))!=length(colnames(eval(m0$newdata.Longi)))) stop("Your new dataset for longitudinal data must have the same number of columns than the dataset used in the 'fit'")
if (any(colnames(eval(m$data.Longi))!=colnames(eval(m0$newdata.Longi)))) stop("Your new dataset for longitudinal data must have the very same variables than the dataset used in the 'fit'")
}
if (is.null(m$recurrentAG)) recurrentAG <- FALSE
else recurrentAG <- TRUE
if(class(fit)=="jointPenal" | class(fit)=="trivPenal" | class(fit) == "trivPenalNL"){
m$formula.LongitudinalData <- m$formula.terminalEvent <- m$recurrentAG <- m$data.Longi <- m$n.knots <- m$random <- m$link <- m$id <- m$kappa <- m$maxit <- m$hazard <- m$nb.int <- m$betaorder <- m$betaknots <- m$init.B <- m$LIMparam <- m$LIMlogl <- m$LIMderiv <- m$left.censoring <- m$print.times <- m$init.Random <- m$init.Eta <- m$init.Alpha <- m$method.GH <- m$intercept <- m$n.nodes <- m$biomarker <- m$formula.KG <- m$formula.KD <- m$dose <- m$time.biomarker <- m$BoxCox <- m$init.Biomarker <- m$... <- m$RandDist <- NULL
m[[1]] <- as.name("model.frame")
if (!missing(newdata)) m[[3]] <- as.name(m0$newdata)
dataset <- eval(m, sys.parent())
typeofY <- attr(model.extract(dataset, "response"),"type")
Y <- model.extract(dataset, "response")
if (typeofY=="right"){
tt0 <- rep(0,dim(dataset)[1])
tt1 <- Y[,1]
c <- Y[,2]
}else{
tt0 <- Y[,1]
tt1 <- Y[,2]
c <- Y[,3]
}
tt0 <- as.numeric(tt0)
tt1 <- as.numeric(tt1)
c <- as.numeric(c)
class(m$formula) <- "formula"
special <- c("strata", "cluster", "subcluster", "terminal", "num.id", "timedep")
Terms <- terms(m$formula, special)
m$formula <- Terms
dropx <- NULL
tempc <- untangle.specials(Terms, "cluster", 1:10)
cluster <- strata(dataset[, tempc$vars], shortlabel = TRUE)
dropx <- c(dropx,tempc$terms)
tempterm <- untangle.specials(Terms, "terminal", 1:10)
terminal <- strata(dataset[, tempterm$vars], shortlabel = TRUE)
terminal <- as.numeric(as.character(terminal))
dropx <- c(dropx,tempterm$terms)
if (!is.null(dropx)) newTerms <- Terms[-dropx]
else newTerms <- Terms
X <- model.matrix(newTerms, dataset)
if (ncol(X) > 1) X <- X[, -1, drop = FALSE]
nva1 <- ncol(X)
if (!missing(newdata)){
nobs <- nrow(newdata)
nsujet <- length(unique(cluster))
}else{
nobs <- fit$n
nsujet <- fit$groups
}
if (!recurrentAG){
tt0dc <- aggregate(tt1,by=list(cluster),FUN=sum)[,2]
tt1dc <- aggregate(tt1,by=list(cluster),FUN=sum)[,2]
}else{
tt0dc <- rep(0,nsujet)
tt1dc <- aggregate(tt1,by=list(cluster),FUN=function(x) x[length(x)])[,2]
}
cdc <- aggregate(terminal,by=list(cluster),FUN=function(x) x[length(x)])[,2]
m2 <- fit$call
m2$formula.LongitudinalData <- m2$n.knots <- m2$recurrentAG <- m2$cross.validation <- m2$kappa <- m2$maxit <- m2$hazard <- m2$nb.int1 <-m2$nb.int2 <- m2$RandDist <- m2$betaorder <- m2$betaknots <- m2$init.B <- m2$LIMparam <- m2$LIMlogl <- m2$LIMderiv <- m2$print.times <- m2$init.Theta <- m2$init.Alpha <- m2$Alpha <- m2$method.GH <- m2$intercept <- m2$init.Eta <- m2$data.Longi <- m2$init.Random <- m2$left.censoring <- m2$random <- m2$link <- m2$id <- m2$n.nodes <- m2$biomarker <- m2$formula.KG <- m2$formula.KD <- m2$dose <- m2$time.biomarker <- m2$BoxCox <- m2$init.Biomarker <- m2$... <- NULL
m2$formula[[3]] <- m2$formula.terminalEvent[[2]]
m2$formula.terminalEvent <- NULL
m2[[1]] <- as.name("model.frame")
if (!missing(newdata)) m2[[3]] <- as.name(m0$newdata)
datasetdc <- eval(m2, sys.parent())
class(m2$formula) <- "formula"
special2 <- c("strata", "timedep")
Terms2 <- terms(m2$formula, special2)
X2 <- model.matrix(Terms2, datasetdc)
if (ncol(X2) > 1) X2 <- X2[, -1, drop = FALSE]
nva2 <- ncol(X2)
if (!is.null(ncol(X2))){
Xdc <- aggregate(X2[,1],by=list(cluster), FUN=function(x) x[length(x)])[,2]
if (ncol(X2)>1){
for (i in 2:ncol(X2)){
Xdc.i <- aggregate(X2[,i],by=list(cluster), FUN=function(x) x[length(x)])[,2]
Xdc <- cbind(Xdc,Xdc.i)
}
}
}else{
Xdc <- aggregate(X2,by=list(cluster), FUN=function(x) x[length(x)])[,2]
}
if (!missing(newdata) & length(fit$coef[1:(fit$nvarEnd+fit$nvarRec)])!=(ncol(X)+ncol(X2))) stop("Different covariates in model and newdata. Verify your dataset, be careful to the factor variables.")
}
if(class(fit) == "trivPenal" | class(fit) == "longiPenal"){
m2 <- fit$call
m2$formula <- m2$formula.terminalEvent <- m2$data <- m2$random <- m2$id <- m2$link <- m2$n.knots <- m2$kappa <- m2$maxit <- m2$hazard <- m2$nb.int <- m2$betaorder <- m2$betaknots <- m2$init.B <- m2$LIMparam <- m2$LIMlogl <- m2$LIMderiv <- m2$print.times <- m2$left.censoring <- m2$init.Random <- m2$init.Eta <- m2$method.GH <- m2$intercept <- m2$... <- NULL
if (!missing(newdata.Longi)){m2[[3]] <- as.name(m0$newdata.Longi)
data.Longi <- newdata.Longi
}else{data.Longi <- eval(m2$data.Longi) }
special <- c("strata", "cluster", "subcluster", "terminal","num.id","timedep")
class(m2$formula.LongitudinalData) <- "formula"
TermsY <- terms(m2$formula.LongitudinalData, special, data = data.Longi)
llY <- attr(TermsY, "term.labels")
ord <- attr(TermsY, "order")
name.Y <- as.character(attr(TermsY, "variables")[[2]])
yy <- data.Longi[,which(names(data.Longi)==name.Y)]
ind.placeY <- which(llY%in%names(which(lapply(data.Longi[,which(names(data.Longi)%in%llY)],function(x) length(levels(x)))>2)))
vec.factorY <- NULL
vec.factorY <- c(vec.factorY,llY[ind.placeY])
mat.factorY <- matrix(vec.factorY,ncol=1,nrow=length(vec.factorY))
vec.factorY <-apply(mat.factorY,MARGIN=1,FUN=function(x){
if (length(grep("as.factor",x))>0){
pos1 <- grep("r",unlist(strsplit(x,split="")))[1]+2
pos2 <- length(unlist(strsplit(x,split="")))-1
return(substr(x,start=pos1,stop=pos2))
}else{
return(x)
}})
ind.placeY <- grep(paste(vec.factorY,collapse="|"),llY)
if(is.factor(data.Longi[,names(data.Longi)==llY[1]]))X_L<- as.numeric(data.Longi[,names(data.Longi)==llY[1]])-1
else X_L<- data.Longi[,names(data.Longi)==llY[1]]
if(length(llY)>1){
for(i in 2:length(llY)){
if(is.factor(data.Longi[,names(data.Longi)==llY[i]]))X_L<- cbind(X_L,as.numeric(data.Longi[,names(data.Longi)==llY[i]])-1)
else X_L<- cbind(X_L,data.Longi[,names(data.Longi)==llY[i]])
}}
if(sum(ord)>length(ord)){
for(i in 1:length(ord)){
if(ord[i]>1){
v1 <- strsplit(as.character(llY[i]),":")[[1]][1]
v2 <- strsplit(as.character(llY[i]),":")[[1]][2]
if(is.factor(data.Longi[,names(data.Longi)==v1]) && length(levels(data.Longi[,names(data.Longi)==v1]))>2)stop("Interactions not allowed for factors with 3 or more levels (yet)")
if(is.factor(data.Longi[,names(data.Longi)==v2]) && length(levels(data.Longi[,names(data.Longi)==v2]))>2)stop("Interactions not allowed for factors with 3 or more levels (yet)")
if(is.factor(data.Longi[,names(data.Longi)==v1]) || !is.factor(data.Longi[,names(data.Longi)==v2])){
X_L <- cbind(X_L,(as.numeric(data.Longi[,names(data.Longi)==v1])-1)*data.Longi[,names(data.Longi)==v2])
llY[i]<-paste(llY[i],levels(data.Longi[,names(data.Longi)==v1])[2],sep="")
}else if(!is.factor(data.Longi[,names(data.Longi)==v1]) || is.factor(data.Longi[,names(data.Longi)==v2])){
X_L <- cbind(X_L,data.Longi[,names(data.Longi)==v1]*(as.numeric(data.Longi[,names(data.Longi)==v2])-1))
llY[i]<-paste(llY[i],levels(data.Longi[,names(data.Longi)==v2])[2],sep="")
}else{
X_L <- cbind(X_L,data.Longi[,names(data.Longi)==v1]*data.Longi[,names(data.Longi)==v2])
}
}
}
}
if(dim(X_L)[2]!=length(llY))stop("The variables in the longitudinal part must be in the data.Longi")
X_L <- as.data.frame(X_L)
names(X_L) <- llY
Intercept <- rep(1,dim(X_L)[1])
if(fit$intercept)X_L <- cbind(Intercept,X_L)
X_Lall<- X_L
"%+%"<- function(x,y) paste(x,y,sep="")
if(length(vec.factorY) > 0){
for(i in 1:length(vec.factorY)){
X_L <- cbind(X_L[,-(which(names(X_L)==vec.factorY[i]))],model.matrix(as.formula("~"%+%0%+%"+"%+%paste(vec.factorY[i], collapse= "+")), data.Longi)[,-1])
}
vect.factY<-names(X_L)[which(!(names(X_L)%in%llY))]
occurY <- rep(0,length(vec.factorY))
for(i in 1:length(vec.factorY)){
occurY[i] <- length(grep(vec.factorY[i],vect.factY))
}
}
if (ncol(X_L) == 0){
noVarY <- 1
}else{
noVarY <- 0
}
clusterY <- data.Longi$id
maxy_rep <- max(table(clusterY))
uni.cluster<-as.factor(unique(clusterY))
npred <- length(uni.cluster)
nva3<-ncol(X_L)
varY <- as.matrix(sapply(X_L, as.numeric))
if(length(vec.factorY) > 0){
k <- 0
for(i in 1:length(vec.factorY)){
ind.placeY[i] <- ind.placeY[i]+k
k <- k + occurY[i]-1
}
}
if(fit$link=="Random-effects")link <- 1
if(fit$link=="Current-level") link <- 2
matzy <- NULL
names.matzy <- fit$names.re
matzy <- data.matrix(X_Lall[,which(names(X_Lall)%in%names.matzy)])
if(fit$leftCensoring==FALSE){s_cag_id = 0
s_cag = 0}else{
s_cag_id = 1
s_cag = fit$leftCensoring.threshold
}
}
if(class(fit)== "longiPenal"){
m$formula.LongitudinalData <- m$formula.terminalEvent <- m$recurrentAG <- m$data.Longi <- m$n.knots <- m$random <- m$link <- m$id <- m$kappa <- m$maxit <- m$hazard <- m$nb.int <- m$betaorder <- m$betaknots <- m$init.B <- m$LIMparam <- m$LIMlogl <- m$LIMderiv <- m$left.censoring <- m$print.times <- m$init.Random <- m$init.Eta <- m$init.Alpha <- m$method.GH <- m$intercept <- m$n.nodes <- m$... <- NULL
m[[1]] <- as.name("model.frame")
if (!missing(newdata)) m[[3]] <- as.name(m0$newdata)
dataset <- eval(m, sys.parent())
typeofY <- attr(model.extract(dataset, "response"),"type")
Y <- model.extract(dataset, "response")
if (typeofY=="right"){
tt0dc <- rep(0,dim(dataset)[1])
tt1dc <- Y[,1]
cdc <- Y[,2]
} else {
tt0dc <- Y[,1]
tt1dc <- Y[,2]
cdc <- Y[,3]
}
tt0dc <- as.numeric(tt0dc)
tt1dc <- as.numeric(tt1dc)
cdc <- as.numeric(cdc)
class(m$formula) <- "formula"
special <- c("strata", "cluster", "subcluster", "num.id", "timedep")
Terms <- terms(m$formula, special)
m$formula <- Terms
dropx <- NULL
newTerms <- Terms
Xdc <- model.matrix(newTerms, dataset)
if (ncol(Xdc) > 1) Xdc <- Xdc[, -1, drop = FALSE]
nva2 <- ncol(Xdc)
if (!missing(newdata)){
nsujet <- dim(newdata)[1]
}else{
nsujet <- fit$groups
}
}
if(class(fit) == "trivPenalNL"){
nva3 <- fit$nvarKG
nva4 <- fit$nvarKD
m3 <- fit$call
m3$formula <- m3$formula.terminalEvent <- m3$biomarker <- m3$formula.KD <- m3$dose <- m3$data <- m3$recurrentAG <- m3$random <- m3$id <- m3$link <- m3$n.knots <- m3$kappa <- m3$maxit <- m3$hazard <- m3$init.B <- m3$LIMparam <- m3$LIMlogl <- m3$LIMderiv <- m3$print.times <- m3$left.censoring <- m3$init.Random <- m3$init.Eta <- m3$init.Alpha <- m3$method.GH <- m3$n.nodes <- m3$init.GH <- m3$time.biomarker <- m3$BoxCox <- m3$... <- NULL
Names.data.Longi <- m3$data.Longi
if (!missing(newdata.Longi)){m3[[3]] <- as.name(m0$newdata.Longi)
data.Longi <- newdata.Longi
}else{data.Longi <- eval(m3$data.Longi) }
formula.KG <- fit$formula.KG
m4 <- fit$call
m4$formula <- m4$formula.terminalEvent <- m4$biomarker <- m4$formula.KG <- m4$dose <- m4$data <- m4$recurrentAG <- m4$random <- m4$id <- m4$link <- m4$n.knots <- m4$kappa <- m4$maxit <- m4$hazard <- m4$init.B <- m4$LIMparam <- m4$LIMlogl <- m4$LIMderiv <- m4$print.times <- m4$left.censoring <- m4$init.Random <- m4$init.Eta <- m4$init.Alpha <- m4$method.GH <- m4$n.nodes <- m4$init.GH <- m4$time.biomarker <- m4$BoxCox <- m4$... <- NULL
Y <- data.Longi[,which(names(data.Longi)==fit$biomarker)]
if(!is.null(formula.KG[3]) && formula.KG[3] != "1()"){
TermsKG <- if (missing(data.Longi)){
terms(formula.KG, special)
}else{
terms(formula.KG, special, data = data.Longi)
}
ord <- attr(TermsKG, "order")
m2$formula.KG <- TermsKG
m2[[1]] <- as.name("model.frame")
if (NROW(m3) == 0)stop("No (non-missing) observations")
llKG <- attr(TermsKG, "term.labels")
name.KG <- as.character(attr(TermsKG, "variables")[[2]])
KG <- data.Longi[,which(names(data.Longi)==name.KG)]
ind.placeKG <- which(lapply(as.data.frame(data.Longi[,which(names(data.Longi)%in%llKG)]),function(x) length(levels(x)))>2)
defined.factor <- llKG[grep("factor",llKG)]
vec.factorKG.tmp <- NULL
if(length(defined.factor)>0){
mat.factorKG.tmp <- matrix(defined.factor,ncol=1,nrow=length(defined.factor))
vec.factorKG.tmp <-apply(mat.factorKG.tmp,MARGIN=1,FUN=function(x){
if (length(grep("factor",x))>0){
if(length(grep(":",x))>0){
if(grep('\\(',unlist(strsplit(x,split="")))[1]<grep(":",unlist(strsplit(x,split="")))[1] && length(grep('\\(',unlist(strsplit(x,split=""))))==1){
pos1 <- grep("r",unlist(strsplit(x,split="")))[1]+2
pos2 <- grep(":",unlist(strsplit(x,split="")))[1]-2
pos3 <- grep(":",unlist(strsplit(x,split="")))[1]
pos4 <- length(unlist(strsplit(x,split="")))
if(length(levels(as.factor(data.Longi[,which(names(data.Longi)==substr(x,start=pos1,stop=pos2))])))>2)return(paste(substr(x,start=pos1,stop=pos2),substr(x,start=pos3,stop=pos4),sep=""))
else return(NaN)
}else if(grep("\\(",unlist(strsplit(x,split="")))[1]>grep(":",unlist(strsplit(x,split="")))[1] && length(grep('\\(',unlist(strsplit(x,split=""))))==1){
pos2 <- grep(":",unlist(strsplit(x,split="")))[1]
pos3 <- grep("\\(",unlist(strsplit(x,split="")))[1]+1
pos4 <- length(unlist(strsplit(x,split="")))-1
if(length(levels(as.factor(data.Longi[,which(names(data.Longi)==substr(x,start=pos3,stop=pos4))])))>2)return(paste(substr(x,start=1,stop=pos2),substr(x,start=pos3,stop=pos4),sep=""))
else return(NaN)
}else{
pos1 <- grep("r",unlist(strsplit(x,split="")))[1]+2
pos2 <- grep(":",unlist(strsplit(x,split="")))[1]-2
pos3 <- grep("\\(",unlist(strsplit(x,split="")))[2]+1
pos4 <- length(unlist(strsplit(x,split="")))-1
if(length(levels(as.factor(data.Longi[,which(names(data.Longi)==substr(x,start=pos1,stop=pos2))])))>2 || length(levels(as.factor(data.Longi[,which(names(data.Longi)==substr(x,start=pos3,stop=pos4))])))>2)return(paste(substr(x,start=pos1,stop=pos2),":",substr(x,start=pos3,stop=pos4),sep=""))
else return(NaN)
}
}else{
pos1 <- grep("r",unlist(strsplit(x,split="")))[1]+2
pos2 <- length(unlist(strsplit(x,split="")))-1
if(length(levels(as.factor(data.Longi[,which(names(data.Longi)==substr(x,start=pos1,stop=pos2))])))>2)return(substr(x,start=pos1,stop=pos2))
else return(NaN)
}
}else{
return(x)
}})
vec.factorKG.tmp <- vec.factorKG.tmp[which(vec.factorKG.tmp!="NaN")]
if(length(vec.factorKG.tmp)>0){
for(i in 1:length(vec.factorKG.tmp)){
if(length(grep(":",vec.factorKG.tmp[i]))==0){
if(length(levels(as.factor(data.Longi[,which(names(data.Longi)==vec.factorKG.tmp[i])])))>2)ind.placeKG <- c(ind.placeKG,which(llKG%in%paste("as.factor(",vec.factorKG.tmp[i],")",sep="")))
}
}}
}
ind.placeKG <- sort(ind.placeKG)
mat.factorKG2 <- matrix(llKG,ncol=1,nrow=length(llKG))
llKG2 <-apply(mat.factorKG2,MARGIN=1,FUN=function(x){
if (length(grep("factor",x))>0 && length(grep(":",x))==0 && unlist(strsplit(x,split=""))[length(unlist(strsplit(x,split="")))]==")"){
pos1 <- grep("r",unlist(strsplit(x,split="")))[1]+2
pos2 <- length(unlist(strsplit(x,split="")))-1
x<-substr(x,start=pos1,stop=pos2)
return(paste(x,levels(as.factor(data.Longi[,which(names(data.Longi)==x)]))[2],sep=""))
}else{
return(x)
}})
llKG3 <-apply(mat.factorKG2,MARGIN=1,FUN=function(x){
if (length(grep("factor",x))>0 && length(grep(":",x))==0 && unlist(strsplit(x,split=""))[length(unlist(strsplit(x,split="")))]==")"){
pos1 <- grep("r",unlist(strsplit(x,split="")))[1]+2
pos2 <- length(unlist(strsplit(x,split="")))-1
return(substr(x,start=pos1,stop=pos2))
}else{
return(x)
}})
llKG.real.names <- llKG3
llKG3 <- llKG3[!llKG2%in%llKG]
if(is.factor(data.Longi[,names(data.Longi)==llKG.real.names[1]]))X_L<- as.numeric(data.Longi[,names(data.Longi)==llKG.real.names[1]])-1
else X_L<- data.Longi[,names(data.Longi)==llKG.real.names[1]]
if(length(llKG) == 1)X_L <- as.data.frame(X_L)
if(length(llKG)>1){
for(i in 2:length(llKG.real.names)){
if(is.factor(data.Longi[,names(data.Longi)==llKG.real.names[i]]))X_L<- cbind(X_L,as.numeric(data.Longi[,names(data.Longi)==llKG.real.names[i]])-1)
else X_L<- cbind(X_L,data.Longi[,names(data.Longi)==llKG.real.names[i]])
}
}
llKG.fin <- llKG.real.names
llKG <- llKG.real.names
if(sum(ord)>length(ord)){
for(i in 1:length(ord)){
if(ord[i]>1){
name_v1 <- strsplit(as.character(llKG[i]),":")[[1]][1]
name_v2 <- strsplit(as.character(llKG[i]),":")[[1]][2]
if(length(grep("factor",name_v1))>0){name_v1<-substring(name_v1,11,nchar(name_v1)-1)
v1 <- as.factor(data.Longi[,names(data.Longi)==name_v1])}
else{v1 <- data.Longi[,names(data.Longi)==name_v1]}
if(length(grep("factor",name_v2))>0){name_v2<-substring(name_v2,11,nchar(name_v2)-1)
v2 <- as.factor(data.Longi[,names(data.Longi)==name_v2])}
else{v2 <- data.Longi[,names(data.Longi)==name_v2]}
llKG[i] <- paste(name_v1,":",name_v2,sep="")
if(is.factor(v1) && !is.factor(v2)){
dummy <- model.matrix( ~ v1 - 1)
for(j in 2:length(levels(v1))){
X_L <- cbind(X_L,dummy[,j]*v2)
if(i>1 && i<length(llKG.fin))llKG.fin <- c(llKG.fin[1:(i-1+j-2)],paste(name_v1,".",levels(v1)[j],":",name_v2,sep=""),llKG.fin[(i+1+j-2):length(llKG.fin)])
else if(i==length(llKG.fin))llKG.fin <- c(llKG.fin[1:(i-1+j-2)],paste(name_v1,".",levels(v1)[j],":",name_v2,sep=""))
else llKG.fin <- c(paste(name_v1,".",levels(v1)[j],":",name_v2,sep=""),llKG.fin[(2+j-2):length(llKG.fin)])
}
}else if(!is.factor(v1) && is.factor(v2)){
dummy <- model.matrix( ~ v2 - 1)
for(j in 2:length(levels(v2))){
X_L <- cbind(X_L,dummy[,j]*v1)
if(i>1 && i<length(llKG.fin))llKG.fin <- c(llKG.fin[1:(i-1+j-2)],paste(name_v1,":",name_v2,levels(v2)[j],sep=""),llKG.fin[(i+1+j-2):length(llKG.fin)])
else if(i==length(llKG.fin))llKG.fin <- c(llKG.fin[1:(i-1+j-2)],paste(name_v1,":",name_v2,levels(v2)[j],sep=""))
else llKG.fin <- c(paste(name_v1,":",name_v2,levels(v2)[j],sep=""),llKG.fin[(2+j-2):length(llKG.fin)])
}
}else if(is.factor(v1) && is.factor(v2)){
dummy1 <- model.matrix( ~ v1 - 1)
dummy2 <- model.matrix( ~ v2 - 1)
for(j in 2:length(levels(v1))){
for(k in 2:length(levels(v2))){
X_L <- cbind(X_L,dummy1[,j]*dummy2[,k])
if(i>1 && i<length(llKG.fin))llKG.fin <- c(llKG.fin[1:(i-1+j-2+k-2)],paste(name_v1,levels(v1)[j],":",name_v2,levels(v2)[k],sep=""),llKG.fin[(i+1+j-2+k-2):length(llKG.fin)])
else if(i==length(llKG.fin))llKG.fin <- c(llKG.fin[1:(i-1+j-2+k-2)],paste(name_v1,levels(v1)[j],":",name_v2,levels(v2)[k],sep=""))
else llKG.fin <- c(paste(name_v1,levels(v1)[j],":",name_v2,levels(v2)[k],sep=""),llKG.fin[(2+j-2+k-2):length(llKG.fin)])
}
}
}else{
X_L <- cbind(X_L,v1*v2)
}
}
}
}
if(length(grep(":",llKG))>0){
for(i in 1:length(grep(":",llKG))){
if(length(levels(data.Longi[,which(names(data.Longi)%in%strsplit(llKG[grep(":",llKG)[i]],":")[[1]])[1]]))>2 || length(levels(data.Longi[,which(names(data.Longi)%in%strsplit(llKG[grep(":",llKG)[i]],":")[[1]])[2]]))>2){
ind.placeKG <- c(ind.placeKG,grep(":",llKG)[i])
}
}
}
vec.factorKG <- NULL
if(length(vec.factorKG.tmp)>0)vec.factorKG <- c(llKG[ind.placeKG],vec.factorKG.tmp)
else vec.factorKG <- c(vec.factorKG,llKG[ind.placeKG])
vec.factorKG <- unique(vec.factorKG)
mat.factorKG <- matrix(vec.factorKG,ncol=1,nrow=length(vec.factorKG))
vec.factorKG <-apply(mat.factorKG,MARGIN=1,FUN=function(x){
if (length(grep("factor",x))>0){
if(length(grep(":",x))>0){
if(grep('\\(',unlist(strsplit(x,split="")))[1]<grep(":",unlist(strsplit(x,split="")))[1] && length(grep('\\(',unlist(strsplit(x,split=""))))==1){
pos1 <- grep("r",unlist(strsplit(x,split="")))[1]+2
pos2 <- grep(":",unlist(strsplit(x,split="")))[1]-2
pos3 <- grep(":",unlist(strsplit(x,split="")))[1]
pos4 <- length(unlist(strsplit(x,split="")))
return(paste(substr(x,start=pos1,stop=pos2),substr(x,start=pos3,stop=pos4),sep=""))
}else if(grep("\\(",unlist(strsplit(x,split="")))[1]>grep(":",unlist(strsplit(x,split="")))[1] && length(grep('\\(',unlist(strsplit(x,split=""))))==1){
pos2 <- grep(":",unlist(strsplit(x,split="")))[1]
pos3 <- grep("\\(",unlist(strsplit(x,split="")))[1]+1
pos4 <- length(unlist(strsplit(x,split="")))-1
return(paste(substr(x,start=1,stop=pos2),substr(x,start=pos3,stop=pos4),sep=""))
}else{
pos1 <- grep("r",unlist(strsplit(x,split="")))[1]+2
pos2 <- grep(":",unlist(strsplit(x,split="")))[1]-2
pos3 <- grep("\\(",unlist(strsplit(x,split="")))[2]+1
pos4 <- length(unlist(strsplit(x,split="")))-1
return(paste(substr(x,start=pos1,stop=pos2),":",substr(x,start=pos3,stop=pos4),sep=""))
}
}else{
pos1 <- grep("r",unlist(strsplit(x,split="")))[1]+2
pos2 <- length(unlist(strsplit(x,split="")))-1
return(substr(x,start=pos1,stop=pos2))}
}else{
return(x)
}})
for(i in 1:length(llKG.fin)){
if(sum(names(data.Longi)==llKG.fin[i])>0){
if(is.factor(data.Longi[,names(data.Longi)==llKG.fin[i]]) && length(levels(data.Longi[,names(data.Longi)==llKG.fin[i]]))==2){
llKG.fin[i] <- paste(llKG.fin[i],levels(data.Longi[,names(data.Longi)==llKG.fin[i]])[2],sep="")}
}
}
X_L <- as.data.frame(X_L)
if(dim(X_L)[2]!=length(llKG.fin))stop("The variables in the longitudinal part must be in the data.Longi")
names(X_L) <- llKG.fin
X_Lall<- X_L
"%+%"<- function(x,y) paste(x,y,sep="")
if(length(vec.factorKG) > 0){
for(i in 1:length(vec.factorKG)){
if(length(grep(":",vec.factorKG[i]))==0){
factor.spot <- which(names(X_L)==vec.factorKG[i])
if(factor.spot<ncol(X_L)) X_L <- cbind(X_L[1:(factor.spot-1)],model.matrix(as.formula("~"%+%0%+%"+"%+%paste(vec.factorKG[i], collapse= "+")), model.frame(~.,data.Longi,na.action=na.pass))[,-1],X_L[(factor.spot+1):ncol(X_L)])
else X_L <- cbind(X_L[1:(factor.spot-1)],model.matrix(as.formula("~"%+%0%+%"+"%+%paste(vec.factorKG[i], collapse= "+")), model.frame(~.,data.Longi,na.action=na.pass))[,-1])
} }
}
varKG <- as.matrix(sapply(X_L, as.numeric))
nsujety<-nrow(X_L)
}
clusterY <- data.Longi$id
max_rep <- max(table(clusterY))
uni.clusterY<-as.factor(unique(clusterY))
if(is.null(formula.KG[3]) | formula.KG[3] == "1()"){
varKG <- c()
nsujety <- length(Y)
}
formula.KD <- fit$formula.KD
if(!is.null(formula.KD[3]) && formula.KD[3] != "1()"){
TermsKD <- if (missing(data.Longi)){
terms(formula.KD, special)
}else{
terms(formula.KD, special, data = data.Longi)
}
ord <- attr(TermsKD, "order")
m4$formula.KD <- TermsKD
m4[[1]] <- as.name("model.frame")
if (NROW(m4) == 0)stop("No (non-missing) observations")
llKD <- attr(TermsKD, "term.labels")
name.KD <- as.character(attr(TermsKD, "variables")[[2]])
KD <- data.Longi[,which(names(data.Longi)==name.KD)]
ind.placeKD <- which(lapply(as.data.frame(data.Longi[,which(names(data.Longi)%in%llKD)]),function(x) length(levels(x)))>2)
defined.factor <- llKD[grep("factor",llKD)]
vec.factorKD.tmp <- NULL
if(length(defined.factor)>0){
mat.factorKD.tmp <- matrix(defined.factor,ncol=1,nrow=length(defined.factor))
vec.factorKD.tmp <-apply(mat.factorKG.tmp,MARGIN=1,FUN=function(x){
if (length(grep("factor",x))>0){
if(length(grep(":",x))>0){
if(grep('\\(',unlist(strsplit(x,split="")))[1]<grep(":",unlist(strsplit(x,split="")))[1] && length(grep('\\(',unlist(strsplit(x,split=""))))==1){
pos1 <- grep("r",unlist(strsplit(x,split="")))[1]+2
pos2 <- grep(":",unlist(strsplit(x,split="")))[1]-2
pos3 <- grep(":",unlist(strsplit(x,split="")))[1]
pos4 <- length(unlist(strsplit(x,split="")))
if(length(levels(as.factor(data.Longi[,which(names(data.Longi)==substr(x,start=pos1,stop=pos2))])))>2)return(paste(substr(x,start=pos1,stop=pos2),substr(x,start=pos3,stop=pos4),sep=""))
else return(NaN)
}else if(grep("\\(",unlist(strsplit(x,split="")))[1]>grep(":",unlist(strsplit(x,split="")))[1] && length(grep('\\(',unlist(strsplit(x,split=""))))==1){
pos2 <- grep(":",unlist(strsplit(x,split="")))[1]
pos3 <- grep("\\(",unlist(strsplit(x,split="")))[1]+1
pos4 <- length(unlist(strsplit(x,split="")))-1
if(length(levels(as.factor(data.Longi[,which(names(data.Longi)==substr(x,start=pos3,stop=pos4))])))>2)return(paste(substr(x,start=1,stop=pos2),substr(x,start=pos3,stop=pos4),sep=""))
else return(NaN)
}else{
pos1 <- grep("r",unlist(strsplit(x,split="")))[1]+2
pos2 <- grep(":",unlist(strsplit(x,split="")))[1]-2
pos3 <- grep("\\(",unlist(strsplit(x,split="")))[2]+1
pos4 <- length(unlist(strsplit(x,split="")))-1
if(length(levels(as.factor(data.Longi[,which(names(data.Longi)==substr(x,start=pos1,stop=pos2))])))>2 || length(levels(as.factor(data.Longi[,which(names(data.Longi)==substr(x,start=pos3,stop=pos4))])))>2)return(paste(substr(x,start=pos1,stop=pos2),":",substr(x,start=pos3,stop=pos4),sep=""))
else return(NaN)
}
}else{
pos1 <- grep("r",unlist(strsplit(x,split="")))[1]+2
pos2 <- length(unlist(strsplit(x,split="")))-1
if(length(levels(as.factor(data.Longi[,which(names(data.Longi)==substr(x,start=pos1,stop=pos2))])))>2)return(substr(x,start=pos1,stop=pos2))
else return(NaN)
}
}else{
return(x)
}})
vec.factorKD.tmp <- vec.factorKD.tmp[which(vec.factorKD.tmp!="NaN")]
if(length(vec.factorKD.tmp)>0){
for(i in 1:length(vec.factorKD.tmp)){
if(length(grep(":",vec.factorKD.tmp[i]))==0){
if(length(levels(as.factor(data.Longi[,which(names(data.Longi)==vec.factorKD.tmp[i])])))>2)ind.placeKD <- c(ind.placeKD,which(llKD%in%paste("as.factor(",vec.factorKD.tmp[i],")",sep="")))
}
}}
}
ind.placeKD <- sort(ind.placeKD)
mat.factorKD2 <- matrix(llKD,ncol=1,nrow=length(llKD))
llKD2 <-apply(mat.factorKD2,MARGIN=1,FUN=function(x){
if (length(grep("factor",x))>0 && length(grep(":",x))==0 && unlist(strsplit(x,split=""))[length(unlist(strsplit(x,split="")))]==")"){
pos1 <- grep("r",unlist(strsplit(x,split="")))[1]+2
pos2 <- length(unlist(strsplit(x,split="")))-1
x<-substr(x,start=pos1,stop=pos2)
return(paste(x,levels(as.factor(data.Longi[,which(names(data.Longi)==x)]))[2],sep=""))
}else{
return(x)
}})
llKD3 <-apply(mat.factorKD2,MARGIN=1,FUN=function(x){
if (length(grep("factor",x))>0 && length(grep(":",x))==0 && unlist(strsplit(x,split=""))[length(unlist(strsplit(x,split="")))]==")"){
pos1 <- grep("r",unlist(strsplit(x,split="")))[1]+2
pos2 <- length(unlist(strsplit(x,split="")))-1
return(substr(x,start=pos1,stop=pos2))
}else{
return(x)
}})
llKD.real.names <- llKD3
llKD3 <- llKD3[!llKD2%in%llKD]
if(is.factor(data.Longi[,names(data.Longi)==llKD.real.names[1]]))X_L<- as.numeric(data.Longi[,names(data.Longi)==llKD.real.names[1]])-1
else X_L<- data.Longi[,names(data.Longi)==llKD.real.names[1]]
if(length(llKD)>1){
for(i in 2:length(llKD.real.names)){
if(is.factor(data.Longi[,names(data.Longi)==llKD.real.names[i]]))X_L<- cbind(X_L,as.numeric(data.Longi[,names(data.Longi)==llKD.real.names[i]])-1)
else X_L<- cbind(X_L,data.Longi[,names(data.Longi)==llKD.real.names[i]])
}}
llKD.fin <- llKD.real.names
llKD <- llKD.real.names
if(sum(ord)>length(ord)){
for(i in 1:length(ord)){
if(ord[i]>1){
name_v1 <- strsplit(as.character(llKD[i]),":")[[1]][1]
name_v2 <- strsplit(as.character(llKD[i]),":")[[1]][2]
if(length(grep("factor",name_v1))>0){name_v1<-substring(name_v1,11,nchar(name_v1)-1)
v1 <- as.factor(data.Longi[,names(data.Longi)==name_v1])}
else{v1 <- data.Longi[,names(data.Longi)==name_v1]}
if(length(grep("factor",name_v2))>0){name_v2<-substring(name_v2,11,nchar(name_v2)-1)
v2 <- as.factor(data.Longi[,names(data.Longi)==name_v2])}
else{v2 <- data.Longi[,names(data.Longi)==name_v2]}
llKD[i] <- paste(name_v1,":",name_v2,sep="")
if(is.factor(v1) && !is.factor(v2)){
dummy <- model.matrix( ~ v1 - 1)
for(j in 2:length(levels(v1))){
X_L <- cbind(X_L,dummy[,j]*v2)
if(i>1 && i<length(llKD.fin))llKD.fin <- c(llKD.fin[1:(i-1+j-2)],paste(name_v1,".",levels(v1)[j],":",name_v2,sep=""),llKD.fin[(i+1+j-2):length(llKD.fin)])
else if(i==length(llKD.fin))llKD.fin <- c(llKD.fin[1:(i-1+j-2)],paste(name_v1,".",levels(v1)[j],":",name_v2,sep=""))
else llKD.fin <- c(paste(name_v1,".",levels(v1)[j],":",name_v2,sep=""),llKD.fin[(2+j-2):length(llKD.fin)])
}
}else if(!is.factor(v1) && is.factor(v2)){
dummy <- model.matrix( ~ v2 - 1)
for(j in 2:length(levels(v2))){
X_L <- cbind(X_L,dummy[,j]*v1)
if(i>1 && i<length(llKD.fin))llKD.fin <- c(llKD.fin[1:(i-1+j-2)],paste(name_v1,":",name_v2,levels(v2)[j],sep=""),llKD.fin[(i+1+j-2):length(llKD.fin)])
else if(i==length(llKD.fin))llKD.fin <- c(llKD.fin[1:(i-1+j-2)],paste(name_v1,":",name_v2,levels(v2)[j],sep=""))
else llKD.fin <- c(paste(name_v1,":",name_v2,levels(v2)[j],sep=""),llKD.fin[(2+j-2):length(llKD.fin)])
}
}else if(is.factor(v1) && is.factor(v2)){
dummy1 <- model.matrix( ~ v1 - 1)
dummy2 <- model.matrix( ~ v2 - 1)
for(j in 2:length(levels(v1))){
for(k in 2:length(levels(v2))){
X_L <- cbind(X_L,dummy1[,j]*dummy2[,k])
if(i>1 && i<length(llKD.fin))llKD.fin <- c(llKD.fin[1:(i-1+j-2+k-2)],paste(name_v1,levels(v1)[j],":",name_v2,levels(v2)[k],sep=""),llKG.fin[(i+1+j-2+k-2):length(llKG.fin)])
else if(i==length(llKD.fin))llKD.fin <- c(llKD.fin[1:(i-1+j-2+k-2)],paste(name_v1,levels(v1)[j],":",name_v2,levels(v2)[k],sep=""))
else llKD.fin <- c(paste(name_v1,levels(v1)[j],":",name_v2,levels(v2)[k],sep=""),llKD.fin[(2+j-2+k-2):length(llKD.fin)])
}
}
}else{
X_L <- cbind(X_L,v1*v2)
}
}
}
}
if(length(grep(":",llKD))>0){
for(i in 1:length(grep(":",llKD))){
if(length(levels(data.Longi[,which(names(data.Longi)%in%strsplit(llKD[grep(":",llKD)[i]],":")[[1]])[1]]))>2 || length(levels(data.Longi[,which(names(data.Longi)%in%strsplit(llKD[grep(":",llKD)[i]],":")[[1]])[2]]))>2){
ind.placeKD <- c(ind.placeKD,grep(":",llKD)[i])
}
}
}
vec.factorKD <- NULL
if(length(vec.factorKD.tmp)>0)vec.factorKD <- c(llKD[ind.placeKD],vec.factorKD.tmp)
else vec.factorKD <- c(vec.factorKD,llKD[ind.placeKD])
vec.factorKD <- unique(vec.factorKD)
mat.factorKD <- matrix(vec.factorKD,ncol=1,nrow=length(vec.factorKD))
vec.factorKD <-apply(mat.factorKD,MARGIN=1,FUN=function(x){
if (length(grep("factor",x))>0){
if(length(grep(":",x))>0){
if(grep('\\(',unlist(strsplit(x,split="")))[1]<grep(":",unlist(strsplit(x,split="")))[1] && length(grep('\\(',unlist(strsplit(x,split=""))))==1){
pos1 <- grep("r",unlist(strsplit(x,split="")))[1]+2
pos2 <- grep(":",unlist(strsplit(x,split="")))[1]-2
pos3 <- grep(":",unlist(strsplit(x,split="")))[1]
pos4 <- length(unlist(strsplit(x,split="")))
return(paste(substr(x,start=pos1,stop=pos2),substr(x,start=pos3,stop=pos4),sep=""))
}else if(grep("\\(",unlist(strsplit(x,split="")))[1]>grep(":",unlist(strsplit(x,split="")))[1] && length(grep('\\(',unlist(strsplit(x,split=""))))==1){
pos2 <- grep(":",unlist(strsplit(x,split="")))[1]
pos3 <- grep("\\(",unlist(strsplit(x,split="")))[1]+1
pos4 <- length(unlist(strsplit(x,split="")))-1
return(paste(substr(x,start=1,stop=pos2),substr(x,start=pos3,stop=pos4),sep=""))
}else{
pos1 <- grep("r",unlist(strsplit(x,split="")))[1]+2
pos2 <- grep(":",unlist(strsplit(x,split="")))[1]-2
pos3 <- grep("\\(",unlist(strsplit(x,split="")))[2]+1
pos4 <- length(unlist(strsplit(x,split="")))-1
return(paste(substr(x,start=pos1,stop=pos2),":",substr(x,start=pos3,stop=pos4),sep=""))
}
}else{
pos1 <- grep("r",unlist(strsplit(x,split="")))[1]+2
pos2 <- length(unlist(strsplit(x,split="")))-1
return(substr(x,start=pos1,stop=pos2))}
}else{
return(x)
}})
for(i in 1:length(llKD.fin)){
if(sum(names(data.Longi)==llKD.fin[i])>0){
if(is.factor(data.Longi[,names(data.Longi)==llKD.fin[i]]) && length(levels(data.Longi[,names(data.Longi)==llKD.fin[i]]))==2){
llKD.fin[i] <- paste(llKD.fin[i],levels(data.Longi[,names(data.Longi)==llKD.fin[i]])[2],sep="")}
}
}
X_L <- as.data.frame(X_L)
if(dim(X_L)[2]!=length(llKD.fin))stop("The variables in the longitudinal part must be in the data.Longi")
names(X_L) <- llKD.fin
X_Lall<- X_L
"%+%"<- function(x,y) paste(x,y,sep="")
if(length(vec.factorKD) > 0){
for(i in 1:length(vec.factorKD)){
if(length(grep(":",vec.factorKD[i]))==0){
factor.spot <- which(names(X_L)==vec.factorKD[i])
if(factor.spot<ncol(X_L)) X_L <- cbind(X_L[1:(factor.spot-1)],model.matrix(as.formula("~"%+%0%+%"+"%+%paste(vec.factorKD[i], collapse= "+")), model.frame(~.,data.Longi,na.action=na.pass))[,-1],X_L[(factor.spot+1):ncol(X_L)])
else X_L <- cbind(X_L[1:(factor.spot-1)],model.matrix(as.formula("~"%+%0%+%"+"%+%paste(vec.factorKD[i], collapse= "+")), model.frame(~.,data.Longi,na.action=na.pass))[,-1])
} }
}
varKD <- as.matrix(sapply(X_L, as.numeric))
}else{
varKD <- c()
}
matzy <- NULL
matzy <- cbind(data.Longi[,which(colnames(data.Longi)==fit$time.biomarker)],data.Longi[,which(colnames(data.Longi)==fit$dose)])
if(dim(matzy)[2] != 2)stop("Both information on the biomarker measurement times and dose must be included in the data.")
matzy <- as.matrix(matzy)
if(fit$link=="Random-effects")link <- 1
if(fit$link=="Current-level") link <- 2
if(fit$leftCensoring==FALSE){
s_cag_id = 0
s_cag = 0
}else{
s_cag_id = 1
s_cag = fit$leftCensoring.threshold
}
}
cat("\n")
cat("Calculating ... \n")
if(class(fit)== 'jointPenal'){
if(fit$logNormal==0){
ans <- .Fortran(C_cvpl,
as.integer(nobs),
as.integer(nsujet),
as.integer(cluster),
as.integer(c),
as.integer(cdc),
as.integer(nva1),
as.integer(nva2),
as.double(X),
as.double(Xdc),
as.integer(typeof),
as.integer(nz),
as.double(zi),
as.double(ttt),
as.double(tttdc),
as.integer(nbintervR),
as.integer(nbintervDC),
as.integer(np),
as.double(b),
as.double(vopt),
as.double(tt0),
as.double(tt1),
as.double(tt0dc),
as.double(tt1dc),
as.integer(nt),
as.double(pred.times),
rl_cond=as.double(rep(0,nt)),
epoir=as.double(rep(0,nt)),
contribt=as.double(rep(0,nt*nsujet)),
atrisk=as.double(rep(0,nt))
)
}else{
ans <- .Fortran(C_cvpl_logn,
as.integer(nobs),
as.integer(nsujet),
as.integer(cluster),
as.integer(c),
as.integer(cdc),
as.integer(nva1),
as.integer(nva2),
as.double(X),
as.double(Xdc),
as.integer(typeof),
as.integer(nz),
as.double(zi),
as.double(ttt),
as.double(tttdc),
as.integer(nbintervR),
as.integer(nbintervDC),
as.integer(np),
as.double(b),
as.double(vopt),
as.double(tt0),
as.double(tt1),
as.double(tt0dc),
as.double(tt1dc),
as.integer(nt),
as.double(pred.times),
rl_cond=as.double(rep(0,nt)),
epoir=as.double(rep(0,nt)),
contribt=as.double(rep(0,nt*nsujet)),
atrisk=as.double(rep(0,nt))
)
}}else if(class(fit) == "longiPenal"){
ans <- .Fortran(C_cvpl_long,
as.integer(nsujet),
as.integer(1),
as.integer(length(clusterY)),
as.integer(0),
as.integer(clusterY),
as.integer(0),
as.integer(cdc),
as.double(yy),
as.integer(1),
as.integer(nva2),
as.integer(nva3),
as.integer(fit$ne_re),
as.integer(0),
as.integer(fit$netadc),
as.integer(link),
as.double(matrix(0,nrow=1,ncol=1)),
as.double(Xdc),
as.double(as.matrix(varY)),
as.double(matzy),
as.double(s_cag),
as.integer(s_cag_id),
as.integer(typeof),
as.integer(2),
as.integer(nz),
as.double(zi),
as.integer(np),
as.double(b),
as.double(vopt),
as.double(0),
as.double(0),
as.double(tt0dc),
as.double(tt1dc),
as.integer(nt),
as.double(pred.times),
rl_cond=as.double(rep(0,nt)),
epoir=as.double(rep(0,nt)),
contribt=as.double(rep(0,nt*nsujet)),
atrisk=as.double(rep(0,nt))
)
}else if(class(fit) == "trivPenal"){
ans <- .Fortran(C_cvpl_long,
as.integer(nsujet),
as.integer(nobs),
as.integer(length(clusterY)),
as.integer(cluster),
as.integer(clusterY),
as.integer(c),
as.integer(cdc),
as.double(yy),
as.integer(nva1),
as.integer(nva2),
as.integer(nva3),
as.integer(fit$ne_re),
as.integer(fit$netar),
as.integer(fit$netadc),
as.integer(link),
as.double(X),
as.double(Xdc),
as.double(as.matrix(varY)),
as.double(matzy),
as.double(s_cag),
as.integer(s_cag_id),
as.integer(typeof),
as.integer(3),
as.integer(nz),
as.double(zi),
as.integer(np),
as.double(b),
as.double(vopt),
as.double(tt0),
as.double(tt1),
as.double(tt0dc),
as.double(tt1dc),
as.integer(nt),
as.double(pred.times),
rl_cond=as.double(rep(0,nt)),
epoir=as.double(rep(0,nt)),
contribt=as.double(rep(0,nt*nsujet)),
atrisk=as.double(rep(0,nt))
)
}else if(class(fit) == "trivPenalNL"){
GH <- c(0,0)
GH[1] <- ifelse(fit$methodGH == "Standard", 0, 1)
GH[2] <- fit$n.nodes
box_cox <- c(0,1)
box_cox[1] <- ifelse(fit$BoxCox == TRUE, 1, 0)
if(!is.null(fit$BoxCox_parameter))box_cox[2] <- fit$BoxCox_parameter
lappend <- function (lst, ...){
lst <- c(lst, list(...))
return(lst)
}
nodes <- gauss.quad(20,kind="hermite")$nodes
weights <- gauss.quad(20,kind="hermite")$weights*exp(nodes^2)
nodes2 <- gauss.quad(20,kind="hermite")$nodes
weights2 <- gauss.quad(20,kind="hermite")$weights*exp(nodes2^2)
tmp <- rep(list(nodes), fit$ne_re)
tmp <- lappend(tmp, nodes2)
nodes <- as.data.frame(do.call(expand.grid,tmp))
tmp <- rep(list(weights), fit$ne_re)
tmp <- lappend(tmp, weights2)
weights <- as.data.frame(do.call(expand.grid,tmp))
nodes <- sapply(nodes, as.double)
weights <- sapply(weights, as.double)
ans <- .Fortran(C_cvplnl,
as.integer(nsujet),
as.integer(nobs),
as.integer(length(clusterY)),
as.integer(cluster),
as.integer(clusterY),
as.integer(c),
as.integer(cdc),
as.double(Y),
as.integer(nva1),
as.integer(nva2),
as.integer(nva3),
as.integer(nva4),
as.integer(fit$ne_re),
as.integer(fit$random.which),
as.double(box_cox),
as.integer(fit$netar),
as.integer(fit$netadc),
as.integer(link),
as.double(X),
as.double(Xdc),
as.double(cbind(varKG,varKD)),
as.double(matzy),
as.double(s_cag),
as.integer(s_cag_id),
as.integer(typeof),
as.integer(nz),
as.double(zi),
as.integer(np),
as.double(b),
as.double(vopt),
as.double(tt0),
as.double(tt1),
as.double(tt0dc),
as.double(tt1dc),
as.integer(nt),
as.double(pred.times),
rl_cond=as.double(rep(0,nt)),
epoir=as.double(rep(0,nt)),
contribt=as.double(rep(0,nt*nsujet)),
atrisk=as.double(rep(0,nt)),
as.integer(GH),
as.double(fit$b_pred),
as.double(fit$weights),
as.double(fit$nodes),
as.integer(fit$n.nodes^fit$ne_re*20)
)
}
out <- NULL
if (!missing(newdata)) out$data <- m0$newdata
else out$data <- fit$data
out$new.data <- !is.null(newdata)
out$pred.times <- pred.times
out$mpol <- ans$rl_cond
if(!missing(newdata) && any(out$mpol<0))stop("The program stopped abnormally. This may be related to the new datasets with insufficient information")
if (missing(newdata)) out$cvpol <- ans$epoir
out$IndivContrib <- matrix(ans$contribt,nrow=nsujet,ncol=nt)
out$AtRisk <- ans$atrisk
cat("Estimators of EPOCE computed for",length(pred.times),"times \n")
class(out) <- c("epoce")
out
} |
dEnricherView <- function(eTerm, top_num=10, sortBy=c("adjp","pvalue","zscore","nAnno","nOverlap","none"), decreasing=NULL, details=F)
{
if(is.logical(eTerm)){
stop("There is no enrichment in the 'eTerm' object.\n")
}
if (class(eTerm) != "eTerm" ){
stop("The function must apply to a 'eTerm' object.\n")
}
sortBy <- match.arg(sortBy)
if( is.null(top_num) ){
top_num <- length(eTerm$set_info$setID)
}
if ( top_num > length(eTerm$set_info$setID) ){
top_num <- length(eTerm$set_info$setID)
}
if(dim(eTerm$set_info)[1]==1){
tab <- data.frame( name = eTerm$set_info$name,
nAnno = sapply(eTerm$gs,length),
nOverlap = sapply(eTerm$overlap,length),
zscore = eTerm$zscore,
pvalue = eTerm$pvalue,
adjp = eTerm$adjp,
namespace = eTerm$set_info$namespace,
distance = eTerm$set_info$distance,
members = sapply(eTerm$overlap, function(x) paste(names(x),collapse=','))
)
}else{
tab <- data.frame( name = eTerm$set_info$name,
nAnno = sapply(eTerm$gs,length),
nOverlap = sapply(eTerm$overlap,length),
zscore = eTerm$zscore,
pvalue = eTerm$pvalue,
adjp = eTerm$adjp,
namespace = eTerm$set_info$namespace,
distance = eTerm$set_info$distance,
members = sapply(eTerm$overlap, function(x) paste(names(x),collapse=','))
)
}
rownames(tab) <- eTerm$set_info$setID
if(details == T){
res <- tab[,c(1:9)]
}else{
res <- tab[,c(1:6)]
}
if(is.null(decreasing)){
if(sortBy=="zscore" | sortBy=="nAnno" | sortBy=="nOverlap"){
decreasing <- T
}else{
decreasing <- F
}
}
switch(sortBy,
adjp={res <- res[order(res[,6], decreasing=decreasing)[1:top_num],]},
pvalue={res <- res[order(res[,5], decreasing=decreasing)[1:top_num],]},
zscore={res <- res[order(res[,4], decreasing=decreasing)[1:top_num],]},
nAnno={res <- res[order(res[,2], decreasing=decreasing)[1:top_num],]},
nOverlap={res <- res[order(res[,3], decreasing=decreasing)[1:top_num],]},
none={res <- res[order(rownames(res), decreasing=decreasing)[1:top_num],]}
)
if(sortBy=='none'){
suppressWarnings(flag <- all(!is.na(as.numeric(rownames(res)))))
if(flag){
res <- res[order(as.numeric(rownames(res)), decreasing=decreasing)[1:top_num],]
}
}
res
} |
.pparametric = function(mfit, zres)
{
m = dim(zres)[2]
n = dim(zres)[1]
ures = matrix(NA, ncol = m, nrow = n)
for(i in 1:m){
gdist = mfit@fit[[i]]@model$modeldesc$distribution
lambda = ifelse(mfit@fit[[i]]@model$modelinc[18]>0, mfit@fit[[i]]@fit$ipars["ghlambda",1], 0)
skew = ifelse(mfit@fit[[i]]@model$modelinc[16]>0, mfit@fit[[i]]@fit$ipars["skew",1], 0)
shape = ifelse(mfit@fit[[i]]@model$modelinc[17]>0, mfit@fit[[i]]@fit$ipars["shape",1], 0)
ures[,i] = pdist(gdist, zres[,i], mu = 0, sigma = 1, lambda = lambda, skew = skew, shape = shape)
}
return(ures)
}
.pparametric.filter = function(mflt, zres)
{
m = dim(zres)[2]
n = dim(zres)[1]
ures = matrix(NA, ncol = m, nrow = n)
for(i in 1:m){
gdist = mflt@filter[[i]]@model$modeldesc$distribution
lambda = ifelse(mflt@filter[[i]]@model$modelinc[18]>0, mflt@filter[[i]]@model$pars["ghlambda",1], 0)
skew = ifelse(mflt@filter[[i]]@model$modelinc[16]>0, mflt@filter[[i]]@model$pars["skew",1], 0)
shape = ifelse(mflt@filter[[i]]@model$modelinc[17]>0, mflt@filter[[i]]@model$pars["shape",1], 0)
ures[,i] = pdist(gdist, zres[,i], mu = 0, sigma = 1, lambda = lambda, skew = skew, shape = shape)
}
return(ures)
}
.pempirical = function(zres)
{
m = dim(zres)[2]
n = dim(zres)[1]
ures = matrix(NA, ncol = m, nrow = n)
for(i in 1:m){
fn = ecdf(sort(zres[,i]))
ures[,i] = fn(zres[,i])
}
return(ures)
}
.pempirical.filter = function(zres, dcc.old)
{
m = dim(zres)[2]
n = dim(zres)[1]
ures = matrix(NA, ncol = m, nrow = n)
for(i in 1:m){
fn = ecdf(sort(zres[1:dcc.old,i]))
ures[,i] = fn(zres[,i])
}
return(ures)
}
.pspd = function(zres, spd.control)
{
m = dim(zres)[2]
n = dim(zres)[1]
ures = matrix(NA, ncol = m, nrow = n)
sfit = vector(mode = "list", length = m)
sfit = lapply(as.list(1:m), function(i) spdfit(zres[,i], upper = spd.control$upper, lower = spd.control$lower,
tailfit = "GPD", type = spd.control$type, kernelfit = spd.control$kernel, information = "observed"))
for(i in 1:m){
ures[,i] = pspd(zres[,i], sfit[[i]])
}
return(list(ures = ures, sfit = sfit))
}
.pspd.filter = function(zres, spd.control, dcc.old)
{
m = dim(zres)[2]
n = dim(zres)[1]
ures = matrix(NA, ncol = m, nrow = n)
sfit = vector(mode = "list", length = m)
sfit = lapply(as.list(1:m), function(i) spdfit(zres[1:dcc.old,i], upper = spd.control$upper, lower = spd.control$lower,
tailfit = "GPD", type = spd.control$type, kernelfit = spd.control$kernel, information = "observed"))
for(i in 1:m){
ures[,i] = pspd(zres[,i], sfit[[i]])
}
return(list(ures = ures, sfit = sfit))
}
.qparametric = function(ures, modelinc, pars)
{
m = dim(ures)[2]
zres = matrix(NA, ncol = m, nrow = dim(ures)[1])
distn = c("norm", "snorm", "std", "sstd","ged", "sged", "nig", "ghyp", "jsu")
for(i in 1:m){
gdist = distn[modelinc[21,i]]
lambda = ifelse(modelinc[18,i]>0, pars["ghlambda",i], 0)
skew = ifelse(modelinc[16,i]>0, pars["skew",i], 0)
shape = ifelse(modelinc[17,i]>0, pars["shape",i], 0)
zres[,i] = qdist(gdist, ures[,i], mu = 0, sigma = 1, lambda = lambda, skew = skew, shape = shape)
}
return(zres)
}
.qempirical = function(ures, oldz)
{
zres = matrix(NA, ncol = dim(ures)[2], nrow = dim(ures)[1])
for(i in 1:dim(ures)[2]){
zres[,i] = quantile(oldz[,i], ures[,i], type = 1)
}
return(zres)
}
.qspd = function(ures, sfit)
{
zres = matrix(NA, ncol = dim(ures)[2], nrow = dim(ures)[1])
for(i in 1:dim(ures)[2]){
zres[,i] = qspd(ures[,i], sfit[[i]])
}
return(zres)
} |
data("merzbach", package = "folio")
keep <- apply(X = merzbach, MARGIN = 2, FUN = function(x) max(x) >= 50)
counts <- as_count(merzbach[, keep])
dates <- as.numeric(utils::as.roman(rownames(counts)))
plot_time(counts, dates)
plot_time(counts, dates, facet = TRUE) |
PMLE.Normal <-
function(l.trunc,x.trunc,testimator=FALSE,GOF=TRUE){
m=length(l.trunc)
bar.l=mean(l.trunc);bar.x=mean(x.trunc)
s.l=var(l.trunc);s.x=var(x.trunc);s.lx=cov(l.trunc,x.trunc)
l.func=function(theta){
mul=theta[1];mux=theta[2];varl=theta[3];varx=theta[4];covlx=theta[5]
delta=(mux-mul)/sqrt(varx+varl-2*covlx)
prop=pnorm(delta)
R=varl*varx-covlx^2
D.vec=varx*(l.trunc-mul)^2-2*covlx*(l.trunc-mul)*(x.trunc-mux)+varl*(x.trunc-mux)^2
D=mean(D.vec)/R
-( -m*log(prop)-m/2*log((2*pi)^2)-m/2*log(R)-m*D/2 )
}
res=nlm(l.func,p=c(bar.l,bar.x,s.l,s.x,s.lx),hessian=TRUE)
est=res$estimate
l.max=-res$minimum
Info=res$hessian/m
I_inv=solve(Info)
l0.func=function(theta0){
l.func(c(theta0,0))
}
res0=nlm(l0.func,p=c(bar.l,bar.x,s.l,s.x),hessian=T)
est0=res0$estimate
l0.max=-res0$minimum
LR=2*(l.max-l0.max)
est_test=c(est0,0)+(est-c(est0,0))*(LR>qchisq(0.95,df=1))
if(testimator==TRUE){est=est_test}
mul=est[1];mux=est[2];varl=est[3];varx=est[4];covlx=est[5]
se.mul=sqrt(I_inv[1,1]/m);se.mux=sqrt(I_inv[2,2]/m);se.varl=sqrt(I_inv[3,3]/m)
se.varx=sqrt(I_inv[4,4]/m);se.covlx=sqrt(I_inv[5,5]/m)
Vlx=sqrt(varx-covlx^2/varl)
prop.func=function(mul,mux,varl,varx,covlx){
delta=(mux-mul)/sqrt(varx+varl-2*covlx)
pnorm(delta)
}
prop.est=prop.func(mul,mux,varl,varx,covlx)
h=0.00000001
prop_dmul=(prop.func(mul+h,mux,varl,varx,covlx)-prop.est)/h
prop_dmux=(prop.func(mul,mux+h,varl,varx,covlx)-prop.est)/h
prop_dvarl=(prop.func(mul,mux,varl+h,varx,covlx)-prop.est)/h
prop_dvarx=(prop.func(mul,mux,varl,varx+h,covlx)-prop.est)/h
prop_dcovlx=(prop.func(mul,mux,varl,varx,covlx+h)-prop.est)/h
c_dot=c(prop_dmul,prop_dmux,prop_dvarl,prop_dvarx,prop_dcovlx)
se.prop=sqrt(t(c_dot)%*%(I_inv/m)%*%c_dot)
mu_L=c(estim=mul,se=se.mul)
mu_X=c(estim=mux,se=se.mux)
var_L=c(estim=varl,se=se.varl)
var_X=c(estim=varx,se=se.varx)
cov_LX=c(estim=covlx,se=se.covlx)
prop=c(estim=prop.est,se=se.prop)
LR_test=c(LR=LR,pvalue=1-pchisq(LR,df=1))
AIC_res = round(-2*l.max+2*5,2)
BIC_res = round(-2*l.max+5*log(m),2)
C.test=K.test=NULL
F_par=F_emp=NULL
if(GOF==TRUE){
F.func=function(ll,xx){
f_par=function(s){
A=covlx/sqrt(varl)*s
B=sqrt(varx-covlx^2/varl)
C=pnorm((xx-mux-A)/B)-pnorm((mul-mux+sqrt(varl)*s-A)/B)
C*dnorm(s)
}
Up_par=(ll-mul)/sqrt(varl)
integrate(f_par,-Inf,Up_par)$value
}
F_par=F_emp=numeric(m)
for(i in 1:m){
F_par[i]=F.func(l.trunc[i],x.trunc[i])/prop.est
F_emp[i]=mean( (l.trunc<=l.trunc[i])&(x.trunc<=x.trunc[i]) )
}
C.test=sum( (F_emp-F_par)^2 )
K.test=max( abs( F_emp-F_par ) )
plot(F_emp,F_par,xlab="F_empirical",ylab="F_parametric",xlim=c(0,1),ylim=c(0,1))
lines(x = c(0,1), y = c(0,1))
}
list(mu_L=mu_L,mu_X=mu_X,var_L=var_L,var_X=var_X,cov_LX=cov_LX,
c=prop,test=LR_test,
ML=l.max,AIC=AIC_res,BIC=BIC_res,
C=C.test,K=K.test,F_empirical=F_emp,F_parametric=F_par)
} |
fit_float <- function (all.samples, all.standards, LC.vals,
float, ex.smaller = NULL,
file.output = NULL, best.fits = NULL) {
if (is.null(ex.smaller)) {
ex.smaller <- 0
} else {
if (ex.smaller >= 1 | ex.smaller < 0) stop("You can only exclude portions between 0 and 1, e.g. 0.02 for 2 %")
}
if(is.null(file.output)) {
file.output <- FALSE
}
if(is.null(best.fits)) {
best.fits <- 1
}
try(pb <- txtProgressBar(min = 1, max = length(all.samples), style = 3), silent = TRUE)
float.list <- as.list(as.data.frame(t(float)))
float.results <- NULL
for (i in 1:length(all.samples)) {
cat(paste("Overall progress:\n", sep = ""))
try(setTxtProgressBar(pb, i), silent = TRUE)
cat(paste("\nFitting sample: ", all.samples[[i]]$name, ", with ", length(float[,1]), " combinations, starting: ", Sys.time(), "\n", sep = ""))
fit.standards <- std_df(sample = all.samples[[i]], all.standards = all.standards)
new.results <- lapply(float.list, function(X) LC_fit(sample = all.samples[[i]],
float = as.numeric(X),
standards = fit.standards,
LC.vals = LC.vals,
ex.smaller = ex.smaller,
E.zero = all.samples[[i]]$data$E0)
)
new.results <- do.call(rbind.data.frame, new.results)
new.results <- cbind(new.results, float)
new.results.sorted <- new.results[order(new.results$R.fac),]
if (file.output == TRUE) {
write.csv2(x = new.results.sorted, file = paste("LCF.float", all.samples[[i]]$name, "csv", sep = "."), row.names = FALSE)
}
best.fit <- head(new.results.sorted, best.fits)
row.names(best.fit) <- NULL
row.names(best.fit)[1] <- all.samples[[i]]$name
float.results <- rbind(float.results, best.fit)
write.csv2(float.results, "temp.float.csv", row.names = TRUE)
cat(paste("Sample finished: ", Sys.time(), "\n", sep = ""))
}
try(close(pb), silent = TRUE)
return(float.results)
} |
dcAncestralMP <- function(data, phy, output.detail=F, parallel=T, multicores=NULL, verbose=T)
{
startT <- Sys.time()
if(verbose){
message(paste(c("Start at ",as.character(startT)), collapse=""), appendLF=T)
message("", appendLF=T)
}
if (class(phy) != "phylo"){
stop("The input 'phy' must belong to the class 'phylo'!")
}
Ntip <- ape::Ntip(phy)
Nnode <- ape::Nnode(phy)
Ntot <- Ntip+Nnode
phy <- ape::reorder.phylo(phy, "postorder")
e1 <- phy$edge[, 1]
e2 <- phy$edge[, 2]
connectivity <- suppressMessages(dcTreeConnectivity(phy, verbose=verbose))
if (Nnode != Ntip-1){
stop("The input 'phy' is not binary and rooted!")
}
if(is.vector(data)){
tmp_data <- matrix(data, ncol=1)
if(!is.null(names(data))){
rownames(tmp_data) <- names(data)
}
data <- tmp_data
}
if(is.data.frame(data)){
data <- as.matrix(data)
}
if (!is.null(rownames(data))) {
ind <- match(rownames(data), phy$tip.label)
data <- data[!is.na(ind),]
if(nrow(data) != Ntip){
stop(message(sprintf("The row names of input 'data' do not contain all of the tip labels of the input 'phy': %d NOT FOUND!", Ntip-nrow(data)), appendLF=T))
}
ind <- match(phy$tip.label, rownames(data))
data <- data[ind,]
}else{
if(nrow(data) != Ntip){
stop(message(sprintf("The row number of input 'data' do not equal the tip number of the input 'phy'!"), appendLF=T))
}
}
if(verbose){
message(sprintf("The input tree has '%d' tips.", Ntip), appendLF=T)
}
doReconstruct <- function(x, Ntot, Ntip, Nnode, connectivity, e1, e2, output.detail, verbose){
if (!is.factor(x)){
x_tmp <- base::factor(x)
}else{
x_tmp <- x
}
nl <- base::nlevels(x_tmp)
lvls <- base::levels(x_tmp)
x_tmp <- as.integer(x_tmp)
if(nl==1){
if(verbose){
message(sprintf("\tNote, there is only one state '%s' in tips", lvls), appendLF=T)
}
if(output.detail){
res <- list()
res$states <- rep(lvls, Ntot)
}else{
res <- rep(lvls, Ntot)
}
return(invisible(res))
}
if(verbose){
message(sprintf("\tFirst, do maximum parsimony-modified Fitch algorithm in a bottom-up manner (%s) ...", as.character(Sys.time())), appendLF=T)
}
Cx <- matrix(NA, nrow=Ntot, ncol=nl, dimnames=list(1:Ntot, lvls))
Cx[cbind(1:Ntip, x_tmp)] <- 1
Cx_final <- Cx
for (i in seq(from=1, by=2, length.out=Nnode)) {
j <- i + 1L
cur <- e1[i]
all_children <- which(connectivity[cur-Ntip,]==1)
if(0){
ind <- match(1:Ntip, all_children)
all_children <- all_children[ind[!is.na(ind)]]
}
tmp <- Cx[all_children, ]
ttmp <- apply(tmp, 2, function(x) sum(x,na.rm=T))
ind <- which(ttmp==max(ttmp))
if(length(ind)==1){
Cx[cur, ind] <- 1
Cx_final[cur, ind] <- 1
}else{
Cx[cur, ind] <- NA
Cx_final[cur, ind] <- Inf
}
}
anc <- apply(Cx, 1, function(x){
tmp <- lvls[which(x==1)]
if(length(tmp)==0){
return("tie")
}else{
return(tmp)
}
})
if(verbose){
message(sprintf("\tSecond, resolve unknown states being the same as its direct parent in a top-down manner (%s) ...", as.character(Sys.time())), appendLF=T)
}
anc_final <- anc
ties <- which(anc_final=='tie')
if(length(ties) > 0){
if(verbose){
message(sprintf("\t\tbreak %d tie(s)", length(ties)), appendLF=T)
}
for(i in 1:length(ties)){
child_ind <- ties[i]
if(child_ind==Ntip+1){
ind <- which(is.infinite(Cx_final[child_ind,]))
anc_final[child_ind] <- lvls[ind[length(ind)]]
Cx_final[child_ind, ind[length(ind)]] <- 1
}else{
child <- names(child_ind)
parent <- e1[match(child, e2)]
parent_ind <- match(parent, names(anc))
anc_final[child_ind] <- anc_final[parent_ind]
Cx_final[child_ind, match(anc_final[parent_ind],lvls)] <- 1
}
}
}
Cx_final[is.infinite(Cx_final)] <- NA
p2c <- cbind(anc_final[e1], anc_final[e2])
p2c_final <- p2c
if(nl==2){
if(anc_final[e1[2*Nnode-1]] == lvls[nl]){
p2c_final <- rbind(p2c_final, lvls)
}
}
if(verbose){
all <- paste("\t", p2c_final[,1], "->", p2c_final[,2], sep='')
changes <- sapply(unique(all), function(x){
sum(x==all)
})
changes <- sort(changes)
msg <- paste(names(changes),changes, sep=": ", collapse="\n")
message(sprintf("\tIn summary, the number of between-state changes:\n%s\n", msg), appendLF=T)
}
if(output.detail){
Lx <- matrix(0, nrow=Ntot, ncol=nl, dimnames=list(1:Ntot, lvls))
Lx[cbind(1:Ntip, x_tmp)] <- 1
for (i in seq(from=1, by=2, length.out=Nnode)) {
j <- i + 1L
cur <- e1[i]
all_children <- which(connectivity[cur-Ntip,]==1)
if(0){
ind <- match(1:Ntip, all_children)
all_children <- all_children[ind[!is.na(ind)]]
}
tmp <- Cx_final[all_children, ]
ttmp <- apply(tmp, 2, function(x) sum(x,na.rm=T))
Lx[cur,] <- ttmp
}
rp <- t(apply(Lx, 1, function(x) x/sum(x)))
rate <- matrix(0, nl, nl)
ind <- matrix(match(p2c_final, lvls),ncol=2)
for(i in 1:nrow(ind)){
rate[ind[i,1], ind[i,2]] <- rate[ind[i,1], ind[i,2]] + 1
}
colnames(rate) <- rownames(rate) <- lvls
}
if(output.detail){
res <- list()
res$states <- anc_final
res$transition <- rate
res$relative <- signif(rp, digits=4)
}else{
res <- anc_final
}
return(invisible(res))
}
progress_indicate <- function(i, B, step, flag=F){
if(i %% ceiling(B/step) == 0 | i==B | i==1){
if(flag & verbose){
message(sprintf("\t%d out of %d (%s)", i, B, as.character(Sys.time())), appendLF=T)
}
}
}
integer_vec <- suppressMessages(dcDuplicated(data, pattern.wise="column", verbose=verbose))
ind_unique <- sort(unique(integer_vec))
data_unique <- as.matrix(data[, ind_unique], ncol=length(ind_unique))
if(verbose){
message(sprintf("The input data has %d characters/columns (with %d distinct patterns).", ncol(data), ncol(data_unique)), appendLF=T)
}
flag_parallel <- F
if(parallel){
flag_parallel <- dnet::dCheckParallel(multicores=multicores, verbose=verbose)
if(flag_parallel){
j <- 1
res_list <- foreach::`%dopar%` (foreach::foreach(j=1:ncol(data_unique), .inorder=T), {
progress_indicate(i=j, B=ncol(data_unique), 10, flag=T)
suppressMessages(doReconstruct(x=data_unique[,j], Ntot=Ntot, Ntip=Ntip, Nnode=Nnode, connectivity=connectivity, e1=e1, e2=e2, output.detail=output.detail, verbose=verbose))
})
ind <- match(integer_vec, ind_unique)
res_list <- res_list[ind]
if(!is.null(colnames(data))){
names(res_list) <- colnames(data)
}else{
names(res_list) <- 1:ncol(data)
}
if(!output.detail){
res <- do.call(base::cbind, res_list)
if(is.numeric(data)){
res_tmp <- matrix(as.numeric(res), ncol=ncol(res), nrow=nrow(res))
if(!is.null(rownames(res))){
rownames(res_tmp) <- rownames(res)
}
if(!is.null(colnames(res))){
colnames(res_tmp) <- colnames(res)
}
res <- res_tmp
}
}else{
res <- res_list
}
}
}
if(flag_parallel==F){
res_list <- lapply(1:ncol(data_unique),function(j) {
progress_indicate(i=j, B=ncol(data_unique), 10, flag=T)
suppressMessages(doReconstruct(x=data_unique[,j], Ntot=Ntot, Ntip=Ntip, Nnode=Nnode, connectivity=connectivity, e1=e1, e2=e2, output.detail=output.detail, verbose=verbose))
})
ind <- match(integer_vec, ind_unique)
res_list <- res_list[ind]
if(!is.null(colnames(data))){
names(res_list) <- colnames(data)
}else{
names(res_list) <- 1:ncol(data)
}
if(!output.detail){
res <- do.call(base::cbind, res_list)
if(is.numeric(data)){
res_tmp <- matrix(as.numeric(res), ncol=ncol(res), nrow=nrow(res))
if(!is.null(rownames(res))){
rownames(res_tmp) <- rownames(res)
}
if(!is.null(colnames(res))){
colnames(res_tmp) <- colnames(res)
}
res <- res_tmp
}
}else{
res <- res_list
}
}
endT <- Sys.time()
if(verbose){
message("", appendLF=T)
message(paste(c("Finish at ",as.character(endT)), collapse=""), appendLF=T)
}
runTime <- as.numeric(difftime(strptime(endT, "%Y-%m-%d %H:%M:%S"), strptime(startT, "%Y-%m-%d %H:%M:%S"), units="secs"))
message(paste(c("Runtime in total is: ",runTime," secs\n"), collapse=""), appendLF=T)
invisible(res)
} |
gpg <- function(inc, gender = NULL, method = c("mean", "median"),
weights = NULL, sort = NULL, years = NULL, breakdown = NULL,
design = NULL, cluster = NULL, data = NULL, var = NULL,
alpha = 0.05, na.rm = FALSE, ...) {
if(is.null(gender)) stop("'gender' must be supplied")
byYear <- !is.null(years)
byStratum <- !is.null(breakdown)
if(!is.null(data)) {
inc <- data[, inc]
gender <- data[, gender]
if(!is.null(weights)) weights <- data[, weights]
if(!is.null(sort)) sort <- data[, sort]
if(byYear) years <- data[, years]
if(byStratum) breakdown <- data[, breakdown]
if(!is.null(var)) {
if(!is.null(design)) design <- data[, design]
if(!is.null(cluster)) cluster <- data[, cluster]
}
}
if(!is.numeric(inc)) stop("'inc' must be a numeric vector")
method <- match.arg(method)
if(!is.factor(gender)) stop("'gender' must be a factor.")
if(length(levels(gender)) != 2) stop("'gender' must have exactly two levels")
if(!all(levels(gender) == c("female", "male"))) {
gender <- factor(gender, labels=c("female","male"))
warning("The levels of gender were internally recoded - your first level has to correspond to females")
}
if(!is.null(years)) {
if(!is.factor(years)) stop("'years' should be a factor")
nage <- length(levels(years))
if(n > 12) warning(paste("Too small sample sizes may occur by using ", n," age classes"))
}
n <- length(inc)
if(is.null(weights)) weights <- weights <- rep.int(1, n)
else if(!is.numeric(weights)) stop("'weights' must be a numeric vector")
if(!is.null(sort) && !is.vector(sort) && !is.ordered(sort)) {
stop("'sort' must be a vector or ordered factor")
}
if(byYear && !is.numeric(years)) {
stop("'years' must be a numeric vector")
}
if(byStratum) {
if(!is.vector(breakdown) && !is.factor(breakdown)) {
stop("'breakdown' must be a vector or factor")
} else breakdown <- as.factor(breakdown)
}
if(is.null(data)) {
if(length(weights) != n) {
stop("'weights' must have the same length as 'x'")
}
if(!is.null(sort) && length(sort) != n) {
stop("'sort' must have the same length as 'x'")
}
if(byYear && length(years) != n) {
stop("'years' must have the same length as 'x'")
}
if(byStratum && length(breakdown) != n) {
stop("'breakdown' must have the same length as 'x'")
}
}
if(byYear) {
ys <- sort(unique(years))
gp <- function(y, inc, weights, sort, years, na.rm) {
i <- years == y
genderGap(inc[i], gender[i], method, weights[i], sort[i], na.rm=na.rm)
}
value <- sapply(ys, gp, inc=inc, weights=weights,
sort=sort, years=years, na.rm=na.rm)
names(value) <- ys
} else {
ys <- NULL
value <- genderGap(inc, gender, method, weights, sort, na.rm=na.rm)
}
if(byStratum) {
gpR <- function(i, inc, weights, sort, na.rm) {
genderGap(inc[i], gender[i], method, weights[i], sort[i], na.rm=na.rm)
}
valueByStratum <- aggregate(1:n,
if(byYear) list(year=years, stratum=breakdown)
else list(stratum=breakdown),
gpR, inc=inc, weights=weights,
sort=sort, na.rm=na.rm)
names(valueByStratum)[ncol(valueByStratum)] <- "value"
rs <- levels(breakdown)
} else valueByStratum <- rs <- NULL
res <- constructGpg(value=value,
valueByStratum=valueByStratum,
years=ys, strata=rs)
if(!is.null(var)) {
res <- variance(inc, weights, years, breakdown, design, cluster,
indicator=res, alpha=alpha, na.rm=na.rm, type=var,
gender=gender, method=method, ...)
}
return(res)
}
genderGap <- function(x, gend, method = 'mean', weights = NULL,
sort = NULL, na.rm = FALSE) {
if(is.null(gend)) stop("'gender' must be supplied")
if(isTRUE(na.rm)){
indices <- !is.na(x)
x <- x[indices]
gend <- gend[indices]
if(!is.null(weights)) weights <- weights[indices]
if(!is.null(sort)) sort <- sort[indices]
} else if(any(is.na(x))) return(NA)
male <- levels(gend)[1]
female <- levels(gend)[2]
if(is.null(weights)) weights <- rep.int(1, length(x))
incgendmale <- x[gend=="male"]
incgendmaleWeights <- weights[gend=="male"]
incgendfemale <- x[gend=="female"]
incgendfemaleWeights <- weights[gend=="female"]
if(method == 'mean') {
wM <- weighted.mean(x=incgendmale, w=incgendmaleWeights)
wF <- weighted.mean(x=incgendfemale, w=incgendfemaleWeights)
return((wM - wF) / wM)
} else {
wM <- weightedMedian(incgendmale, incgendmaleWeights)
wF <- weightedMedian(incgendfemale, incgendfemaleWeights)
return((wM - wF)/wM)
}
} |
pcbic.subpatterns <-
function(eigenvals, n, pattern0) {
b <- NULL
pts <- NULL
k <- length(pattern0)
if (k == 1) {
return(F)
}
for (i in 1:(k - 1)) {
p1 <- pcbic.unite(pattern0, i)
b2 <- pcbic(eigenvals, n, p1)
b <- c(b, b2$BIC)
pts <- cbind(pts, p1)
}
list(bic = b, pattern = pts)
} |
multiview_samps <- reactive({
validate(
need(input$param, message = FALSE),
need(!is.null(input$multiview_warmup), message = "Loading...")
)
if (!input$multiview_warmup)
par_samps_post_warmup()
else
par_samps_all()
})
dynamic_trace_plot_multiview <- reactive({
if (input$param == "")
return()
stack <- FALSE
chain <- 0
do.call(
".param_trace_dynamic",
args = list(
param_samps = multiview_samps(),
chain = chain,
stack = stack,
warmup_val = N_WARMUP,
warmup_shade = isTRUE(input$multiview_warmup) && N_WARMUP > 0,
x_lab = "Iteration",
y_lab = input$param
)
)
})
autocorr_plot_multiview <- reactive({
lags <- min(25, round((N_ITER - N_WARMUP) / 2))
do.call(
".autocorr_single_plot",
args = list(
samps = multiview_samps(),
lags = lags
)
)
})
density_plot_multiview <- reactive({
do.call(
".param_dens",
args = list(
param = input$param,
dat = multiview_samps(),
chain = 0,
chain_split = FALSE,
fill_color = base_fill,
line_color = vline_base_clr,
point_est = "None",
CI = "None",
x_breaks = "Some",
title = FALSE
)
)
})
output$multiview_param_name <-
renderUI(strong(style = "font-size: 250%; color:
output$multiview_trace_out <-
dygraphs::renderDygraph(dynamic_trace_plot_multiview())
output$multiview_density_out <-
renderPlot(density_plot_multiview(), bg = "transparent")
output$multiview_autocorr_out <-
renderPlot(autocorr_plot_multiview(), bg = "transparent") |
x = nuclearPed(1)
test_that("simple marker getters work", {
m = marker(x, name="m1", chrom=1, posMb=1e7)
x = setMarkers(x, m)
expect_equal(name(m), "m1")
expect_equal(name(x, 1), "m1")
expect_equal(chrom(m), "1")
expect_equal(chrom(x, markers=1), "1")
expect_equal(chrom(x, markers="m1"), "1")
expect_equal(posMb(m), 1e7)
expect_equal(posMb(x, markers=1), 1e7)
expect_equal(posMb(x, markers="m1"), 1e7)
})
test_that("alleles() accessor works", {
als = c("p","e","d")
m1 = marker(x, alleles=1:3, name="m1")
m2 = marker(x, alleles=als, name="m2")
x = setMarkers(x, list(m1,m2))
expect_equal(alleles(m1), as.character(1:3))
expect_equal(alleles(x, marker=1), as.character(1:3))
expect_equal(alleles(x, marker="m1"), as.character(1:3))
expect_equal(alleles(m2), sort(als))
expect_equal(alleles(x, marker=2), sort(als))
expect_equal(alleles(x, marker="m2"), sort(als))
})
test_that("afreq() accessor works", {
afr = c(.2,.3,.5)
m1 = marker(x, name="m1")
m2 = marker(x, alleles=1:3, afreq=afr, name="m2")
m3 = marker(x, alleles=3:1, afreq=afr, name="m3")
x = setMarkers(x, list(m1,m2,m3))
ans1 = c('1'=0.5, '2'=0.5)
expect_equal(afreq(m1), ans1)
expect_equal(afreq(x, marker=1), ans1)
expect_equal(afreq(x, marker="m1"), ans1)
names(afr) = 1:3
expect_equal(afreq(m2), afr)
expect_equal(afreq(x, marker=2), afr)
afr_rev = rev(afr); names(afr_rev) = 1:3
expect_equal(afreq(m3), afr_rev)
expect_equal(afreq(x, marker=3), afr_rev)
})
test_that("afreq replacement works", {
m = marker(x, alleles=c("a", "b"), name="m1")
x = setMarkers(x, list(m))
afr = c(a=.1, b=.9)
afreq(x, "m1") = afr
expect_equal(afreq(x, 1), afr)
afreq(x, 1) = rev(afr)
expect_equal(afreq(x, "m1"), afr)
})
test_that("afreq replacement gives correct error messages", {
m = marker(x, alleles=c("a"), name="m1")
x = setMarkers(x, list(m))
expect_error({afreq(x, "m2") = c(a=1)}, "Unknown marker name: m2")
expect_error({afreq(x, 2) = c(a=1)}, "Marker index out of range: 2")
expect_error({afreq(x, 1:2) = c(a=1)}, "Frequency replacement can only be done for a single marker")
expect_error({afreq(x, "m1") = 1}, "Frequency vector must be named")
expect_error({afreq(x, "m1") = c(b=1)}, "Unknown allele: b")
expect_error({afreq(x, "m1") = c(a=1)[0]}, "Alleles missing from frequency vector: a")
expect_error({afreq(x, "m1") = c(a=0.1)}, "Frequencies must sum to 1")
})
test_that("genotype() works", {
x = nuclearPed(children="boy")
m1 = marker(x, name="m1")
m2 = marker(x, boy="1/2", name="m2")
m3 = marker(x, "1"="17.2/17.2", name="m3")
x = setMarkers(x, list(m1,m2,m3))
genoNA = c(NA_character_, NA_character_)
expect_equal(genotype(m1, "boy"), genoNA)
expect_equal(genotype(x, marker=1, id="boy"), genoNA)
expect_equal(genotype(x, marker="m1", id="boy"), genoNA)
genoHet = as.character(1:2)
expect_equal(genotype(m2, id="boy"), genoHet)
expect_equal(genotype(x, marker=2, id="boy"), genoHet)
genoSTR = c("17.2", "17.2")
expect_equal(genotype(m3, 1), genoSTR)
expect_equal(genotype(m3, "1"), genoSTR)
expect_equal(genotype(x, marker="m3", id=1), genoSTR)
})
test_that("genotype replacement works", {
x = nuclearPed(father=101, mother=102, children="boy")
m1 = marker(x, name="m1", alleles=1:2)
m2 = marker(x, name="m2", alleles=c("a", "b"))
x = setMarkers(x, list(m1, m2))
genotype(x, 1, id=101) = "2/2"
genotype(x, "m1", "boy") = "2/1"
expect_equal(genotype(x, "m1", 101), c("2", "2"))
expect_equal(genotype(x, 1, "boy"), c("2", "1"))
genotype(x, 2, id=101) = 'b/b'
genotype(x, "m2", "boy") = 'b/a'
expect_equal(genotype(x, "m2", 101), c("b", "b"))
expect_equal(genotype(x, 2, "boy"), c("b", "a"))
})
test_that("genotype replacement gives correct error messages", {
x = nuclearPed(father=101, mother=102, children="boy")
x = addMarker(x, name="m1", alleles=1:2)
expect_error({genotype(x, "m2", 101) = 3}, "Unknown marker name: m2")
expect_error({genotype(x, 2, 101) = 3}, "Marker index out of range: 2")
expect_error({genotype(x, 1:2, 101) = 3}, "Genotype replacement can only be done for a single marker")
expect_error({genotype(x, "m1", 100) = 3}, "Unknown ID label: 100")
expect_error({genotype(x, "m1", "girl") = 3}, "Unknown ID label: girl")
expect_error({genotype(x, "m1", 101) = 3}, "Unknown allele for this marker: 3")
expect_error({genotype(x, "m1", 101) = 1:3}, "Number of alleles must be 1 or 2")
})
test_that("genotype replacement works with partial genotypes", {
x = nuclearPed(father=101, mother=102, children=1:2)
x = addMarker(x, name="m1", alleles=c('a','b'))
genotype(x, "m1", id=101) = c("a", NA)
genotype(x, "m1", id=102) = c("a", "")
genotype(x, "m1", id=1) = c("b", 0)
genotype(x, "m1", id=2) = c("b", "-")
expect_equal(x$MARKERS[[1]][,1], c(1,1,2,2))
expect_equal(x$MARKERS[[1]][,2], c(0,0,0,0))
expect_equal(genotype(x, 1, 101), c("a", NA_character_))
expect_equal(genotype(x, 1, 102), c("a", NA_character_))
expect_equal(genotype(x, 1, 1), c("b", NA_character_))
expect_equal(genotype(x, 1, 2), c("b", NA_character_))
}) |
test_that("read_gtfs", {
expect_error(read_gtfs("xyz123.zip"))
poa <- read_gtfs(system.file("extdata/poa.zip", package="gtfs2gps"))
expect_type(poa, "list")
expect_equal(length(poa), 7)
expect_true(length(poa$agency) >= 1)
expect_equal(length(poa$routes), 3)
expect_equal(length(poa$stops), 6)
expect_equal(length(poa$stop_times), 5)
expect_equal(length(poa$shapes), 4)
expect_equal(length(poa$trips), 4)
expect_equal(length(poa$calendar), 10)
expect_type(poa$stop_times$arrival_time, "integer")
expect_type(poa$stop_times$departure_time, "integer")
expect_equal(dim(poa$stop_times)[1], 23040)
expect_equal(dim(poa$shapes)[1], 1265)
expect_equal(dim(poa$trips)[1], 387)
sp <- read_gtfs(system.file("extdata/saopaulo.zip", package="gtfs2gps"))
expect_type(sp$frequencies$start_time, "integer")
expect_type(sp$frequencies$end_time, "integer")
file.copy(system.file("extdata/poa.zip", package="gtfs2gps"), "poa.zip")
unzip("poa.zip")
files <- c("stops.txt", "stop_times.txt", "shapes.txt", "trips.txt", "calendar.txt", "agency.txt", "routes.txt")
}) |
coxsnell.bc <- function(density, logdensity, n, parms, mle, lower = '-Inf', upper = 'Inf', ...)
{
{p <- length(parms); l <- length(mle)};
if(p != l) stop("The arguments 'parms' and 'mle' must be have the same size")
kappa_ij <- matrix(NA_real_, ncol = p, nrow = p)
kappa_ijl <- array(NA_real_, dim = c(p, p, p))
kappa_ij_l <- kappa_ijl
{colnames(kappa_ij) <- parms; rownames(kappa_ij) <- parms};
{integrand <- function(x){}; snd <- integrand}
for(i in 1:p)
{
assign(parms[i], mle[i])
}
first <- sapply(1:p, function(i) D(logdensity, parms[i]))
for(i in 1:p)
{
for(j in 1:p)
{
second <- D(first[[i]], parms[j])
body(integrand) <- bquote(.(second) * .(density))
aux <- tryCatch(integrate(integrand, lower, upper, stop.on.error = FALSE)[c("message", "value")], error = function(e) list(message = "fails"))
if(aux$message != 'OK') stop('The integrate function failed')
kappa_ij[i, j] <- -n * aux$value
for(l in 1:p)
{
third <- D(second, parms[l])
body(integrand) <- bquote(.(third) * .(density))
aux <- tryCatch(integrate(integrand, lower, upper, stop.on.error = FALSE)[c("message", "value")], error = function(e) list(message = "fails"))
if(aux$message != 'OK') stop('The integrate function failed')
kappa_ijl[i, j, l] <- n * aux$value
body(integrand) <- bquote(.(second) * .(first[[l]]) * .(density))
aux <- tryCatch(integrate(integrand, lower, upper, stop.on.error = FALSE)[c("message", "value")], error = function(e) list(message = "fails"))
if(aux$message != 'OK') stop('The integrate function failed')
kappa_ij_l[i, j, l] <- n * aux$value
}
}
}
if(any(eigen(kappa_ij)$values < 0)) stop("The final Hessian matrix has at least one negative eigenvalue")
inv_kappa_ij <- solve(kappa_ij)
bc <- vector(length = p, mode = 'numeric')
names(bc) <- parms
for(s in 1:p)
{
bc[s] <- 0
for(i in 1:p)
{
for(j in 1:p)
{
for(l in 1:p)
{
bc[s] <- bc[s] + inv_kappa_ij[s, i] * inv_kappa_ij[j, l] * (0.5 * kappa_ijl[i, j, l] + kappa_ij_l[i, j, l])
}
}
}
}
{mle.bc <- mle - bc; varcov.bc <- expected.varcov(density, logdensity, n, parms, mle.bc, lower, upper, ...)$varcov}
{names(mle) <- parms; names(mle.bc) <- parms};
return(list(mle = mle, varcov = inv_kappa_ij, mle.bc = mle.bc, varcov.bc = varcov.bc, bias = bc))
} |
step_set <- function(x, y, style) {
stopifnot(is_step(x))
stopifnot(is_step(y))
stopifnot(is.character(style))
new_step(
parent = x,
parent2 = y,
locals = utils::modifyList(x$locals, y$locals),
style = style,
class = "dtplyr_step_set",
)
}
dt_sources.dtplyr_step_set <- function(x) {
dt_sources.dtplyr_step_join(x)
}
dt_call.dtplyr_step_set <- function(x, needs_copy = x$needs_copy) {
lhs <- dt_call(x$parent, needs_copy)
rhs <- dt_call(x$parent2)
call <- switch(x$style,
intersect = call2("fintersect", lhs, rhs),
union = call2("funion", lhs, rhs),
union_all = call2("funion", lhs, rhs, all = TRUE),
setdiff = call2("fsetdiff", lhs, rhs),
)
call
}
intersect.dtplyr_step <- function(x, y, ...) {
if (!is_step(y)) {
y <- lazy_dt(y)
}
step_set(x, y, style = "intersect")
}
intersect.data.table <- function(x, y, ...) {
x <- lazy_dt(x)
intersect(x, y, ...)
}
union.dtplyr_step <- function(x, y, ...) {
if (!is_step(y)) {
y <- lazy_dt(y)
}
step_set(x, y, style = "union")
}
union.data.table <- function(x, y, ...) {
x <- lazy_dt(x)
union(x, y, ...)
}
union_all.dtplyr_step <- function(x, y, ...) {
if (!is_step(y)) {
y <- lazy_dt(y)
}
step_set(x, y, style = "union_all")
}
union_all.data.table <- function(x, y, ...) {
x <- lazy_dt(x)
union_all(x, y, ...)
}
setdiff.dtplyr_step <- function(x, y, ...) {
if (!is_step(y)) {
y <- lazy_dt(y)
}
step_set(x, y, style = "setdiff")
}
setdiff.data.table <- function(x, y, ...) {
x <- lazy_dt(x)
setdiff(x, y, ...)
} |
library(EnvStats)
windows()
plotPredIntNormTestPowerCurve(n = 10, k = 1,
range.delta.over.sigma = c(0, 5),
pi.type = "upper", conf.level = 0.99,
ylim = c(0, 1),
xlab = expression(paste(Delta, " (SDs above Background)")),
ylab = "Power", cex.main = 1.1,
main = "Figure 6-2. Normal Power Curve (n = 10) for 99% Prediction Limit Test")
windows()
plotPredIntNormTestPowerCurve(n = 10, k = 1,
range.delta.over.sigma = c(0, 5),
pi.type = "upper", conf.level = 0.99,
ylim = c(0, 1),
xlab = "SD Units Above BG",
ylab = "Power",
main = "Figure 6-3. EPA Reference Power Curves")
plotPredIntNormTestPowerCurve(n = 10, k = 2, conf.level = 0.99,
add = TRUE, plot.col = "red", plot.lty = 2)
plotPredIntNormTestPowerCurve(n = 10, k = 4, conf.level = 0.99,
add = TRUE, plot.col = "blue", plot.lty = 3)
legend("topleft", c("Quarterly", "Semi-Annual", "Annual"), lty = 3:1,
lwd = 3 * par("cex"), col = c("blue", "red", "black"))
predIntNormTestPower(n = 10, k = 1,
delta.over.sigma = (15 - 6) / 2,
pi.type = "upper", conf.level = 0.99)
stats <- summaryStats(Sulfate.ppm ~ 1, data = EPA.09.Ex.6.3.sulfate.df,
data.name = "Sulfate", digits = 1)
stats
n <- stats[, "N"]
SD <- stats[, "SD"]
delta <- c(0, 200)
windows()
plotPredIntNormTestPowerCurve(n = n, k = 1,
axes = F,
range.delta.over.sigma = range(delta / SD),
pi.type = "upper", conf.level = 0.99,
ylim = c(0, 1),
xlab = "Sulfate Conc. Increase (ppm)",
ylab = "Power",
main = "Figure 6-3. Approximate s-Based Power Curve for Sulfate")
axis(2)
x.axis.ticks <- seq(0, 200, by = 50)
axis(1, at = x.axis.ticks / SD, labels = x.axis.ticks)
box()
abline(v = 75 / SD, lty = 2, lwd = 2)
predIntNormTestPower(n = n, k = 1, delta.over.sigma = 75 / SD,
pi.type = "upper", conf.level = 0.99)
rm(stats, n, SD, delta, x.axis.ticks) |
predict.BayesSUR <- function(object, newx, type="response", beta.type="marginal", Pmax=0, ...){
if( length(type) > 1 ){
warning("'type' has length > 1 and only the first element will be used")
type <- type[1]
}
if( !(type %in% c("response", "coefficients", "nonzero")) )
stop("Please specify correct 'type'!")
if( Pmax<0 | Pmax>1 )
stop("Please specify correct argument 'Pmax' in [0,1]!")
if( !(beta.type %in% c("marginal", "conditional")) )
stop("Please specify acorrect 'beta.type'!")
if( (type %in% c("response", "coefficients")) & (Pmax>0) & (beta.type=="marginal"))
stop("Pmax > 0 is valid only if the arguments type='coefficients' and beta.type='conditional'!")
gamma_hat <- getEstimator( object, estimator="gamma", Pmax = Pmax, ...)
beta_hat <- getEstimator( object, estimator="beta", Pmax = Pmax, beta.type=beta.type, ...)
object$output[-1] <- paste(object$output$outFilePath,object$output[-1],sep="")
X <- as.matrix( read.table(object$output$X,header=T) )
if( "X0" %in% names(object$output) ){
X0 <- as.matrix( read.table(object$output$X0) )
}else{
X0 <- NULL
}
if( missing(newx) ){
y.pred <- cbind(X0, X) %*% beta_hat
}else{
y.pred <- newx %*% beta_hat
}
gamma_out <- which(gamma_hat==1, arr.ind=TRUE)
colnames(gamma_out) <- c("predictors", "response")
if(type == "response")
return( y.pred )
if(type == "coefficients")
return( beta_hat )
if(type == "nonzero")
return( gamma_out )
} |
pwMoment <-
function (x, j = 0, k = 0, method = "unbiased", plot.pos.cons = c(a = 0.35,
b = 0), na.rm = FALSE)
{
if (length(j) != 1 || !is.vector(j, mode = "numeric") ||
j != trunc(j) || j < 0 || length(k) != 1 || !is.vector(k,
mode = "numeric") || k != trunc(k) || k < 0)
stop("'j' and 'k' must be non-negative integers")
if (j > 0 && k > 0)
stop("Either 'j' or 'k' (or both) must be 0")
if (!is.vector(x, mode = "numeric") || is.factor(x))
stop("'x' must be a numeric vector")
wna <- which.na(x)
if (length(wna)) {
if (na.rm)
x <- x[-wna]
else return(NA)
}
n <- length(x)
if (n < 1)
stop("'x' must contain at least one non-missing, finite value")
if (j == 0 && k == 0) {
pwm <- mean(x)
}
else {
x <- sort(x)
method <- match.arg(method, c("unbiased", "plotting.position"))
if (method == "plotting.position") {
if (!is.vector(plot.pos.cons, mode = "numeric") ||
length(plot.pos.cons) != 2)
stop("'plot.pos.cons' must be a numeric vector of length 2")
if (any(is.na(match(c("a", "b"), names(plot.pos.cons)))))
names(plot.pos.cons) <- c("a", "b")
p <- ((1:n) - plot.pos.cons["a"])/(n + plot.pos.cons["b"])
}
if (j > 0) {
if (j > (n - 1))
stop("'j' must be between 0 and 'n-1'")
pwm <- switch(method, unbiased = {
index <- (j + 1):n
sum((x[index]/n) * exp(lchoose(index - 1, j) -
lchoose(n - 1, j)))
}, plotting.position = mean(p^j * x))
}
else {
if (k > (n - 1))
stop("'k' must be between 0 and 'n-1'")
pwm <- switch(method, unbiased = {
index <- 1:(n - k)
sum((x[index]/n) * exp(lchoose(n - index, k) -
lchoose(n - 1, k)))
}, plotting.position = mean((1 - p)^k * x))
}
}
pwm
} |
tmpdir <- tempdir()
vcr_configure(dir = tmpdir, write_disk_path = file.path(tmpdir, "files"))
context("use_cassette: works as expected")
test_that("use_cassette works as expected", {
skip_on_cran()
library(crul)
mydir <- file.path(tempdir(), "asdfasdfsd")
invisible(vcr_configure(dir = mydir))
unlink(file.path(vcr_c$dir, "testing1.yml"))
aa <- use_cassette(name = "testing1", {
res <- crul::HttpClient$new("https://eu.httpbin.org/get")$get()
})
expect_output(print(aa), "<vcr - Cassette>")
expect_output(print(aa), "Record method: once")
expect_output(print(aa), "Serialize with: yaml")
expect_output(print(aa), "Persist with: FileSystem")
expect_output(print(aa), "preserve_exact_body_bytes")
expect_is(aa, "Cassette")
expect_is(aa$name, "character")
expect_equal(aa$name, "testing1")
expect_false(aa$allow_playback_repeats)
expect_is(aa$args, "list")
expect_is(aa$call_block, "function")
expect_is(res, "HttpResponse")
expect_is(res$content, "raw")
cas <- readLines(file.path(vcr_c$dir, "testing1.yml"))
expect_is(cas, "character")
})
context("use_cassette fails well")
test_that("use_cassette fails well", {
unlink(file.path(vcr_c$dir, "foobar333.yml"))
expect_error(
sw(sm(use_cassette("foobar333"))),
"`vcr::use_cassette` requires a code block"
)
expect_error(use_cassette(), "argument \"name\" is missing")
expect_error(
suppressMessages(use_cassette("newbar", {}, record = "stuff")),
"'record' value of 'stuff' is not in the allowed set"
)
expect_error(
suppressMessages(use_cassette("newbar", {}, match_requests_on = "stuff")),
"'match_requests_on' values \\(stuff\\) is not in the allowed set"
)
expect_error(
suppressMessages(use_cassette("newbar3", {}, update_content_length_header = 5)),
"update_content_length_header must be of class logical"
)
expect_error(
suppressMessages(use_cassette("newbar4", {}, preserve_exact_body_bytes = 5)),
"preserve_exact_body_bytes must be of class logical"
)
expect_error(
suppressMessages(use_cassette("newbar5", {}, persist_with = "jello")),
"The requested VCR cassette persister \\(jello\\) is not registered"
)
expect_error(
suppressMessages(use_cassette("newbar6", {}, serialize_with = "howdy")),
"The requested vcr cassette serializer \\(howdy\\) is not registered"
)
})
unlink(list.files(vcr_c$dir, pattern = "newbar", full.names = TRUE))
unlink(file.path(vcr_c$dir, "foobar333.yml"))
unlink("foobar333.yml")
unlink("testing1.yml")
vcr_configure_reset() |
covML <- function(x,...){
(nrow(x)-1)/(nrow(x)) * stats::cov(x, ...)
}
covMLtoUB <- function(x,n,...){
n/(n-1) * x
}
covUBtoML <- function(x,n,...){
(n-1)/n * x
} |
top10_paths <- function() {
check_data()
trace_cnt <- data.frame(table(tashudata::tashu$RENT_STATION, tashudata::tashu$RETURN_STATION))
names(trace_cnt) <- c("RENT_STATION", "RETURN_STATION", "COUNT")
sort_trace_cnt <- head(trace_cnt[order(-trace_cnt$COUNT), ], 10)
ggplot() +
geom_point(
aes_string(x = "RENT_STATION", y = "RETURN_STATION", size = "COUNT"),
data = sort_trace_cnt)+
ggtitle("Most Popular paths In 2013 ~ 2015\n")
} |
source("helper/helper.R")
f = backports:::isTRUE
expect_identical(f(TRUE), TRUE)
expect_identical(f(FALSE), FALSE)
expect_identical(f(1), FALSE)
expect_identical(f(iris), FALSE)
expect_identical(f(structure(TRUE, foo = "bar")), TRUE) |
hardclasses <- function (x, classdim = 2L, soft.name = NA, tol = 1e-5, drop = TRUE){
if (ndim (x) == 0) {
warning ("Using hardclasses (cbind (x, 1 - x)) instead.")
x <- cbind (x, 1 - x)
colnames (x) <- 1 : 0
}
classdim <- numericindex (x = dim (x), i = classdim, n = names (dimnames (x)))
x <- aperm (x, c(seq_len (ndim (x))[-classdim], classdim))
x <- makeNd (x, -2)
olddims <- attr (x, "old")[[1]]
if (any (abs(1 - rowSums (x)) > tol, na.rm = TRUE))
warning ("Found samples with total membership != 1")
if (is.null (classes <- colnames (x)))
classes <- paste ("class", seq_len (ncol (x)), sep = "")
x <- x >= 1 - tol
cl <- apply (x, 1, function (x) match (TRUE, x))
if (! is.na (soft.name)){
classes <- c (classes, soft.name)
cl [is.na (cl)] <- length (classes)
}
cl <- structure (cl,
.Label = classes, class = "factor",
.Dim = head (olddims$dim, -1),
.Dimnames = lon (head (olddims$dimnames, -1)))
drop1d (cl, drop = drop)
}
.test (hardclasses) <- function (){
checkEquals (hardclasses (pred),
factor (rep (letters [c (1, 2, NA, NA, NA)], 2), levels = letters [1 : 3]))
checkEquals (hardclasses (pred, drop = FALSE), ensuredim (hardclasses (pred)))
tmp <- pred
dim (tmp) <- c (5, 2, 3)
checkEquals (hardclasses (tmp, 3),
structure (c (1L, 2L, NA, NA, NA, 1L, 2L, NA, NA, NA), .Dim = c(5L, 2L),
.Label = c("class1", "class2", "class3"), class = "factor"))
warn <- options(warn = 2)$warn
on.exit (options (warn = warn))
checkException (hardclasses (pred [,1]))
options(warn = -1)
checkEquals (hardclasses (pred [, 1]),
factor (rep (c ("1", "0", NA, NA, NA), 2), levels = c ("1", "0")))
options (warn = warn)
pred [2:3,] <- NA
checkEquals (hardclasses (pred),
factor (letters [c (1, NA, NA, NA, NA, 1, 2, NA, NA, NA)],
levels = letters [1 : 3]))
pred [1,1] <- NA
checkEquals (hardclasses (pred),
factor (letters [c (NA, NA, NA, NA, NA, 1, 2, NA, NA, NA)],
levels = letters [1 : 3]))
pred [6,2] <- NA
checkEquals (hardclasses (pred),
factor (letters [c (NA, NA, NA, NA, NA, 1, 2, NA, NA, NA)],
levels = letters [1 : 3]))
}
hard <- function (op)
attr (op, "hard")
"hard<-" <- function (op, value){
stopifnot (is.logical (value), !is.na (value))
attr (op, "hard") <- value
op
}
.test (hard) <- function (){
myop <- function (){}
checkTrue (is.null (hard (myop)))
hard (myop) <- TRUE
checkTrue (hard (myop))
hard (myop) <- FALSE
checkTrue (!hard (myop))
checkException (hard (myop) <- NULL)
checkException (hard (myop) <- NA)
}
harden <- function (x, classdim = 2L, tol = 1e-6, closed = TRUE){
x <- .make01 (x, tol = tol)
if (closed && dim (x) [classdim] > 1L){
nas <- colSums (aperm (x, c (classdim, seq_len (ndim (x)) [-classdim])))
nas <- which (is.na (nas) | nas == 0, arr.ind = TRUE)
nas <- as.matrix (nas)
if (length (nas) > 0L)
for (i in seq_len (dim (x)[classdim])){
tmp <- cbind (nas [,seq_len (classdim - 1)],
i,
nas [,seq_len (ncol (nas) - classdim + 1) + classdim - 1])
x [tmp] <- NA
}
}
x
}
.test (harden) <- function (){
checkEquals (harden (as.matrix (v)),
structure(c(0, NA, NA, 1, NA),
.Dim = c(5L, 1L),
.Dimnames = list(c("a", "b", "c", "d", "e"), NULL)))
checkEquals (harden (m),
structure(c(1, NA, NA, NA, 0, NA, NA, NA, 0, NA, NA, NA),
.Dim = c(4L, 3L),
.Dimnames = list(c("a", "b", "c", "d"), c("A", "B", "C")))
)
checkEquals (harden (pred.array),
structure(c(1, 0, NA, NA, NA, 1, 0, NA, NA, NA,
0, 1, NA, NA, NA, 0, 1, NA, NA, NA,
0, 0, NA, NA, NA, 0, 0, NA, NA, NA,
1, 1, 1, 1, 1, NA, NA, NA, NA, NA,
0, 0, 0, 0, 0, NA, NA, NA, NA, NA,
0, 0, 0, 0, 0, NA, NA, NA, NA, NA),
.Dim = c(10L, 3L, 2L),
.Dimnames = list(NULL, c("a", "b", "c"), c("1", "2")))
)
checkEquals (harden (pred.array, closed = FALSE),
structure(c(1, 0, NA, NA, NA, 1, 0, NA, NA, NA,
0, 1, NA, NA, NA, 0, 1, NA, NA, NA,
0, 0, 0, NA, NA, 0, 0, 0, NA, NA,
1, 1, 1, 1, 1, NA, NA, NA, NA, NA,
0, 0, 0, 0, 0, NA, NA, NA, NA, NA,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
.Dim = c(10L, 3L, 2L),
.Dimnames = list(NULL, c("a", "b", "c"), c("1", "2")))
)
checkEquals (harden (pred.array, classdim = 3L),
structure(c( 1, 0, NA, NA, NA, NA, NA, NA, NA, NA,
NA, 1, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
1, 1, NA, NA, NA, NA, NA, NA, NA, NA,
NA, 0, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
.Dim = c(10L, 3L, 2L),
.Dimnames = list(NULL, c("a", "b", "c"), c("1", "2")))
)
} |
plot_cumhaz <- function(data, dist, time = "time", censor = "censor", by = "") {
fit <- fit_data(data, dist, time, censor, by)
pfunc <- match.fun(paste("p", dist, sep = ""))
qfunc <- match.fun(paste("q", dist, sep = ""))
if (nchar(by) > 0) {
f <- fit[[1]]
l <- c(p = .01, f$estimate)
s <- split(unname(l), names(l))
l <- c(p = .99, f$estimate)
e <- split(unname(l), names(l))
start <- floor(do.call(qfunc, s))
end <- ceiling(do.call(qfunc, e))
y <- NULL
for (i in 1:length(fit)) {
f <- fit[[i]]
args <- c(f$estimate)
pargs <- split(unname(args), names(args))
pargs$q <- start:end
y <- rbind(y, cbind(matrix(-log(1 - do.call(pfunc, pargs)), ncol = 1),
levels(as.factor(data[[by]]))[i]))
}
df <- data.frame(y)
df$x <- rep(start:end, length(fit))
df$X1 <- as.numeric(as.character(df$X1))
p <- ggplot(df, aes_string(x = "x", y = "X1", group = "X2", color = factor(df[["X2"]]))) + geom_line() +
scale_x_continuous(name = "T") +
scale_y_continuous(name = "H(t)") +
ggtitle(paste(dist, "cumulative hazard function")) +
theme(axis.text.x = element_text(size = rel(1.5)),
axis.text.y = element_text(size = rel(1.5)),
axis.title.y = element_text(size = rel(1.5)),
axis.title.x = element_text(size = rel(1.5)),
plot.title = element_text(size = rel(2))) +
scale_colour_discrete(name = "Group")
plot(p)
}
else {
l <- c(p = .01, fit$estimate)
s <- split(unname(l), names(l))
l <- c(p = .99, fit$estimate)
e <- split(unname(l), names(l))
start <- floor(do.call(qfunc, s))
end <- ceiling(do.call(qfunc, e))
args <- c(fit$estimate)
pargs <- split(unname(args), names(args))
pargs$q <- start:end
args <- c(fit$estimate)
dargs <- split(unname(args), names(args))
dargs$x <- start:end
y <- NULL
y <- rbind(y, cbind(matrix(-log(1 - do.call(pfunc, pargs)), ncol = 1)))
df <- data.frame(y)
df$x <- start:end
df$y <- as.numeric(as.character(df$y))
p <- ggplot(df, aes_string(x = "x", y = "y")) + geom_line() +
scale_x_continuous(name = "T") +
scale_y_continuous(name = "H(t)") +
ggtitle(paste(dist, "cumulative hazard function")) +
theme(axis.text.x = element_text(size = rel(1.5)),
axis.text.y = element_text(size = rel(1.5)),
axis.title.y = element_text(size = rel(1.5)),
axis.title.x = element_text(size = rel(1.5)),
plot.title = element_text(size = rel(2)))
plot(p)
}
} |
consider <- function (part)
{
if (!inherits(part,'partana')) stop('Must pass argument of class partana')
ctc <- part$ctc
vals <- part$ctc[row(part$ctc)>col(part$ctc)]
mus <- as.numeric(table(part$clustering))
n <- length(part$clustering)
row <- rep(0,length(vals))
col <- rep(0,length(vals))
pnt <- 0
for (i in 1:(ncol(ctc)-1)) {
for (j in (i+1):nrow(ctc)) {
pnt <- pnt + 1
row[pnt] <- j
col[pnt] <- i
}
}
row <- row[rev(order(vals))]
col <- col[rev(order(vals))]
vals <- rev(sort(vals))
out <- data.frame(row,col,vals)
out
} |
lqs <- function(x, ...) UseMethod("lqs")
lqs.formula <-
function(formula, data, ...,
method = c("lts" ,"lqs", "lms", "S", "model.frame"),
subset, na.action,
model = TRUE, x.ret = FALSE, y.ret = FALSE, contrasts = NULL)
{
method <- match.arg(method)
mf <- match.call(expand.dots = FALSE)
mf$method <- mf$contrasts <- mf$model <- mf$x.ret <- mf$y.ret <- mf$... <- NULL
mf[[1L]] <- quote(stats::model.frame)
mf <- eval.parent(mf)
if (method == "model.frame") return(mf)
mt <- attr(mf, "terms")
y <- model.extract(mf, "response")
offset <- model.offset(mf)
if(!is.null(offset)) y <- y - offset
x <- model.matrix(mt, mf, contrasts)
contr <- attr(x, "contrasts")
xint <- match("(Intercept)", colnames(x), nomatch = 0L)
if(xint) x <- x[, -xint, drop = FALSE]
fit <- lqs.default(x, y, intercept = (xint > 0), method = method, ...)
fit$terms <- mt
fit$call <- match.call()
fit$contrasts <- contr
fit$xlevels <- .getXlevels(mt, mf)
fit$na.action <- attr(mf, "na.action")
if(model) fit$model <- mf
if(x.ret) fit$x <- x
if(y.ret) fit$y <- y
fit
}
lqs.default <-
function(x, y, intercept = TRUE, method = c("lts", "lqs", "lms", "S"),
quantile, control = lqs.control(...), k0 = 1.548, seed, ...)
{
lqs.control <- function(psamp = NA, nsamp = "best", adjust = TRUE)
list(psamp = psamp, nsamp = nsamp, adjust = adjust)
n <- length(y)
nmx <- deparse(substitute(x))
if(is.null(dim(x))) {
x <- as.matrix(x)
colnames(x) <- nmx
} else x <- as.matrix(x)
p <- ncol(x)
if(any(is.na(x)) || any(is.na(y)))
stop("missing values are not allowed")
nm <- colnames(x)
if(is.null(nm))
nm <- if(p > 1) paste("X", 1L:p, sep="") else if(p == 1) "X" else NULL
if(intercept) {
att <- attr(x, "contrasts")
x <- cbind(1, x)
nm <- c("(Intercept)", nm)
attr(x, "contrasts") <- att
}
p <- ncol(x)
if(nrow(x) != n) stop("'x' and 'y' must have the same number of rows")
method <- match.arg(method)
lts <- 0; beta <- 0
if(method == "lqs" && missing(quantile)) quantile <- floor((n+p+1)/2)
if(method == "lms") quantile <- floor((n+1)/2)
if(method == "lts") {
lts <- 1
if(missing(quantile)) quantile <- floor(n/2) + floor((p+1)/2)
}
if(method == "S") {
lts <- 2
beta <- 0.5
quantile <- ceiling(n/2)
chi <- function(u, k0)
{ u <- (u/k0)^2; ifelse(u < 1, 3*u - 3*u^2 + u^3, 1) }
}
if(quantile > n-1)
stop(gettextf("'quantile' must be at most %d", n-1),
domain = NA)
ps <- control$psamp
if(is.na(ps)) ps <- p
if(ps < p) {
ps <- p
warning("'ps' must be at least 'p'")
}
adj <- control$adjust & intercept
nsamp <- eval(control$nsamp)
nexact <- choose(n, ps)
if(is.character(nsamp) && nsamp == "best") {
nsamp <- if(nexact < 5000) "exact" else "sample"
} else if(is.numeric(nsamp) && nsamp > nexact) {
warning(sprintf(ngettext(nexact,
"only %d set, so all sets will be tried",
"only %d sets, so all sets will be tried"),
nexact), domain = NA)
nsamp <- "exact"
}
samp <- nsamp != "exact"
if(samp) {
if(nsamp == "sample") nsamp <- min(500*ps, 3000)
} else
nsamp <- nexact
if(samp && !missing(seed)) {
if(exists(".Random.seed", envir=.GlobalEnv, inherits=FALSE)) {
seed.keep <- get(".Random.seed", envir=.GlobalEnv, inherits=FALSE)
on.exit(assign(".Random.seed", seed.keep, envir=.GlobalEnv))
}
assign(".Random.seed", seed, envir=.GlobalEnv)
}
z <- .C(lqs_fitlots,
as.double(x), as.double(y), as.integer(n), as.integer(p),
as.integer(quantile), as.integer(lts), as.integer(adj),
as.integer(samp), as.integer(ps), as.integer(nsamp),
crit=double(1), sing=integer(1L), bestone=integer(ps),
coefficients=double(p), as.double(k0), as.double(beta)
)[c("crit", "sing", "coefficients", "bestone")]
if(z$sing == nsamp)
stop("'lqs' failed: all the samples were singular", call.=FALSE)
z$sing <- paste(z$sing, "singular samples of size", ps, "out of", nsamp)
z$bestone <- sort(z$bestone)
names(z$coefficients) <- nm
fitted <- drop(x %*% z$coefficients)
z$fitted.values <- fitted
z$residuals <- y - fitted
c1 <- 1/qnorm((n + quantile)/(2*n))
s <-
if(lts == 1)
sqrt(z$crit/quantile)/sqrt(1 - 2*n*dnorm(1/c1)/(quantile*c1))
else if(lts == 0) sqrt(z$crit)*c1 else z$crit
res <- z$residuals
ind <- abs(res) <= 2.5*s
s2 <- sum(res[ind]^2)/(sum(ind) - p)
z$scale <- c(s, sqrt(s2))
if(method == "S") {
psi <- function(u, k0) (1 - pmin(1, abs(u/k0))^2)^2
resid <- z$residuals
scale <- s
for(i in 1L:30L) {
w <- psi(resid/scale, k0)
temp <- lm.wfit(x, y, w, method="qr")
resid <- temp$residuals
s2 <- scale*sqrt(sum(chi(resid/scale, k0))/((n-p)*beta))
if(abs(s2/scale - 1) < 1e-5) break
scale <- s2
}
z$coefficents <- temp$coefficients
z$fitted.values <- temp$fitted.values
z$residuals <- resid
z$scale <- scale
}
class(z) <- "lqs"
z
}
print.lqs <- function (x, digits = max(3, getOption("digits") - 3), ...)
{
if(!is.null(cl <- x$call)) {
cat("Call:\n")
dput(cl, control=NULL)
cat("\n")
}
cat("Coefficients:\n")
print.default(format(coef(x), digits = digits), print.gap = 2,
quote = FALSE)
cat("\nScale estimates", format(x$scale, digits = digits) ,"\n\n")
invisible(x)
}
predict.lqs <- function (object, newdata, na.action = na.pass, ...)
{
if (missing(newdata)) return(fitted(object))
Terms <- delete.response(terms(object))
m <- model.frame(Terms, newdata, na.action = na.action,
xlev = object$xlevels)
if(!is.null(cl <- attr(Terms, "dataClasses"))) .checkMFClasses(cl, m)
X <- model.matrix(Terms, m, contrasts = object$contrasts)
drop(X %*% object$coefficients)
}
cov.rob <- function(x, cor = FALSE, quantile.used = floor((n+p+1)/2),
method = c("mve", "mcd", "classical"), nsamp = "best", seed)
{
method <- match.arg(method)
x <- as.matrix(x)
if(any(is.na(x)) || any(is.infinite(x)))
stop("missing or infinite values are not allowed")
n <- nrow(x); p <- ncol(x)
if(n < p+1)
stop(gettextf("at least %d cases are needed", p+1), domain = NA)
if(method == "classical") {
ans <- list(center = colMeans(x), cov = var(x))
} else {
if(quantile.used < p+1)
stop(gettextf("'quantile' must be at least %d", p+1), domain = NA)
if(quantile.used > n-1)
stop(gettextf("'quantile' must be at most %d", n-1), domain = NA)
divisor <- apply(x, 2, IQR)
if(any(divisor == 0)) stop("at least one column has IQR 0")
x <- x /rep(divisor, rep(n,p))
qn <- quantile.used
ps <- p + 1
nexact <- choose(n, ps)
if(is.character(nsamp) && nsamp == "best")
nsamp <- if(nexact < 5000) "exact" else "sample"
if(is.numeric(nsamp) && nsamp > nexact) {
warning(sprintf(ngettext(nexact,
"only %d set, so all sets will be tried",
"only %d sets, so all sets will be tried"),
nexact), domain = NA)
nsamp <- "exact"
}
samp <- nsamp != "exact"
if(samp) {
if(nsamp == "sample") nsamp <- min(500*ps, 3000)
} else nsamp <- nexact
if (nsamp > 2147483647) {
if(samp)
stop(sprintf("Too many samples (%.3g)", nsamp))
else
stop(sprintf('Too many combinations (%.3g) for nsamp = "exact"', nsamp))
}
if(samp && !missing(seed)) {
if(exists(".Random.seed", envir=.GlobalEnv, inherits=FALSE)) {
seed.keep <- get(".Random.seed", envir=.GlobalEnv, inherits=FALSE)
on.exit(assign(".Random.seed", seed.keep, envir=.GlobalEnv))
}
assign(".Random.seed", seed, envir=.GlobalEnv)
}
z <- .C(mve_fitlots,
as.double(x), as.integer(n), as.integer(p),
as.integer(qn), as.integer(method=="mcd"),
as.integer(samp), as.integer(ps), as.integer(nsamp),
crit=double(1), sing=integer(1L), bestone=integer(n))
z$sing <- paste(z$sing, "singular samples of size", ps,
"out of", nsamp)
crit <- z$crit + 2*sum(log(divisor)) +
if(method=="mcd") - p * log(qn - 1) else 0
best <- seq(n)[z$bestone != 0]
if(!length(best)) stop("'x' is probably collinear")
means <- colMeans(x[best, , drop = FALSE])
rcov <- var(x[best, , drop = FALSE]) * (1 + 15/(n - p))^2
dist <- mahalanobis(x, means, rcov)
cut <- qchisq(0.975, p) * quantile(dist, qn/n)/qchisq(qn/n, p)
cov <- divisor * var(x[dist < cut, , drop = FALSE]) *
rep(divisor, rep(p, p))
attr(cov, "names") <- NULL
ans <- list(center =
colMeans(x[dist < cut, , drop = FALSE]) * divisor,
cov = cov, msg = z$sing, crit = crit, best = best)
}
if(cor) {
sd <- sqrt(diag(ans$cov))
ans <- c(ans, list(cor = (ans$cov/sd)/rep(sd, rep(p, p))))
}
ans$n.obs <- n
ans
}
lmsreg <- function(...)
{
oc <- sys.call()
oc$method <- "lms"
oc[[1L]] <- quote(MASS::lqs)
eval.parent(oc)
}
ltsreg <- function(...)
{
oc <- sys.call()
oc$method <- "lts"
oc[[1L]] <- quote(MASS::lqs)
eval.parent(oc)
}
cov.mve <- function(...)
{
oc <- sys.call()
oc$method <- "mve"
oc[[1L]] <- quote(MASS::cov.rob)
eval.parent(oc)
}
cov.mcd <- function(...)
{
oc <- sys.call()
oc$method <- "mcd"
oc[[1L]] <- quote(MASS::cov.rob)
eval.parent(oc)
} |
compute_benjamini_hochberg <- function(statistic = NULL, pvalue){
n <- length(pvalue)
i <- n:1L
o <- order(pvalue, decreasing = TRUE)
ro <- order(o)
pvalue <- pmin(1, cummin(n/i * pvalue[o]))[ro]
out <- list(main = cbind(statistic = statistic,pvalue = pvalue))
return(out)
} |
NULL
IDXERR <- paste("Indexing of",sQuote("rle"),"objects by numeric or logical indexes is not implemented at this time.")
`[.rle` <- function(x, i, ...){
if(is.character(i) || (!is.null(rle_unclass_index <- getOption("rle.unclass_index")) && rle_unclass_index)) NextMethod()
else stop(IDXERR)
}
`[<-.rle` <- function(x, i, ..., value){
if(is.character(i) || (!is.null(rle_unclass_index <- getOption("rle.unclass_index")) && rle_unclass_index)) NextMethod()
else stop(IDXERR)
}
`[[.rle` <- function(x, i, ...){
if(is.character(i) || (!is.null(rle_unclass_index <- getOption("rle.unclass_index")) && rle_unclass_index)) NextMethod()
else stop(IDXERR)
}
`[[<-.rle` <- function(x, i, ..., value){
if(is.character(i) || (!is.null(rle_unclass_index <- getOption("rle.unclass_index")) && rle_unclass_index)) NextMethod()
else stop(IDXERR)
}
`$.rle` <- function(x, name){
NextMethod()
}
`$<-.rle` <- function(x, name, value){
NextMethod()
} |
validation.ordPmat <-
function(ordPmat){
ncatvec = numeric(0)
w1=ordPmat>1
w2=ordPmat<0
w3=ordPmat==1
w4=is.na(ordPmat)
if(sum(w4)>0){stop("NAs are not permitted in ordPmat")}
if(sum(w1)>0){stop("The probabilities cannot be greater than 1")}
if(sum(w2)>0){stop("The probabilities cannot be less than 0")}
if(sum(w3)>0){stop("The probabilities cannot be equal to 1")}
noutcome = ncol(ordPmat)
for(i in 1:noutcome){
if (sum(ordPmat[,i])<1 | sum(ordPmat[,i])>1) {
stop("\n Invalid probabilities for variable", i,".", "\n Sum of probabilities for each variable must be exactly 1.")
}
ncatvec[i]= length(ordPmat[ordPmat[,i]>0,i])
}
return(list(J=noutcome, K=ncatvec) )
} |
collection_reload <- function(conn, name, raw = FALSE, callopts) {
conn$collection_reload(name, raw, callopts)
} |
"mollusk" |
check_dirs <- function(x, exists = TRUE, x_name = NULL) {
if (is.null(x_name)) x_name <- deparse_backtick_chk((substitute(x)))
chk_string(x_name)
chk_character(x, x_name = x_name)
chk_vector(x, x_name = x_name)
chk_not_any_na(x, x_name = x_name)
chk_flag(exists)
dirs <- vapply(x, vld_file, TRUE)
if (any(dirs)) {
abort_chk(x_name, " must specify directories ('", x[dirs][1], "' is a file)", x = x)
}
x <- x[vapply(x, vld_dir, TRUE) != exists]
if (!length(x)) {
return(invisible(x))
}
x <- x[1]
if (exists) {
abort_chk(x_name, " must specify existing directories ('", x, "' can't be found)", x = x)
}
abort_chk(x_name, " must not specify existing directories ('", x, "' exists)", x = x)
} |
lag2.plot <-
function(series1,series2,max.lag=0,corr=TRUE,smooth=TRUE,col=gray(.1),
lwl=1, bgl ='white', box.col=8, ...){
as.ts = stats::as.ts
par = graphics::par
plot = graphics::plot
lines= graphics::lines
ts.intersect = stats::ts.intersect
legend = graphics::legend
name1=paste(deparse(substitute(series1)),"(t-",sep="")
name2=paste(deparse(substitute(series2)),"(t)",sep="")
series1=as.ts(series1)
series2=as.ts(series2)
max.lag=as.integer(max.lag)
m1=max.lag+1
prow=ceiling(sqrt(m1))
pcol=ceiling(m1/prow)
a=stats::ccf(series1,series2,max.lag,plot=FALSE)$acf
old.par <- par(no.readonly = TRUE)
par(mfrow=c(prow,pcol))
for(h in 0:max.lag){
tsplot(stats::lag(series1,-h), series2, xy.labels=FALSE, type='p', xlab=paste(name1,h,")",sep=""), ylab=name2, col=col, ...)
if (smooth==TRUE)
lines(stats::lowess(ts.intersect(stats::lag(series1,-h),series2)[,1],
ts.intersect(stats::lag(series1,-h),series2)[,2]), col=2, lwd=lwl)
if (corr==TRUE)
legend("topright", legend=round(a[m1-h], digits=2), text.col=4, bg=bgl, adj=.25, cex = 0.85, box.col=box.col)
on.exit(par(old.par))
}
} |
run_ssm <- function(N,I,J,Y=NULL,D=NULL,Z=NULL,sigmax=1,lambda=1,y_T=pi/4,y_D=(3*pi)/4,priors="default",gfunction=c("logistic","gompertz"),kappa_bnds=c(5,300),nchains=1,niter=2000,nwarmup=500,ncores="AUTO",stan_object=FALSE,...){
if(I<1| N<1 | J<1)
stop("Positive integers should be provided for I, J, N, M")
if(is.null(Z))
stop("The design matrix Z must be provided")
if(is.null(Y))
stop("The observed matrix Y must be provided")
if(is.null(D))
stop("The matrix of distances D must be provided")
if(ncores=="AUTO")
ncores <- parallel::detectCores()
if(length(priors)==1 && priors=="default")
priors <- rep(list(NULL),dim(Z)[2])
gfunction=match.arg(gfunction)
lb <- 0.1
datastan <- list(
I = I,
N = N,
J = J,
KK = dim(Z)[2],
Y = Y,
DY = D,
sigmaz = rep(sigmax,I),
bnds = matrix(1,I*J,1)%*%matrix(c(lb,pi-lb,(pi-lb)-lb),1,3),
D = Z,
a = rep(1,I*J),
lambda_vec = rep(lambda,I*J),
Am = kronecker(diag(1,I),rep(1/J,J)),
kappa_lb = kappa_bnds[1],
kappa_ub = kappa_bnds[2],
priors_matrix = check_prior(priors)
)
if(gfunction=="logistic"){
out <- rstan::sampling(stanmodels$fit_model_log,chains=nchains,iter=niter,warmup=nwarmup,data=datastan,cores=ncores,...)
}else if(gfunction=="gompertz"){
out <- rstan::sampling(stanmodels$fit_model_gomp,chains=nchains,iter=niter,warmup=nwarmup,data=datastan,cores=ncores,...)
}
data_out <- rstan::extract(out,pars=c("b","z_pred","y_star","gamma","z_s_upd"))
gamma_out = data.frame(data_out$gamma)
names(gamma_out) = paste("gamma",seq(1,dim(gamma_out)[2]),sep="")
datafit <- list(
I = I,
N = N,
J = J,
Gfunction = gfunction,
params = list(sigmax=sigmax,lambda=lambda,kappa_bnds=kappa_bnds,gamma=gamma_out,beta=data_out$b,kappa),
data = list(Y=Y,X=data_out$z_pred,MU=data_out$y_star,D=D,Z=Z,X_smooth=data_out$z_s_upd),
stan_table = rstan::monitor(out)
)
if(stan_object==TRUE){
save(out,file = paste(getwd(),"/stan_object.rda",sep=""),compress = TRUE)
}
return(datafit)
} |
"cor2dprime" <-
function(mat, probs){
nloci <- ncol(mat)
ldmat <- matrix(0, ncol=nloci, nrow=nloci)
for (i in 1:(nloci-1)){
for (j in i:nloci){
p1 <- probs[i]
q1 <- 1-p1
p2 <- probs[j]
q2 <- 1-p2
D <- mat[i,j]*sqrt(p1*p2*q1*q2)
if (D<0) D <- D/min(p1*q2,q1*p2) else D <- D/max(-p1*p2,-q1*q2)
ldmat[i,j] <- D
ldmat[j,i] <- ldmat[i,j]
}
}
return(ldmat)
} |
gbart=function(
x.train, y.train,
x.test=matrix(0,0,0), type='wbart',
ntype=as.integer(
factor(type, levels=c('wbart', 'pbart', 'lbart'))),
sparse=FALSE, theta=0, omega=1,
a=0.5, b=1, augment=FALSE, rho=NULL,
xinfo=matrix(0,0,0), usequants=FALSE,
rm.const=TRUE,
sigest=NA, sigdf=3, sigquant=0.90,
k=2, power=2, base=0.95,
lambda=NA, tau.num=c(NA, 3, 6)[ntype],
offset=NULL, w=rep(1, length(y.train)),
ntree=c(200L, 50L, 50L)[ntype], numcut=100L,
ndpost=1000L, nskip=100L,
keepevery=c(1L, 10L, 10L)[ntype],
printevery=100L, transposed=FALSE,
hostname=FALSE,
mc.cores = 1L, nice = 19L, seed = 99L
)
{
if(is.na(ntype))
stop("type argument must be set to either 'wbart', 'pbart' or 'lbart'")
n = length(y.train)
if(!transposed) {
temp = bartModelMatrix(x.train, numcut, usequants=usequants,
xinfo=xinfo, rm.const=rm.const)
x.train = t(temp$X)
numcut = temp$numcut
xinfo = temp$xinfo
if(length(x.test)>0) {
x.test = bartModelMatrix(x.test)
x.test = t(x.test[ , temp$rm.const])
}
rm.const <- temp$rm.const
grp <- temp$grp
rm(temp)
}
else {
rm.const <- NULL
grp <- NULL
}
if(n!=ncol(x.train))
stop('The length of y.train and the number of rows in x.train must be identical')
p = nrow(x.train)
np = ncol(x.test)
if(length(rho)==0) rho=p
if(length(rm.const)==0) rm.const <- 1:p
if(length(grp)==0) grp <- 1:p
check <- unique(sort(y.train))
if(length(check)==2) {
if(!all(check==0:1))
stop('Binary y.train must be coded as 0 and 1')
if(type=='wbart')
stop("The outcome is binary so set type to 'pbart' or 'lbart'")
}
if(length(offset)==0) {
offset=mean(y.train)
if(type=='pbart') offset=qnorm(offset)
else if(type=='lbart') offset=qlogis(offset)
}
if(type=='wbart') {
y.train = y.train-offset
if(!is.na(sigest) && !is.na(lambda) && lambda==0) {
}
else if(is.na(lambda)) {
if(is.na(sigest)) {
if(p < n)
sigest = summary(lm(y.train~.,
data.frame(t(x.train),y.train)))$sigma
else sigest = sd(y.train)
}
qchi = qchisq(1-sigquant, sigdf)
lambda = (sigest^2)*qchi/sigdf
} else {
sigest=sqrt(lambda)
}
if(is.na(tau.num)) {
tau=(max(y.train)-min(y.train))/(2*k*sqrt(ntree))
} else {
tau=tau.num/(k*sqrt(ntree))
}
} else {
lambda=1
sigest=1
tau=tau.num/(k*sqrt(ntree))
}
check=(np>0 && np==n)
for(i in 1:n)
for(j in 1:p) {
if(check) check=((is.na(x.train[j, i]) && is.na(x.test[j, i])) ||
(!is.na(x.train[j, i]) && !is.na(x.test[j, i]) &&
x.train[j, i]==x.test[j, i]))
while(is.na(x.train[j, i])) {
h=sample.int(n, 1)
x.train[j, i]=x.train[j, h]
}
}
if(check) x.test=x.train
else if(np>0) {
for(i in 1:np)
for(j in 1:p)
while(is.na(x.test[j, i])) {
h=sample.int(np, 1)
x.test[j, i]=x.test[j, h]
}
}
if(.Platform$OS.type!='unix') hostname <- FALSE
else if(hostname)
hostname <- system('hostname', intern=TRUE)
ptm <- proc.time()
res = .Call("cgbart",
ntype,
n,
p,
np,
x.train,
y.train,
x.test,
ntree,
numcut,
ndpost*keepevery,
nskip,
keepevery,
power,
base,
offset,
tau,
sigdf,
lambda,
sigest,
w,
sparse,
theta,
omega,
grp,
a,
b,
rho,
augment,
printevery,
xinfo
)
res$proc.time <- proc.time()-ptm
res$hostname <- hostname
Y=t(matrix(y.train, nrow=n, ncol=ndpost))
if(type=='wbart') {
res$yhat.train.mean <- apply(res$yhat.train, 2, mean)
SD=matrix(res$sigma[-(1:nskip)], nrow=ndpost, ncol=n)
log.pdf=dnorm(Y, res$yhat.train, SD, TRUE)
res$sigma.mean=mean(SD[ , 1])
}
else {
if(type=='pbart') res$prob.train = pnorm(res$yhat.train)
else if(type=='lbart') res$prob.train = plogis(res$yhat.train)
log.pdf=dbinom(Y, 1, res$prob.train, TRUE)
res$prob.train.mean <- apply(res$prob.train, 2, mean)
}
min.log.pdf=t(matrix(apply(log.pdf, 2, min), nrow=n, ncol=ndpost))
log.CPO=log(ndpost)+min.log.pdf[1, ]-
log(apply(exp(min.log.pdf-log.pdf), 2, sum))
res$LPML=sum(log.CPO)
if(np>0) {
if(type=='wbart')
res$yhat.test.mean <- apply(res$yhat.test, 2, mean)
else {
if(type=='pbart') res$prob.test = pnorm(res$yhat.test)
else if(type=='lbart') res$prob.test = plogis(res$yhat.test)
res$prob.test.mean <- apply(res$prob.test, 2, mean)
}
}
res$ndpost = ndpost
res$offset = offset
names(res$treedraws$cutpoints) = dimnames(x.train)[[1]]
dimnames(res$varcount)[[2]] = as.list(dimnames(x.train)[[1]])
dimnames(res$varprob)[[2]] = as.list(dimnames(x.train)[[1]])
res$varcount.mean <- apply(res$varcount, 2, mean)
res$varprob.mean <- apply(res$varprob, 2, mean)
res$rm.const <- rm.const
attr(res, 'class') <- type
return(res)
} |
gc_correct <- function(nipt_object, method = "LOESS", include_XY = F, span =0.75,
ref_genome = "hg37"){
if(class(nipt_object)[1] == NIPT_sample_class){
if (method == loess){
return(corrected_sample <- gc_correct_NIPTSample_loess(nipt_object, span = 0.75,
include_XY = include_XY,
ref_genome = ref_genome))
}
if (method == bin){
return(gc_correct_NIPTSample_bin(nipt_object,
include_XY = include_XY, ref_genome = ref_genome))
}
else{
stop("Error")
}
}
if(class(nipt_object)[1] == NIPT_control_group_class){
if (method == loess){
return(gc_correct_NIPTControlGroup_loess(nipt_object, span = 0.75, include_XY = include_XY,
ref_genome = ref_genome))
}
if (method == bin){
return(gc_correct_NIPTControlGroup_bin(nipt_object, include_XY = include_XY,
ref_genome = ref_genome))
}
else{
stop("Error")
}
}
} |
myFun <- structure(
function
(
x
)
{
x * 2
},
ex=function()
{
x <- 5
myFun(x)
})
.result <-
list(myFun = list(definition = "myFun <- structure(\n\nfunction
description = "Fun. description", `item{x}` = "an argument",
title = "My function", format = "", examples = "\nx <- 5\nmyFun(x)\n")) |
expect_pipeop_class = function(poclass, constargs = list()) {
skip_on_cran()
po = do.call(poclass$new, constargs)
expect_pipeop(po)
poclone = po$clone(deep = TRUE)
expect_deep_clone(po, poclone)
in_nop = rep(list(NO_OP), po$innum)
in_nonnop = rep(list(NULL), po$innum)
out_nop = rep(list(NO_OP), po$outnum)
names(out_nop) = po$output$name
expect_false(po$is_trained)
expect_equal(po$train(in_nop), out_nop)
expect_equal(po$predict(in_nop), out_nop)
expect_true(is_noop(po$state))
expect_true(po$is_trained)
expect_error(po$predict(in_nonnop), "Pipeop .* got NO_OP during train")
expect_pipeop(po)
poclone = po$clone(deep = TRUE)
expect_deep_clone(po, poclone)
}
expect_pipeop = function(po) {
label = sprintf("pipeop '%s'", po$id)
expect_class(po, "PipeOp", label = label)
expect_string(po$id, label = label)
expect_class(po$param_set, "ParamSet", label = label)
expect_list(po$param_set$values, names = "unique", label = label)
expect_flag(po$is_trained, label = label)
expect_output(print(po), "PipeOp:", label = label)
expect_character(po$packages, any.missing = FALSE, unique = TRUE, label = label)
expect_function(po$train, nargs = 1)
expect_function(po$predict, nargs = 1)
expect_function(po$.__enclos_env__$private$.train, nargs = 1)
expect_function(po$predict, nargs = 1)
expect_function(po$.__enclos_env__$private$.predict, nargs = 1)
expect_data_table(po$input, any.missing = FALSE)
expect_names(names(po$input), permutation.of = c("name", "train", "predict"))
expect_data_table(po$output, any.missing = FALSE)
expect_names(names(po$output), permutation.of = c("name", "train", "predict"))
expect_int(po$innum, lower = 1)
expect_int(po$outnum, lower = 1)
testthat::expect_true(every(po$param_set$tags, function(x) {
length(intersect(c("train", "predict"), x)) > 0
}))
}
expect_deep_clone = function(one, two) {
expect_equal(one, two)
visited = new.env()
visited_b = new.env()
expect_references_differ = function(a, b, path) {
force(path)
addr_a = data.table::address(a)
addr_b = data.table::address(b)
if (!is.null(visited[[addr_a]])) {
return(invisible(NULL))
}
visited[[addr_a]] = path
visited_b[[addr_b]] = path
if (utils::tail(path, 1) != "[attributes]" && !is.null(base::attributes(a))) {
expect_references_differ(base::attributes(a), base::attributes(b), c(path, "[attributes]"))
}
if (!base::is.recursive(a)) {
return(invisible(NULL))
}
if (base::is.environment(a)) {
if (identical(a, baseenv()) || identical(a, globalenv()) || identical(a, emptyenv())) {
return(invisible(NULL))
}
if (length(path) > 1 && R6::is.R6(a) && "clone" %nin% names(a)) {
return(invisible(NULL))
}
if (identical(utils::tail(path, 1), c("[element train_task] 'train_task'"))) {
return(invisible(NULL))
}
label = sprintf("Object addresses differ at path %s", paste0(path, collapse = "->"))
expect_true(addr_a != addr_b, label = label)
expect_null(visited_b[[addr_a]], label = label)
}
if (base::is.function(a)) {
return(invisible(NULL))
}
objnames = base::names(a)
if (is.null(objnames) || anyDuplicated(objnames)) {
index = seq_len(base::length(a))
} else {
index = objnames
if (base::is.environment(a)) {
index = Filter(function(x) !bindingIsActive(x, a), index)
}
}
for (i in index) {
if (utils::tail(path, 1) == "[attributes]" && i %in% c("srcref", "srcfile", ".Environment")) {
next
}
expect_references_differ(base::`[[`(a, i), base::`[[`(b, i), c(path, sprintf(
"[element %s]%s", i,
if (!is.null(objnames)) {
sprintf(" '%s'", if (is.character(index)) i else objnames[[i]])
} else {
""
})))
}
}
expect_references_differ(one, two, "ROOT")
}
expect_shallow_clone = function(one, two) {
expect_equal(one, two)
if (base::is.environment(one)) {
addr_a = data.table::address(one)
addr_b = data.table::address(two)
expect_true(addr_a != addr_b, label = "Objects are shallow clones")
}
} |
library(shiny.fluent)
if (interactive()) {
shinyApp(
ui = div(
Toggle.shinyInput("toggle", value = TRUE),
textOutput("toggleValue")
),
server = function(input, output) {
output$toggleValue <- renderText({
sprintf("Value: %s", input$toggle)
})
}
)
} |
`%>%` <- magrittr::`%>%`
context("New layout API")
test_that("two step layouting works", {
g <- make_ring(10)
l1 <- layout_as_star(g)
l2 <- layout_(g, as_star())
expect_identical(l1, l2)
})
test_that("parameters go through", {
g <- make_ring(10)
l1 <- layout_as_star(g, center = 5)
l2 <- layout_(g, as_star(center = 5))
expect_identical(l1, l2)
})
test_that("parameters are evaluated early", {
g <- make_ring(10)
l1 <- layout_as_star(g, center = 5)
cc <- 5
spec <- as_star(center = cc)
cc <- 10
l2 <- layout_(g, spec)
expect_identical(l1, l2)
})
test_that("piping form is OK, too", {
g <- make_ring(10)
l1 <- layout_as_star(g, center = 5)
l2 <- g %>%
layout_(as_star(center = 5))
expect_identical(l1, l2)
})
test_that("add_layout_ works", {
g <- make_ring(10)
l1 <- layout_as_star(g, center = 5)
l2 <- add_layout_(g, as_star(center = 5))$layout
expect_identical(l1, l2)
l3 <- g %>%
add_layout_(as_star(center = 5)) %>%
graph_attr("layout")
expect_identical(l1, l3)
}) |
test_that("not_infinite returns FALSE if value is infinite", {
not_infinite(Inf) |> expect_false()
not_infinite(-Inf) |> expect_false()
})
test_that("not_infinite returns TRUE if value is not infinite", {
for_all(
a = any_vector(),
property = \(a) not_infinite(a) |> expect_true()
)
}) |
echo_rb <- function(tree, mrca_list, tip){
mrca_list <- append(mrca_list, tip)
quote_vec <-paste0('"', mrca_list, '"')
q_vec <-paste0(quote_vec[-length(quote_vec)], ',')
q_final <- append(q_vec, tail(quote_vec, n=1))
return(q_final)
} |
splitgrp=function(n, m) {
x = seq_len(n)
if(n < m) {
list(`1`=x)
} else {
grp_ids = seq_len(floor(n/m))
grp_lens=sapply(
suppressWarnings(split(x, grp_ids))
, length
)
split(x
, rep(grp_ids, grp_lens)
)
}
} |
ml3_neglog <-
function(param,dat,mlmax=1e+15,fixed=FALSE,...)
{
loglik = mlmax
lik = NULL
x = dat[,1]
y = dat[,2]
z = dat[,3]
if(fixed) param[1]=0
lik = try(dtgpd_neglog(x, y, z, mar1 = param[1:3],mar2 = param[4:6],mar3 = param[7:9],dep = param[10]))
if(!is.null(lik)){
loglik = -sum(log(lik))
if(min(1+param[3]*(x-param[1])/param[2])<0) loglik=mlmax
if(min(1+param[6]*(y-param[4])/param[5])<0) loglik=mlmax
if(min(1+param[9]*(z-param[7])/param[8])<0) loglik=mlmax}
loglik
} |
NULL
welfareDecisionAnalysis <- function(estimate, welfare, numberOfModelRuns,
randomMethod="calculate",
functionSyntax="data.frameNames",
relativeTolerance=0.05,
verbosity=0){
elPa <- function(netBenefitSample){
- mean( netBenefitSample*(netBenefitSample<0) )
}
elSq <- function(netBenefitSample){
mean( netBenefitSample*(netBenefitSample>0) )
}
eol <- function(netBenefitSample){
elPa_ <- elPa(netBenefitSample)
elSq_ <- elSq(netBenefitSample)
min(elPa_,elSq_)
}
thisAnalysis<-NULL
if ( is.function(welfare) ) {
mcResult<-mcSimulation( estimate=estimate,
model_function=welfare,
numberOfModelRuns=numberOfModelRuns,
randomMethod=randomMethod,
functionSyntax=functionSyntax,
relativeTolerance=relativeTolerance,
verbosity=verbosity)
enbPa_<-colMeans(mcResult$y)
elPa_<-apply(X=mcResult$y, MARGIN=2, FUN=elPa)
elSq_<-apply(X=mcResult$y, MARGIN=2, FUN=elSq)
eol_ <-pmin(elPa_,elSq_)
optimalChoice_<-ifelse( eol_==elPa_, "PA", "SQ")
thisAnalysis$call<-match.call()
thisAnalysis$mcResult<-mcResult
thisAnalysis$enbPa<-enbPa_
thisAnalysis$elPa<-elPa_
thisAnalysis$elSq<-elSq_
thisAnalysis$eol<-eol_
thisAnalysis$optimalChoice<-optimalChoice_
} else if ( is.list(welfare) ){
stop("The general case of two welfare functions for project approval and status quo,
respectively is not implemented, yet!")
} else {
stop("welfare must be either a function or a list of two functions.")
}
class(thisAnalysis) <- "welfareDecisionAnalysis"
return(thisAnalysis)
}
summary.welfareDecisionAnalysis <- function(object,
...,
digits = max(3, getOption("digits")-3),
probs=c(0.05, 0.5, 0.95)){
summaryDf<-if(!is.null(probs)){
data.frame( t(apply(X=object$mcResult$y, MARGIN=2, FUN=quantile, probs=probs)),
enbPa=object$enbPa,
elPa=object$elPa,
elSq=object$elSq,
eol=object$eol,
optimalChoice=object$optimalChoice,
check.names=FALSE)
}else{
data.frame(enbPa=object$enbPa,
elPa=object$elPa,
elSq=object$elSq,
eol=object$eol,
optimalChoice=object$optimalChoice)
}
summaryDf<-format(x=summaryDf, digits=digits, ...)
res<-list(summary=summaryDf,
call=object$call)
class(res)<-"summary.welfareDecisionAnalysis"
res
}
print.summary.welfareDecisionAnalysis <- function(x, ...){
cat("Call:\n")
print(x$call)
cat("\nSummary of decision analysis:\n")
print(x$summary,...)
}
hist.welfareDecisionAnalysis <- function(x, breaks=100, col=NULL, xlab=NULL, main=paste("Histogram of " , xlab), ...,
colorQuantile =c("GREY", "YELLOW", "ORANGE", "DARK GREEN", "ORANGE", "YELLOW", "GREY"),
colorProbability=c(1.00, 0.95, 0.75, 0.55, 0.45, 0.25, 0.05),
resultName=NULL){
hist(x$mcResult, breaks=breaks, col=col, xlab=xlab, main=main, ...,
colorQuantile =colorQuantile,
colorProbability=colorProbability,
resultName=resultName)
} |
NULL
setClass(Class = "Title", contains = "AmObject",
representation =
representation(text = "character", size = "numeric"))
setMethod(f = "initialize", signature = "Title",
definition = function(.Object, text, size, ...)
{
if (!missing(text)) {
.Object@text <- text
}
if (!missing(size)) {
.Object@size <- size
}
.Object <- setProperties(.Object, ...)
validObject(.Object)
return(.Object)
})
title <- function(text, size, ...) {
.Object <- new("Title", ...)
if (!missing(text)) .Object@text <- text
if (!missing(size)) .Object@size <- size
validObject(.Object)
return(.Object)
}
amTitle <- function(text, size, ...) {
.Object <- new("Title", ...)
if (!missing(text)) .Object@text <- text
if (!missing(size)) .Object@size <- size
validObject(.Object)
return(.Object)
}
setMethod(f = "setText", signature = c("Title", "character"),
definition = function(.Object, text)
{
.Object@text <- text
validObject(.Object)
return(.Object)
})
setGeneric(name = "setSize", def = function(.Object, size) { standardGeneric("setSize") })
setMethod(f = "setSize", signature = c("Title", "numeric"),
definition = function(.Object, size)
{
.Object@size <- size
validObject(.Object)
return(.Object)
}) |
mixe<-function (formula, r, R, dpn, delt, data, na.action, ...)
{
cal <- match.call(expand.dots = FALSE)
mat <- match(c("formula", "data", "na.action"), names(cal))
cal <- cal[c(1L, mat)]
cal[[1L]] <- as.name("model.frame")
cal <- eval(cal)
y <- model.response(cal)
md <- attr(cal, "terms")
x <- model.matrix(md, cal, contrasts)
s <- t(x) %*% x
xin <- solve(s)
r <- as.matrix(r)
RC <- matrix(R, NCOL(s))
RR <- t(RC)
if (is.matrix(R))
RR <- R
else RR <- RR
if (length(dpn) == 1L)
shi <- dpn
else if (is.matrix(dpn))
shi <- dpn
else shi <- diag(dpn)
de1 <- as.matrix(delt)
bb <- xin %*% t(x) %*% y
ev <- (t(y) %*% y - t(bb) %*% t(x) %*% y)/(NROW(x) - NCOL(x))
ev <- diag(ev)
w1 <- solve(s/ev + t(RR) %*% solve(shi) %*% RR)
w2 <- (t(x) %*% y)/ev + t(RR) %*% solve(shi) %*% r
bm <- w1 %*% w2
colnames(bm) <- c("Estimate")
dbd <- w1
Standard_error <- sqrt(diag(abs(dbd)))
dbd <- w1
rdel <- matrix(delt, NROW(RR))
lenr <- length(RR)
dlpt <- diag(RR %*% xin %*% t(RR))
if (lenr == ncol(RR))
ilpt <- sqrt(solve(abs(dlpt)))
else ilpt <- sqrt(solve(diag(abs(dlpt))))
upt <- RR %*% bm
tb <- t(upt)
t_statistic <- ((tb - t(rdel)) %*% ilpt)/sqrt(ev)
tst <- t(2L * pt(-abs(t_statistic), df = (NROW(x) - NCOL(x))))
pvalue <- c(tst, rep(NA, (NCOL(x) - NROW(RR))))
bibet <- xin %*% t(RR) %*% solve((shi/ev) + RR %*% xin %*%
t(RR)) %*% de1
bibets <- bibet %*% t(bibet)
mse <- dbd + bibets
mse1 <- sum(diag(mse))
mse1 <- round(mse1, digits <- 4L)
names(mse1) <- c("MSE")
t_statistic <- c(t_statistic, rep(NA, (NCOL(x) - NROW(RR))))
ans1 <- cbind(bm, Standard_error, t_statistic, pvalue)
ans <- round(ans1, digits <- 4L)
anw <- list(`*****Mixed Regression Estimator*****` = ans,
`*****Mean square error value*****` = mse1)
anw
} |
modtype <- function(model, measure, call.fn)
{
if (inherits(model, what = 'list') && length(model) > 1L)
stop("passing multiple objects at a time is not allowed", call. = FALSE)
mtype <- NULL
if (inherits(model, what = "glm")){
if (call.fn == "hosmerlem" && !(model$family$family=="binomial"))
stop("the intended test is only available for the binomial family",
call. = FALSE)
mtype <- "glm"
} else if (inherits(model, what = "vglm")){
cm <- familyname(model) == "cumulative"
ac <- familyname(model) == "acat"
cr <- familyname(model) == "cratio"
mn <- familyname(model) == "multinomial"
if (call.fn == "Rsquared"){
if (!(cm || mn))
stop("model family should be any of cumulative, propodds or " ,
"multinomial.", call. = FALSE)
}
if (call.fn =="brant"|| call.fn =="LRT"){
if (!familyname(model) == "cumulative")
stop("model family should be either cumulative or propodds.",
call. = FALSE)
if (model@misc$parallel != TRUE)
stop("the non-parallel model is not supported, consider adding ",
"parallel=TRUE in vglm call.", call. = FALSE)
}
if (call.fn == "hosmerlem"||call.fn =="lipsitz"||call.fn =="pulkroben"){
if (call.fn == "hosmerlem"){
if (!any(cm, ac, cr, mn)){
stop("model family should be any of cumulative, propodds, acat, cratio or ",
"multinomial.", call. = FALSE)
}
}
if (call.fn =="lipsitz"||call.fn =="pulkroben")
if (!any(cm, ac, cr)) stop("model family should be either cumulative, propodds.",
"acat or cratio", call. = FALSE)
nL <- ncol(model@y)
md <- names(model@coefficients)[-c(1:(nL-1))]
mf <- colnames(model@x)[-1L]
uncon <- c("test for the unconstrained model is not yet available, consider ",
"using parallel=TRUE in the model specification.")
if (cm && model@misc$parallel == FALSE) stop(uncon, call. = FALSE)
if (mn && model@misc$parallel == FALSE) stop(uncon, call. = FALSE)
if (ac && length(md)!=length(mf)) stop(uncon, call. = FALSE)
if (cr && length(md)!=length(mf)) stop(uncon, call. = FALSE)
}
mtype <- "vglm"
} else if (inherits(model, what = "serp")){
funs <- c("brant", "LRT", "hosmerlem", "lipsitz", "pulkroben")
if (any(call.fn == funs)){
if (model$slope != "parallel")
stop("the non-parallel model is not supported, consider adding ",
"slope='parallel' in serp call.", call. = FALSE)
}
mtype <- "serp"
} else if (inherits(model, what = "multinom")){
mtype <- "multinom"
} else if (inherits(model, what = "clm") || inherits(model, what = "clm2")){
mtype <- "clm"
} else if (inherits(model, what = "polr")){
mtype <- "polr"
} else if (inherits(model, what = "mlogit")){
mtype <- "mlogit"
} else return(NA)
if (call.fn == "lipsitz" || call.fn == "pulkroben") call.fn <- "lp_pk"
grpmod1 <- c("glm", "multinom", "mlogit")
grpmod2 <- c("glm", "vglm","serp", "polr", "clm", "mlogit", "multinom")
grpmod3 <- c("serp","vglm")
grpmod4 <- c("serp", "polr", "clm", "vglm")
grpmod5 <- c("glm", "vglm")
if (call.fn == "Rsquared"){
mx <- c("mckelvey", "efron", "tjur")
if (measure %in% mx && !mtype %in% grpmod5)
stop("requested measure is not available for the supplied model",
call. = FALSE)}
if (call.fn == "brant" && mtype %in% grpmod1) return(NA)
if (call.fn == "erroR" && !mtype %in% grpmod2) return(NA)
if (call.fn == "LRT" && !mtype %in% grpmod3) return(NA)
if (call.fn == "hosmerlem" && !mtype %in% grpmod2) return(NA)
if (call.fn == "lp_pk" && !mtype %in% grpmod4) return(NA)
mtype
} |
`%||%` <- function(lhs, rhs) {
if (!is.null(lhs)) rhs
else lhs
}
frost_csl <- function(parameter) {
parameter <- unique(parameter)
parameter %||% paste(parameter, collapse = ",")
} |
expected <- eval(parse(text="c(-1.05715266611575, -0.873306430909872, -0.548705796690786, -0.288240908441576, -0.0649703574297026, 0.224762433374997, 0.3255545927283, 0.4813346401898, 0.530823516045489, 1.2699009772491)"));
test(id=0, code={
argv <- eval(parse(text="list(c(-1.05715266611575, -0.873306430909872, -0.548705796690786, -0.288240908441576, -0.0649703574297026, 0.224762433374997, 0.3255545927283, 0.4813346401898, 0.530823516045489, 1.2699009772491), c(1L, 3L, 4L, 5L, 6L, 7L, 8L, 10L))"));
.Internal(`psort`(argv[[1]], argv[[2]]));
}, o=expected); |
blockquoter <- function() add_multiline_prefix('> ', as_is = TRUE) |
optimsimplex.shrink <- function(this=NULL,fun=NULL,sigma=0.5,data=NULL){
nv <- this$nbve
mv1 <- matrix(rep(this$x[1,],nv-1),nrow=nv-1,byrow=TRUE)
newx <- (1.0-sigma)*mv1 + sigma*this$x[2:nv,,drop=FALSE]
this$x[2:nv,] <- newx[1:(nv-1),,drop=FALSE]
tmp <- optimsimplex.compsomefv(this=this,fun=fun,indices=2:nv,data=data)
this <- tmp$this
if (!is.null(data)) data <- tmp$data
varargout <- list(this=this,data=data)
return(varargout)
} |
geom_ribbon_pattern <- function(mapping = NULL, data = NULL,
stat = "identity", position = "identity",
...,
na.rm = FALSE,
orientation = NA,
show.legend = NA,
inherit.aes = TRUE,
outline.type = "both") {
outline.type <- match.arg(outline.type, c("both", "upper", "legacy"))
layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomRibbonPattern,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
na.rm = na.rm,
orientation = orientation,
outline.type = outline.type,
...
)
)
}
GeomRibbonPattern <- ggproto(
"GeomRibbonPattern", GeomRibbon,
default_aes = augment_aes(
pattern_aesthetics,
ggplot2::aes(
colour = NA,
fill = "grey20",
size = 0.5,
linetype = 1,
alpha = NA
)
),
aspect_ratio = 1,
draw_key = function(self, ...) {
draw_key_polygon_pattern(..., aspect_ratio = self$aspect_ratio)
},
draw_group = function(self, data, panel_params, coord, na.rm = FALSE, flipped_aes = FALSE, outline.type = "both") {
data <- ggplot2::flip_data(data, flipped_aes)
if (na.rm) data <- data[stats::complete.cases(data[c("x", "ymin", "ymax")]), ]
data <- data[order(data$group), ]
aes_names <- c(
"colour", "fill", "size", "linetype", "alpha",
names(pattern_aesthetics)
)
aes <- unique(data[aes_names])
if (nrow(aes) > 1) {
abort("Aesthetics can not vary with a ribbon")
}
aes <- as.list(aes)
missing_pos <- !stats::complete.cases(data[c("x", "ymin", "ymax")])
ids <- cumsum(missing_pos) + 1
ids[missing_pos] <- NA
data <- unclass(data)
positions <- new_data_frame(list(
x = c(data$x, rev(data$x)),
y = c(data$ymax, rev(data$ymin)),
id = c(ids, rev(ids))
))
positions <- ggplot2::flip_data(positions, flipped_aes)
munched <- coord_munch(coord, positions, panel_params)
g_poly <- polygonGrob(
munched$x, munched$y, id = munched$id,
default.units = "native",
gp = gpar(
fill = scales::alpha(aes$fill, aes$alpha),
col = if (identical(outline.type, "legacy")) aes$colour else NA
)
)
stopifnot(!is.null(munched$id))
polygons <- split(munched, munched$id)
boundary_dfs <- lapply(polygons, function(polygon) {
create_polygon_df(
x = polygon$x,
y = polygon$y
)
})
first_idx <- !duplicated(munched$id)
first_rows <- munched[first_idx, ]
all_params <- first_rows
all_params <- cbind(all_params, aes)
self$aspect_ratio <- get_aspect_ratio()
pattern_grobs <- create_pattern_grobs(all_params, boundary_dfs, self$aspect_ratio)
if (identical(outline.type, "legacy")) {
warn(glue('outline.type = "legacy" is only for backward-compatibility ',
'and might be removed eventually'))
return(ggname("geom_ribbon", grobTree(g_poly, pattern_grobs)))
}
munched_lines <- munched
munched_lines$id <- switch(
outline.type,
both = munched_lines$id + rep(c(0, max(ids, na.rm = TRUE)), each = length(ids)),
upper = munched_lines$id + rep(c(0, NA), each = length(ids)),
abort(glue("invalid outline.type: {outline.type}"))
)
g_lines <- polylineGrob(
munched_lines$x, munched_lines$y, id = munched_lines$id,
default.units = "native",
gp = gpar(
col = aes$colour,
lwd = aes$size * .pt,
lty = aes$linetype)
)
ggname("geom_ribbon", grobTree(g_poly, pattern_grobs, g_lines))
}
)
geom_area_pattern <- function(mapping = NULL, data = NULL, stat = "identity",
position = "stack", na.rm = FALSE, orientation = NA,
show.legend = NA, inherit.aes = TRUE, ...,
outline.type = "upper") {
outline.type <- match.arg(outline.type, c("both", "upper", "legacy"))
layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomAreaPattern,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
na.rm = na.rm,
orientation = orientation,
outline.type = outline.type,
...
)
)
}
GeomAreaPattern <- ggproto(
"GeomAreaPattern", GeomRibbonPattern,
default_aes = augment_aes(
pattern_aesthetics,
aes(
colour = NA,
fill = "grey20",
size = 0.5,
linetype = 1,
alpha = NA
)
),
required_aes = c("x", "y"),
setup_params = function(data, params) {
params$flipped_aes <- ggplot2::has_flipped_aes(data, params, ambiguous = TRUE)
params
},
setup_data = function(data, params) {
data$flipped_aes <- params$flipped_aes
data <- ggplot2::flip_data(data, params$flipped_aes)
data <- transform(data[order(data$PANEL, data$group, data$x), ], ymin = 0, ymax = y)
ggplot2::flip_data(data, params$flipped_aes)
}
) |
as_vegaspec.{{s3_class_name}} <- function(spec, ...) {
vegawidget::as_vegaspec(spec, ...)
}
print.{{s3_class_name}} <- function(x, ...) {
x <- as_vegaspec(x)
print(x, ...)
}
format.{{s3_class_name}} <- function(x, ...) {
x <- as_vegaspec(x)
format(x, ...)
}
knit_print.{{s3_class_name}} <- function(spec, ..., options = NULL) {
spec <- as_vegaspec(spec)
knitr::knit_print(spec, ..., options = options)
} |
ci2p <- function(
est, lower, upper,
log_transform = FALSE,
conf = 0.95,
qdist = stats::qnorm,
pdist = stats::pnorm) {
if (upper < lower) {
tmp <- lower
lower <- upper
upper <- tmp
rm(tmp)
ui_warn("upper < lower: they are considered reversed")
}
if (log_transform) {
est <- log(est)
lower <- log(lower)
upper <- log(upper)
}
se <- (upper - lower) / (2L * qdist(conf))
1L - pdist(est / se)
} |
geom_violinhalf <- function(mapping = NULL,
data = NULL,
stat = "ydensity",
position = "dodge",
trim = TRUE,
flip = FALSE,
scale = c("area", "count", "width"),
show.legend = NA,
inherit.aes = TRUE,
...) {
scale <- match.arg(scale)
layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomViolinHalf,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
trim = trim,
scale = scale,
flip = flip,
...
)
)
}
GeomViolinHalf <- ggproto("GeomViolinHalf", Geom,
extra_params = c("na.rm", "flip"),
setup_data = function(data, params) {
data$width <- data$width %||% params$width %||% (resolution(data$x, FALSE) * 0.9)
data <- do.call(rbind, lapply(split(data, data$group), function(.group) {
.group$ymin <- min(.group$y)
.group$ymax <- max(.group$y)
.group$xmin <- .group$x
.group$xmax <- .group$x + .group$width / 2
.group
}))
},
draw_group = function(data, panel_scales, coord, flip) {
data$xminv <- data$x
if (is.logical(flip)) {
if (flip) {
data$xmaxv <- data$x - data$violinwidth * (data$xmax - data$x)
} else {
data$xmaxv <- data$x + data$violinwidth * (data$xmax - data$x)
}
} else if (is.numeric(flip)) {
if (unique(data$group) %in% flip) {
data$xmaxv <- data$x - data$violinwidth * (data$xmax - data$x)
} else {
data$xmaxv <- data$x + data$violinwidth * (data$xmax - data$x)
}
}
mindata <- maxdata <- data
mindata$x <- mindata$xminv
mindata <- mindata[order(mindata$y), , drop = FALSE]
maxdata$x <- maxdata$xmaxv
maxdata <- maxdata[order(maxdata$y, decreasing = TRUE), , drop = FALSE]
newdata <- rbind(mindata, maxdata)
newdata <- rbind(newdata, newdata[1, ])
.grobName("geom_violinhalf", GeomPolygon$draw_panel(newdata, panel_scales, coord))
},
draw_key = draw_key_polygon,
default_aes = aes(
weight = 1,
colour = "grey20",
fill = "white",
size = 0.5,
alpha = NA,
linetype = "solid"
),
required_aes = c("x", "y")
)
.grobName <- function(prefix, grob) {
insight::check_if_installed("grid")
grob$name <- grid::grobName(grob, prefix)
grob
} |
prediction.mca <-
function(model,
data = find_data(model),
at = NULL,
calculate_se = FALSE,
...) {
if (is.null(at)) {
out <- data
} else {
out <- build_datalist(data, at = at, as.data.frame = TRUE)
at_specification <- attr(out, "at_specification")
}
tmp <- predict(model, newdata = out, ...)
pred <- make_data_frame(out, tmp)
pred[["fitted"]] <- NA_real_
pred[["se.fitted"]] <- NA_real_
vc <- NA_real_
structure(pred,
class = c("prediction", "data.frame"),
at = if (is.null(at)) at else at_specification,
type = NA_character_,
call = if ("call" %in% names(model)) model[["call"]] else NULL,
model_class = class(model),
row.names = seq_len(nrow(pred)),
vcov = vc,
jacobian = NULL,
weighted = FALSE)
} |
library("R.utils")
show <- methods::show
x <- letters[1:8]
x2 <- c(x[-1], "\n")
x3 <- x2[-1]
y <- as.list(x[1:3])
cat("mprint():\n")
print(x)
cprint(x)
print(y)
cprint(y)
cat("mcat():\n")
cat(x, "\n")
ccat(x, "\n")
cat(x2)
ccat(x2)
cat(x3, sep=",")
ccat(x3, sep=",")
cat(x3, sep="\n")
ccat(x3, sep="\n")
cat("mstr():\n")
str(x)
cstr(x)
str(y)
cstr(y)
cat("mshow():\n")
show(x)
cshow(x)
show(y)
cshow(y)
cat("mprintf():\n")
printf("x=%d\n", 1:3)
cprintf("x=%d\n", 1:3)
cat("mout():\n")
writeLines(x)
cout(writeLines(x))
cfoo <- function(a=1) {
cprintf("a=%s\n", a)
}
cbar <- function(...) {
cfoo(...)
}
a <- 2
cfoo(a)
cfoo(3)
cbar(a)
cbar(3)
res <- captureOutput({ ccat("Hello") })
str(res)
stopifnot(length(res) == 0L)
withSink({ ccat("Hello") }, file="foo.txt", type="message")
res <- readLines("foo.txt")
str(res)
stopifnot(length(res) == 0L) |
stress_VaR_ES <- function(x, alpha, q_ratio = NULL,
s_ratio = NULL, q = NULL, s = NULL, k = 1, normalise = FALSE, names = NULL, log = FALSE){
if (is.SWIM(x)) x_data <- get_data(x) else x_data <- as.matrix(x)
if (anyNA(x_data)) warning("x contains NA")
if (any(alpha <= 0) || any(alpha >= 1)) stop("Invalid alpha argument")
if (!is.null(q) && !is.null(q_ratio)) stop("Only provide q or q_ratio")
if (!is.null(s) && !is.null(s_ratio)) stop("Only provide s or s_ratio")
if (is.null(q) && is.null(q_ratio)) stop("no q or q_ratio defined")
if (is.null(s) && is.null(s_ratio)) stop("no s or s_ratio defined")
n <- length(x_data[, k])
max_length <- max(length(alpha), length(q), length(q_ratio), length(s), length(s_ratio))
VaR <- stats::quantile(x_data[, k], alpha, names = FALSE, type = 1)
if (is.null(q)){
if (!is.numeric(q_ratio)) stop("Invalid q_ratio argument")
if (any(VaR == 0)) warning("VaR is 0, define q instead if q_ratio.")
if (length(alpha) > 1 && length(q_ratio) > 1 && length(alpha) != length(q_ratio)) stop("Arguments alpha and q_ratio must have length one or equal length.")
q <- rep(q_ratio * VaR, length.out = max_length)
} else {
if (!is.numeric(q)) stop("invalid q argument")
if (length(alpha) > 1 && length(q) > 1 && length(alpha) != length(q)) stop("arguments alpha and q must have length one or equal length")
q <- rep(q, length.out = max_length)
}
VaR_achieved <- vector('numeric', length = max_length)
for (i in 1:max_length) {
VaR_achieved[i] <- max(x_data[, k][x_data[, k] <= q[i]])
if(q[i] != VaR_achieved[i])message(paste("Stressed VaR specified was", round(q[i], 4),", stressed VaR achieved is", round(VaR_achieved[i], 4)))
}
VaR_matrix <- matrix(rep(VaR_achieved, each = n), ncol = length(VaR_achieved))
ES <- colMeans((x_data[, k] - VaR_matrix) * (x_data[, k] > VaR_matrix)) / (1 - alpha) + VaR_achieved
if (is.null(s)){
if (!is.numeric(s_ratio)) stop("Invalid s_ratio argument")
if (any(ES == 0)) warning("ES is 0, define s instead if s_ratio.")
if (length(alpha) > 1 && length(s_ratio) > 1 && length(alpha) != length(s_ratio)) stop("Arguments alpha and s_ratio must have length one or equal length.")
s <- rep(s_ratio * ES, length.out = max_length)
} else {
if (!is.numeric(s)) stop("Invalid s argument")
if (length(alpha) > 1 && length(s) > 1 && length(alpha) != length(s)) stop("Arguments alpha and s must have length one or equal length.")
s <- rep(s, length.out = max_length)
}
alpha <- rep(alpha, length.out = max_length)
ecdfx <- stats::ecdf(x_data[, k])
if (any(q > s)) stop("All q need to be smaller than s.")
if (any(VaR != q & ecdfx(VaR) == ecdfx(q))) stop("There are not enough data points, specifically, there is none between VaR and q.")
if (any(abs(ecdfx(q) - ecdfx(s)) <= 1/n)) stop("There are not enough data points, specifically, there is none between q and s.")
if (any(s >= max(x_data[, k])) || any(q <= min(x_data[, k]))) stop("all s need to be smaller than the largest and all q larger than the smallest data point.")
q_matrix <- matrix(rep(VaR_achieved, each = n), ncol = max_length)
constr <- cbind(alpha, "q"= VaR_achieved, s)
new_weights <- apply(X = constr, MARGIN = 1, FUN = .rn_VaR_ES, y = x_data[, k], normalise = normalise)
if (is.null(colnames(x_data))) colnames(x_data) <- paste("X", as.character(1:dim(x_data)[2]), sep = "")
if (is.null(names)) {
temp <- paste(rep("stress", max_length), 1:max_length)
} else {
temp <- names
}
if (length(temp) != max_length) stop("length of names are not the same as the number of models")
names(new_weights) <- temp
type <- rep(list("VaR ES"), length.out = max_length)
constr1 <- cbind("k" = rep(k, length.out = max_length), constr)
constr_ES <- list()
for(s in 1:max_length){
temp_list <- list(as.list(constr1[s, ]))
names(temp_list) <- temp[s]
constr_ES <- c(constr_ES, temp_list)
}
my_list <- SWIM("x" = x_data, "new_weights" = new_weights, "type" = type, "specs" = constr_ES)
if (is.SWIM(x)) my_list <- merge(x, my_list)
if (log) {
summary_weights(my_list)
}
return(my_list)
}
.rn_VaR_ES <- function(y, constraints, normalise){
.alpha <- constraints[1]
.q <- constraints[2]
.s <- constraints[3]
if(normalise == TRUE){
min.y <- min(y)
max.y <- max(y)
.q <- (.q - min.y) / (max.y - min.y)
.s <- (.s - min.y) / (max.y - min.y)
y <- .scale(y)
}
x_q <- 1 * (y > .q)
theta_sol <- function(theta){
mean((y - .s) * exp(theta * (y - .q)) * x_q)
}
theta <- stats::uniroot(theta_sol, lower = -10^-20, upper = 10^-20, tol = 10^-30, extendInt = "yes")$root
prob_q <- mean(y <= .q)
e <- mean(exp(theta * (y - .q)) * (y > .q))
if(normalise == FALSE){
rn_weights <- function(z){(.alpha / prob_q) * (z <= .q) + (1 - .alpha) / e * exp(theta * (z - .q)) * (z > .q)}
} else {
rn_weights <- function(z){
z1 <- (z - min.y) / (max.y - min.y)
(.alpha / prob_q) * (z1 <= .q) + (1 - .alpha) / e * exp(theta * (z1 - .q)) * (z1 > .q)
}
}
return(rn_weights)
} |
library(PowerTOST)
sampsiz <- function(alpha=0.05, power, CV, GMR, theta1, logscale=TRUE,
design="2x2", method="exact")
{
data <- merge(CV, GMR)
names(data) <- c("CV","GMR")
tbl <- data.frame()
for (j in seq_along(power))
{
data$n <- 1
data$power <- power[j]
for (i in seq_along(data$n)) {
data$n[i] <- sampleN.TOST(alpha=alpha, CV=data[i,"CV"],
theta0=data$GMR[i], logscale=logscale,
targetpower=power[j], theta1=theta1,
design=design, method=method,
print=FALSE)[,"Sample size"]
}
data2 <- reshape(data, v.names="n", idvar=c("power","CV"), timevar="GMR",
direction="wide")
names(data2) <- gsub("n.","R",names(data2))
names(data2)[names(data2)=="R1"] <- "R1.0"
names(data2)[names(data2)=="R0"] <- "R0.0"
cat("Power",power[j],"\n")
print(data2[,-2],row.names=FALSE)
cat("\n")
tbl <- rbind(tbl, data2)
}
return(invisible(tbl))
}
cat(paste('Chow S.C., Liu J.P.\n',
'"Design and Analysis of Bioavailability\n',
'and Bioequvalence Studies", Third edition\n',
'CRC Press, Chapman & Hall, Boca Raton (2009)\n\n',sep=""))
cat("Table 9.6.2 2x2x3, BEL 0.8-1.20, additive model,\n")
cat("approximate via shifted central t-distribution\n")
CVs <- seq(from=0.1, to=0.4, by=0.02)
GMRs <- seq(from=0.0, to=0.15, by=0.05)
power <- c(0.8, 0.9)
t9.6.2 <- sampsiz(power=power, CV=CVs, GMR=GMRs, logscale=FALSE, theta1=-0.2,
method="shifted", design="2x2x3")
cat("Table 9.6.6 2x2x3, BEL 0.8-1.25, multiplicative model model,\n");
cat("approximate via shifted central t-distribution\n")
cat("Attention! Liu, Chow's CV is se!\n")
CVs <- seq(from=0.1, to=0.4, by=0.02)
CVs <- se2CV(CVs)
GMRs <- seq(from=0.85, to=1.2, by=0.05)
power <- c(0.8, 0.9)
t9.6.6 <- sampsiz(power=power, CV=CVs, GMR=GMRs, logscale=TRUE, theta1=0.8,
method="shift", design="2x2x3") |
expected <- eval(parse(text="structure(FALSE, .Label = FALSE)"));
test(id=0, code={
argv <- eval(parse(text="list(structure(FALSE, .Label = FALSE), FALSE)"));
do.call(`levels<-`, argv);
}, o=expected); |
context("ebirdhotspot")
test_that("ebirdhotspot works correctly", {
skip_on_cran()
expect_warning(out <- ebirdhotspot('L99381', max = 10, provisional = TRUE))
expect_is(out, "data.frame")
expect_is(out$comName, "character")
expect_warning(ebirdhotspot('L99381'))
expect_warning(expect_error(ebirdhotspot(locID = 'L99381', back = 40)))
}) |
linkedLives = function(currentData,sequenceUnits,lifecourseSequences){
courseLen = length(lifecourseSequences[1,])
bbcseq = lifecourseSequences
hen = length(lifecourseSequences[,1])
slen = length(sequenceUnits)
testStats = array(0,c(1000,1))
totalScore = score = 0
for(i in 1:length(bbcseq[,1])){
myseq = bbcseq[i,1:courseLen]
score = (mobility_index(myseq,sequenceUnits,slen))
totalScore = totalScore + score
}
testStatistic = totalScore/(courseLen*hen)
for(j in 1:1000){
totalScore = score = 0
Population = lifecourseSequences
thePopulation = currentData[order(currentData[,1]),]
selectYear = thePopulation[,1]
U = length(unique(selectYear))
UY = unique(selectYear)
for(g in 1:U){
candidates = which(selectYear%in%UY[g])
Population[candidates,] = sample(bbcseq[candidates[1],])
}
for(i in 1:hen){
myseq = Population[i,]
score = (mobility_index(myseq,sequenceUnits,courseLen))
totalScore = totalScore + score
}
testStats[j] = totalScore/(hen*courseLen)
}
LeftTailed_pval = length(which(testStats<=testStatistic))/1000
RightTailed_pval = length(which(testStats>=testStatistic))/1000
list(testStats,LeftTailed_pval,RightTailed_pval,testStatistic)
} |
library(matR)
xx <- lapply (demoSets(), readSet)
lapply (xx, scrubSet)
lapply (xx, scrapeSet)
lapply (list (
"mgm4440066.3",
"mgm4440066.3 mgm4440062.3",
"mgm4440066.3 mgm4440062.3 mgm4440055.3",
"mgm4440066.3",
"mgm4440066.3\tmgm4440062.3",
"mgm4440066.3\tmgm4440062.3 mgm4440055.3",
"mgm4440066.3",
"mgm4440066.3\t mgm4440062.3",
"mgm4440066.3\t mgm4440062.3\t mgm4440055.3",
"mgm4440066.3",
"mgm4440066.3\nmgm4440062.3",
"mgm4440066.3\nmgm4440062.3\nmgm4440055.3",
"4440066.3",
"4440066.3 4440062.3",
"4440066.3 4440062.3 4440055.3",
c("mgm4440066.3"),
c("mgm4440066.3", "mgm4440062.3"),
c("mgm4440066.3", "mgm4440062.3", "mgm4440055.3"),
c(4440066.3),
c(4440066.3, 4440062.3),
c(4440066.3, 4440062.3, 4440055.3),
c("mgm4440066.3 mgm4440062.3", "mgm4440055.3"),
c("4440066.3 mgm4440062.3", "mgm4440055.3"),
c("mgm4440066.3 4440062.3", "mgm4440055.3"),
c("mgm4440066.3 mgm4440062.3", "4440055.3"),
c("mgm4440066.3 4440062.3", "4440055.3"),
c("4440066.3 mgm4440062.3", "4440055.3"),
c("4440066.3 4440062.3", "mgm4440055.3"),
"mgp21",
"mgp21 mgp24",
"mgp21 mgp24 mgp30",
c("mgp21"),
c("mgp21", "mgp24"),
c("mgp21", "mgp24", "mgp30")),
scrubSet) |
liply <- function(lst,fun,axis,...)
{
purrr::map(lst,fun,...) %>% imappend(axis=axis)
}
ilply <- function(im,axis,fun,...)
{
imsplit(im,axis) %>% purrr::map(fun,...)
}
idply <- function(im,axis,fun,...)
{
imsplit(im,axis) %>% purrr::map_df(function(x, ...) as.data.frame(t(fun(x, ...))), .id = ".id")
}
iiply <- function(im,axis,fun,...)
{
imsplit(im,axis) %>% purrr::map(fun,...) %>% imappend(axis=axis)
}
imsplit <- function(im,axis,nb=-1)
{
if (is.cimg(im))
{
l <- im_split(im,axis,nb)
}
else if (is.pixset(im))
{
l <- px_split(im,axis,nb)
}
else
{
stop("im must be either an image or pixset")
}
d.ind <- index.coords[[axis]]
d <- dim(im)
if (nb!=-1)
{
b.end <- map_dbl(l,function(v) dim(v)[d.ind]) %>% cumsum
b.start <- c(1,b.end[-length(l)]+1)
b.str <- sprintf("= %i - %i",b.start,b.end)
names(l) <- paste(axis,b.str)
}
else
{
names(l) <- paste(axis,1:length(l),sep=" = ")
}
l
}
imsplit.recur <- function(im,spl,nb=-1)
{
if (length(spl) > 1)
{
imsplit.recur(im,spl[[1]]) %>% purrr::map(imsplit.recur,spl=spl[-1])
}
else
{
l <- im_split(im,axis,nb)
d.ind <- index.coords[[axis]]
d <- dim(im)
if (nb!=-1)
{
b.end <- map_dbl(l,function(v) dim(v)[d.ind]) %>% cumsum
b.start <- c(1,b.end[-length(l)]+1)
b.str <- sprintf("= %i - %i",b.start,b.end)
names(l) <- paste(axis,b.str)
}
else
{
names(l) <- paste(axis,1:length(l),sep=" = ")
}
l
}
}
NULL
check.reduce <- function(l)
{
l <- as.imlist(l)
if (length(l) > 1)
{
ok <- sapply(l,dim) %>% { apply(.,1,stats::sd) == 0 } %>% all
if (!ok)
{
stop("Images must all be the same size")
}
}
l
}
add <- function(x,na.rm=FALSE) {
x <- check.reduce(x)
reduce_wsum(x,rep(1,length(x)),na_rm=na.rm)
}
wsum <- function(x,w,na.rm=FALSE)
{
if (length(w)!=length(x)) stop("weights must the same length as input")
check.reduce(x) %>% reduce_wsum(w,na_rm=na.rm)
}
average <- function(x,na.rm=FALSE) check.reduce(x) %>% reduce_average(na_rm=na.rm)
mult <- function(x,na.rm=FALSE) check.reduce(x) %>% reduce_prod(na_rm=na.rm)
parmax <- function(x,na.rm=FALSE) check.reduce(x) %>% reduce_minmax(na_rm=na.rm,max=TRUE)
parmax.abs <- function(x) maxmin.abs(x,TRUE)
parmin.abs <- function(x) maxmin.abs(x,FALSE)
parmin <- function(x,na.rm=FALSE) check.reduce(x) %>% reduce_minmax(na_rm=na.rm,max=FALSE)
enorm <- function(x) check.reduce(x) %>% reduce_list(5)
parmed <- function(x,na.rm=FALSE) check.reduce(x) %>% reduce_med(na_rm=na.rm)
parvar <- function(x,na.rm=FALSE)
{
if (na.rm)
{
nValid <- map_il(x,px.na) %>% { length(x) - add(.) }
avg <- add(x,na.rm=TRUE)/nValid
map_il(x,~ (.-avg)^2) %>% add(na.rm=TRUE) %>% { ./(nValid-1) }
}
else
{
n <- length(x)
avg <- average(x)
map_il(x,~ (.-avg)^2) %>% add %>% { ./(n-1) }
}
}
parsd <- function(x,na.rm=FALSE) parvar(x,na.rm=na.rm) %>% sqrt
parall <- function(x) check.reduce(x) %>% Reduce(function(a,b) a & b,.)
parany <- function(x) check.reduce(x) %>% Reduce(function(a,b) a | b,.)
equal <- function(x)
{
if (length(x) == 1)
{
stop("x has only one element")
}
else
{
acc <- px.all(x[[1]])
v <- x[[1]]
x <- x[-1]
for (xv in x)
{
acc[xv!=v] <- FALSE
}
acc
}
}
which.parmax <- function(x) maxmin.ind(x,max=TRUE)
which.parmin <- function(x) maxmin.ind(x,max=FALSE)
parsort <- function(x,increasing=TRUE) check.reduce(x) %>% psort(increasing)
parorder <- function(x,increasing=TRUE) check.reduce(x) %>% porder(increasing)
parrank <- function(x,increasing=TRUE) check.reduce(x) %>% prank(increasing)
maxmin.abs <- function(L,max=TRUE)
{
n <- length(L)
cmax <- abs(L[[1]])
out <- L[[1]]
for (ind in 2:n)
{
aL <- abs(L[[ind]])
if (max)
{
v <- aL > cmax
}
else
{
v <- aL < cmax
}
out[v] <- L[[ind]][v]
cmax[v] <- aL[v]
}
out
}
maxmin.ind <- function(L,max=TRUE)
{
n <- length(L)
pind <- L[[1]]*0 + 1
cmax <- L[[1]]
for (ind in 2:n)
{
if (max)
{
v <- L[[ind]] > cmax
}
else
{
v <- L[[ind]] < cmax
}
pind[v] <- ind
cmax[v] <- L[[ind]][v]
}
pind
}
imappend <- function(imlist,axis)
{
if (all(map_lgl(imlist,is.cimg)))
{
im_append(imlist,axis)
}
else if (all(map_lgl(imlist,is.pixset)))
{
px_append(imlist,axis)
}
else
{
stop("List contains unknown image type (must be all images, or all pixsets)")
}
} |
html_paged = function(
..., css = c('default-fonts', 'default-page', 'default'), theme = NULL,
template = pkg_resource('html', 'paged.html'), csl = NULL,
front_cover = NULL, back_cover = NULL
) {
html_format(
..., css = css, theme = theme, template = template, .pagedjs = TRUE,
.pandoc_args = c(
lua_filters('uri-to-fn.lua', 'loft.lua', 'footnotes.lua'),
if (!is.null(csl)) c('--csl', csl),
pandoc_chapter_name_args(),
pandoc_covers_args(front_cover, back_cover)
),
.dependencies = covers_dependencies(front_cover, back_cover)
)
}
html_letter = function(..., css = c('default', 'letter'), fig_caption = FALSE) {
html_paged(..., css = css, fig_caption = fig_caption)
}
book_crc = function(..., css = c('crc-page', 'default-page', 'default', 'crc')) {
html_paged(..., css = css)
}
jss_paged = function(
..., css = c('jss-fonts', 'jss-page', 'jss'),
template = pkg_resource('html', 'jss_paged.html'),
csl = pkg_resource('csl', 'journal-of-statistical-software.csl'),
highlight = NULL, pandoc_args = NULL
) {
jss_format = html_paged(
..., template = template, css = css,
csl = csl, highlight = highlight,
number_sections = FALSE,
pandoc_args = c(
lua_filters('jss.lua'),
'--metadata', 'link-citations=true',
pandoc_args
)
)
opts_jss = list(
prompt = TRUE, comment = NA, R.options = list(prompt = 'R> ', continue = 'R+ '),
fig.align = 'center', fig.width = 4.9, fig.height = 3.675,
class.source = 'r-chunk-code'
)
for (i in names(opts_jss)) {
jss_format$knitr$opts_chunk[[i]] = opts_jss[[i]]
}
jss_format
}
thesis_paged = function(
..., css = c('thesis'), template = pkg_resource('html', 'thesis.html')
) {
html_paged(..., css = css, template = template)
}
pagedown_dependency = function(css = NULL, js = FALSE, .test = FALSE) {
paged = if (.test) 'js/paged-latest.js' else c('js/paged.js', 'js/hooks.js')
list(htmltools::htmlDependency(
'paged', packageVersion('pagedown'), src = pkg_resource(),
script = if (js) c('js/config.js', paged),
stylesheet = file.path('css', css), all_files = .test
))
}
html_format = function(
..., self_contained = TRUE, anchor_sections = FALSE, mathjax = 'default', css, template, pandoc_args = NULL,
.dependencies = NULL, .pagedjs = FALSE, .pandoc_args = NULL, .test = FALSE
) {
if (!identical(mathjax, 'local')) {
if (identical(mathjax, 'default'))
mathjax = rmarkdown:::default_mathjax()
if (isTRUE(self_contained) && !is.null(mathjax)) {
pandoc_args = c(pandoc_args, paste0('--mathjax=', mathjax))
mathjax = NULL
}
}
css2 = grep('[.](?:sa|sc|c)ss$', css, value = TRUE, invert = TRUE)
css = setdiff(css, css2)
check_css(css2)
pandoc_args = c(
.pandoc_args,
pandoc_args,
if (isTRUE(.pagedjs)) pandoc_metadata_arg('newpage_html_class', 'page-break-after')
)
html_document2 = function(..., extra_dependencies = list()) {
bookdown::html_document2(..., extra_dependencies = c(
extra_dependencies, .dependencies,
pagedown_dependency(xfun::with_ext(css2, '.css'), .pagedjs, .test)
))
}
fmt = html_document2(
..., self_contained = self_contained, anchor_sections = anchor_sections, mathjax = mathjax, css = css,
template = template, pandoc_args = pandoc_args
)
if (isTRUE(.pagedjs)) {
fmt$knitr$opts_chunk[['ft.shadow']] = FALSE
}
fmt
}
chapter_name = function() {
config = bookdown:::load_config()
chapter_name = config[['chapter_name']] %n% bookdown:::ui_language('chapter_name')
if (is.null(chapter_name) || identical(chapter_name, '')) return()
if (!is.character(chapter_name)) stop(
'chapter_name in _bookdown.yml must be a character string'
)
if (length(chapter_name) > 2) stop('chapter_name must be of length 1 or 2')
chapter_name
}
pandoc_metadata_arg = function(name, value) {
if (!missing(value) && is.character(value)) {
value = deparse(value)
}
c('--metadata', if (missing(value)) name else paste0(name, '=', value))
}
pandoc_chapter_name_args = function() {
unlist(lapply(chapter_name(), pandoc_metadata_arg, name = 'chapter_name'))
}
pandoc_covers_args = function(front_cover, back_cover) {
build_links = function(name, img) {
if (length(img) == 0) return()
name = paste(name, seq_along(img), sep = '-')
links = paste0('<link id="', name,'-pagedown-attachment" rel="attachment" href="', img, '" />')
links[!file.exists(img)]
}
links = c(build_links('front-cover', front_cover), build_links('back-cover', back_cover))
if (length(links)) {
writeLines(links, f <- tempfile(fileext = ".html"))
rmarkdown::includes_to_pandoc_args(rmarkdown::includes(f))
}
}
covers_dependencies = function(front_cover, back_cover) {
html_dep = function(name, img) {
htmltools::htmlDependency(
name, packageVersion('pagedown'), dirname(path.expand(img)),
attachment = c(pagedown = basename(img)), all_files = FALSE
)
}
build_deps = function(name, img) {
if (length(img) == 0) return(list())
name = paste(name, seq_along(img), sep = '-')
name = name[file.exists(img)]
img = img[file.exists(img)]
mapply(name, img, FUN = html_dep, USE.NAMES = FALSE, SIMPLIFY = FALSE)
}
c(build_deps('front-cover', front_cover), build_deps('back-cover', back_cover))
} |
Fch <-
function(age, fabs, Eg, Eh, percCYPg, percCYPh){
res <- fabs*(1 - Eg*weightCYPsum(age, percCYPg))*(1 - Eh*weightCYPsum (age, percCYPh))
return(res)
} |
library("ACNE");
verbose <- Arguments$getVerbose(-8, timestamp=TRUE);
dataSet <- "HapMap270,100K,CEU,5trios";
chipType <- "Mapping50K_Hind240";
res <- doACNE(dataSet, chipType=chipType, verbose=verbose);
print(res);
ds <- res$total;
dfR <- getAverageFile(ds, verbose=verbose);
df <- getFile(ds, 1);
baf <- getFile(res$fracB, 1);
ugp <- getAromaUgpFile(ds);
fig <- sprintf("%s", getFullName(df));
if (!devIsOpen(fig)) {
devSet(fig, width=10, height=5);
subplots(2*3, nrow=2, byrow=FALSE);
par(mar=c(3,4,2,1)+0.1, pch=".");
for (chr in 1:3) {
units <- getUnitsOnChromosome(ugp, chr);
pos <- getPositions(ugp, units=units);
beta <- extractMatrix(baf, units=units, drop=TRUE);
fracB <- RawAlleleBFractions(beta, pos, chromosome=chr);
theta <- extractMatrix(df, units=units, drop=TRUE);
thetaR <- extractMatrix(dfR, units=units, drop=TRUE);
C <- 2 * theta/thetaR;
cn <- RawCopyNumbers(C, pos, chromosome=chr);
plot(cn, col="gray", cex=0.8, ylim=c(0,4));
xOut <- seq(xMin(cn), xMax(cn), by=0.5e6);
cnS <- gaussianSmoothing(cn, xOut=xOut, sd=1e6);
points(cnS, col="black");
stext(side=3, pos=0, getName(df));
stext(side=3, pos=1, sprintf("Chr%d", chr));
plot(fracB, ylim=c(0,1));
box(col="blue");
stext(side=3, pos=0, getTags(ds, collapse=","));
stext(side=3, pos=1, sprintf("Chr%d", chr));
}
devDone();
} |
`fnorm3` <-
function(dmax3, data, alterInt){
if(any(is.na(dmax3))) return(NaN)
N <- sum(data)
m <- colSums(data)
p<-m/N
p0<-p[1]
p1<-p[2]
p2<-p[3]
ro.0.0.5<-p0*(p1+2*p2)/{sqrt(p0*(1-p0))*sqrt((p1+2*p2)*p0+(p1+2*p0)*p2)}
ro.0.1<-p0*p2/{sqrt(p0*(1-p0))*sqrt(p2*(1-p2))}
ro.0.5.1<-p2*(p1+2*p0)/{sqrt(p2*(1-p2))*sqrt((p1+2*p2)*p0+(p1+2*p0)*p2)}
cov<-matrix(c(1,ro.0.0.5,ro.0.1,ro.0.0.5,1,ro.0.5.1,ro.0.1,ro.0.5.1,1),3,3,byrow=T)
d<-dmax3
lower <- switch(alterInt+1, rep(-d,3), rep(-Inf,3), rep(d,3))
upper <- switch(alterInt+1, rep( d,3), rep(d, 3), rep(Inf,3))
p.norm<-1-as.numeric(sadmvn(lower=lower ,upper=upper, mean=rep(0,3),varcov=cov))
return(p.norm)
} |
matrix_to_hyp <- function(hypothesis, param_names){
mapply(function(hyp_mat, n_ec){
hyps <- apply(hyp_mat[, -ncol(hyp_mat), drop = FALSE], 1, function(x){
include_term <- !x == 0
hyp_scalar <- x[include_term]
hyp_param <- param_names[include_term]
hyp <- paste(paste0("+", hyp_scalar), hyp_param, sep = "*", collapse = "")
hyp <- gsub("\\+-", "-", hyp)
hyp <- gsub("1\\*", "", hyp)
hyp <- gsub("^\\+", "", hyp)
hyp
})
paste(hyps,
paste0(c(rep("=", n_ec), rep(">", (nrow(hyp_mat) - n_ec))), hyp_mat[, ncol(hyp_mat)]),
sep = "",
collapse = "&"
)
}, hyp_mat = hypothesis$hyp_mat, n_ec = hypothesis$n_ec)
} |
library(testit)
assert('detect_pattern() automatically detects syntax patterns', {
(detect_pattern('<<>>=') %==% 'rnw')
(detect_pattern('<<foo, bar=TRUE>>=') %==% 'rnw')
(detect_pattern('% begin.rcode') %==% 'tex')
(detect_pattern('<!--begin.rcode') %==% 'html')
(detect_pattern('``` {r}') %==% 'md')
(detect_pattern('asdf', 'rnw') %==% 'rnw')
(is.null(detect_pattern('foo')))
})
assert('group_pattern() checks if a pattern contains a group', {
(group_pattern('(.*)'))
(!group_pattern('()'))
(!group_pattern('abc'))
(!group_pattern(NULL))
})
ce_rnw = all_patterns$rnw$chunk.end
assert('patterns for Rnw', {
(grep(ce_rnw, ' @') %==% 1L)
(grep(ce_rnw, '@ ') %==% 1L)
(grep(ce_rnw, '@ %asdf') %==% 1L)
(grep(ce_rnw, '@ asdf') %==% integer())
(grep(ce_rnw, ' @ a% sdf') %==% integer())
})
cb_md = all_patterns$md$chunk.begin
assert('patterns for md', {
(grepl(cb_md, '```{r}'))
(grepl(cb_md, '```{r label}'))
(grepl(cb_md, '```{r, eval=FALSE}'))
(grepl(cb_md, '```{awk}'))
(!grepl(cb_md, '```{.class}'))
(!grepl(cb_md, '```{
(!grepl(cb_md, '```{style="color: red"}'))
(!grepl(cb_md, '```{=latex}'))
}) |
KnowB<-function(data, format="A", cell=60, curve= "Rational", estimator=1,
cutoff=1, cutoffCompleteness= 0, cutoffSlope= 1, largematrix=FALSE,
Area="World", extent=TRUE, minLon, maxLon, minLat, maxLat,
colbg="transparent", colcon="transparent", colf="black", pro = TRUE, inc = 0.005, exclude = NULL,
colexc = NULL, colfexc="black", colscale=c("
legend.pos="y", breaks=9, xl=0, xr=0, yb=0, yt=0, asp, lab = NULL, xlab = "Longitude", ylab = "Latitude",
main1="Observed richness", main2="Records",
main3="Completeness", main4="Slope", cex.main = 1.6, cex.lab = 1.4, cex.axis = 1.2, cex.legend=1.2, family = "sans", font.main = 2, font.lab = 1, font.axis = 1, lwdP=0.6, lwdC=0.1, trans=c(1,1),
log=c(0,0), ndigits=0, save="CSV", file1 = "Observed richness",
file2 = "List of species", file3 = "Species per site", file4 = "Estimators", file5 = "Species per record",
file6 = "Records", file7 = "Completeness", file8 = "Slope", file9 = "Standard error of the estimators",
na = "NA", dec = ",", row.names = FALSE, jpg=TRUE, jpg1="Observed richness.jpg",
jpg2="Records.jpg", jpg3="Completeness.jpg", jpg4="Slope.jpg",cex=1.5, pch=15,
cex.labels=1.5, pchcol="red", ask=FALSE){
method<-"accumulation"
options(digits=15,warn=-1)
SpR<-FALSE
if(jpg==FALSE) par(ask=ask) else yuret<-1
colscale<-append("transparent",colscale)
if(exists("adworld")==FALSE){
adworld<-1
stop("It is necessary to load data(adworld)")
}
if(Area!="World" & exists("adworld1")==FALSE){
stop("It is necessary to use RWizard and replace data(adworld) by @_Build_AdWorld_, for using administative areas")
}
if(!is.null(exclude)){
stop("It is necessary to use RWizard and replace data(adworld) by @_Build_AdWorld_, for using administative areas")
}
if(exists("adworld1")==FALSE){
adworld1<-1
}
if(exists("adworld2")==FALSE){
adworld2<-1
}
if(format=="B"){
data[is.na(data)]<-0
}
if(format=="A"){
data[is.na(data)]<-0
}
x<-na.exclude(data)
if(format=="B"){
format<-"A"
xLo<-x[,1]
xLa<-x[,2]
dimg<-dim(x[,c(-1,-2)])
replicas<-rep(dimg[1], dimg[2])
x2<-matrix(as.matrix(x[,c(-1,-2)]), ncol=1)
headers<-names(x[,c(-1,-2)])
sps<-rep(x=headers,times=replicas)
xLo<-rep(x=xLo,times=dimg[2])
xLa<-rep(x=xLa,times=dimg[2])
x<-data.frame(sps,xLo,xLa,x2)
names(x)<-c("Species","Longitude","Latitude", "Counts")
x<-x[x$Counts>0,]
x<-x[(x$Longitude!=0 & x$Latitude!=0),]
}
if(format=="A"){
if(method=="accumulation"){
b<-x[,4]
x<-x[rep(1:nrow(x[,1:3]), b), ]
x[,4]<-1
}
}
values1<-data.frame(1,2,3,4,5,6,7,8,9,10)
values2<-data.frame(1,2,3,4,5,6,7)
if(estimator==0){
values3<-data.frame(1,2,3,4,5,6,7,8,9,10,11)
sevalues3<-data.frame(1,2,3,4,5,6,7,8)
}
else{
values3<-data.frame(1,2,3,4,5,6,7,8)
sevalues3<-data.frame(1,2,3,4,5,6)
}
sevalues1<-data.frame(1,2,3,4,5,6,7)
sevalues2<-data.frame(1,2,3,4,5,6)
salA<-data.frame(c(2,5,6,1,0,6,5,8,7,4,9,8), nrow=4)
sal<-data.frame(c(2,5,6,1,0,6,5,8,7,4,9,8), nrow=4)
cu2<-NA;cu3<-NA;cu4<-NA;cu5<-NA
sp2<-NA;sp3<-NA;sp4<-NA;sp5<-NA
serandom<-NA;seexact<-NA;secoleman<-NA;serarefaction<-NA;R2exact<-NA;R2random<-NA
if(format=="A"){
re<-dim(x)
Records<-seq(1,re[1],1)
temp<-cbind(x,Records)
div<-x[,2]*cos(180*3.1416/180)+x[,3]*sin(90*3.1416/180)+(x[,2]+x[,3])*x[,2]+(x[,2]-x[,3])*x[,3]+
(x[,2]+x[,3])*x[,3]+(x[,2]-x[,3])*x[,2]+(x[,2]-x[,3])*x[,2]*x[,3]+(x[,2]+x[,3])*x[,2]*x[,3]
dt1<-cbind(x,div)
dt1<-dt1[order(dt1[,5]), ]
pp1<-subset(temp, !duplicated(temp[,1]))
dimtemp<-dim(temp)
dimpp1<-dim(pp1)
elements<-dimtemp[1]*dimpp1[1]
rm(pp1)
elements[is.na(elements)]<-0
if(elements>2000000000) elements<-0 else elements<-elements
if(SpR==FALSE) elements<-0 else elements<-elements
if(elements==0){
datosac<-x
datosf<-x
if(largematrix==FALSE){
}
else{
pp1<-subset(temp[,c(1,2,3)], !duplicated(temp[,1]))
pp1<-pp1[order(pp1[,1]), ]
pp2<-t(pp1)
coluSp<-dim(pp2)
datos3<-aggregate(x[,4],by=list(x[,1]),mean)
datos3<-datos3[order(datos3[,1]), ]
d3<-dim(datos3)
for (t in 1:d3[1]){
sele<-subset(temp,temp[,1] %in% datos3[t,1])
dimsele<-dim(sele)
matr1<-matrix(0, nrow=dimsele[1], ncol=coluSp[2]+2)
matr1[,1]<-sele[,2]
matr1[,2]<-sele[,3]
matr1[,t+2]<-1
if(t==1){
colnames(matr1)<-c("Longitude","Latitude",pp2[1,])
write.table(matr1,"Species per record.txt", row.names=FALSE)
}
else{
write.table(matr1,"Species per record.txt", row.names=FALSE, col.names=FALSE, append=TRUE)
}
}
rm(datos3)
}
}
else{
datosf<-with(dt1, table(dt1[,5],dt1[,1]))
datosac<-with(temp, table(temp[,5],temp[,1]))
}
if(elements==0){
}
else{
if(save=="RData"){
file3<-paste(file3,".RData", sep="")
file5<-paste(file5,".RData", sep="")
save(datosf, file=file3)
save(datosac, file=file5)
load(file3)
load(file5)
}
else{
file3<-paste(file3,".CSV", sep="")
file5<-paste(file5,".CSV", sep="")
if(dec=="."){
write.csv(x=datosf, file = file3, row.names=row.names,na=na)
write.csv(x=datosac, file = file3, row.names=row.names,na=na)
}
else{
write.csv2(x=datosf, file = file3, row.names=row.names,na=na)
write.csv2(x=datosac, file = file5, row.names=row.names,na=na)
}
if(dec=="."){
datosf<-read.csv(file3, header=T, check.names=FALSE)
datosac<-read.csv(file5, header=T, check.names=FALSE)
}
else{
datosf<-read.csv2(file3, header=T, check.names=FALSE)
datosac<-read.csv2(file5, header=T, check.names=FALSE)
}
}
}
if(elements==0){
datosf<-x
datosac<-x
}
else{
datosac<-cbind(temp[,2:3],datosac)
datos2<-dt1[,2:3]
datos2<-subset(datos2, !duplicated(datos2))
datosf<-cbind(datos2,datosf)
datosf<-datosf[order(datosf[,1], datosf[,2]), ]
}
datosf[is.na(datosf)]<-0
species<-aggregate(x[,4],by=list(x[,1]),FUN=sum,na.rm=TRUE)
species1<-aggregate(x[,4],by=list(x[,1]),FUN=mean,na.rm=TRUE)
species<-cbind(species,species1[,2])
colnames(species)<-c("Species","Sum", "Mean")
}
else{
datosf<-x
datosac<-replace(x[,-c(1,2)], x[,-c(1,2)]>1,1)
datosac<-cbind(x[,c(1,2)],datosac)
Sum<-colSums(x[,-c(1,2)])
Mean<-colMeans(x[,-c(1,2)])
species<-rbind(Sum,Mean)
colnames(species)<-colnames(x[,-c(1,2)])
species<-cbind(c("Sum","Mean"),species)
}
if(format=="B") elements<-1 else elements<-elements
if(elements==0){
}
else{
if(save=="RData"){
file3<-paste(file3,".RData", sep="")
file5<-paste(file5,".RData", sep="")
save(datosf, file=file3)
save(datosac, file=file5)
}
else{
file3<-paste(file3,".CSV", sep="")
file5<-paste(file5,".CSV", sep="")
if(dec=="."){
write.csv(x=datosf, file = file3, row.names=row.names,na=na)
write.csv(x=datosac, file = file3, row.names=row.names,na=na)
}
else{
write.csv2(x=datosf, file = file3, row.names=row.names,na=na)
write.csv2(x=datosac, file = file5, row.names=row.names,na=na)
}
}
}
rm(datos2)
f<-round(cell/60,digits=10)
ff<-180/f
cc<-ff*2
matriz<-matrix(-9999, nrow=ff, ncol=cc+1)
col<-c(0,seq(from=-180+f, to=180, by=f))
row<-c(seq(from=90-f, to=-90, by=-f))
matriz<-as.data.frame(matriz)
matriz[,1]<-row
matriz<-rbind(col,matriz)
names(matriz)<-NULL
a<-dim(matriz)
options(warn=-1)
matriz1<-matriz
matriz4<-matriz
matriz5<-matriz
matriz6<-matriz
if(format=="A"){
maxLat1<-ceiling(max(x[,3]))
minLat1<-floor(min(x[,3]))
maxLon1<-ceiling(max(x[,2]))
minLon1<-floor(min(x[,2]))
}
else
{
maxLat1<-ceiling(max(x[,2]))
minLat1<-floor(min(x[,2]))
maxLon1<-ceiling(max(x[,1]))
minLon1<-floor(min(x[,1]))
}
rm(x)
r1<-which(abs(row-maxLat1)==min(abs(row-maxLat1)))
r2<-which(abs(row-minLat1)==min(abs(row-minLat1)))
c1<-which(abs(col-minLon1)==min(abs(col-minLon1)))
c2<-which(abs(col-maxLon1)==min(abs(col-maxLon1)))
if(length(c1)==2) c1<-c1[2] else c1<-c1
if(length(c2)==2) c2<-c2[2] else c2<-c2
if(length(r1)==2) c1<-r1[2] else r1<-r1
if(length(r2)==2) r2<-r2[2] else r2<-r2
if(format=="A"){
if(method=="accumulation"){
datosf<-datosac
}
}
else{
if(method=="accumulation"){
datosa<-replace(datosf[,-c(1,2)], datosf[,-c(1,2)]>1,1)
datosf<-cbind(datosf[,c(1,2)],datosa)
}
}
rm(datosac)
ZZ<-matrix(c("","","",""), nrow=2)
begin.time<-Sys.time()
leng1<-length(seq(r1-f, r2+f, by = f))
uio<-1
for (z in seq(r1-f, r2+f, by = f)){
if(uio<=1){
uio<-uio+1
}
else{
if(uio>=2){
end.time<-Sys.time()
end.times <- format(end.time, "%b %d, %Y at %X")
run.time<-difftime(end.time,begin.time,units="secs")
run<-as.numeric(run.time)
run<-run/length(seq(r1-f,z, by=f))
run1<-run*(leng1-length(seq(r1-f,z, by=f)))
if(run1>=3600){
ZZ[2,2]<-"remaining hours...."
}
else{
if(run1<=60) ZZ[2,2]<-"remaining seconds...." else ZZ[2,2]<-"remaining minutes...."
}
if(run1>=3600){
minutes<-run1/3600
}
else{
if(run1<=60) minutes<-run1 else minutes<-run1/60
}
minutes<-round(minutes, digits=1)
ZZ[1,1]<-end.times
ZZ[2,1]<-minutes
write.table(ZZ,"Inf.txt", row.names=FALSE,col.names=FALSE)
}
else{
}
}
for (h in seq(c1-f, c2+f, by = f)){
if(format=="A"){
if(elements==0){
datosx<-temp[(temp[,2]>=col[h-f])&(temp[,2]<col[h])&(temp[,3]>=row[z+f])&(temp[,3]<row[z]),]
dimx<-dim(datosx)
datosL<-datosx[,2:3]
if(dimx[1]==0) datosx<-datosx else datosx<-with(datosx, table(datosx[,5],datosx[,1]))
if(dimx[1]==0) datosx<-datosx else datosx<-datosx[, apply(datosx, 2, sum)!=0]
}
else{
datosx<-datosf[(datosf[,1]>=col[h-f])&(datosf[,1]<col[h])&(datosf[,2]>=row[z+f])&(datosf[,2]<row[z]),]
datosx<-datosx[, apply(datosx, 2, sum)!=0]
datosL<-datosx
}
}
else{
datosx<-datosf[(datosf[,1]>=col[h-f])&(datosf[,1]<col[h])&(datosf[,2]>=row[z+f])&(datosf[,2]<row[z]),]
datosx<-datosx[, apply(datosx, 2, sum)!=0]
datosL<-datosx
}
if(elements==0) datosx<-datosx else datosx<-datosx[,-c(1,2)]
dimy<-dim(datosx)
if(is.null(dimy)){
dimy[2]<-1
dimy[1]<-length(datosx)
Lo1<-subset(datosL[,1], !duplicated(datosL[,1]))
La1<-subset(datosL[,2], !duplicated(datosL[,2]))
Longitude<-mean(Lo1)
Latitude<-mean(La1)
com<-NA
cu3<-NA
cu2<-NA
sp3<-NA
sp2<-NA
slope<-NA
serandom<-NA;seexact<-NA; R2exact<-NA; R2random<-NA
if(estimator==0){
ratio<-dimy[1]/dimy[2]
temp4<-c(Longitude, Latitude, dimy[1],dimy[2],cu3,cu2,sp3,sp2,slope,com, ratio)
values3<-rbind(values3,temp4)
setemp4<-c(Longitude, Latitude, dimy[1],dimy[2],seexact,serandom,R2exact,R2random)
sevalues3<-rbind(sevalues3,setemp4)
}
if(estimator==1){
ratio<-dimy[1]/dimy[2]
temp4<-c(Longitude, Latitude, dimy[1],dimy[2],cu3,sp3,com, ratio)
values3<-rbind(values3,temp4)
setemp4<-c(Longitude, Latitude, dimy[1],dimy[2],seexact,R2exact)
sevalues3<-rbind(sevalues3,setemp4)
}
if(estimator==2){
ratio<-dimy[1]/dimy[2]
temp4<-c(Longitude, Latitude, dimy[1],dimy[2],cu2,sp2,com, ratio)
values3<-rbind(values3,temp4)
setemp4<-c(Longitude, Latitude, dimy[1],dimy[2],serandom,R2random)
sevalues3<-rbind(sevalues3,setemp4)
}
values<-values3[-1,]
sevalues<-sevalues3[-1,]
if(estimator==0){
colnames(values)<-c("Longitude", "Latitude", "Records","Observed.richness", "Richness.exact", "Richness.random", "Slope.exact", "Slope.random", "Mean.slope", "Completeness", "Ratio")
colnames(sevalues)<-c("Longitude", "Latitude", "Records","Observed.richenss", "SE.exact", "SE.random","R2.exact","R2.random")
}
if(estimator==1){
colnames(values)<-c("Longitude", "Latitude", "Records","Observed.richness","Richness", "Slope","Completeness","Ratio")
colnames(sevalues)<-c("Longitude", "Latitude", "Records","Observed.richness","SE","R2")
}
if(estimator==2){
colnames(values)<-c("Longitude", "Latitude", "Records","Observed.richness","Richness", "Slope", "Completeness","Ratio")
colnames(sevalues)<-c("Longitude", "Latitude", "Records","Observed.richness","SE","R2")
}
}
dimy[is.null(dimy)] <- 0
if(dimy[1]==1){
cut<-sum(datosx, na.rm=TRUE)/dimy[2]
}
else{
cut<-dimy[1]/dimy[2]
}
if(dimy[1]==0){
hgh<-1
}
else{
if(dimy[2]==0) {
matriz1[z+2,h]<-dimy[2]
matriz4[z+2,h]<-dimy[1]
Lo1<-subset(datosL[,1], !duplicated(datosL[,1]))
La1<-subset(datosL[,2], !duplicated(datosL[,2]))
Longitude<-mean(Lo1)
Latitude<-mean(La1)
}
else{
if(dimy[2]==1){
matriz1[z+2,h]<-dimy[2]
matriz4[z+2,h]<-dimy[1]
Lo1<-subset(datosL[,1], !duplicated(datosL[,1]))
La1<-subset(datosL[,2], !duplicated(datosL[,2]))
Longitude<-mean(Lo1)
Latitude<-mean(La1)
}
else{
if(dimy[1]==0){
hgh<-1
}
else{
if(dimy[1]==1){
matriz1[z+2,h]<-dimy[2]
matriz4[z+2,h]<-dimy[1]
Lo1<-subset(datosL[,1], !duplicated(datosL[,1]))
La1<-subset(datosL[,2], !duplicated(datosL[,2]))
Longitude<-mean(Lo1)
Latitude<-mean(La1)
}
else{
if(method=="accumulation"){
if(cut<cutoff){
if(estimator==0){
Methods<-c(NA,NA)
Methodssp<-c(NA,NA)
sp2<-NA;sp3<-NA
seMethods<-c(NA,NA)
serandom<-NA;seexact<-NA
}
else{
Methods<-c(NA)
Methodssp<-c(NA)
sp2<-NA;sp3<-NA
seMethods<-c(NA)
serandom<-NA;seexact<-NA
}
}
else{
if(estimator==0 | estimator==2){
cu<- vegan::specaccum(datosx, method="random", permutations = 200)
datosc<-data.frame(cu$richness, cu$sites)
ymax<-max(datosc[,1],na.rm=T)
ymin<-min(datosc[,1],na.rm=T)
xmax<-max(datosc[,2],na.rm=T)
xmin<-min(datosc[,2],na.rm=T)
if(curve=="Clench"){
modelo<-try(nls(cu.richness ~ A*cu.sites/(1+B*cu.sites), data=datosc, start=list(A=1, B=0.01)), silent=T)
}
if(curve=="Exponential"){
modelo<-try(nls(cu.richness ~ (A)*(1-exp((-B*cu.sites))), data=datosc, start=list(A=ymax, B=0.01)), silent=T)
}
if(curve=="Saturation"){
modelo<-try(nls(cu.richness~A*(1-exp(-B*(cu.sites-C))), data=datosc, trace=T, start=list(A=ymax, B=0.01, C=0)), silent=TRUE)
}
if(curve=="Rational"){
modelo<-try(nls(cu.richness~(A+B*cu.sites)/(1+C*cu.sites), data=datosc, trace=T, start=list(A=1, B=1, C=0)), silent=TRUE)
}
res<-summary(modelo)
if(res[1]=="1"){
cu2<-NA
serandom<-NA
}
else{
cu2<-res$parameters[1,1]/res$parameters[2,1]
if(curve=="Saturation" | curve=="Exponential"){
cu2<-res$parameters[1,1]
}
if(curve=="Rational"){
cu2<-res$parameters[2,1]/res$parameters[3,1]
}
if(cu2<0){
cu2<-NA
sp2<-NA
serandom<-NA
}
else{
cu2<-cu2
leng<-length(cu$sites)
sp2<-cu$richness[leng]-cu$richness[leng-1]
if(curve=="Clench"){res1<-datosc[,1]-(res$coefficients[1,1]*datosc[,2])/(1+res$coefficients[2,1]*datosc[,2])}
if(curve=="Exponential"){res1<-datosc[,1]-(res$coefficients[1,1])*(1-exp((-res$coefficients[2,1]*datosc[,2])))}
if(curve=="Saturation"){res1<-datosc[,1]-(res$coefficients[1,1]*(1-exp(-res$coefficients[2,1]*(datosc[,2]-res$coefficients[3,1]))))}
if(curve=="Rational"){res1<-datosc[,1]-(res$coefficients[1,1]+res$coefficients[2,1]*datosc[,2])/(1+res$coefficients[3,1]*datosc[,2])}
serandom<-sqrt(sum((res1)^2)/length(res1))
R2random<-1-var(res1, na.rm=T)/var(datosc[,1], na.rm=T)
}
}
}
if(estimator==0 | estimator==1){
cu<- vegan::specaccum(datosx, method="exact")
datosc<-data.frame(cu$richness, cu$sites)
ymax<-max(datosc[,1],na.rm=T)
ymin<-min(datosc[,1],na.rm=T)
xmax<-max(datosc[,2],na.rm=T)
xmin<-min(datosc[,2],na.rm=T)
if(curve=="Clench"){
modelo<-try(nls(cu.richness ~ A*cu.sites/(1+B*cu.sites), data=datosc, start=list(A=1, B=0.01)), silent=T)
}
if(curve=="Exponential"){
modelo<-try(nls(cu.richness ~ (A)*(1-exp((-B*cu.sites))), data=datosc, start=list(A=ymax, B=0.01)), silent=T)
}
if(curve=="Saturation"){
modelo<-try(nls(cu.richness~A*(1-exp(-B*(cu.sites-C))), data=datosc, trace=T, start=list(A=ymax, B=0.01, C=0)), silent=TRUE)
}
if(curve=="Rational"){
modelo<-try(nls(cu.richness~(A+B*cu.sites)/(1+C*cu.sites), data=datosc, trace=T, start=list(A=1, B=1, C=0)), silent=TRUE)
}
res<-summary(modelo)
if(res[1]=="1"){
cu3<-NA
seexact<-NA
}
else{
cu3<-res$parameters[1,1]/res$parameters[2,1]
if(curve=="Saturation" | curve=="Exponential"){
cu3<-res$parameters[1,1]
}
if(curve=="Rational"){
cu3<-res$parameters[2,1]/res$parameters[3,1]
}
if(cu3<0){
cu3<-NA
sp3<-NA
seexact<-NA
}
else{
cu3<-cu3
leng<-length(cu$sites)
sp3<-cu$richness[leng]-cu$richness[leng-1]
if(curve=="Clench"){res1<-datosc[,1]-(res$coefficients[1,1]*datosc[,2])/(1+res$coefficients[2,1]*datosc[,2])}
if(curve=="Exponential"){res1<-datosc[,1]-(res$coefficients[1,1])*(1-exp((-res$coefficients[2,1]*datosc[,2])))}
if(curve=="Saturation"){res1<-datosc[,1]-(res$coefficients[1,1]*(1-exp(-res$coefficients[2,1]*(datosc[,2]-res$coefficients[3,1]))))}
if(curve=="Rational"){res1<-datosc[,1]-(res$coefficients[1,1]+res$coefficients[2,1]*datosc[,2])/(1+res$coefficients[3,1]*datosc[,2])}
seexact<-sqrt(sum((res1)^2)/length(res1))
R2exact<-1-var(res1, na.rm=T)/var(datosc[,1], na.rm=T)
}
}
}
if(estimator==0){
Methods<-c(cu3,cu2)
if(cut<cutoff) seMethods<-c(NA,NA) else seMethods<-c(seexact, serandom)
Methods[Methods < 0] <- NA
seMethods[seMethods < 0] <- NA
if(cut<cutoff) Methodssp<-c(NA,NA) else Methodssp<-c(sp3, sp2)
Methodssp[Methodssp < 0] <- NA
}
if(estimator==1){
Methods<-c(cu3)
if(cut<cutoff) seMethods<-c(NA) else seMethods<-c(seexact)
Methods[Methods < 0] <- NA
seMethods[seMethods < 0] <- NA
if(cut<cutoff) Methodssp<-c(NA) else Methodssp<-c(sp3)
Methodssp[Methodssp < 0] <- NA
}
if(estimator==2){
Methods<-c(cu2)
if(cut<cutoff) seMethods<-c(NA) else seMethods<-c(serandom)
Methods[Methods < 0] <- NA
seMethods[seMethods < 0] <- NA
if(cut<cutoff) Methodssp<-c(NA) else Methodssp<-c(sp2)
Methodssp[Methodssp < 0] <- NA
}
}
}
Methods[Methods < 0] <- NA
if(estimator==0){
pred<-mean(Methods, na.rm=T)
}
else{
pred<-mean(Methods[estimator], na.rm=T)
}
if(method=="accumulation"){
if (estimator==0){
slope<-mean(Methodssp, na.rm=T)
pred<-mean(Methods, na.rm=T)
}
else{
slope<-Methodssp
pred<-Methods
}
}
com<-(dimy[2]*100/pred)
Lo1<-subset(datosL[,1], !duplicated(datosL[,1]))
La1<-subset(datosL[,2], !duplicated(datosL[,2]))
Longitude<-mean(Lo1)
Latitude<-mean(La1)
if(method=="accumulation"){
if(estimator==0){
if(!is.na(slope) & slope>cutoffSlope){
com<-NA
}
if(!is.na(com) & com<cutoffCompleteness){
com<-NA
}
ratio<-dimy[1]/dimy[2]
temp4<-c(Longitude, Latitude, dimy[1],dimy[2],cu3,cu2,sp3,sp2,slope,com, ratio)
cu2<-NA;cu3<-NA
sp2<-NA;sp3<-NA
values3<-rbind(values3,temp4)
setemp4<-c(Longitude, Latitude, dimy[1],dimy[2],seexact,serandom,R2exact,R2random)
serandom<-NA;seexact<-NA; R2exact<-NA; R2random<-NA
sevalues3<-rbind(sevalues3,setemp4)
}
if(estimator==1){
if(!is.na(com) & com<cutoffCompleteness){
com<-NA
}
if(!is.na(slope) & slope>cutoffSlope){
com<-NA
}
ratio<-dimy[1]/dimy[2]
temp4<-c(Longitude, Latitude, dimy[1],dimy[2],cu3,sp3,com, ratio)
cu2<-NA;cu3<-NA
sp2<-NA;sp3<-NA
values3<-rbind(values3,temp4)
setemp4<-c(Longitude, Latitude, dimy[1],dimy[2],seexact,R2exact)
serandom<-NA;seexact<-NA;R2exact<-NA
sevalues3<-rbind(sevalues3,setemp4)
}
if(estimator==2){
if(!is.na(slope) & slope>cutoffSlope){
com<-NA
}
if(!is.na(com) & com<cutoffCompleteness){
com<-NA
}
ratio<-dimy[1]/dimy[2]
temp4<-c(Longitude, Latitude, dimy[1],dimy[2],cu2,sp2,com, ratio)
cu2<-NA;cu3<-NA
sp2<-NA;sp3<-NA
values3<-rbind(values3,temp4)
setemp4<-c(Longitude, Latitude, dimy[1],dimy[2],serandom,R2random)
serandom<-NA;seexact<-NA;R2random<-NA
sevalues3<-rbind(sevalues3,setemp4)
}
}
matriz1[z+2,h]<-dimy[2]
matriz4[z+2,h]<-dimy[1]
if(is.na(pred)) matriz5[z+2,h]<-(-9999) else matriz5[z+2,h]<-com
if(method=="accumulation"){
if(is.na(slope)) matriz6[z+2,h]<-(-9999) else matriz6[z+2,h]<-slope
if(is.na(pred)){
}
else{
if(slope>cutoffSlope){
matriz6[z+2,h]<-(-9999)
matriz5[z+2,h]<-(-9999)
}
}
}
else{
matriz6[z+2,h]<-(-9999)
}
}
}
}
}
}
}
}
if(method=="accumulation"){
values<-values3[-1,]
sevalues<-sevalues3[-1,]
if(estimator==0){
colnames(values)<-c("Longitude", "Latitude", "Records","Observed.richness", "Richness.exact", "Richness.random", "Slope.exact", "Slope.random", "Mean.slope", "Completeness", "Ratio")
colnames(sevalues)<-c("Longitude", "Latitude", "Records","Observed.richenss", "SE.exact", "SE.random","R2.exact","R2.random")
}
if(estimator==1){
colnames(values)<-c("Longitude", "Latitude", "Records","Observed.richness","Richness", "Slope","Completeness","Ratio")
colnames(sevalues)<-c("Longitude", "Latitude", "Records","Observed.richness","SE","R2")
}
if(estimator==2){
colnames(values)<-c("Longitude", "Latitude", "Records","Observed.richness","Richness", "Slope", "Completeness","Ratio")
colnames(sevalues)<-c("Longitude", "Latitude", "Records","Observed.richness","SE","R2")
}
}
else{
}
ZZ[1,1]<-end.times
ZZ[2,1]<-"Saving files...."
ZZ[2,2]<-""
write.table(ZZ,"Inf.txt", row.names=FALSE,col.names=FALSE)
if(save=="RData"){
file1<-paste(file1,".RData", sep="")
file6<-paste(file6,".RData", sep="")
file7<-paste(file7,".RData", sep="")
file8<-paste(file8,".RData", sep="")
file2<-paste(file2,".RData", sep="")
file4<-paste(file4,".RData", sep="")
file9<-paste(file9,".RData", sep="")
save(matriz1, file=file1)
save(matriz4, file=file6)
save(matriz5, file=file7)
if(method=="accumulation"){
save(matriz6,file = file8)
}
save(species, file = file2)
save(values, file = file4)
save(sevalues, file = file9)
}
else{
file1<-paste(file1,".CSV", sep="")
file6<-paste(file6,".CSV", sep="")
file7<-paste(file7,".CSV", sep="")
file8<-paste(file8,".CSV", sep="")
file2<-paste(file2,".CSV", sep="")
file4<-paste(file4,".CSV", sep="")
file9<-paste(file9,".CSV", sep="")
if(dec=="."){
write.csv(x=matriz1, file = file1, row.names=row.names,na=na)
write.csv(x=species, file = file2, row.names=row.names,na=na)
write.csv(x=values, file = file4, row.names=row.names,na=na)
write.csv(x=matriz4, file = file6, row.names=row.names,na=na)
write.csv(x=matriz5, file = file7, row.names=row.names,na=na)
write.csv(x=sevalues, file = file9, row.names=row.names,na=na)
if(method=="accumulation"){
write.csv(x=matriz6, file = file8, row.names=row.names,na=na)
}
}
else{
write.csv2(x=matriz1, file = file1, row.names=row.names,na=na)
write.csv2(x=species, file = file2, row.names=row.names,na=na)
write.csv2(x=values, file = file4, row.names=row.names,na=na)
write.csv2(x=matriz4, file = file6, row.names=row.names,na=na)
write.csv2(x=matriz5, file = file7, row.names=row.names,na=na)
write.csv2(x=sevalues, file = file9, row.names=row.names,na=na)
if(method=="accumulation"){
write.csv2(x=matriz6, file = file8, row.names=row.names,na=na)
}
}
}
if(method=="accumulation") rty<-4 else rty<-3
d<-length(Area)
AA<-Area[1]
if (AA=="World"){
datos1<-adworld[2:5,]
}
else{
datos1<-rbind(adworld1,adworld2)
}
datos1<-na.exclude(datos1)
for (qw in 1:rty){
if(qw==1){
if(save=="RData"){
load(file6)
varscale<-as.matrix(matriz4)
}
else{
if(dec=="."){
varscale<-read.csv(file6, header=F)
}
else{
varscale<-read.csv2(file6, header=F)
}
}
}
if(qw==2){
if(save=="RData"){
load(file1)
varscale<-as.matrix(matriz1)
}
else{
if(dec=="."){
varscale<-read.csv(file1, header=F)
}
else{
varscale<-read.csv2(file1, header=F)
}
}
matrizz1<-varscale
}
if(qw==3){
if(save=="RData"){
load(file7)
varscale<-as.matrix(matriz5)
}
else{
if(dec=="."){
varscale<-read.csv(file7, header=F)
}
else{
varscale<-read.csv2(file7, header=F)
}
}
}
if(qw==4){
if(save=="RData"){
load(file8)
varscale<-as.matrix(matriz6)
}
else{
if(dec=="."){
varscale<-read.csv(file8, header=F)
}
else{
varscale<-read.csv2(file8, header=F)
}
}
}
ZZ[1,1]<-end.times
ZZ[2,1]<-"Printing plot"
if(qw==1) ZZ[2,2]<-main2 else hjjuy<-1
if(qw==2) ZZ[2,2]<-main1 else hjjuy<-1
if(qw==3) ZZ[2,2]<-main3 else hjjuy<-1
if(qw==4) ZZ[2,2]<-main4 else hjjuy<-1
write.table(ZZ,"Inf.txt", row.names=FALSE,col.names=FALSE)
if(any(varscale[-1,-1]!=-9999)==FALSE){
ZZ[1,1]<-end.times
ZZ[2,1]<-"This map is not depicted because it was not possible to estimate this estimator in any of the cells (see the file Estimators)"
ZZ[2,2]<-""
write.table(ZZ,"Inf.txt", row.names=FALSE,col.names=FALSE)
}
else{
if (AA=="World"){
if (missing(minLat)) minLat<--90 else minLat<-minLat
if (missing(maxLat)) maxLat<-90 else maxLat<-maxLat
if (missing(minLon)) minLon<--180 else minLon<-minLon
if (missing(maxLon)) maxLon<-180 else maxLon<-maxLon
if(missing(extent)) extent<-TRUE else extent<-extent
if(extent==TRUE) minLat<-minLat1-f else minLat<-minLat
if(extent==TRUE) maxLat<-maxLat1+f else maxLat<-maxLat
if(extent==TRUE) minLon<-minLon1-f else minLon<-minLon
if(extent==TRUE) maxLon<-maxLon1+f else maxLon<-maxLon
}
else{
if (missing(maxLon)){
if(max(datos1$Lon)<0) maxLon<-(max(datos1$Lon)-max(datos1$Lon)*inc) else maxLon<-(max(datos1$Lon)+max(datos1$Lon)*inc)
}
else {
maxLon<-maxLon
}
if (missing(minLon)){
if(min(datos1$Lon)<0) minLon<-(min(datos1$Lon)+min(datos1$Lon)*inc) else minLon<-(min(datos1$Lon)-min(datos1$Lon)*inc)
}
else {
minLon<-minLon
}
if (missing(maxLat)){
if(max(datos1$Lat)<0) maxLat<-(max(datos1$Lat)-max(datos1$Lat)*inc) else maxLat<-(max(datos1$Lat)+max(datos1$Lat)*inc)
}
else {
maxLat<-maxLat
}
if (missing(minLat)){
if(min(datos1$Lat)<0) minLat<-(min(datos1$Lat)+min(datos1$Lat)*inc) else minLat<-(min(datos1$Lat)-min(datos1$Lat)*inc)
}
else {
minLat<-minLat
}
}
Lon<-as.numeric(varscale[1,-1])
varLon<-as.numeric(varscale[1,-1])
a<-length(Lon)
for (i in 1:a){
if(i==1) varLon[i]<-((-180+Lon[i])/2) else varLon[i]<-((Lon[i-1]+Lon[i])/2)
}
Lat<-as.numeric(varscale[-1,1])
varLat<-as.numeric(varscale[-1,1])
a<-length(Lat)
for (i in 1:a){
if(i==1) varLat[i]<-((90+Lat[i])/2) else varLat[i]<-((Lat[i-1]+Lat[i])/2)
}
varLat<-(-varLat)
firstrow<-varscale[1,]
ajuste<-varscale[varscale[,1]<=maxLat&varscale[,1]>=minLat,]
ifelse(firstrow==ajuste[1,], jj<-1, ajuste<-rbind(firstrow,ajuste))
ajuste<-ajuste[,ajuste[1,]<=maxLon&ajuste[1,]>=minLon]
ajuste<-ajuste[-1,-1]
ajuste<-as.matrix(ajuste)
if(trans[1]==0){
ajuste<-replace(ajuste, ajuste==-9999,NA)
ajuste<-ajuste/trans[2]
ajuste<-replace(ajuste, is.na(ajuste),-9999)
}
else{
ajuste<-replace(ajuste, ajuste==-9999,NA)
ajuste<-ajuste*trans[2]
ajuste<-replace(ajuste, is.na(ajuste),-9999)
}
if(log[1]==0){
ajuste<-ajuste
}
else{
ajuste<-replace(ajuste, ajuste==-9999,NA)
ajuste<-log(ajuste+log[2])
ajuste<-replace(ajuste, is.na(ajuste),-9999)
}
ajuste<- ajuste[nrow(ajuste):1,]
varscale<-varscale[-1,-1]
varscale<-as.matrix(varscale)
if(trans[1]==0){
varscale<-replace(varscale, varscale==-9999,NA)
varscale<-varscale/trans[2]
varscale<-replace(varscale, is.na(varscale),-9999)
}
else{
varscale<-replace(varscale, varscale==-9999,NA)
varscale<-varscale*trans[2]
varscale<-replace(varscale, is.na(varscale),-9999)
}
if(log[1]==0){
varscale<-varscale
}
else{
varscale<-replace(varscale, varscale==-9999,NA)
varscale<-log(varscale+log[2])
varscale<-replace(varscale, is.na(varscale),-9999)
}
varscale<- varscale[nrow(varscale):1,]
varscale<-t(varscale)
if (maxLon>=180) maxLon<-180 else maxLon<-maxLon
if (minLon<=-180) minLon<--180 else minLon<-minLon
if (maxLat>=90) maxLat<-90 else maxLat<-maxLat
if (minLat<=-90) minLat<--90 else minLat<-minLat
if (missing(varscale)) varscale=NULL else varscale=varscale
legend.max=max(ajuste)
if(legend.max<=10){
legend.min=(if(min(ajuste[!ajuste==-9999])==0) min(ajuste[!ajuste==-9999])+(max(ajuste)/(length(colscale)-1)) else min(ajuste[!ajuste==-9999]))
}
else{
legend.min=min(ajuste[!ajuste==-9999])
}
if(legend.min<0) legend.min<-legend.min+legend.min*0.1/100 else legend.min<-legend.min-legend.min*0.1/100
if(legend.max<0) legend.max<-legend.max-legend.max*0.1/100 else legend.max<-legend.max+legend.max*0.1/100
Lati<-(maxLat+minLat)/2
if (pro==TRUE) aspe=(1/cos(Lati*pi/180)) else aspe=1
if (missing(asp)) asp=aspe else asp=asp
x<-0
y<-0
rm(datos1)
if(qw==1) file=jpg2 else hjjuy<-1
if(qw==2) file=jpg1 else hjjuy<-1
if(qw==3) file=jpg3 else hjjuy<-1
if(qw==4) file=jpg4 else hjjuy<-1
if(jpg==TRUE) jpeg(filename = file, width = 8000, height = 4000, units = "px", pointsize = 14, bg = "white", res = 600) else hhjhk<-1
squishplot <- function(xlim,ylim,asp=1){
if(length(xlim) < 2) stop('xlim must be a vector of length 2')
if(length(ylim) < 2) stop('ylim must be a vector of length 2')
tmp <- par(c('plt','pin','xaxs','yaxs'))
if( tmp$xaxs == 'i' ){
xlim <- range(xlim)
} else {
tmp.r <- diff(range(xlim))
xlim <- range(xlim) + c(-1,1)*0.04*tmp.r
}
if( tmp$yaxs == 'i' ){
ylim <- range(ylim)
} else {
tmp.r <- diff(range(ylim))
ylim <- range(ylim) + c(-1,1)*0.04*tmp.r
}
tmp2 <- (ylim[2]-ylim[1])/(xlim[2]-xlim[1])
tmp.y <- tmp$pin[1] * tmp2 * asp
if(tmp.y < tmp$pin[2]){
par(pin=c(tmp$pin[1], tmp.y))
par(plt=c(tmp$plt[1:2], par('plt')[3:4]))
} else {
tmp.x <- tmp$pin[2]/tmp2/asp
par(pin=c(tmp.x, tmp$pin[2]))
par(plt=c(par('plt')[1:2], tmp$plt[3:4]))
}
return(invisible(tmp['plt']))
}
if(min(varscale[!varscale==-9999])==0) iniF<-(-0.00001) else iniF<-legend.min
if (maxLon==180 & minLon==-180 & minLat==-90 & maxLat==90){
xl<-185
xr<-195
}
if(qw==1) main=main2 else hjjuy<-1
if(qw==2) main=main1 else hjjuy<-1
if(qw==3) main=main3 else hjjuy<-1
if(qw==4){
main=main4
ndigits=ndigits+2
color7<-rev(colscale[-1])
colscale<-append(colscale[1],color7)
}
par(lwd=lwdP,fg="black",family=family)
tmp<-squishplot(xlim=c(minLon,maxLon), ylim=c(minLat,maxLat), asp=aspe)
legend.freq1=abs((legend.max-iniF)/(length(colscale)-1))
legend.freq=abs((legend.max-iniF)/(breaks-1))
if (missing(legend.pos)){
if((maxLon-minLon)>260 & (maxLon-minLon)/(maxLat-minLat)>2.265) legend.pos="x" else legend.pos="y"
}
if(legend.pos=="x"){
if (missing(cex.main)) cex.main=1.3 else cex.main=cex.main
}
if(legend.pos=="y"){
if (missing(cex.main)) cex.main=1.6 else cex.main=cex.main
}
if (legend.pos=="y") par(oma=c(0,0,0,1)) else par(oma=c(0,0,2,0))
image(varLon, varLat,varscale,xlim=c(minLon,maxLon),ylim=c(minLat,maxLat), axes=F, xaxs="i", yaxs="i", xlab="",ylab="", col=colscale, breaks=c(iniF,seq(iniF,legend.max,by=legend.freq1)))
par(new=T,lwd=lwdP)
plot(x,y,xlim=c(minLon,maxLon),ylim=c(minLat,maxLat),xlab=xlab, main="", axes=TRUE,
ylab = ylab, cex.lab=cex.lab, cex.axis= cex.axis,type="n",bty="l",
font.lab=font.lab, font.axis=font.axis,lab=lab,yaxs="i",xaxs="i",yaxt="n",xaxt="n")
mtext(text=main,side=3, line=0.3, cex=cex.main, font=font.main)
axis(side=1,xlim=c(minLon,maxLon),lwd=lwdP)
axis(side=2,ylim=c(minLat,maxLat),lwd=lwdP)
if (colbg=="
if (legend.pos=="y"){
if (xl==0){
x1<-(maxLon-minLon)*(-0.00106495)+0.747382095+maxLon
x2<-(maxLon-minLon)*(-0.003194851)+2.060146284+maxLon
}
else{
x1<-xl
x2<-xr
}
if(legend.max<=10){
sequ<-(seq(iniF,legend.max,by=legend.freq))
sequ<-round(sequ, digits=ndigits)
}
else{
if(iniF==0){
legend.freq=abs((legend.max-iniF)/(breaks-1))
sequ<-(seq(iniF,legend.max,by=legend.freq))
sequ<-round(sequ, digits=ndigits)
}
else{
sequ<-(seq(iniF,legend.max,by=legend.freq))
sequ<-round(sequ, digits=ndigits)
}
}
plotrix::color.legend(xl=x1, yb=minLat, xr= x2,
yt=maxLat, sequ, gradient="y", align="rb", cex=cex.legend, rect.col=colscale[-1])
}
else{
if (yb==0){
if(!is.null(main)){
y1<-maxLat+(maxLat-minLat)*(0.101851852)-1.333333333
y2<-maxLat+(maxLat-minLat)*(0.157407407)-1.333333333
}
else{
y1<-maxLat+(maxLat-minLat)*(0.027777778)
y2<-maxLat+(maxLat-minLat)*(0.083333333)
}
}
else{
y1<-yb
y2<-yt
}
if(legend.max<=10){
sequ<-(seq(iniF,legend.max,by=legend.freq))
sequ<-round(sequ, digits=ndigits)
}
else{
sequ<-(seq(iniF,legend.max,by=legend.freq))
sequ<-round(sequ, digits=ndigits)
}
plotrix::color.legend(xl=minLon, yb=y1, xr=maxLon, yt=y2, sequ,
gradient="x", align="lt", cex=cex.legend, rect.col=colscale[-1])
}
if (AA=="World") {
polygon(adworld$Lon,adworld$Lat,col=colcon, border=colf)
if(!is.null(exclude)){
polygon(adworld2$Lon,adworld2$Lat,col=colexc, border=colfexc)
}
}
else {
polygon(adworld1$Lon,adworld1$Lat,col=colcon, border=colf)
polygon(adworld2$Lon,adworld2$Lat,col=colexc, border=colfexc)
}
par(tmp)
if(jpg==TRUE) dev.off() else hhjk<-1
}
}
ObservedRichness<-matriz1
Records<-matriz4
Completeness<-matriz5
Slope<-matriz6
if(save=="RData"){
save(ObservedRichness, file=file1)
save(Records, file=file6)
save(Completeness, file=file7)
if(method=="accumulation"){
save(Slope,file = file8)
}
}
ZZ[1,1]<-"END"
ZZ[1,2]<-""
ZZ[2,1]<-""
ZZ[2,2]<-""
write.table(ZZ,"Inf.txt", row.names=FALSE,col.names=FALSE)
rm(datosf)
rm(datosL)
rm(datosx)
rm(matriz)
rm(matriz1)
rm(matriz4)
rm(matriz5)
rm(matriz6)
rm(values)
rm(values1)
rm(values2)
rm(values3)
rm(varscale)
rm(temp)
rm(temp4)
} |
empCreditScoring <- function(scores, classes, p0=0.55, p1=0.1, ROI=0.2644){
roc = .empRocInfo(scores, classes)
alpha <- 1-p0-p1
lambda <- c(0,(roc$pi1*ROI/roc$pi0)*diff(roc$F1)/diff(roc$F0))
lambda <- c(lambda[lambda<1],1)
lambdaii <- head(lambda,n=-1)
lambdaie <- tail(lambda,n=-1)
F0 <- roc$F0[1:length(lambdaii)]
F1 <- roc$F1[1:length(lambdaii)]
EMPC <- sum(alpha*(lambdaie-lambdaii)*(roc$pi0*F0*(lambdaie+lambdaii)/2 - ROI*F1*roc$pi1)) +
(roc$pi0*tail(F0,n=1)-ROI*roc$pi1*tail(F1,n=1))*p1
EMPCfrac <- sum(alpha*(lambdaie-lambdaii)*(roc$pi0*F0+roc$pi1*F1)) +
p1*(roc$pi0*tail(F0,n=1) + roc$pi1*tail(F1,n=1))
list(EMPC=EMPC, EMPCfrac=EMPCfrac)
} |
noeffect.circ.lin <- function (x, y, bw, method = "LL", calib = "chisq", n_boot = 500) {
name1 <- deparse(substitute(x))
name2 <- deparse(substitute(y))
DNAME <- paste(paste(name1, collapse = "\n"), "and",
paste(name2, collapse = "\n"))
if (!is.numeric(x))
stop("argument 'x' must be numeric")
if (!is.numeric(y))
stop("argument 'y' must be numeric")
if (length(x) != length(y))
stop("'x' and 'y' must have the same number of observations")
if (is.circular(x)) {
datacircularp <- circularp(x)
}else{
datacircularp <- list(type = "angles", units = "radians",
template = "none", modulo = "2pi", zero = 0,
rotation = "counter")
}
x <- conversion.circular(x, units = "radians", zero = 0,
rotation = "counter", modulo = "2pi")
nax <- is.na(x)
nay <- is.na(y)
x <- x[!nax & !nay]
y <- y[!nax & !nay]
if ((sum(nax) + sum(nay)) > 0)
warning("Missing values were removed.", "\n")
n <- length(x)
if (n == 0)
stop("No observations (at least after removing missing values)")
if (missing(bw)) {
bw <- bw.reg.circ.lin(x, y, method = method) * 4
} else{
if (is.numeric(bw)){
if (bw<0)
stop("Argument 'bw' must be positive")
}else{
stop("Argument 'bw' must be numeric")
}
}
if (!is.character(calib))
stop("calib must be either ''chisq'' or ''boot'' ")
if (calib!="chisq"&calib!="boot")
stop("calib must be either ''chisq'' or ''boot'' ")
if (!is.character(method))
stop ("method must be either ''LL'' or ''NW'' ")
if (method!="LL"&method!="NW")
stop("method must be either ''LL'' or ''NW'' ")
if (!is.numeric(n_boot))
stop(" 'n_boot' must be numeric")
if (n_boot <= 0)
stop(" 'n_boot' must be > 0")
me <- mean(y)
S <- kernCL(x, x, bw, method = method, tol = 300)
m_hat <- S %*% y
L <- matrix(rep(1/n,n^2),nrow=n)
I <- diag(n)
A <- t(I - S) %*% (I - S)
B <- I - L - A
rss0 <- as.numeric(t(y) %*% (I-L) %*% y)
rss1 <- as.numeric(t(y) %*% A %*% y)
obs <- (rss0 - rss1) / rss1
if (calib == "chisq") {
T <- B - A * obs
k1 <- sum(diag(T))
C <- T %*% T
k2 <- 2 * sum(diag(C))
k3 <- 8 * sum(diag(C %*% T))
aa <- abs(k3 / (4 * k2))
bb <- (8 * k2^3) / k3^2
cc <- k1 - aa * bb
p <- 1 - pchisq(-cc / aa, bb)
}else{
res <- y - me
y_boot <- numeric(n_boot)
stat_boot <- numeric(n_boot)
for (b in 1:n_boot){
res_boot <- sample(res, n, replace=T)
y_boot <- me + res_boot
me_boot <- mean(y_boot)
S_boot <- kernCL(x, x, bw, tol = 300)
m_hat_boot <- S_boot %*% y_boot
A_boot <- t(I - S_boot) %*% (I - S_boot)
B_boot <- I - L - A_boot
rss0_boot <- as.numeric(t(y_boot) %*% (I - L) %*% y_boot)
rss1_boot <- as.numeric(t(y_boot) %*% A_boot %*% y_boot)
stat_boot[b] <- (rss0_boot-rss1_boot) / rss1_boot
}
p <- sum(ifelse(stat_boot >= obs, 1, 0)) / n_boot
}
meth <- "No-effect test for a circular predictor and a real-valued response"
STATISTIC <- obs
names(STATISTIC) <- "C.obs"
PARAMETER <- bw
names(PARAMETER) <- "bw"
structure(list( statistic = STATISTIC, alternative = "Significant effect",
p.value = p, method = meth, parameter = PARAMETER, data.name = DNAME),
class = "htest")
} |
Life.lines <-
function( entry.date = NA,
exit.date = NA,
birth.date = NA,
entry.age = NA,
exit.age = NA,
risk.time = NA
)
{
if( conv <- any( inherits( entry.date, "Date" ),
inherits( exit.date, "Date" ),
inherits( birth.date, "Date" ),
inherits( entry.age , "difftime" ),
inherits( exit.age , "difftime" ),
inherits( risk.time, "difftime" ) ) )
{
if( inherits( entry.date, "Date" ) ) entry.date <- as.numeric( entry.date ) / 365.35 + 1970
if( inherits( exit.date, "Date" ) ) exit.date <- as.numeric( exit.date ) / 365.35 + 1970
if( inherits( birth.date, "Date" ) ) birth.date <- as.numeric( birth.date ) / 365.35 + 1970
if( inherits( entry.age , "difftime" ) ) entry.age <- as.numeric( entry.age ) / 365.35
if( inherits( exit.age , "difftime" ) ) exit.age <- as.numeric( exit.age ) / 365.35
if( inherits( risk.time, "difftime" ) ) risk.time <- as.numeric( risk.time ) / 365.35
class( entry.date ) <- "numeric"
class( exit.date ) <- "numeric"
class( birth.date ) <- "numeric"
class( entry.age ) <- "numeric"
class( exit.age ) <- "numeric"
class( risk.time ) <- "numeric"
}
wh <- (1:6)[!is.na( list( entry.date,
entry.age,
exit.date,
exit.age,
birth.date,
risk.time ) )]
LL <- rbind( entry.date,
entry.age,
exit.date,
exit.age,
birth.date,
risk.time )
M <- rbind( c( -1, 1, 0, 0, 1, 0 ),
c( 0, 0, -1, 1, 1, 0 ),
c( 0, 1, 0, -1, 0, 1 ) )
if( qr( M[,-wh[1:3]] )$rank < 3 )
cat( "Insufficient information to display life lines" )
A1 <- M[, wh[1:3]]
A2 <- M[,-wh[1:3]]
x1 <- LL[wh[1:3],]
x2 <- -solve( A2 ) %*% A1 %*% x1
LL[-wh[1:3],] <- x2
LL <- data.frame( t(LL) )
attr( LL, "Date" ) <- conv
if( conv )
{
LL[,c(1,3,5)] <- ( LL[,c(1,3,5)] - 1970 ) * 365.25
LL[,c(2,4,6)] <- LL[,c(2,4,6)] * 365.25
class( LL[,1] ) <-
class( LL[,3] ) <-
class( LL[,5] ) <- "Date"
class( LL[,2] ) <-
class( LL[,4] ) <-
class( LL[,6] ) <- "difftime"
}
LL
} |
fmodelpcm <-
function(zeta, y, bpar, prior = dnorm, ...) {
if (is.vector(y)) y <- matrix(y, 1, length(y))
m <- nrow(bpar)
r <- ncol(bpar) + 1
prob <- matrix(0, m, r)
storage.mode(y) <- "integer"
storage.mode(bpar) <- "double"
storage.mode(prob) <- "double"
tmp <- .Fortran("fmodelpcm", zeta = as.double(zeta), y = y,
m = as.integer(m), r = as.integer(r), s = as.integer(nrow(y)),
bpar = bpar,
loglik = as.double(0), prob = prob)
return(list(post = tmp$loglik + log(prior(zeta, ...)), prob = tmp$prob))
} |
context("loading a backup")
test_that("load_last is working", {
pat <- system.file("extdata",
"Podocarpus_2020-01-10.rda",
package = "taxlist")
use_pat <- gsub("_[0-9]+-[0-9]+-[0-9]+\\.rda$", "", pat)
expect_is({
load_last(use_pat)
Podocarpus
}, "taxlist")
}
) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.