code
stringlengths 1
13.8M
|
---|
Stars <- faraway::star
star.plot1 <- gf_point(light ~ temp, data = Stars)
HotStars <- Stars %>% filter(temp > 3.7)
star.model1 <- lm(light ~ temp, data = Stars)
star.model2 <- lm(light ~ temp, data = HotStars)
gf_point(light ~ temp, data = Stars) %>%
gf_lm(color = "gray50", linetype = "dotted") %>%
gf_lm(color = "red", linetype = "dashed", data = HotStars) %>%
gf_text(light ~ (temp + 0.04), label = ~ as.character(id),
data = Stars %>% mutate(id = 1:nrow(.)) %>% filter(temp < 4.0)) |
.Random.seed <-
c(403L, 189L, 490957550L, -1217174692L, 1438670993L, -1595435593L,
-610662280L, 1459477894L, -103422359L, -735922324L, -1413603500L,
-1141420077L, -489311481L, -1964054346L, -645709246L, 327042884L,
1323531489L, -72453445L, 1354746922L, 1679288486L, -1845354314L,
271111761L, 2029969097L, 2111552298L, 912758045L, -1752473824L,
-30400337L, 709731555L, 1251824045L, 1391788087L, -824511571L,
-616061544L, -1435258634L, -195963022L, 1201609562L, -1986949015L,
932315961L, -904103660L, 60972810L, -1241266019L, -307560314L,
-50696194L, 145445864L, 630273943L, -577761799L, -171162704L,
-189815997L, 926976159L, 1353082083L, -278715741L, -1232709859L,
399169289L, -736337852L, 1579139708L, 1826149352L, -1780294960L,
-738588781L, 354234898L, 1732116876L, 738568050L, 2080409687L,
133498717L, 2099840733L, -62830851L, 1164496912L, 889281906L,
910853988L, -1315592830L, -1562765319L, 2133689272L, -1611056241L,
1754163178L, -938613377L, -2090189185L, 2070481746L, 518663225L,
-424755412L, 624610880L, 1087215960L, 1753900946L, 1861994741L,
344455468L, 382060915L, 1158762015L, 1668279272L, 1693730970L,
-1779377816L, 1617848016L, -907296250L, 460091981L, 44822549L,
-1736931095L, 132232650L, -1817244008L, -1883135508L, 915483967L,
474322394L, -1868393922L, -834019750L, 1098405428L, 710301994L,
975212936L, 421097643L, 1860625142L, -1453093250L, -607279562L,
1690224885L, -677555511L, -1505739892L, 55430759L, 1088133158L,
-771152529L, -252097648L, 1335911022L, -2122249746L, -1701760917L,
-490497563L, 2121264785L, -1730503473L, -315520443L, -373977026L,
-663449740L, -1196362016L, -1472842080L, -1904684128L, -770868952L,
-827348320L, -884803127L, -373112375L, -2010648669L, 1918487090L,
-1847796400L, 116936266L, 165058818L, 2012598056L, -307156925L,
1772372794L, -2058873552L, 447610119L, 1967396896L, 1722869676L,
627327368L, -1253918630L, -934135821L, -1331082879L, 1531185136L,
1713379495L, 634744795L, 1803656697L, -807398566L, 1253584867L,
-175788510L, -527978100L, 921839264L, 2064912725L, -1971206582L,
-2082051585L, 667995882L, -96760145L, 1951867024L, 744626471L,
1554541349L, -1351611224L, 155165411L, -1528779109L, -1603901292L,
-2071297018L, 1707022795L, 1400799561L, 852100145L, -1244007118L,
-1471872566L, 1941371861L, -1638542199L, 324196132L, -616094088L,
967191664L, 97475157L, 516224644L, -1870067534L, -865811092L,
627529438L, 1901026887L, -747414194L, -1846408795L, -2026166662L,
2110040548L, 1494528248L, 112381043L, -1641518178L, -645999203L,
1705261178L, -1022513250L, 1298620166L, 1399121177L, -572826245L,
-701638023L, 261351408L, 923292180L, -2033073867L, -1130741106L,
949707591L, 1655003169L, -1747637913L, -1027456988L, -1560155292L,
-151120880L, 107156311L, -1132337363L, -2142009475L, -1181735906L,
1952653518L, -2084253414L, 192751857L, -782535039L, 1207639863L,
1445600814L, -2034802863L, -1996903467L, -267225668L, 1911417240L,
-1907355987L, 519119028L, -609606158L, -772047907L, 27253397L,
-1829021370L, -567913569L, 1297088827L, -1826904098L, -236841308L,
-416266581L, -1594096845L, -1700329051L, -736422670L, -753186125L,
1317000694L, 633144668L, 1463847668L, -484433985L, 278580097L,
-825151463L, -842078215L, -1954145699L, -1021962281L, 1520651138L,
-1895996570L, 1439048565L, -997307616L, 1794223796L, 970604100L,
764192112L, -293996214L, 481124458L, -185725263L, -743745516L,
935188249L, -1435920657L, 1698382723L, 1041177325L, -1473403959L,
219676940L, -523184685L, -2044450746L, 229293775L, 1641304956L,
-363628148L, 381429773L, 570845299L, -1250148168L, -1504894321L,
-711160621L, -1824810957L, -2003165392L, -1559784685L, -55852780L,
1236290073L, -18469487L, 1335867783L, 674219597L, 1107181591L,
-322089900L, -2073765921L, 673093785L, 1410895919L, 1152441256L,
-522898346L, 1333387558L, 1640679896L, -1723118665L, 919991743L,
180948598L, -11431479L, 725894232L, -1890912363L, -1768749845L,
441744443L, -1625689866L, -884653537L, -459669645L, 782065939L,
203985515L, -1211439614L, -538397452L, -1462068114L, -1123262037L,
-1706420334L, -272867055L, -704075683L, 753043746L, 1120004295L,
211948991L, -2084424770L, 1929844698L, 1797819102L, 1831586871L,
-1611645402L, 1410941731L, 1072626369L, 1890752498L, -654743435L,
-1506961518L, 1665827435L, 2138598083L, 717686576L, 982165898L,
-738315815L, 1852957499L, -41392941L, -1901326130L, -2020813996L,
-320187343L, 804579273L, 36556357L, 1028244827L, 1324306804L,
-2120659597L, 620032731L, 273359679L, 841443377L, 1918694108L,
1541016543L, -187492256L, -1105954492L, 1440011823L, -1629424023L,
735853749L, -737178000L, 799463297L, -978160022L, -516373956L,
-1792780652L, -337459497L, 1225063829L, -1488639662L, 2133407501L,
929088252L, 1034252797L, 1366776725L, 1303476073L, 424471470L,
1654228818L, -1757594990L, -1654295971L, -544097769L, -1793680820L,
1503449010L, 1995239913L, 1665147474L, 1958398312L, -953629887L,
-1310162551L, -583107641L, -1407700730L, -877072285L, 1503567110L,
-496633413L, 1722567239L, 2041365269L, -238800148L, 23755723L,
-1333777885L, 1922567960L, 951282240L, 77037769L, 1394387630L,
2124887516L, -872286894L, -573185060L, 1365255076L, 1238843987L,
-775720473L, 842910229L, -926974018L, -664487687L, -309590549L,
1643973941L, 236905321L, -2011680573L, -447836929L, 368331116L,
-2133821156L, 1967560198L, 63447006L, -882935093L, -2077723977L,
-401004775L, 1119619688L, 130321740L, -2091761397L, -711225014L,
-310928724L, -1648255889L, -757127249L, -1584183985L, -714046502L,
1428518756L, -1164535043L, -2032831613L, 1270306712L, -253764015L,
1264853485L, -443548444L, -1075841221L, -213037238L, -435803170L,
-327838780L, -178258830L, 1276374881L, -1516283478L, -90024388L,
-1235358932L, -524924672L, -918057212L, 486043020L, 1090595729L,
848934478L, 1102606194L, -356973804L, 581859473L, 1370380831L,
-955909766L, -639449652L, 1518795840L, -1767355527L, -1085875782L,
-1104610720L, -1238142052L, 18602075L, -1041637526L, -747028661L,
-975766137L, -1053691304L, 1345574295L, -1162157782L, 178441501L,
-1953905612L, 968981795L, 1582343163L, -1613657709L, 1179859854L,
754714026L, -1276878683L, 1095792200L, -817145602L, -739874497L,
1946022770L, 901215628L, 824638862L, -141329983L, 161806672L,
-1541516711L, 544683828L, -984664213L, -1044120739L, -1922473841L,
-712999806L, 56034776L, 1744298770L, 1919308355L, -1707866372L,
600419792L, -1092621522L, 480063956L, -1289049772L, 1046058236L,
-1519598440L, 1190189316L, 2128886055L, -1245081056L, 1121772537L,
-1514776021L, -211481091L, 1630922488L, -1752260963L, 167514230L,
-1806485035L, -281586027L, 912784184L, -1523538911L, -54729297L,
-270644441L, -860395540L, 42905577L, 961873681L, -1788891533L,
1268680349L, 1606294965L, -1038563238L, 106179516L, 824889524L,
-993914639L, -880043976L, -1072722257L, 962411414L, -494098780L,
-2031653421L, -1969886864L, 142269176L, -351526076L, -425289609L,
-707793391L, 826041699L, 2022430038L, 1768265120L, 1495859979L,
-354156308L, 1008641256L, -1414038487L, -1919058590L, 496369326L,
2075222322L, -1936676285L, 373873820L, -1877362354L, 1688418813L,
-139539598L, -738379834L, -204620798L, -423497826L, 1580985157L,
625730391L, 1507124026L, -580477066L, -1943312386L, 828477820L,
-1987934113L, 2096546268L, -2072478372L, 198392697L, -565339972L,
1236431061L, 1384455286L, 1691065298L, 450633844L, 64577132L,
-1263319072L, 1266833830L, 1725854435L, -629896894L, -354716585L,
-141119352L, -136475695L, 1999993945L, 285479357L, 1314582388L,
1026274644L, 1151312497L, -939784308L, 1758737729L, 942851703L,
1961339074L, -1699660587L, 1903778489L, 396877115L, 959268031L,
286173964L, 1350047655L, -700382521L, -1784753599L, -1393689082L,
-2139829422L, -757503258L, 1406770951L, 1981915965L, 1320899732L,
2133368553L, -1276251290L, -435003433L, -1558923338L, -448808681L,
-549997498L, -1065049537L, -402418843L, 1627295682L, -1109144703L,
1947597939L, -1707323542L, -1926236146L, -424488164L, -928379317L,
-1303529079L, 2062398465L, -1435401652L, -889491093L, 1916749749L,
-465432964L, 1218038433L, 1101624518L, 1396526615L, -1067949865L,
-584064899L, 1470314383L, 1482455407L, -1241076574L, 328145978L,
-325843679L, 375170633L, -1604101133L, -1230912356L, 351756244L,
-466101047L, 677712883L, -612029992L, -343273266L, -2034661964L
) |
mf_map <- function(x, var, type = "base",
breaks, nbreaks, pal, alpha = 1,
inches, val_max, symbol, col,
lwd_max, val_order, pch, cex,
border, lwd, bg,
col_na, cex_na, pch_na,
leg_pos, leg_title, leg_title_cex,
leg_val_cex, leg_val_rnd, leg_no_data,
leg_frame, add,
...) {
if (!type %in% c(
"base", "prop", "choro", "typo", "symb", "grad",
"prop_choro", "prop_typo", "symb_choro"
)) {
stop(paste0(
'\'type\' should be one of "base", "prop", "choro", "typo", ',
'"symb", "grad", "prop_choro", "prop_typo" or "symb_choro".'
),
call. = FALSE
)
}
if (!missing(var)) {
if (type == "base") {
message("Please use the 'type' argument to map variables.")
} else {
lv <- length(var)
lin <- var %in% names(x)
if (lv != length(lin[lin == TRUE])) {
stop(paste0("It is likely that 'var' is not a valid variable name."),
call. = FALSE
)
}
}
}
argx <- as.list(match.call()[-1])
argx <- argx[names(argx) != "type"]
switch(type,
prop = do.call(what = mf_prop, argx, envir = parent.frame()),
choro = do.call(what = mf_choro, argx, envir = parent.frame()),
typo = do.call(what = mf_typo, argx, envir = parent.frame()),
symb = do.call(what = mf_symb, argx, envir = parent.frame()),
base = do.call(what = mf_base, argx, envir = parent.frame()),
grad = do.call(what = mf_grad, argx, envir = parent.frame()),
prop_choro = do.call(what = mf_prop_choro, argx, envir = parent.frame()),
prop_typo = do.call(what = mf_prop_typo, argx, envir = parent.frame()),
symb_choro = do.call(what = mf_symb_choro, argx, envir = parent.frame())
)
} |
xmlDOMApply <-
function(dom, func)
{
.Call("RS_XML_RecursiveApply", dom, func, NULL, PACKAGE = "XML")
} |
screen.corRank <- function(Y, X, family, method = 'pearson', rank = 2, ...) {
listp <- apply(X, 2, function(x, Y, method) {
ifelse(var(x) <= 0, 1, cor.test(x, y = Y, method = method)$p.value)
}, Y = Y, method = method)
whichVariable <- (rank(listp) <= rank)
return(whichVariable)
} |
library(git2r)
source("util/check.R")
sessionInfo()
path <- tempfile(pattern = "git2r-")
dir.create(path)
repo <- init(path, branch = "main")
config(repo, user.name = "Alice", user.email = "[email protected]")
writeLines("Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do",
con = file.path(path, "test.txt"))
add(repo, "test.txt")
commit_1 <- commit(repo, "Commit message 1")
checkout(repo, "branch1", create = TRUE)
writeLines("Branch 1", file.path(path, "branch-1.txt"))
add(repo, "branch-1.txt")
commit_2 <- commit(repo, "Commit message branch 1")
b_2 <- branch_create(commit_1, "branch2")
checkout(b_2)
writeLines("Branch 2", file.path(path, "branch-2.txt"))
add(repo, "branch-2.txt")
commit_3 <- commit(repo, "Commit message branch 2")
writeLines(c("Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do",
"eiusmod tempor incididunt ut labore et dolore magna aliqua."),
con = file.path(path, "test.txt"))
add(repo, "test.txt")
commit_4 <- commit(repo, "Second commit message branch 2")
stopifnot(identical(merge_base(commit_2, commit_3), commit_1))
b <- branches(repo)
checkout(b[sapply(b, "[", "name") == "main"][[1]], force = TRUE)
m_1 <- merge(repo, "branch1")
stopifnot(identical(m_1$fast_forward, TRUE))
stopifnot(identical(m_1$conflicts, FALSE))
stopifnot(identical(sha(m_1), NA_character_))
m_2 <- merge(path, "branch2")
stopifnot(identical(m_2$fast_forward, FALSE))
stopifnot(identical(m_2$conflicts, FALSE))
stopifnot(identical(sha(m_2), sha(commits(repo)[[1]])))
checkout(repo, "branch3", create = TRUE)
writeLines(c("Lorem ipsum dolor amet sit, consectetur adipisicing elit, sed do",
"eiusmod tempor incididunt ut labore et dolore magna aliqua."),
con = file.path(path, "test.txt"))
add(repo, "test.txt")
commit(repo, "Commit message branch 3")
checkout(repo, "main", force = TRUE)
writeLines(c("Lorem ipsum dolor sit amet, adipisicing consectetur elit, sed do",
"eiusmod tempor incididunt ut labore et dolore magna aliqua."),
con = file.path(path, "test.txt"))
add(repo, "test.txt")
commit(repo, "Some commit message branch 1")
m_3 <- merge(repo, "branch3")
stopifnot(identical(m_3$up_to_date, FALSE))
stopifnot(identical(m_3$fast_forward, FALSE))
stopifnot(identical(m_3$conflicts, TRUE))
stopifnot(identical(sha(m_3), NA_character_))
stopifnot(identical(status(repo),
structure(list(staged = empty_named_list(),
unstaged = list(conflicted = "test.txt"),
untracked = empty_named_list()),
class = "git_status")))
unlink(path, recursive = TRUE) |
boot_weibull <- function(t0,B=1000,theta,C,N){
boot <- numeric()
simulWeib <- function(N, shape=shape, scale=scale, rateC)
{
Tlat <- rweibull(N, shape = shape, scale = scale)
C <- rexp(n=N, rate=rateC)
time <- pmin(Tlat, C)
status <- as.numeric(Tlat <= C)
data.frame(id=1:N,
time=time,
status=status
)
}
for(l in 1:B){
data_rb <- simulWeib(N=N, shape=theta[1], scale=theta[2], rateC=C)
mod1b <- survreg(Surv(time,status)~1,data=data_rb,dist="weibull")
theta_rb <- c(1/mod1b$scale,exp(mod1b$coefficients))
F <- function(theta,x) {
pweibull(x,shape=theta[1],scale=theta[2])
}
surv_rb <- function(t){
1-F(theta_rb,t)
}
boot[l] <- surv_rb(t0)
}
return(boot)
} |
ISOpureS2.model_optimize.kappa.kappa_deriv_loglikelihood <- function(log_kappa, model) {
kappa <- (exp(t(log_kappa)) + model$MIN_KAPPA);
expww <- exp(log_kappa);
omegaPP <- model$omega %*% model$PPtranspose;
kappaomegaPP <- omegaPP * ISOpure.util.repmat(t(kappa), 1, ncol(model$PPtranspose));
D <- nrow(model$log_cc);
G <- ncol(model$log_cc);
deriv_loglikelihood <- 0;
for (dd in 1:D) {
deriv_loglikelihood[dd] <- digamma(kappa[dd]) - (omegaPP[dd,,drop=FALSE]%*%t(digamma(kappaomegaPP[dd,, drop=FALSE]))) +( omegaPP[dd,,drop=FALSE]%*%t(model$log_cc[dd,,drop=FALSE]));
}
deriv_loglikelihood <- deriv_loglikelihood * expww;
deriv_loglikelihood <- -deriv_loglikelihood;
return(as.matrix(deriv_loglikelihood, nrow=D, ncol=1));
} |
data(mtcars)
mtcar2 <- within(mtcars, {
mpg_c <- mpg * (1+am) + 5
am <- factor(am)
})
fm2 <- glm(disp ~ am * mpg + mpg_c, data = mtcar2)
c2 <- coef(fm2)
V2 <- vcov(fm2)
jj <- !is.na(c2)
stopifnot(names(which(!jj)) == "am1:mpg"
, identical(length(c2), 5L), identical(dim(V2), c(5L,5L))
, all.equal(c2[jj], coef(fm2, complete=FALSE))
, all.equal(V2[jj,jj], vcov(fm2, complete=FALSE))
, all.equal(c2[jj], c(`(Intercept)`= 626.0915, am1 = -249.4183,
mpg = -33.74701, mpg_c = 10.97014),
tol = 7e-7)
) |
melt.long <- function(data, select, group){
if(missing(select)) select <- seq_len(ncol(data))
stopifnot(!is.null(colnames(data)))
vars <- colnames(data)[select]
if(!missing(group)){
stopifnot(length(group) == nrow(data))
res <- data.frame(group = rep(group, ncol(data[,select])),
value = do.call(c, data[,select]),
variable = factor(rep(vars, each = nrow(data)),
levels = vars))
}else{
res <- data.frame(value = do.call(c, data[,select]),
variable = factor(rep(vars, each = nrow(data)),
levels = vars))
}
res
} |
propTrianglesLayer <- function(x, spdf, df, spdfid = NULL, dfid = NULL,
var1, col1 = "
var2, col2 = "
k = 0.02,
legend.pos = "topright",
legend.title.txt = paste(var1,var2,sep=" / "),
legend.title.cex = 0.8,
legend.var1.txt = var1,
legend.var2.txt = var2,
legend.values.cex = 0.6,
legend.values.rnd = 0,
legend.style = "c",
legend.frame = FALSE,
add = TRUE)
{
if(!missing(x)){spdf <- methods::as(x, "Spatial")}
if (missing(df)){df <- spdf@data}
if (is.null(spdfid)){spdfid <- names(spdf@data)[1]}
if (is.null(dfid)){dfid <- names(df)[1]}
dots <- cbind(spdf@data[, spdfid], as.data.frame(sp::coordinates(spdf)))
colnames(dots) <- c(spdfid,"x","y")
dots <- data.frame(dots, df[match(dots[,spdfid], df[,dfid]),])
dots <- dots[,c(spdfid,"x","y", var1, var2)]
x1 <- sp::bbox(spdf)[1]
y1 <- sp::bbox(spdf)[2]
x2 <- sp::bbox(spdf)[3]
y2 <- sp::bbox(spdf)[4]
if (sum(df[,var1], na.rm = TRUE)>=sum(df[,var2], na.rm = TRUE)){
var <- var1
} else {
var <- var2
}
sfdc <- (x2-x1)*(y2-y1)
sc <- max(abs(dots[,var]),na.rm = TRUE)
if(add==FALSE){suppressWarnings(sp::plot(spdf))}
dots$size1 <- sqrt(dots[,var1]*k* sfdc / sc /2)
dots$y1<-dots$y+dots$size1/2
dots <- dots[order(dots[,var1],decreasing=TRUE),]
dots$xx1a<-dots$x-dots$size1/2
dots$xx1b<-dots$x
dots$xx1c<-dots$x+dots$size1/2
dots$yy1a<-dots$y
dots$yy1b<-dots$y+dots$size1/2
dots$yy1c<-dots$y
for (i in 1:length(dots$x)){
polygon(c(dots$xx1a[i],dots$xx1b[i],dots$xx1c[i]),
c(dots$yy1a[i],dots$yy1b[i],dots$yy1c[i]),
col = col1, border = "
}
dots$size2 <- sqrt(dots[,var2]*k* sfdc / sc /2)
dots$y1<-dots$y+dots$size2/2
dots <- dots[order(dots[,var2],decreasing=TRUE),]
dots$xx1a<-dots$x-dots$size2/2
dots$xx1b<-dots$x
dots$xx1c<-dots$x+dots$size2/2
dots$yy1a<-dots$y
dots$yy1b<-dots$y-dots$size2/2
dots$yy1c<-dots$y
for (i in 1:length(dots$x)){
polygon(c(dots$xx1a[i],dots$xx1b[i],dots$xx1c[i]),
c(dots$yy1a[i],dots$yy1b[i],dots$yy1c[i]),
col = col2, border = "
}
if(legend.pos!="n"){
legendPropTriangles(pos = legend.pos, title.txt = legend.title.txt,
var.txt = legend.var1.txt,
var2.txt = legend.var2.txt,
title.cex = legend.title.cex,
values.cex = legend.values.cex,
var = dots[,var1],
var2 = dots[,var2],
r = dots$size1,
r2 = dots$size2,
col = col1,
col2 = col2,
frame = legend.frame,
values.rnd = legend.values.rnd,
style = legend.style)
}
} |
`pred2.crr` <- function(f.crr, lp, time) {
if (time > max(f.crr$uftime)) {
stop("pick a smaller time!")
}
if (time < min(f.crr$uftime)) {
stop("pick a greater time!")
}
lhat <- cumsum(exp(lp) * f.crr$bfitj)
ci <- cbind(f.crr$uftime, 1. - exp(-lhat))
ci <- ci[ci[, 1.] <= time + 1e-10, ]
ci <- ci[dim(ci)[1.], -1.]
ci
} |
panelcontour <-
function(x, y, ...)
{
usr <- par("usr"); on.exit(par(usr))
par(usr = c(usr[1:2], 0, 1.5) )
kd <- kde2d(x,y)
kdmax <- max(kd$z)
contour(kd,add=TRUE,drawlabels=FALSE,levels=c(kdmax*0.01,kdmax*0.5,kdmax*0.9),col=c("red","blue","green"),...)
} |
context("Model Predictions")
test_that("model_prediction works", {
source("examples_fcn_doc/examples_model_prediction.R")
expect_equal(length(unique(df_2$ID)),32)
expect_null(df_3$DV)
expect_null(df_4$a_i)
expect_equal(length(unique(df_5$Group)),2)
expect_equal(length(unique(df_5$a_i)),2)
expect_equal(length(unique(df_5$ID)),6)
expect_equal(length(unique(df_6$Group)),2)
expect_true(all(is.na(df_6$PRED)))
expect_true(all(c("WT","AGE") %in% names(df_7)))
expect_equal(length(unique(df_8$WT)),2)
expect_equal(length(unique(df_8$AGE)),2)
expect_equal(length(unique(df_9$WT)),2)
expect_equal(length(unique(df_9$AGE)),2)
expect_equal(length(unique(df_9$ID)),6)
expect_equal(length(unique(df_10$WT)),6)
expect_equal(length(unique(df_11$AGE)),6)
expect_equal(length(unique(df_12$AMT)),3)
expect_equal(length(unique(df_13$AMT)),2)
expect_equal(length(unique(df_15$AMT[df_15$ID==1])),3)
df_16 <- model_prediction(design=design_3,DV=TRUE,dosing=dosing_4,filename="test.csv")
expect_true("test.csv" %in% list.files())
unlink("test.csv")
dosing_2 <- list(list(AMT=1000,RATE=NA,Time=0.5),list(AMT=3000,RATE=NA,Time=0.5),list(AMT=6000,RATE=NA,Time=0.5))
expect_error(model_prediction(design=design_3,DV=T,dosing=dosing_2))
sfg <- function(x,a,bpop,b,bocc){
parameters=c(CL=bpop[1]*exp(b[1]),
V=bpop[2]*exp(b[2]),
KA=bpop[3]*exp(b[3]),
Favail=bpop[4],
DOSE=a[1])
return(parameters)
}
poped.db.2 <- create.poped.database(ff_fun=ff.PK.1.comp.oral.sd.CL,
fg_fun=sfg,
fError_fun=feps.add.prop,
bpop=c(CL=0.15, V=8, KA=1.0, Favail=1),
notfixed_bpop=c(1,1,1,0),
d=c(CL=0.07, V=0.02, KA=0.6),
sigma=c(prop=0.01,add=1),
groupsize=32,
xt=c( 0.5,1,2,6,24,36,72,120),
minxt=0,
maxxt=120,
a=70)
plot_model_prediction(poped.db.2,PI=T,DV=T)
df_20 <- model_prediction(poped.db.2,PI=TRUE)
expect_true(all(c("PI_l","PI_u") %in% names(df_20)))
sfg.3 <- function(x,a,bpop,b,bocc){
parameters=c(CL=bpop[1]*exp(b[1]),
V=bpop[2]*exp(b[2]),
KA=bpop[3]*exp(b[3]),
Favail=bpop[4],
DOSE=a[1],
TAU=a[2])
return(parameters)
}
poped.db.3 <- create.poped.database(ff_fun=ff.PK.1.comp.oral.sd.CL,
fg_fun=sfg.3,
fError_fun=feps.add.prop,
bpop=c(CL=0.15, V=8, KA=1.0, Favail=1),
notfixed_bpop=c(1,1,1,0),
d=c(CL=0.07, V=0.02, KA=0.6),
sigma=c(prop=0.01,add=1),
groupsize=32,
xt=c( 0.5,1,2,6,24,36,72,120),
minxt=0,
maxxt=120,
a=c(DOSE=70,TAU=200))
plot_model_prediction(poped.db.3,PI=T,DV=T)
})
test_that("plot_model_prediction works", {
source("examples_fcn_doc/examples_plot_model_prediction.R")
expect_output(str(plot_model_prediction(poped.db)))
}) |
T3fitpartitioning <-
function(Xprep,n,m,p,AS,BT,CU,K,renormmode,laba,labb,labc){
narg=nargs()
if (narg<10){
laba=paste("a",1:n,sep="")
labb=paste("b",1:m,sep="")
labc=paste("c",1:p,sep="")
}
Xprep=as.matrix(Xprep)
r1=ncol(AS)
r2=ncol(BT)
r3=ncol(CU)
Res=Xprep-(AS%*%K%*%kronecker(t(CU),t(BT)))
fitA=100-colSums(t(Res)^2)/colSums(t(Xprep)^2)*100
Res=permnew(Res,n,m,p)
Xprep=permnew(Xprep,n,m,p)
fitB=100-colSums(t(Res)^2)/colSums(t(Xprep)^2)*100
Res=permnew(Res,m,p,n)
Xprep=permnew(Xprep,m,p,n)
fitC=100-colSums(t(Res)^2)/colSums(t(Xprep)^2)*100
Xprep=permnew(Xprep,p,n,m)
Res=permnew(Res,p,n,m)
BCcontr=NULL
ACcontr=NULL
ABcontr=NULL
if (renormmode==1){
F=AS%*%K
BCcontr=colSums(F^2)/SUM(Xprep)$ssq*100
strCB=noquote(vector(mode="character",length=r2*r3))
i=1
for (k in 1:r3){
for (j in 1:r2){
strCB[i]=noquote(paste(" B",as.character(j),"xC",as.character(k),sep=""))
i=i+1
}
}
BCcontr=cbind(BCcontr)
rownames(BCcontr)=strCB
}
if (renormmode==2){
F=BT%*%permnew(K,r1,r2,r3)
ACcontr=colSums(F^2)/SUM(Xprep)$ssq*100
strAC=noquote(vector(mode="character",length=r1*r3))
i=1
for (k in 1:r1){
for (j in 1:r3){
strAC[i]=noquote(paste(" C",as.character(j),"xA",as.character(k),sep=""))
i=i+1
}
}
ACcontr=cbind(ACcontr)
rownames(ACcontr)=strAC
}
if (renormmode==3){
F=CU%*%permnew(permnew(K,r1,r2,r3),r2,r3,r1)
ABcontr=colSums(F^2)/SUM(Xprep)$ssq*100
strBA=noquote(vector(mode="character",length=r1*r2))
i=1
for (k in 1:r2){
for (j in 1:r1){
strBA[i]=noquote(paste(" A",as.character(j),"xB",as.character(k),sep=""))
i=i+1
}
}
ABcontr=cbind(ABcontr)
rownames(ABcontr)=strBA
}
fitA=cbind(fitA)
fitB=cbind(fitB)
fitC=cbind(fitC)
rownames(fitA)=laba
rownames(fitB)=labb
rownames(fitC)=labc
out=list()
out$fitA=fitA
out$fitB=fitB
out$fitC=fitC
out$ABcontr=ABcontr
out$ACcontr=ACcontr
out$BCcontr=BCcontr
return(out)
} |
startSubVis <- function() {
subvisDir <- system.file("SubVisApp", package = "SubVis")
shiny::runApp(subvisDir, display.mode = "normal")
} |
pplace_to_taxonomy <-
function(pplace,taxonomy,rank=c("phylum","class","order","family","genus","species"),type="all",tax_name=TRUE,run_id=NULL){
if(class(pplace)!="pplace"){
stop("ERROR: the input is not an object of class pplace")
}
if(sum(colnames(taxonomy)%in%rank)==0){
stop("ERROR: none of the rank provided is available in the taxonomy")
}
if(!is.null(run_id)){
pplace <- sub_pplace(pplace,run_id=run_id)
}
out <- pplace$multiclass[,c("name","tax_id")]
if(type=="best"){
out <- out[order(out$likelihood,decreasing=TRUE)]
out <- out[match(unique(out[,1]),out[,1]),]
}
out <- out[order(out[,1]),]
if(sum(is.na(match(out$tax_id,taxonomy$tax_id)))/length(out$tax_id)>0.5) warning("the taxonomy doesn't seems to match the pplace object")
out2 <- as.matrix(taxonomy[match(out$tax_id,taxonomy$tax_id),colnames(taxonomy)%in%rank])
rownames(out2) <- out[,1]
if(tax_name){
out3 <- out2
tax_id <- unique(as.vector(out3))
tax_id <- tax_id[tax_id!=""]
for(i in 1:length(tax_id)){
if(!is.na(tax_id[i])){
out2[out3==tax_id[i]] <- taxonomy$tax_name[taxonomy$tax_id==tax_id[i]]
}
}
}
out2[!is.na(out2) & out2==""] <- "Unclassified"
return(out2)
} |
predictReal <- function(object, newexperts = NULL, newY = NULL, awake = NULL,
online = TRUE, type = c("model", "response", "weights", "all"),
use_cpp = getOption("opera_use_cpp", default = FALSE), quiet = FALSE, ...) {
type <- match.arg(type)
if (!is.null(newY)) {
T <- length(newY)
if (is.null(object$names.experts)) {
object$names.experts <- colnames(newexperts)
}
newexperts <- matrix(newexperts, nrow = T)
N <- ncol(newexperts)
} else if (object$coefficients[1] != "Uniform") {
N <- length(object$coefficients)
newexperts <- matrix(newexperts, ncol = N)
T <- nrow(newexperts)
} else {
warning("You should provide observations to train non trivial model")
N <- ncol(newexperts)
T <- nrow(newexperts)
if (is.null(newexperts)) {
result <- switch(type, model = object, response = NULL, weights = NULL,
all = list(model = object, response = NULL, weights = NULL))
return(result)
}
}
if (!is.null(awake)) {
awake <- matrix(awake, nrow = T)
if (!identical(dim(awake), dim(newexperts))) {
stop("Bad dimensions: awake and newexperts should have same dimensions")
}
} else {
awake <- matrix(1, nrow = T, ncol = N)
}
idx.na <- which(is.na(newexperts))
awake[idx.na] <- 0
newexperts[idx.na] <- 0
if (is.null(object$training) && (object$coefficients[1] != "Uniform") && (object$model ==
"MLpol")) {
stop(paste(object$model, "cannot handle non-uniform prior weight vector"))
}
init = FALSE
if (object$coefficients[1] == "Uniform") {
object$coefficients <- rep(1/N, N)
init = TRUE
}
if (length(object$coefficients) != N) {
stop("Bad number of experts: (length(object$coefficients) != nrow(newexperts))")
}
if (!is.null(awake) && !identical(awake, matrix(1, nrow = T, ncol = N)) && (object$model ==
"Ridge" || object$model == "OGD")) {
stop(paste("Sleeping or missing values not allowed for", object$model, "model."))
}
if (is.null(newexperts)) {
if (!is.null(newY)) {
stop("Expert advice should be provided if newY is non null")
}
result <- switch(type, model = object, response = object$prediction, weights = object$weights,
all = list(model = object, response = object$prediction, weights = object$weights))
return(result)
}
if (!is.null(newexperts) && is.null(newY) && online) {
stop("newY cannot be null to perform online prediction. Provide newY or set online = FALSE")
}
if (!online) {
w <- matrix(object$coefficients, ncol = 1)
pond <- c(awake %*% w)
newpred <- c(((newexperts * awake) %*% w)/pond)
newweights <- (t(t(awake) * c(w)) / pond)[seq(1,T,by=object$d),]
}
if (!is.null(newY)) {
if (!is.null(object$parameters$averaged) && object$parameters$averaged && !is.null(object$training)) {
object$coefficients <- object$training$next.weights
}
if (object$model == "Ridge") {
if (is.null(object$parameters$lambda) || !is.null(object$parameters$grid.lambda)) {
newobject <- ridgeCalib(y = newY, experts = newexperts, w0 = object$coefficients,
gamma = object$parameters$gamma, grid.lambda = object$parameters$grid.lambda,
training = object$training, use_cpp = use_cpp, quiet = quiet)
newobject$parameters$lambda <- c(object$parameters$lambda, newobject$parameters$lambda)
} else {
newobject <- ridge(y = newY, experts = newexperts, lambda = object$parameters$lambda,
w0 = object$coefficients, training = object$training, use_cpp = use_cpp, quiet = quiet)
}
newobject$loss.gradient = FALSE
}
if (object$model == "MLpol") {
newobject <- MLpol(y = newY, experts = newexperts, awake = awake, loss.type = object$loss.type,
loss.gradient = object$loss.gradient, training = object$training, use_cpp = use_cpp, quiet = quiet)
newobject$parameters <- list(eta = rbind(object$parameters$eta, newobject$parameters$eta))
}
if (object$model == "OGD") {
if (is.null(object$parameters$alpha)) {object$parameters$alpha = 0.5}
if (is.null(object$parameters$simplex)) {object$parameters$simplex = TRUE}
newobject <- OGD(y = newY, experts = newexperts, loss.type = object$loss.type,
training = object$training, alpha = object$parameters$alpha, simplex = object$parameters$simplex,
w0 = object$coefficients, quiet = quiet)
}
if ((object$model == "BOA") || (object$model == "MLewa") || (object$model ==
"MLprod")) {
algo <- eval(parse(text = object$model))
if (object$model != "MLewa") {
newobject <- algo(y = newY, experts = newexperts, awake = awake, loss.type = object$loss.type,
loss.gradient = object$loss.gradient, w0 = object$coefficients, training = object$training,
use_cpp = use_cpp, quiet = quiet)
} else {
newobject <- algo(y = newY, experts = newexperts, awake = awake, loss.type = object$loss.type,
loss.gradient = object$loss.gradient, w0 = object$coefficients, training = object$training,
quiet = quiet)
}
newobject$parameters <- list(eta = rbind(object$parameters$eta, newobject$parameters$eta))
}
if (object$model == "EWA") {
if (is.null(object$parameters$eta) || !is.null(object$parameters$grid.eta)) {
newobject <- ewaCalib(y = newY, experts = newexperts, awake = awake,
loss.type = object$loss.type, loss.gradient = object$loss.gradient,
w0 = object$coefficients, gamma = object$parameters$gamma, grid.eta = sort(object$parameters$grid.eta),
training = object$training, use_cpp = use_cpp, quiet = quiet)
newobject$parameters$eta <- c(object$parameters$eta, newobject$parameters$eta)
} else {
newobject <- ewa(y = newY, experts = newexperts, eta = object$parameters$eta,
awake = awake, loss.type = object$loss.type, loss.gradient = object$loss.gradient,
w0 = object$coefficients, training = object$training, use_cpp = use_cpp, quiet = quiet)
}
}
if (object$model == "FS") {
if (is.null(object$parameters$eta) || is.null(object$parameters$alpha) ||
!is.null(object$parameters$grid.eta) || !is.null(object$parameters$grid.alpha)) {
if (is.null(object$parameters$grid.alpha)) {
object$parameters$grid.alpha <- 10^(-4:-1)
}
newobject <- fixedshareCalib(y = newY, experts = newexperts, awake = awake,
loss.type = object$loss.type, loss.gradient = object$loss.gradient,
w0 = object$coefficients, gamma = object$parameters$gamma, grid.eta = object$parameters$grid.eta,
grid.alpha = object$parameters$grid.alpha, training = object$training, quiet = quiet)
newobject$parameters$eta <- c(object$parameters$eta, newobject$parameters$eta)
newobject$parameters$alpha <- c(object$parameters$alpha, newobject$parameters$alpha)
} else {
newobject <- fixedshare(y = newY, experts = newexperts, eta = object$parameters$eta,
alpha = object$parameters$alpha, awake = awake, loss.type = object$loss.type,
loss.gradient = object$loss.gradient, w0 = object$coefficients,
training = object$training, quiet = quiet)
}
}
if (object$model == "FTRL") {
if (is.null(object$training) && ! any(c("fun_reg", "constr_ineq", "constr_eq") %in% names(object$parameters))) {
default <- TRUE
} else {
default <- FALSE
}
if (init) {
object$coefficients = NULL
}
newobject <- FTRL("y" = newY, "experts" = newexperts,
"eta" = object$parameters$eta,
"fun_reg" = object$parameters$fun_reg, "fun_reg_grad" = object$parameters$fun_reg_grad,
"constr_eq" = object$parameters$constr_eq, "constr_eq_jac" = object$parameters$constr_eq_jac,
"constr_ineq" = object$parameters$constr_ineq, "constr_ineq_jac" = object$parameters$constr_ineq_jac,
"max_iter" = object$parameters$max_iter,
"obj_tol" = object$parameters$obj_tol,
"loss.type" = object$loss.type, "loss.gradient" = object$loss.gradient,
"w0" = object$coefficients,
"training" = object$training,
"default" = default, "quiet" = quiet)
}
newobject$Y <- rbind(object$Y, matrix(newY, ncol = object$d))
newobject$experts <- rbind(object$experts, newexperts)
newobject$names.experts <- object$names.experts
if (is.null(newobject$names.experts)) {
if (!is.null(colnames(newexperts))) {
newobject$names.experts <- colnames(newexperts)
} else {
if (!is.null(names(newexperts))) {
newobject$names.experts <- colnames(newexperts)
} else {
newobject$names.experts <- paste("X",1:N,sep="")
}
}
}
newobject$awake <- rbind(object$awake, awake)
colnames(newobject$experts) <- newobject$names.experts
colnames(newobject$weights) <- newobject$names.experts
colnames(newobject$awake) <- newobject$names.experts
if (is.null(object$parameters$averaged)) {
newobject$parameters$averaged = FALSE
} else {
newobject$parameters$averaged = object$parameters$averaged
}
newobject$weights = newobject$weights[seq(1,T,by=object$d),]
if (newobject$parameters$averaged) {
if (object$T == 0) {
newweights.avg <- apply(newobject$weights, 2, cumsum) / (1:(T/object$d))
} else {
newweights.avg <- (object$training$sumweights + apply(newobject$weights, 2, cumsum)) / (object$T + 1:(T/object$d))
}
newobject$training$sumweights <- (object$T + T/object$d) * newweights.avg[T/object$d,] + newobject$coefficients
newobject$training$next.weights <- newobject$coefficients
newobject$coefficients <- newobject$training$sumweights / (object$T + T/object$d + 1)
}
if (online) {
if (newobject$parameters$averaged) {
newweights <- newweights.avg
newpred <- rowSums(newweights.avg * newexperts)
} else {
newweights <- newobject$weights
newpred <- newobject$prediction
}
}
newobject$prediction <- rbind(object$prediction, matrix(newpred, ncol = object$d))
newobject$weights <- rbind(object$weights, newweights)
rownames(newobject$weights) <- NULL
newobject$loss <- mean(loss(x = c(newobject$prediction), y = c(newobject$Y), loss.type = newobject$loss.type))
newobject$T <- object$T + T/object$d
newobject$d <- object$d
} else {
newobject <- object
}
class(newobject) <- "mixture"
result <- switch(type, model = newobject, response = matrix(newpred, ncol = object$d), weights = newweights,
all = list(model = newobject, response = newpred, weights = newweights))
return(result)
} |
sdp_supply <- function (Q, capacity, target, surface_area, max_depth, evap,
S_disc = 1000, R_disc = 10,
Q_disc = c(0.0, 0.2375, 0.4750, 0.7125, 0.95, 1.0),
loss_exp = 2, S_initial = 1,
plot = TRUE, tol = 0.99,
Markov = FALSE, rep_rrv = FALSE){
frq <- frequency(Q)
if (is.ts(Q)==FALSE) stop("Q must be seasonal time series object with frequency of 12 or 4")
if (frq != 12 && frq != 4) stop("Q must have frequency of 4 or 12")
if (missing(evap)) {
evap <- ts(rep(0, length(Q)), start = start(Q), frequency = frq)
}
if(length(evap) == 1) {
evap <- ts(rep(evap, length(Q)), start = start(Q), frequency = frq)
}
if (length(evap) != length(Q) && length(evap) != frq){
stop("Evaporation must be either a time series of length Q, a vector of length frequency(Q), or a single numeric constant")
}
if (start(Q)[2] != 1){
message("NOTE: First incomplete year of time series removed")
Q <- window(Q, start = c(start(Q)[1] + 1, 1), frequency = frq)
}
if(end(Q)[2] != frq){
message("NOTE: Final incomplete year of time series removed")
Q <- window(Q, end = c(end(Q)[1] - 1, frq), frequency = frq)
}
if (length(evap) == frq){
evap <- ts(rep(evap, length(Q) / frq), start = start(Q), frequency = frq)
} else {
if(is.ts(evap)==FALSE) stop("Evaporation must be either a time series of length Q or a vector of length frequency(Q) for a seasonal evaporation profile")
evap <- window(evap, start = start(Q), end = end(Q), frequency = frq)
}
if (missing(surface_area)) {
surface_area <- 0
}
evap_seas <- as.vector(tapply(evap, cycle(evap), FUN = mean))
if (Markov == FALSE){
Q_month_mat <- matrix(Q, byrow = TRUE, ncol = frq)
Q.probs <- diff(Q_disc)
Q_class_med <- apply(Q_month_mat, 2, quantile, type = 8,
probs = Q_disc[-1] - (Q.probs / 2))
S_states <- seq(from = 0, to = capacity, by = capacity / S_disc)
R_disc_x <- seq(from = 0, to = target, by = target / R_disc)
Shell.array <- array(0,dim=c(length(S_states),length(R_disc_x),length(Q.probs)))
Cost_to_go <- vector("numeric",length=length(S_states))
Results_mat <- matrix(0,nrow=length(S_states),ncol=frq)
R_policy <- matrix(0,nrow=length(S_states),ncol=frq)
Bellman <- R_policy
R_policy_test <- R_policy
} else if (Markov == TRUE){
Q_month_mat <- matrix(Q, byrow = TRUE, ncol = frq)
n_Qcl <- length(Q_disc) - 1
Q.probs <- diff(Q_disc)
Q_class_med <- apply(Q_month_mat, 2, quantile, type = 8,
probs = Q_disc[-1] - (Q.probs / 2))
S_states <- seq(from = 0, to = capacity, by = capacity / S_disc)
R_disc_x <- seq(from = 0, to = target, by = target / R_disc)
Shell.array <- array(0, dim = c(length(S_states), length(R_disc_x),
length(Q.probs)))
Q_class.mat <- matrix(nrow=length(Q_month_mat[,1]),ncol=frq)
for (m in 1:frq){
Q_disc_x <- gtools::quantcut(Q_month_mat[,m], Q_disc)
Q_class.mat[,m] <- as.numeric(as.vector(factor(Q_disc_x,
labels = c(1:n_Qcl))))
}
Q_trans_probs <- array(0, c(length(Q_disc) - 1, length(Q_disc) - 1, frq))
for (m in 1 : frq){
for (cl in 1 : n_Qcl){
if (m == frq){
Tr.count <- table(factor(Q_class.mat[which(Q_class.mat[1:(length(Q_month_mat[,1]) - 1),
frq] == cl) + 1, 1], 1:n_Qcl))
}else{
Tr.count <- table(factor(Q_class.mat[which(Q_class.mat[,m] == cl),
m + 1], 1:n_Qcl))
}
Tr.freq <- Tr.count / sum(Tr.count)
Q_trans_probs[cl,,m] <- Tr.freq
}}
Cost_to_go <- matrix(0, nrow = (length(S_states)), ncol = n_Qcl)
R_policy <- array(0,dim = c(length(S_states), n_Qcl, frq))
Bellman <- R_policy
R_policy_test <- R_policy
}
if (missing(max_depth)){
c <- sqrt(2) / 3 * (surface_area * 10 ^ 6) ^ (3/2) / (capacity * 10 ^ 6)
GetLevel <- function(c, V){
y <- (6 * V / (c ^ 2)) ^ (1 / 3)
return(y)
}
GetArea <- function(c, V){
Ay <- (((3 * c * V) / (sqrt(2))) ^ (2 / 3))
return(Ay)
}
} else {
c <- 2 * capacity / (max_depth * surface_area)
GetLevel <- function(c, V){
y <- max_depth * (V / (capacity * 10 ^ 6)) ^ (c / 2)
return(y)
}
GetArea <- function(c, V){
Ay <- ((2 * (capacity * 10 ^ 6)) / (c * max_depth * (V / (capacity * 10 ^ 6)) ^ (c / 2))) * ((V / (capacity * 10 ^ 6)) ^ (c / 2)) ^ (2 / c)
Ay[which(is.nan(Ay) == TRUE)] <- 0
return(Ay)
}
}
GetEvap <- function(s, q, r, ev){
e <- GetArea(c, V = s * 10 ^ 6) * ev / 10 ^ 6
n <- 0
repeat{
n <- n + 1
s_plus_1 <- max(min(s + q - r - e, capacity), 0)
e_x <- GetArea(c, V = ((s + s_plus_1) / 2) * 10 ^ 6) * ev / 10 ^ 6
if (abs(e_x - e) < 0.001 || n > 20){
break
} else {
e <- e_x
}
}
return(e)
}
S_area_rel <- GetArea(c, V = S_states * 10 ^ 6)
message(paste0("policy converging... (>", tol,")"))
if (Markov == FALSE){
repeat{
for (t in frq:1){
R.cstr <- sweep(Shell.array, 3, Q_class_med[,t], "+") +
sweep(Shell.array, 1, S_states, "+") -
sweep(Shell.array, 1, evap_seas[t] * S_area_rel / 10 ^ 6, "+")
R.star <- aperm(apply(Shell.array, c(1, 3), "+", R_disc_x), c(2, 1, 3))
R.star[,2:(R_disc + 1),][which(R.star[,2:(R_disc + 1),] > R.cstr[,2 : (R_disc + 1),])] <- NaN
Deficit.arr <- (R.star - target) / target
Cost_arr <- ( (abs(Deficit.arr)) ^ loss_exp)
S.t_plus_1 <- R.cstr - R.star
S.t_plus_1[which(S.t_plus_1 < 0)] <- 0
Implied_S_state <- round(1 + (S.t_plus_1 / capacity)
* (length(S_states) - 1))
Implied_S_state[which(Implied_S_state > length(S_states))] <- length(S_states)
Cost_to_go.arr <- array(Cost_to_go[Implied_S_state],
dim = c(length(S_states), length(R_disc_x) , length(Q.probs)))
Min_cost_arr <- Cost_arr + Cost_to_go.arr
Min_cost_arr_weighted <- sweep(Min_cost_arr, 3, Q.probs, "*")
Min_cost_expected <- apply(Min_cost_arr_weighted, c(1, 2), sum)
Bellman[,t] <- Cost_to_go
Cost_to_go <- apply(Min_cost_expected, 1, min, na.rm = TRUE)
Results_mat[,t] <- Cost_to_go
R_policy[,t] <- apply(Min_cost_expected, 1, which.min)
}
message(sum(R_policy == R_policy_test) / (frq * length(S_states)))
if (sum(R_policy == R_policy_test) / (frq * length(S_states)) > tol){
break
}
R_policy_test <- R_policy
}
} else if (Markov == TRUE){
repeat{
for (t in frq:1){
R.cstr <- sweep(Shell.array, 3, Q_class_med[,t], "+") +
sweep(Shell.array, 1, S_states, "+") -
sweep(Shell.array, 1, evap_seas[t] * S_area_rel / 10 ^ 6, "+")
R.star <- aperm(apply(Shell.array, c(1, 3), "+", R_disc_x), c(2, 1, 3))
R.star[,2:(R_disc + 1),][which(R.star[,2:(R_disc + 1),] > R.cstr[,2 : (R_disc + 1),])] <- NaN
Deficit.arr <- (R.star - target) / target
Cost_arr <- ( (abs(Deficit.arr)) ^ loss_exp)
S.t_plus_1 <- R.cstr - R.star
S.t_plus_1[which(S.t_plus_1 < 0)] <- 0
Implied_S_state <- round(1 + (S.t_plus_1 / capacity)
* (length(S_states) - 1))
Implied_S_state[which(Implied_S_state > length(S_states))] <- length(S_states)
Cost_to_go.arr <- array(Cost_to_go,
dim = c(length(S_states), n_Qcl, n_Qcl))
Expectation <- apply(sweep(Cost_to_go.arr, c(2,3),
t(Q_trans_probs[,,t]), "*"), c(1,3), sum)
Exp.arr <- Shell.array
for (Qt in 1:n_Qcl){
Exp.arr[,,Qt] <- matrix(Expectation[,Qt][Implied_S_state[,,Qt]],
ncol = length(R_disc_x))
}
R_policy[,,t] <- apply( (Cost_arr + Exp.arr), c(1,3), which.min)
Cost_to_go <- apply( (Cost_arr + Exp.arr), c(1,3), min, na.rm = TRUE)
Bellman[,,t] <- Cost_to_go
}
message(sum(R_policy == R_policy_test) / (frq * length(S_states) * n_Qcl))
if (sum(R_policy == R_policy_test) / (frq * length(S_states) * n_Qcl) > tol){
break
}
R_policy_test <- R_policy
}
}
S <- vector("numeric",length(Q) + 1); S[1] <- S_initial * capacity
R_rec <- vector("numeric",length(Q))
E <- vector("numeric", length(Q))
y <- vector("numeric", length(Q))
Spill <- vector("numeric", length(Q))
for (yr in 1:nrow(Q_month_mat)) {
for (month in 1:frq) {
t_index <- (frq * (yr - 1)) + month
S_state <- which.min(abs(S_states - S[t_index]))
Qx <- Q_month_mat[yr,month]
if (Markov == FALSE){
R <- R_disc_x[R_policy[S_state,month]]
} else if (Markov == TRUE){
Q_class <- which.min(abs(as.vector(Q_class_med[,month] - Qx)))
R <- R_disc_x[R_policy[S_state,Q_class,month]]
}
R_rec[t_index] <- R
E[t_index] <- GetEvap(s = S[t_index], q = Qx, r = R, ev = evap[t_index])
y[t_index] <- GetLevel(c, S[t_index] * 10 ^ 6)
if ( (S[t_index] - R + Qx - E[t_index]) > capacity) {
S[t_index + 1] <- capacity
Spill[t_index] <- S[t_index] - R + Qx - capacity - E[t_index]
}else{
if ( (S[t_index] - R + Qx - E[t_index]) < 0) {
S[t_index + 1] <- 0
R_rec[t_index] <- max(0, S[t_index] + Qx - E[t_index])
}else{
S[t_index + 1] <- S[t_index] - R + Qx - E[t_index]
}
}
}
}
R_policy <- (R_policy - 1) / R_disc
S <- ts(S[1:(length(S) - 1)],start = start(Q),frequency = frq)
R_rec <- ts(R_rec, start = start(Q), frequency = frq)
E <- ts(E, start = start(Q), frequency = frequency(Q))
y <- ts(y, start = start(Q), frequency = frequency(Q))
Spill <- ts(Spill, start = start(Q), frequency = frq)
total_penalty <- sum( ( (target - R_rec) / target) ^ loss_exp)
if(plot) {
plot(R_rec, ylab = "Controlled release", ylim = c(0, target))
plot(S, ylab = "Storage", ylim = c(0, capacity))
plot(Spill, ylab = "Uncontrolled spill")
}
if (rep_rrv == TRUE){
deficit <- ts(round(1 - (R_rec / target),5), start = start(Q), frequency = frequency(Q))
rel_ann <- sum(aggregate(deficit, FUN = mean) == 0) /
length(aggregate(deficit, FUN = mean))
rel_time <- sum(deficit == 0) / length(deficit)
rel_vol <- sum(R_rec) / (target * length(deficit))
fail.periods <- which(deficit > 0)
if (length(fail.periods) == 0) {
resilience <- NA
vulnerability <- NA
} else {
if (length(fail.periods) == 1) {
resilience <- 1
vulnerability <- max(deficit)
} else {
resilience <- (sum(diff(which(deficit > 0)) > 1) + 1) / (length(which(deficit > 0)))
fail.refs <- vector("numeric", length = length(fail.periods))
fail.refs[1] <- 1
for (j in 2:length(fail.periods)) {
if (fail.periods[j] > (fail.periods[j - 1] + 1)) {
fail.refs[j] <- fail.refs[j - 1] + 1
} else {
fail.refs[j] <- fail.refs[j - 1]
}
}
n.events <- max(fail.refs)
event.starts <- by(fail.periods, fail.refs, FUN = min)
event.ends <- by(fail.periods, fail.refs, FUN = max)
max.deficits <- vector("numeric", length = n.events)
for (k in 1:n.events) {
max.deficits[k] <- max(deficit[event.starts[k]:event.ends[k]])
}
vulnerability <- mean(max.deficits)
}
}
results <- list(R_policy, Bellman, S, R_rec, E, y, Spill, rel_ann, rel_time,
rel_vol, resilience, vulnerability, Q_disc, total_penalty)
names(results) <- c("release_policy", "Bellman", "storage", "releases",
"evap_loss", "water_level", "spill", "annual_reliability",
"time_based_reliability", "volumetric_reliability",
"resilience", "vulnerability", "flow_disc", "total_penalty")
} else {
results <- list(R_policy, Bellman, S, R_rec, E, y, Spill, Q_disc, total_penalty)
names(results) <- c("release_policy", "Bellman", "storage",
"releases", "evap_loss", "water_level",
"spill", "flow_disc", "total_penalty")
}
return(results)
} |
test_that("uncardinal", {
expect_equal(uncardinal("zero"), 0)
expect_equal(uncardinal("one"), 1)
expect_equal(uncardinal(c("one", "two", "three")), c(1, 2, 3))
expect_equal(uncardinal("infinity"), Inf)
expect_equal(uncardinal("negative infinity"), -Inf)
x <- c(
-1000:1e4,
sample(c(1, -1), 1e4, replace = TRUE) * 10 ^ runif(1e4, 5, 15.95) %/% 1
)
expect_equal(uncardinal(as.character(cardinal(x))), x)
})
test_that("uncardinal with class nombre", {
expect_equal(uncardinal(cardinal(0)), 0)
expect_equal(uncardinal(ordinal(25)), 25)
})
test_that("uncardinal warning", {
expect_warning(uncardinal("one and a half"), "integer cardinals")
expect_warning(uncardinal(letters[1:6]), "and one more")
}) |
synth_data_edu <- function(agm_dat, edu_vec) {
dat <- agm_dat[[1]]
age_ht <- data.frame(
age_gen= c("under15", "15_17", "18_24", "25_29", "30_34","35_39", "40_44",
"45_49", "50_54", "55_59", "60_64", "65_69","70_74", "75_79", "80_84", "85up"),
edu= c(NA, NA, "18_24", rep("25_34",2), rep("35_44", 2), rep("45_64", 4), rep("65up",5)),
stringsAsFactors = FALSE)
agm_list <- split(dat, dat$gender)
agm_list[[1]] <- split(agm_list[[1]], agm_list[[1]]$age)
agm_list[[2]] <- split(agm_list[[2]], agm_list[[2]]$age)
edu_m <- edu_vec[which(substr(names(edu_vec), 1,1) == "m")]
edu_f <- edu_vec[which(substr(names(edu_vec), 1,1) == "f")]
edu_levels <- c("lt_hs", "some_hs", "hs_grad", "some_col", "assoc_dec", "ba_deg", "grad_deg")
agm_list[[1]] <- do.call("rbind",
lapply(agm_list[[1]], edu_lapply, ht= age_ht, edu_v= edu_m, levels= edu_levels))
agm_list[[2]] <- do.call("rbind",
lapply(agm_list[[2]], edu_lapply, ht= age_ht, edu_v= edu_f, levels= edu_levels))
dat <- do.call("rbind", agm_list)
dat <- factor_return(dat, prob_name= "p")
return(list(dat, levels(dat$age)))
}
edu_lapply <- function(l, ht, edu_v, levels) {
if (is.na(l$age[1]))
return(data.frame(age= "under15", gender= "Male",
marital_status= "never_mar", edu_attain= "lt_hs", p= 0))
else if (l$age[1] == "under15") {
return(data.frame(age=l$age, gender= l$gender,
marital_status= l$marital_status, edu_attain= "lt_hs", p= l$p))
} else if (l$age[1] == "15_17") {
return(data.frame(age=l$age, gender= l$gender,
marital_status= l$marital_status, edu_attain= "some_hs", p= l$p))
} else {
l_age_comp <- ht[,2][which(l$age[1] == ht[,1])]
edu_comp <- edu_v[which(grepl(l_age_comp, names(edu_v)))]
if (sum(edu_comp) > 0) edu_comp <- (edu_comp / sum(edu_comp))
st <- data.frame(pct= edu_comp, levels= factor(levels, levels= levels))
st <- base::split(st, 1:nrow(st))
dat <- replicate(length(levels), l, simplify = FALSE)
return(do.call("rbind", mapply(add_synth_attr_level, dat= dat, prob_name= "p",
attr_name= "edu_attain", attr= st,
SIMPLIFY = FALSE)))
}
} |
context("aggre")
test_that("aggre leaves original data untouched", {
x <- sire[1:100,]
BL <- list(fot= seq(0,20,1/12), age= c(0:100, Inf), per= c(1960:2014))
x <- lexpand(x, birth = bi_date, entry = dg_date, exit = ex_date,
status = status %in% 1:2, breaks=BL)
set.seed(1L)
x <- x[sample(x = .N, size = .N, replace = FALSE)]
setkeyv(x, NULL)
setDT(x)
forceLexisDT(x, breaks = BL, allScales = c("fot", "per", "age"), key = FALSE)
xor <- copy(x)
ag1 <- aggre(x, by = list(gender = factor(sex, 1, "f"), sex, surv.int = fot, per, agegr = age))
expect_identical(x, xor)
})
test_that("aggre works with by = NULL", {
sr <- popEpi::sire[dg_date < ex_date,][1:1000,]
BL <- list(fot= seq(0,20,1), age= c(0:100, Inf), per= c(1960:2014))
x <- lexpand(sr, birth = bi_date, entry = dg_date, exit = ex_date,
status = status %in% 1:2, breaks=BL)
ag1 <- aggre(x, by = NULL)
expect_equal(as.numeric(ag1), c(9539.1903286174274, 1000, 373, 627))
})
test_that("aggre and lexpand produce the same results", {
sr <- popEpi::sire[dg_date < ex_date,][1:1000,]
BL <- list(fot= seq(0,20,1/12), age= c(0:100, Inf), per= c(1960:2014))
x <- lexpand(sr, birth = bi_date, entry = dg_date, exit = ex_date,
status = status %in% 1:2, breaks=BL)
if (!is.data.table(x)) setDF2DT(x)
e <- quote(list(gender = factor(sex, 1, "f"), sex, surv.int = fot, per, agegr = age))
v <- c("gender", "sex", "sex", "surv.int", "per", "agegr")
forceLexisDT(x, breaks = BL, allScales = c("fot", "per", "age"))
x2 <- aggre(x, by = e, verbose = FALSE)
x3 <- aggre(x, by = e, type = "full", verbose = FALSE)
x4 <- lexpand(sr, birth = bi_date, entry = dg_date, exit = ex_date,
status = status %in% 1:2, aggre.type = "non-empty",
breaks=BL, aggre = list(gender = factor(sex, 1, "f"), sex, surv.int = fot, per, agegr = age))
x5 <- lexpand(sr, birth = bi_date, entry = dg_date, exit = ex_date,
status = status %in% 1:2, aggre.type = "cartesian",
breaks=BL, aggre = list(gender = factor(sex, 1, "f"), sex, surv.int = fot, per, agegr = age))
x[, fot := popEpi:::cutLow(fot, BL$fot)]
x[, age := popEpi:::cutLow(age, BL$age)]
x[, per := popEpi:::cutLow(per, BL$per)]
x <- x[, list(pyrs = sum(lex.dur), obs = sum(lex.Xst)), keyby = e]
x <- x[pyrs > 0 & !is.na(pyrs)]
if (!is.data.table(x2)) setDF2DT(x2)
if (!is.data.table(x3)) setDF2DT(x3)
if (!is.data.table(x4)) setDF2DT(x4)
if (!is.data.table(x5)) setDF2DT(x5)
setkeyv(x, v)
setkeyv(x2, v)
setkeyv(x3, v)
setkeyv(x4, v)
setkeyv(x5, v)
expect_equal(x2$pyrs, x$pyrs, tolerance = 1e-05)
expect_equal(x2$from0to1, x$obs, tolerance = 1e-05)
expect_equal(sum(x2$pyrs), sum(x3$pyrs), tolerance = 1e-05)
expect_equal(sum(x2$from0to1), sum(x3$from0to1), tolerance = 1e-05)
expect_equal(sum(x2$pyrs), sum(x4$pyrs), tolerance = 1e-05)
expect_equal(sum(x2$from0to1), sum(x4$from0to1), tolerance = 1e-05)
expect_equal(x3$pyrs, x5$pyrs, tolerance = 1e-05)
expect_equal(x3$from0to0, x5$from0to0, tolerance = 1e-05)
expect_equal(sum(x3$from0to1), sum(x5$from0to1), tolerance = 1e-05)
expect_equal(x2$pyrs, x4$pyrs, tolerance = 1e-05)
expect_equal(x2$from0to0, x4$from0to0, tolerance = 1e-05)
expect_equal(sum(x2$from0to1), sum(x4$from0to1), tolerance = 1e-05)
})
test_that("aggre()'s by argument works flexibly", {
library(Epi)
BL <- list(fot = 0:5, per = c(1995,2015))
for (cond in c(FALSE, TRUE)) {
x <- Lexis(data = sire[dg_date < ex_date,][1:500, ], entry = list(fot = 0, age = dg_age, per = get.yrs(dg_date)),
exit = list(per = get.yrs(ex_date)), exit.status = status,
entry.status = 0)
x <- splitMulti(x, breaks = BL)
setDF(x)
setattr(x, "class", c("Lexis", "data.frame"))
x$agegr <- cut(x$dg_age, 2)
if (cond) {
forceLexisDT(x, breaks = BL, allScales = c("fot", "per", "age"))
alloc.col(x)
}
a <- aggre(x, by = list(agegr = cut(dg_age, 2), sex, fot, per = per), type = "unique")
b <- aggre(x, by = c("agegr", "sex", "fot", "per"), type = "unique")
expect_equal(a, b)
a <- aggre(x, by = cut(dg_age, 2), type = "unique")
setnames(a, "cut", "agegr")
attr(a, "aggre.meta")$by <- "agegr"
b <- aggre(x, by = c("agegr"), type = "unique")
c <- aggre(x, by = list(agegr = cut(dg_age, 2)), type = "unique")
d<- aggre(x, by = agegr, type = "unique")
expect_equal(a, b)
expect_equal(b, c)
expect_equal(c, d)
}
})
test_that("subset argument works properly", {
x <- sire[dg_date < ex_date, ][1:1000,]
BL <- list(fot= seq(0,20,1/12), age= c(0:100, Inf), per= c(1960:2014))
x <- lexpand(x, birth = bi_date, entry = dg_date, exit = ex_date,
status = status %in% 1:2, breaks=BL)
x2 <- x[x$dg_age <= 55L, ]
setDT(x)
setDT(x2)
forceLexisDT(x, breaks = BL, allScales = c("fot", "per", "age"), key = FALSE)
forceLexisDT(x2, breaks = BL, allScales = c("fot", "per", "age"), key = FALSE)
ag <- quote(list(gender = factor(sex, 1, "f"), sex, surv.int = fot, per, agegr = age))
ag1 <- aggre(x, by = ag, subset = dg_age <= 55L)
ag2 <- aggre(x2, by = ag)
ag3 <- aggre(x, by = ag, type = "full", subset = dg_age <= 55L)
ag4 <- aggre(x2, by = ag, type = "full")
expect_identical(ag1, ag2)
expect_identical(ag3, ag4)
})
test_that("at.risk column works as intended", {
popEpi:::skip_on_cran_and_ci()
x <- sire[dg_date < ex_date, ][1:1000,]
BL <- list(fot= seq(0,20,1/12), age= c(0:100, Inf), per= c(1960:2014))
x <- Lexis(data = x,
entry = list(fot = 0, age = dg_age, per = get.yrs(dg_date)),
exit = list(per = get.yrs(ex_date)), exit.status = status,
entry.status = 0)
x <- splitMulti(x, breaks = BL, drop = TRUE)
ag <- aggre(x, by = list(sex, fot))
setkey(ag, sex, fot)
ag[, ndiff := at.risk - c(at.risk[-1], NA), by = list(sex)]
ag[!is.na(ndiff), events := from0to0 + from0to1 + from0to2]
expect_equal(ag$ndiff, ag$events)
x[, evented := detectEvents(x, breaks = attr(x, "breaks"), by = "lex.id") != 0L]
x[, normalEntry := fot %in% BL$fot]
x[, cutFot := cutLow(fot, BL$fot)]
byDT <- CJ(sex = 1,
cutFot = BL$fot[-length(BL$fot)])
n.start <- x[byDT, .(sum(normalEntry & !duplicated(lex.id)),
sum(evented)), by = .EACHI,
on = names(byDT)]
n.start[is.na(ag$ndiff), V2 := NA]
expect_equal(ag$at.risk, n.start$V1)
expect_equal(ag$ndiff, n.start$V2)
})
test_that("at.risk column works as intended, Vol. 2", {
popEpi:::skip_on_cran_and_ci()
data(sire)
BL <- list(fot=seq(0, 5, by = 1/12),
per = c(2008,2013))
x <- Lexis(data = sire[dg_date < ex_date,],
entry = list(fot = 0, age = dg_age, per = get.yrs(dg_date)),
exit = list(per = get.yrs(ex_date)), exit.status = status,
entry.status = 0)
x <- splitMulti(x, breaks = BL, drop = TRUE)
a <- aggre(x, by = list(sex, per, fot))
setkey(a, sex, per, fot)
a[, ndiff := at.risk - c(at.risk[-1], NA), by = list(sex, per)]
a[!is.na(ndiff), events := from0to0 + from0to1 + from0to2]
x[, normalEntry := fot %in% BL$fot]
x[, cutPer := cutLow(per, BL$per)]
x[, cutFot := cutLow(fot, BL$fot)]
byDT <- CJ(sex = 1, cutPer = BL$per[-length(BL$per)],
cutFot = BL$fot[-length(BL$fot)])
n.start <- x[byDT, sum(normalEntry & !duplicated(lex.id)), by = .EACHI,
on = names(byDT)]
expect_equal(a$at.risk, n.start$V1)
}) |
multi_bound <- function(biases, RRAUc = NULL, RRUcY = NULL, RRUsYA1 = NULL,
RRSUsA1 = NULL, RRUsYA0 = NULL, RRSUsA0 = NULL,
RRAUscS = NULL, RRUscYS = NULL, RRAYy = NULL, ORYAa = NULL,
RRYAa = NULL, RRAYyS = NULL, ORYAaS = NULL, RRYAaS = NULL,
RRAUsS = NULL, RRUsYS = NULL) {
if (!inherits(biases, c("bias", "multi_bias"))) {
stop('Argument "biases" must be of class "bias" or "multi_bias"')
}
if (class(biases) == "bias") biases <- multi_bias(biases)
SU <- any(unlist(sapply(biases, attr, which = "SU")))
params <- mget(formalArgs(multi_bound)[-1])
params[sapply(params, is.null)] <- NULL
necc_params <- attr(biases, "parameters")
if (!all(necc_params$argument %in% names(params))) {
missing_params <- setdiff(necc_params$argument, names(params))
err_mess <- paste0(
"You are missing parameters necessary to calculate a bound,",
" or they have missing or incorrect names.",
" You need to supply the following additional arguments: ",
paste0(paste0(missing_params, collapse = " = , "), " ="),
". Run summary(biases) for more information on the arguments."
)
stop(err_mess)
}
if (length(names(params)) > length(necc_params$argument)) {
extra_params <- setdiff(names(params), necc_params$argument)
warning(paste0(
"You seem to have supplied uncessary parameters (",
paste0(extra_params, collapse = ", "), "). ",
"Check to make sure you have chosen the appropriate biases. ",
"These are the parameters that are being used: ",
paste0(necc_params$argument, collapse = ", "), "."
))
}
necc_params$vals <- NA
for (i in seq_len(nrow(necc_params))) {
necc_params$vals[i] <- unlist(params[necc_params$argument[i]])
}
conf_vals <- necc_params$vals[grep("confounding", necc_params$bias)]
miscl_vals <- necc_params$vals[grep("misclassification", necc_params$bias)]
sel1_vals <- necc_params$vals[necc_params$bias == "selection" &
grepl("1", necc_params$latex)]
sel0_vals <- necc_params$vals[necc_params$bias == "selection" &
grepl("0", necc_params$latex)]
if (SU && length(sel1_vals) == 1) sel1_vals <- c(threshold(sel1_vals), threshold(sel1_vals))
if (SU && length(sel0_vals) == 1) sel0_vals <- c(threshold(sel0_vals), threshold(sel0_vals))
if (length(sel1_vals) == 1) sel1_vals <- c(sel1_vals, 1)
if (length(sel0_vals) == 1) sel0_vals <- c(sel0_vals, 1)
conf_prod <- if (length(conf_vals) > 1) bf_func(conf_vals[1], conf_vals[2]) else 1
miscl_prod <- if (length(miscl_vals) > 0) miscl_vals else 1
sel1_prod <- if (length(sel1_vals) > 1) bf_func(sel1_vals[1], sel1_vals[2]) else 1
sel0_prod <- if (length(sel0_vals) > 1) bf_func(sel0_vals[1], sel0_vals[2]) else 1
return(conf_prod * miscl_prod * sel1_prod * sel0_prod)
} |
context("Testing score based approach")
test_that("Runs a simple example without errors", {
expect_error({
set.seed(1234)
n <- 100
x <- rnorm(n)
y <- x+rnorm(n)
y[1:50] <- NA
temp <- data.frame(x,y)
imps <- normUniImp(temp, y~x, M=10, pd=FALSE)
yonx <- function(inputData) {
fitmod <- lm(y~x, data=inputData)
list(est=c(fitmod$coef,sigma(fitmod)^2))
}
myScore <- function(inputData, parm) {
beta0 <- parm[1]
beta1 <- parm[2]
sigmasq <- parm[3]
res <- inputData$y - beta0 - beta1*inputData$x
cbind(res/sigmasq, (res*inputData$x)/sigmasq, res^2/(2*sigmasq^2)-1/(2*sigmasq))
}
scoreBased(imps, analysisFun=yonx, scoreFun=myScore)
}, NA)
}) |
print.svars <- function(x, ...){
cat("\nEstimated B Matrix (unique decomposition of the covariance matrix): \n")
print(x$B)
} |
plot.fairness_pca <- function(x, scale = 0.5, ...) {
if (!requireNamespace("ggrepel", quietly = TRUE)) {
stop("Package \"ggrepel\" needed for this function to work. Please install it.",
call. = FALSE
)
}
lam <- x$sdev[1:2]
n <- nrow(x$x)
lam <- lam * sqrt(n)
if (scale != 0) lam <- lam^scale else lam <- 1
pca_df <- t(t(x$x[, 1:2]) / lam)
rotation <- t(t(x$rotation[, 1:2]) * lam)
pc_1_2 <- x$pc_1_2
pca_data <- as.data.frame(pca_df)
pca_data$labels <- x$label
pca_feature <- as.data.frame(rotation)
pca_feature$labels <- rownames(rotation)
lab_x <- paste("PC1: explained", pc_1_2[1] * 100, "% of variance")
lab_y <- paste("PC2: explained", pc_1_2[2] * 100, "% of variance")
n <- nrow(rotation)
PC1 <- PC2 <- NULL
ggplot() +
geom_hline(yintercept = 0, color = "white", linetype = "dashed") +
geom_vline(xintercept = 0, color = "lightgrey", linetype = "dashed") +
geom_segment(
data = pca_feature,
aes(
x = rep(0, n),
y = rep(0, n),
xend = PC1,
yend = PC2
),
color = "red",
alpha = 0.5,
arrow = arrow(length = unit(0.2, "cm"))
) +
ggrepel::geom_text_repel(
data = pca_feature,
aes(
x = PC1,
y = PC2,
label = labels
),
color = "red", alpha = 0.5, size = 4
) +
ggrepel::geom_text_repel(
data = pca_data,
aes(PC1, PC2, label = labels),
size = 5,
color = "black"
) +
geom_point(data = pca_data, aes(PC1, PC2)) +
DALEX::theme_drwhy() +
theme(legend.position = "none") +
xlab(lab_x) +
ylab(lab_y) +
ggtitle("Fairness PCA plot", subtitle = "created with parity loss metrics")
} |
context("as_period testing")
data(FB)
test_time <- FB
test_tbl_time <- as_tbl_time(test_time, date)
data(FANG)
test_tbl_time_g <- as_tbl_time(FANG, date) %>%
group_by(symbol)
test_that("Converting to more granular throws error", {
expect_error(as_period(test_tbl_time, "hourly"))
})
test_that("Can convert to monthly", {
test_period <- as_period(test_tbl_time, "monthly")
expect_equal(nrow(test_period), 48L)
expect_equal(ncol(test_period), 8L)
expect_equal(test_period$date[2], as.Date("2013-02-01"))
})
test_that("Can convert to monthly - end", {
test_period <- as_period(test_tbl_time, "monthly", side = "end")
expect_equal(nrow(test_period), 48L)
expect_equal(ncol(test_period), 8L)
expect_equal(test_period$date[2], as.Date("2013-02-28"))
})
test_that("Can convert to yearly", {
test_period <- as_period(test_tbl_time, "yearly")
expect_equal(nrow(test_period), 4L)
expect_equal(ncol(test_period), 8L)
expect_equal(test_period$date[2], as.Date("2014-01-02"))
})
test_that("Can convert to yearly - end", {
test_period <- as_period(test_tbl_time, "yearly", side = "end")
expect_equal(nrow(test_period), 4L)
expect_equal(ncol(test_period), 8L)
expect_equal(test_period$date[2], as.Date("2014-12-31"))
})
test_that("Include endpoints with side = 'start' includes last point", {
start <- as_period(test_tbl_time, "yearly", include_endpoints = TRUE)
expect_equal(
object = start$date[length(start$date)],
expected = as.Date("2016-12-30"))
})
test_that("Include endpoints with side = 'start' includes last point", {
end <- as_period(test_tbl_time, "yearly",
side = "end", include_endpoints = TRUE)
expect_equal(
object = end$date[1],
expected = as.Date("2013-01-02"))
})
test_that("Error with non tbl_time object", {
expect_error(as_period(test_time, "yearly"),
"Object is not of class `tbl_time`.")
})
test_that("Groups are respected", {
test_period <- as_period(test_tbl_time_g, "yearly")
expect_equal(nrow(test_period), 16L)
expect_equal(ncol(test_period), 8L)
}) |
expected <- eval(parse(text="TRUE"));
test(id=0, code={
argv <- eval(parse(text="list(structure(list(a = 1), .Dim = 1L, .Dimnames = list(\"a\")))"));
do.call(`is.array`, argv);
}, o=expected); |
namespace <- R6::R6Class(
"Namespace",
public = list(
initialize = function(parent_envir) {
private$envir <- new.env(parent = parent_envir)
},
set = function(symbol, value) {
x <- substitute(symbol)
assign(as.character(x), value, envir = private$envir)
x
},
get = function(symbol) {
name <- as.character(substitute(symbol))
get0(name, envir = private$envir)
},
get_envir = function() {
private$envir
},
import = function(ns) {
env <- ns$get_envir()
for (val in ls(env)) {
assign(val, get0(val, env), envir = self$get_envir())
}
}
),
private = list(
envir = NULL
)
)
namespace_manager <- R6::R6Class(
"NamespaceManager",
public = list(
initialize = function(parent_envir) {
private$envir <- new.env()
private$parent_envir <- parent_envir
},
create = function(symbol) {
if (exists(symbol, envir = private$envir)) {
private$current_envir <- self$get(symbol)
return(private$current_envir)
}
ns <- namespace$new(private$parent_envir)
assign(symbol, ns, envir = private$envir)
ns$import(self$get("llr.core"))
private$current_envir <- ns
ns
},
use = function(name) {
name <- substitute(name)
ns <- self$get_current_ns()
ns$import(self$get(as.character(name)))
},
get_current_ns = function() {
private$current_envir
},
get = function(symbol) {
get0(symbol, envir = private$envir)
},
val_by_ns = function(ns_name, symbol_name) {
ns <- self$get(as.character(substitute(ns_name)))
stopifnot(!is.null(ns))
eval(expr(ns$get(!!substitute(symbol_name))))
}
),
private = list(
envir = NULL,
parent_envir = NULL,
current_envir = NULL
)
) |
textdiamond <- function (mid, radx, rady=NULL, lwd=1, shadow.size=0.01,
adj=c(0.5, 0.5),lab="", box.col="white", lcol="black",
shadow.col="grey", angle=0, ...) {
if (is.null(rady)) {
pin <- par("pin")
rady <- radx*pin[1]/pin[2]* length(lab)
}
shadowbox("diamond", mid=mid, radx=radx, rady=rady,
shadow.size=shadow.size, shadow.col=shadow.col, box.col=box.col,
lcol=lcol, lwd=lwd, angle=angle)
textplain(mid, rady, lab, adj, ...)
} |
library(bfp)
set.seed(19)
x1 <- rnorm(n=15)
x2 <- rbinom(n=15, size=20, prob=0.5)
x3 <- rexp(n=15)
y <- rt(n=15, df=2)
test <- BayesMfp(y ~ bfp (x2, max = 4) + uc (x1 + x3), nModels = 100,
method="exhaustive")
test <- BayesMfp(y ~ bfp(x2, max=1) + uc (x3), nModels = 100,
method="exhaustive",
priorSpecs=list(a=4, modelPrior="dependent"))
summary(test)
logPriors <- as.data.frame(test)$logPrior
sum(exp(logPriors[c(1)]))
sum(exp(logPriors[c(2, 4, 12:18)]))
sum(exp(logPriors[c(3, 5:11)]))
sum(exp(logPriors[3:4])) / sum(exp(logPriors[3:18]))
test <- BayesMfp(y ~ bfp (x2, max = 4) + uc (x1 + x3), nModels = 100,
method="exhaustive", priorSpecs=list(a=4, modelPrior="sparse"))
test <- BayesMfp(y ~ bfp (x2, max = 4) + uc (x1 + x3), nModels = 100,
method="exhaustive", priorSpecs=list(a=4, modelPrior="dependent"))
for(index in seq_along(test))
{
stopifnot(all.equal(getLogPrior(test[index]),
test[[index]]$logP))
}
summary(test)
beta0 <- 1
alpha1 <- 1
alpha2 <- 3
delta1 <- 1
sigma <- 2
n <- 15
k <- 2L
set.seed (123)
x <- matrix (runif (n * k, 1, 4), nrow = n, ncol = k)
w <- matrix (rbinom (n * 1, size = 1, prob = 0.5), nrow = n, ncol = 1)
x1tr <- alpha1 * x[,1]^2
x2tr <- alpha2 * (x[,2])^(1/2)
w1tr <- delta1 * w[,1]
predictorTerms <-
x1tr +
x2tr +
w1tr
trueModel <- list (powers = list (x1 = 2, x2 = 0.5),
ucTerms = as.integer (1)
)
covariateData <- data.frame (x1 = x[,1],
x2 = x[,2],
w = w)
covariateData$y <- predictorTerms + rnorm (n, 0, sigma)
covariateData
dependent <- BayesMfp (y ~ bfp (x1, max=1) + bfp(x2, max=1),
data = covariateData,
priorSpecs =
list (a = 3.5,
modelPrior="dependent"),
method = "exhaustive",
nModels = 10000)
attr(dependent, "logNormConst")
depSum <- as.data.frame(dependent)
depSum
sum(exp(depSum$logPrior))
for(index in seq_along(dependent))
{
stopifnot(all.equal(getLogPrior(dependent[index]),
dependent[[index]]$logP))
}
set.seed(93)
dependent2 <- BayesMfp(y ~ bfp (x1, max=1) + bfp(x2, max=1),
data = covariateData,
priorSpecs =
list (a = 3.5,
modelPrior="sparse"),
method = "sampling",
nModels = 10000L,
chainlength=1000000L)
depSum2 <- as.data.frame(dependent2)
depSum2 |
library(lg)
context("Density estimation functions")
test_matrix_2col <- matrix(c(1, 2, 1, 2), ncol = 2)
test_matrix_3col <- matrix(c(1, 2, 1, 2, 1, 2), ncol = 3)
test_df_2col <- data.frame(x1 = c(1, 2), x2 = c(1, 2))
test_df_3col <- data.frame(x1 = c(1, 2), x2 = c(1, 2), x3 = c(1, 2))
test_string <- "teststring"
test_bw_ok <- c(1, 1)
test_bw_length <- c(1, 1, 1)
test_bw_nonnumeric <- c("a", "b")
test_bw_nonvector <- function() {}
test_estmethod1 <- "1par"
test_estmethod2 <- "5par"
test_estmethod_wr <- "4par"
test_that("dlg_bivariate gives proper errors", {
expect_error(dlg_bivariate(test_string,
eval_points = test_matrix_2col,
bw = test_bw_ok,
est_method = test_estmethod1),
"The data must be a matrix or a data frame")
expect_error(dlg_bivariate(test_matrix_2col,
eval_points = test_string,
bw = test_bw_ok,
est_method = test_estmethod1),
"The grid must be a matrix or a data frame")
expect_error(dlg_bivariate(test_matrix_3col,
eval_points = test_matrix_2col,
bw = test_bw_ok,
est_method = test_estmethod1),
"The data can only have 2 variables")
expect_error(dlg_bivariate(test_df_3col,
eval_points = test_df_2col,
bw = test_bw_ok,
est_method = test_estmethod1),
"The data can only have 2 variables")
expect_error(dlg_bivariate(test_matrix_2col,
eval_points = test_matrix_3col,
bw = test_bw_ok,
est_method = test_estmethod1),
"The grid can only have 2 variables")
expect_error(dlg_bivariate(test_df_2col,
eval_points = test_df_3col,
bw = test_bw_ok,
est_method = test_estmethod1),
"The grid can only have 2 variables")
expect_error(dlg_bivariate(test_df_2col,
eval_points = test_df_3col,
bw = test_bw_ok,
est_method = test_estmethod1),
"The grid can only have 2 variables")
expect_error(dlg_bivariate(test_df_2col,
eval_points = test_df_2col,
bw = test_bw_nonvector,
est_method = test_estmethod1),
"bw must be a vector")
expect_error(dlg_bivariate(test_df_2col,
eval_points = test_df_2col,
bw = test_bw_length,
est_method = test_estmethod1),
"bw must have length 2")
expect_error(dlg_bivariate(test_df_2col,
eval_points = test_df_2col,
bw = test_bw_nonnumeric,
est_method = test_estmethod1),
"bw must be numeric")
expect_error(dlg_bivariate(test_df_2col,
eval_points = test_df_2col,
bw = test_bw_ok,
est_method = test_estmethod_wr),
"Estimation method must be either '1par', '5par', '5par_marginals_fixed' or 'trivariate'")
})
test_that("dlg_bivariate returns the same grid for the two estimation methods", {
expect_equal(dlg_bivariate(x = test_matrix_2col, grid_size = 15, est_method = "1par")$eval_points,
dlg_bivariate(x = test_matrix_2col, grid_size = 15, est_method = "5par")$eval_points)
})
x <- rnorm(100); eval_points <- -5:5; bw <- .5
est <- dlg_marginal(x = x, bw = bw, eval_points = eval_points)
test_that("dlg_marginal returns values", {
expect_equal(x, est$x)
expect_equal(eval_points, est$eval_points)
expect_equal(bw, est$bw)
expect_equal(dim(est$par_est), c(length(eval_points), 2))
})
data_matrix <- cbind(rnorm(100), rnorm(100))
eval_matrix <- cbind(c(1,2,3), c(2,3,4))
bw_vector<- c(1,2)
result <- dlg_marginal_wrapper(data_matrix, eval_matrix, bw_vector)
test_that("dlg_marginal_wrapper returns values", {
expect_equal(is.list(result), TRUE)
expect_equal(length(result), ncol(data_matrix))
})
set.seed(1)
n <- 100
x <- mvtnorm::rmvt(n, df = 10, sigma = diag(3))
grid <- cbind(c(1,2,3), c(1,2,3), c(1,2,3))
lg_object <- lg_main(x)
density_estimate <- dlg(lg_object, grid = grid)
lg_object_wrong <- grid
grid_wrong_dimension <- cbind(grid, c(1,2,3))
test_that("The main density estimator produces correct errors", {
expect_error(dlg(lg_object, grid_wrong_dimension), "The grid can only have 3 variables")
})
condition <- c(1, 0)
grid <- matrix(seq(-5, 5, length.out = 100), ncol = 1)
grid_wrong <- matrix(seq(-5, 5, length.out = 100), ncol = 2)
condition_wrong <- c(1)
test_that("The conditional density estimator produces correct errors", {
expect_error(clg(lg_object, grid = grid_wrong, condition = condition), "The grid can only have 1 variables")
expect_error(clg(lg_object, grid = grid, condition = condition_wrong), "The grid can only have 2 variables")
}) |
fscores.gp <-
function (betas, X, method) {
logf.z <- function (z, y, betas) {
log.prs <- crf.GPCM(betas, z, IRT.param = object$IRT.param, log = TRUE)
log.pxz <- numeric(p)
for (i in 1:p) {
log.pxz[i] <- if (!is.na(y[i])) log.prs[[i]][y[i]] else 0
}
if (prior)
- (sum(log.pxz, na.rm = TRUE) + dnorm(z, log = TRUE))
else
- sum(log.pxz, na.rm = TRUE)
}
fscore <- function (logf.z, y, betas, start) {
opt <- optim(start, fn = logf.z, method = "BFGS", hessian = TRUE, y = y, betas = betas)
hc <- c(1/opt$hes)
list(mu = opt$par, hes = hc)
}
Z <- object$GH$Z
GHw <- object$GH$GHw
log.crf <- crf.GPCM(betas, Z, object$IRT.param, log = TRUE)
log.p.xz <- matrix(0, nrow(X), length(Z))
for (j in 1:p) {
log.pr <- log.crf[[j]]
xj <- X[, j]
na.ind <- is.na(xj)
log.pr <- log.pr[xj, , drop = FALSE]
if (any(na.ind))
log.pr[na.ind, ] <- 0
log.p.xz <- log.p.xz + log.pr
}
p.xz <- exp(log.p.xz)
p.x <- c(p.xz %*% GHw)
p.zx <- p.xz / p.x
if (method == "EB") {
z.st <- c(p.zx %*% (Z * GHw))
scores.ML <- hes.ML <- numeric(nx)
for (i in 1:nx) {
out <- fscore(logf.z = logf.z, y = X[i, ], betas = betas, start = z.st[i])
scores.ML[i] <- out$mu
hes.ML[i] <- out$hes
}
res$z1 <- scores.ML
res$se.z1 <- sqrt(hes.ML)
}
if (method == "EAP") {
res$z1 <- c(p.zx %*% (Z * GHw))
res$se.z1 <- sqrt(c(p.zx %*% (Z * Z * GHw)) - res$z1^2)
}
if (method == "MI") {
constraint <- object$constraint
p <- length(betas)
ncatg <- sapply(betas, length)
vec.betas <- if (constraint == "gpcm") {
unlist(betas, use.names = FALSE)
} else if (constraint == "1PL") {
betas[seq(1, p - 1)] <- lapply(betas[seq(1, p - 1)], function (x) x[-length(x)])
unlist(betas, use.names = FALSE)
} else {
betas <- lapply(betas, function (x) x[-length(x)])
unlist(betas, use.names = FALSE)
}
Var.betas <- vcov(object, robust.se = robust.se)
z.st <- c(p.zx %*% (Z * GHw))
scores.B <- hes.B <- array(0, dim = c(nx, B))
for (b in 1:B) {
new.betas <- mvrnorm(1, vec.betas, Var.betas)
new.betas <- betas.gpcm(new.betas, p, ncatg, constraint)
for (i in 1:nx) {
out <- fscore(logf.z = logf.z, y = X[i, ], betas = new.betas, start = z.st[i])
scores.B[i, b] <- out$mu
hes.B[i, b] <- out$hes
}
}
scores.av <- rowMeans(scores.B)
hes.av <- rowMeans(hes.B)
SV <- array(0, dim = c(nx, B))
for (b in 1:B) {
for (i in 1:nx) {
sc.dif <- scores.B[i, b] - scores.av[i]
SV[i, b] <- outer(sc.dif, sc.dif)
}
}
SV <- rowSums(SV) / (B - 1)
hes.av <- hes.av + (1 + 1/B) * SV
res$z1 <- scores.av
res$se.z1 <- sqrt(hes.av)
attr(res, "zvalues.MI") <- scores.B
attr(res, "var.zvalues.MI") <- hes.B
}
res
} |
context("3F2")
test_that("Kummer relation", {
a <- c(1, 2, 3)
b <- c(9, 10)
c <- sum(b)-sum(a)
p <- 4
o1 <-
mvgamma(b[2], p)*mvgamma(c, p)/mvgamma(b[2]-a[3], p)/mvgamma(c+a[3], p) *
hypergeomPFQ(m=100, c(b[1]-a[1], b[1]-a[2], a[3]), c(b[1], c+a[3]), diag(p))
o2 <- hypergeomPFQ(m=15, a, b, diag(p))
expect_equal(o1, o2, tolerance = 1e-3)
a <- c(1, 2, 3i)
b <- c(9i, 10)
c <- sum(b)-sum(a)
p <- 3
o1 <-
mvgamma(b[2], p)*mvgamma(c, p)/mvgamma(b[2]-a[3], p)/mvgamma(c+a[3], p) *
hypergeomPFQ(m=100, c(b[1]-a[1], b[1]-a[2], a[3]), c(b[1], c+a[3]), diag(p))
o2 <- hypergeomPFQ(m=15, a, b, diag(p))
expect_equal(o1, o2, tolerance = 1e-5)
}) |
sim_multilocus_coal <- function(locus_tree,
effective_pop_size,
generation_time = 1,
mutation_rate = 1e-6,
num_reps) {
warning("please use sim_mlc() instead of sim_multilocus_coal()", call. = FALSE)
sim_mlc(locus_tree, effective_pop_size, generation_time, mutation_rate, num_reps)
}
sim_mlc <- function(locus_tree,
effective_pop_size,
generation_time = 1,
mutation_rate = 1e-6,
num_reps) {
if(effective_pop_size <= 0) {
stop("'effective_pop_size' must be a strictly positive number")
}
if(generation_time <= 0) {
stop("'generation_time' must be a strictly positive number")
}
if(num_reps < 1) {
stop("'effective_pop_size' must be at leat 1")
}
if(class(locus_tree) != "phylo") {
stop("'locus_tree' must be an object of class 'phylo")
}
ipp <- 1
locus_trees_by_dup <- get_loci(locus_tree)
if(class(locus_trees_by_dup) == "phylo") {
message("This is a locus tree with only one loci")
big_tree <- treeducken::sim_msc(locus_tree,
ne = effective_pop_size,
mutation_rate = mutation_rate,
generation_time = generation_time,
num_sampled_individuals = ipp,
rescale = TRUE,
num_genes = num_reps)
gt_list <- list("parent_tree" = big_tree[[1]]$gene.trees, "child_trees" = NULL)
class(gt_list) <- "mlc_genetrees"
return(gt_list)
}
mlc_df <- list(length = length(locus_trees_by_dup))
little_trees <- list(length = length(num_reps))
for(i in seq_len(length(locus_trees_by_dup) - 1)) {
if(is.null(locus_trees_by_dup[[i]]$root.edge)) {
locus_trees_by_dup[[i]]$root.edge <- 0.0
}
mlc_df[[i]] <- treeducken::sim_msc(
locus_trees_by_dup[[i]],
ne = effective_pop_size,
mutation_rate = mutation_rate,
generation_time = generation_time,
num_sampled_individuals = ipp,
rescale = TRUE,
num_genes = num_reps)
little_trees[[i]] <- mlc_df[[i]][[1]]$gene.trees
locus_trees_by_dup <- collapse_clade(
locus_trees_by_dup,
locus_trees_by_dup)
}
locus_trees_by_dup[[length(locus_trees_by_dup)]]$root.edge <- locus_tree$root.edge
big_tree <- treeducken::sim_msc(
locus_trees_by_dup[[length(locus_trees_by_dup)]],
ne = effective_pop_size,
mutation_rate = mutation_rate,
generation_time = generation_time,
num_sampled_individuals = ipp,
num_genes = num_reps)
gt_list <- list("parent_tree" = big_tree[[1]]$gene.trees, "child_trees" = little_trees)
class(gt_list) <- "mlc_genetrees"
gt_list
}
get_loci <- function(locus_tree) {
if(class(locus_tree) != "phylo") {
stop("'locus_tree' must be an object of class 'phylo")
}
if(!(any(grep("D[A-Z]", locus_tree$node.label)))) {
return(locus_tree)
}
loc_trees_subtree_all <- ape::subtrees(locus_tree)
ul_trees <- unlist(loc_trees_subtree_all, recursive = FALSE)
node_labels <- ul_trees[which(names(ul_trees) == "node.label")]
indices_of_dup <- which(node_labels$node.label != "")
rev(loc_trees_subtree_all[indices_of_dup])
}
collapse_locus_subtree <- function(list_of_subtrees,
locus_to_collapse) {
warning("please use collapse_clade() instead of collapse_locus_subtree()", call. = FALSE)
collapse_clade(list_of_subtrees, locus_to_collapse)
}
collapse_clade <- function(list_of_subtrees,
locus_to_collapse) {
tip_labels_subtrees <- get_tipnames(list_of_subtrees)
tips_to_remove <- locus_to_collapse$tip.label[-1]
tree_indices_to_drop <- lapply(tip_labels_subtrees,
function(x) x %in% tips_to_remove)
tree_indices_to_keep <- which(
unlist(
lapply(tree_indices_to_drop, all))
, useNames = FALSE)
names(tree_indices_to_keep) <- NULL
tree_indices_to_drop <- which(
unlist(
lapply(tree_indices_to_drop, any
, useNames = FALSE)))
names(tree_indices_to_drop) <- NULL
drop_from_drop <- intersect(tree_indices_to_drop, tree_indices_to_keep)
if(length(drop_from_drop) != 0)
tree_indices_to_drop <- tree_indices_to_drop[-drop_from_drop]
trees_to_prune <- list_of_subtrees[tree_indices_to_drop]
pruned_trees <- lapply(trees_to_prune,
function(x) ape::drop.tip(x,
tip = tips_to_remove,
rooted = TRUE))
list_of_subtrees[tree_indices_to_drop] <- pruned_trees
list_of_subtrees
}
get_tip_labels_tree_list <- function(multi_tree) {
warning("please use get_tipnames() instead of get_tip_labels_tree_list()", call. = FALSE)
get_tipnames(multi_tree)
}
get_tipnames <- function(multi_tree) {
ul_multi_tree <- unlist(multi_tree, recursive = FALSE)
ul_multi_tree[which(names(ul_multi_tree) == "tip.label")]
}
retrieve_parent_genetrees <- function(gene_tree_list) {
warning("please use get_parent_gts() instead of retrieve_parent_gts()", call. = FALSE)
get_parent_gts(gene_tree_list)
}
get_parent_gts <- function(gene_tree_list) {
if(class(gene_tree_list) != "mlc_genetrees")
stop("list is not in the correct format, did you not use `sim_mlc`?")
return(gene_tree_list$parent_tree)
}
retrieve_child_genetrees <- function(gene_tree_list) {
warning("please use get_child_gts() instead of ", call. = FALSE )
}
get_child_gts <- function(gene_tree_list) {
if(class(gene_tree_list) != "mlc_genetrees")
stop("list is not in the correct format, did you not use `sim_mlc`?")
return(gene_tree_list$child_trees)
} |
plot_acceptance_rate <-
function(accepted.moves,proposed.moves,param.name=deparse(substitute(accepted.moves))){
param.name <- strsplit(param.name,split="_")[[1]][1]
x <- seq(1,length(which(proposed.moves!=0)))
acceptance.rate <- accepted.moves[x]/proposed.moves[x]
plot(acceptance.rate,
pch=20,
col=adjustcolor(1,alpha.f=0.7),
xlab="MCMC sampled generations",
ylab="acceptance rate",
main=paste(param.name,"acceptance rate",sep=" "),
ylim=c(0,1))
abline(h=c(0.2,0.7),col="green",lty="dashed",lwd=2)
if(median(acceptance.rate) > 0.7 || median(acceptance.rate) < 0.2){
polygon(x=c(0-0.04*length(acceptance.rate),
0-0.04*length(acceptance.rate),
length(acceptance.rate)+0.04*length(acceptance.rate),
length(acceptance.rate)+0.04*length(acceptance.rate)),
y=c(0-0.04*length(acceptance.rate),
1+0.04*length(acceptance.rate),
1+0.04*length(acceptance.rate),
0-0.04*length(acceptance.rate)),
col=adjustcolor("red",alpha.f=0.2))
}
} |
assert_numeric_mat_or_vec <- function(x) {
name <- as.character(substitute(x))
if(is.null(x) || !is.numeric(x) | !(is.matrix(x) | is.vector(x)))
stop(paste0('"', name, '"', ' must be a numeric matrix or vector'))
}
assert_vec_length <- function(x, ...) {
name <- as.character(substitute(x))
lens <- unlist(list(...))
lnames <- as.character(substitute(list(...)))[-1]
lnames <- paste(lnames, collapse=' or ')
if(!(length(x) %in% lens) | (NCOL(x) > 1 & NROW(x) > 1))
stop(paste0('"', name, '"', ' must be a vector with length ', lnames))
}
assert_logical_vec_length <- function(x, ...) {
name <- as.character(substitute(x))
lens <- unlist(list(...))
lnames <- as.character(substitute(list(...)))[-1]
lnames <- paste(lnames, collapse=' or ')
if(!(length(x) %in% lens) | !is.logical(x) | (NCOL(x) > 1 & NROW(x) > 1))
stop(paste0('"', name, '"', ' must be a logical vector with length ', lnames))
}
assert_character_vec_length <- function(x, ...) {
name <- as.character(substitute(x))
lens <- unlist(list(...))
lnames <- as.character(substitute(list(...)))[-1]
lnames <- paste(lnames, collapse=' or ')
if(!(length(x) %in% lens) | !is.character(x) | (NCOL(x) > 1 & NROW(x) > 1))
stop(paste0('"', name, '"', ' must be a character vector with length ', lnames))
}
assert_numeric_vec_length <- function(x, ...) {
name <- as.character(substitute(x))
lens <- unlist(list(...))
lnames <- as.character(substitute(list(...)))[-1]
lnames <- paste(lnames, collapse=' or ')
if(!(length(x) %in% lens) | !is.numeric(x) | (NCOL(x) > 1 & NROW(x) > 1))
stop(paste0('"', name, '"', ' must be a numeric vector with length ', lnames))
}
assert_all_in_set <- function(x, vals) {
name <- as.character(substitute(x))
vnames <- paste(vals, collapse=", ")
if(is.null(x) | !all(x %in% vals))
stop(paste0('all "', name, '" values must be in: ', vnames))
}
assert_all_in_open_interval <- function(x, min, max) {
name <- as.character(substitute(x))
if(is.null(x) | any(anyNA(x) | x<=min | x>=max))
stop(paste0('all "', name, '" values must be greater than ', min, ' and lower than ', max))
}
assert_all_in_closed_interval <- function(x, min, max) {
name <- as.character(substitute(x))
if(is.null(x) | any(anyNA(x) | x<min | x>max))
stop(paste0('all "', name, '" values must be between: ', min, ' and ', max))
}
assert_equal_nrow <- function(x, y) {
namex <- as.character(substitute(x))
namey <- as.character(substitute(y))
if(nrow(x) != nrow(y))
stop(paste0('"', namex, '" and "', namey, '" must have the same number of rows'))
}
assert_equal_ncol <- function(x, y) {
namex <- as.character(substitute(x))
namey <- as.character(substitute(y))
if(ncol(x) != ncol(y))
stop(paste0('"', namex, '" and "', namey, '" must have the same number of columns'))
}
assert_max_number_of_levels <- function(x, mlevels) {
name <- as.character(substitute(x))
if(is.null(x) || length(stats::na.omit(unique(x))) > mlevels)
stop(paste0('"', name, '"', ' must have no more than ', mlevels, ' unique elements'))
} |
bandit4arm_2par_lapse <- hBayesDM_model(
task_name = "bandit4arm",
model_name = "2par_lapse",
model_type = "",
data_columns = c("subjID", "choice", "gain", "loss"),
parameters = list(
"Arew" = c(0, 0.1, 1),
"Apun" = c(0, 0.1, 1),
"xi" = c(0, 0.1, 1)
),
regressors = NULL,
postpreds = c("y_pred"),
preprocess_func = bandit4arm_preprocess_func) |
NULL
Facet <- ggproto("Facet", NULL,
shrink = FALSE,
params = list(),
compute_layout = function(data, params) {
abort("Not implemented")
},
map_data = function(data, layout, params) {
abort("Not implemented")
},
init_scales = function(layout, x_scale = NULL, y_scale = NULL, params) {
scales <- list()
if (!is.null(x_scale)) {
scales$x <- lapply(seq_len(max(layout$SCALE_X)), function(i) x_scale$clone())
}
if (!is.null(y_scale)) {
scales$y <- lapply(seq_len(max(layout$SCALE_Y)), function(i) y_scale$clone())
}
scales
},
train_scales = function(x_scales, y_scales, layout, data, params) {
for (layer_data in data) {
match_id <- match(layer_data$PANEL, layout$PANEL)
if (!is.null(x_scales)) {
x_vars <- intersect(x_scales[[1]]$aesthetics, names(layer_data))
SCALE_X <- layout$SCALE_X[match_id]
scale_apply(layer_data, x_vars, "train", SCALE_X, x_scales)
}
if (!is.null(y_scales)) {
y_vars <- intersect(y_scales[[1]]$aesthetics, names(layer_data))
SCALE_Y <- layout$SCALE_Y[match_id]
scale_apply(layer_data, y_vars, "train", SCALE_Y, y_scales)
}
}
},
draw_back = function(data, layout, x_scales, y_scales, theme, params) {
rep(list(zeroGrob()), length(unique(layout$PANEL)))
},
draw_front = function(data, layout, x_scales, y_scales, theme, params) {
rep(list(zeroGrob()), length(unique(layout$PANEL)))
},
draw_panels = function(panels, layout, x_scales, y_scales, ranges, coord, data, theme, params) {
abort("Not implemented")
},
draw_labels = function(panels, layout, x_scales, y_scales, ranges, coord, data, theme, labels, params) {
panel_dim <- find_panel(panels)
xlab_height_top <- grobHeight(labels$x[[1]])
panels <- gtable_add_rows(panels, xlab_height_top, pos = 0)
panels <- gtable_add_grob(panels, labels$x[[1]], name = "xlab-t",
l = panel_dim$l, r = panel_dim$r, t = 1, clip = "off")
xlab_height_bottom <- grobHeight(labels$x[[2]])
panels <- gtable_add_rows(panels, xlab_height_bottom, pos = -1)
panels <- gtable_add_grob(panels, labels$x[[2]], name = "xlab-b",
l = panel_dim$l, r = panel_dim$r, t = -1, clip = "off")
panel_dim <- find_panel(panels)
ylab_width_left <- grobWidth(labels$y[[1]])
panels <- gtable_add_cols(panels, ylab_width_left, pos = 0)
panels <- gtable_add_grob(panels, labels$y[[1]], name = "ylab-l",
l = 1, b = panel_dim$b, t = panel_dim$t, clip = "off")
ylab_width_right <- grobWidth(labels$y[[2]])
panels <- gtable_add_cols(panels, ylab_width_right, pos = -1)
panels <- gtable_add_grob(panels, labels$y[[2]], name = "ylab-r",
l = -1, b = panel_dim$b, t = panel_dim$t, clip = "off")
panels
},
setup_params = function(data, params) {
params$.possible_columns <- unique(unlist(lapply(data, names)))
params
},
setup_data = function(data, params) {
data
},
finish_data = function(data, layout, x_scales, y_scales, params) {
data
},
vars = function() {
character(0)
}
)
vars <- function(...) {
quos(...)
}
is.facet <- function(x) inherits(x, "Facet")
NO_PANEL <- -1L
unique_combs <- function(df) {
if (length(df) == 0) return()
unique_values <- lapply(df, ulevels)
rev(expand.grid(rev(unique_values), stringsAsFactors = FALSE,
KEEP.OUT.ATTRS = TRUE))
}
df.grid <- function(a, b) {
if (is.null(a) || nrow(a) == 0) return(b)
if (is.null(b) || nrow(b) == 0) return(a)
indexes <- expand.grid(
i_a = seq_len(nrow(a)),
i_b = seq_len(nrow(b))
)
unrowname(cbind(
a[indexes$i_a, , drop = FALSE],
b[indexes$i_b, , drop = FALSE]
))
}
as_facets_list <- function(x) {
x <- validate_facets(x)
if (is_quosures(x)) {
x <- quos_auto_name(x)
return(list(x))
}
if (is_string(x)) {
x <- parse_expr(x)
}
if (is_formula(x)) {
return(f_as_facets_list(x))
}
if (!is_bare_list(x)) {
x <- as_quoted(x)
}
if (is.list(x)) {
x <- lapply(x, as_facets)
}
x
}
validate_facets <- function(x) {
if (inherits(x, "uneval")) {
abort("Please use `vars()` to supply facet variables")
}
if (inherits(x, "ggplot")) {
abort(
"Please use `vars()` to supply facet variables\nDid you use %>% instead of +?"
)
}
x
}
compact_facets <- function(x) {
x <- flatten_if(x, is_list)
null_or_missing <- vapply(x, function(x) quo_is_null(x) || quo_is_missing(x), logical(1))
new_quosures(x[!null_or_missing])
}
as_quoted <- function(x) {
if (is.character(x)) {
if (length(x) > 1) {
x <- paste(x, collapse = "; ")
}
return(parse_exprs(x))
}
if (is.null(x)) {
return(list())
}
if (is_formula(x)) {
return(simplify(x))
}
list(x)
}
simplify <- function(x) {
if (length(x) == 2 && is_symbol(x[[1]], "~")) {
return(simplify(x[[2]]))
}
if (length(x) < 3) {
return(list(x))
}
op <- x[[1]]; a <- x[[2]]; b <- x[[3]]
if (is_symbol(op, c("+", "*", "~"))) {
c(simplify(a), simplify(b))
} else if (is_symbol(op, "-")) {
c(simplify(a), expr(-!!simplify(b)))
} else {
list(x)
}
}
f_as_facets_list <- function(f) {
lhs <- function(x) if (length(x) == 2) NULL else x[-3]
rhs <- function(x) if (length(x) == 2) x else x[-2]
rows <- f_as_facets(lhs(f))
cols <- f_as_facets(rhs(f))
list(rows, cols)
}
as_facets <- function(x) {
if (is_facets(x)) {
return(x)
}
if (is_formula(x)) {
f_as_facets(x)
} else {
vars <- as_quoted(x)
as_quosures(vars, globalenv(), named = TRUE)
}
}
f_as_facets <- function(f) {
if (is.null(f)) {
return(as_quosures(list()))
}
env <- f_env(f) %||% globalenv()
vars <- as.quoted(f)
vars <- discard_dots(vars)
as_quosures(vars, env, named = TRUE)
}
discard_dots <- function(x) {
x[!vapply(x, identical, logical(1), as.name("."))]
}
is_facets <- function(x) {
if (!is.list(x)) {
return(FALSE)
}
if (!length(x)) {
return(FALSE)
}
all(vapply(x, is_quosure, logical(1)))
}
eval_facets <- function(facets, data, possible_columns = NULL) {
vars <- compact(lapply(facets, eval_facet, data, possible_columns = possible_columns))
new_data_frame(tibble::as_tibble(vars))
}
eval_facet <- function(facet, data, possible_columns = NULL) {
if (quo_is_symbol(facet)) {
facet <- as.character(quo_get_expr(facet))
if (facet %in% names(data)) {
out <- data[[facet]]
} else {
out <- NULL
}
return(out)
}
env <- new_environment(data)
missing_columns <- setdiff(possible_columns, names(data))
undefined_error <- function(e) abort("", class = "ggplot2_missing_facet_var")
bindings <- rep_named(missing_columns, list(undefined_error))
env_bind_active(env, !!!bindings)
mask <- new_data_mask(env)
mask$.data <- as_data_pronoun(mask)
tryCatch(
eval_tidy(facet, mask),
ggplot2_missing_facet_var = function(e) NULL
)
}
layout_null <- function() {
new_data_frame(list(PANEL = factor(1), ROW = 1, COL = 1, SCALE_X = 1, SCALE_Y = 1))
}
check_layout <- function(x) {
if (all(c("PANEL", "SCALE_X", "SCALE_Y") %in% names(x))) {
return()
}
abort("Facet layout has bad format. It must contain columns 'PANEL', 'SCALE_X', and 'SCALE_Y'")
}
max_height <- function(grobs, value_only = FALSE) {
height <- max(unlist(lapply(grobs, height_cm)))
if (!value_only) height <- unit(height, "cm")
height
}
max_width <- function(grobs, value_only = FALSE) {
width <- max(unlist(lapply(grobs, width_cm)))
if (!value_only) width <- unit(width, "cm")
width
}
find_panel <- function(table) {
layout <- table$layout
panels <- layout[grepl("^panel", layout$name), , drop = FALSE]
new_data_frame(list(
t = min(.subset2(panels, "t")),
r = max(.subset2(panels, "r")),
b = max(.subset2(panels, "b")),
l = min(.subset2(panels, "l"))
), n = 1)
}
panel_cols = function(table) {
panels <- table$layout[grepl("^panel", table$layout$name), , drop = FALSE]
unique(panels[, c('l', 'r')])
}
panel_rows <- function(table) {
panels <- table$layout[grepl("^panel", table$layout$name), , drop = FALSE]
unique(panels[, c('t', 'b')])
}
combine_vars <- function(data, env = emptyenv(), vars = NULL, drop = TRUE) {
possible_columns <- unique(unlist(lapply(data, names)))
if (length(vars) == 0) return(new_data_frame())
values <- compact(lapply(data, eval_facets, facets = vars, possible_columns = possible_columns))
has_all <- unlist(lapply(values, length)) == length(vars)
if (!any(has_all)) {
missing <- lapply(values, function(x) setdiff(names(vars), names(x)))
missing_txt <- vapply(missing, var_list, character(1))
name <- c("Plot", paste0("Layer ", seq_len(length(data) - 1)))
abort(glue(
"At least one layer must contain all faceting variables: {var_list(names(vars))}.\n",
glue_collapse(glue("* {name} is missing {missing_txt}"), "\n", last = "\n")
))
}
base <- unique(rbind_dfs(values[has_all]))
if (!drop) {
base <- unique_combs(base)
}
for (value in values[!has_all]) {
if (empty(value)) next;
old <- base[setdiff(names(base), names(value))]
new <- unique(value[intersect(names(base), names(value))])
if (drop) {
new <- unique_combs(new)
}
base <- unique(rbind(base, df.grid(old, new)))
}
if (empty(base)) {
abort("Faceting variables must have at least one value")
}
base
}
render_axes <- function(x = NULL, y = NULL, coord, theme, transpose = FALSE) {
axes <- list()
if (!is.null(x)) {
axes$x <- lapply(x, coord$render_axis_h, theme)
}
if (!is.null(y)) {
axes$y <- lapply(y, coord$render_axis_v, theme)
}
if (transpose) {
axes <- list(
x = list(
top = lapply(axes$x, `[[`, "top"),
bottom = lapply(axes$x, `[[`, "bottom")
),
y = list(
left = lapply(axes$y, `[[`, "left"),
right = lapply(axes$y, `[[`, "right")
)
)
}
axes
}
render_strips <- function(x = NULL, y = NULL, labeller, theme) {
list(
x = build_strip(x, labeller, theme, TRUE),
y = build_strip(y, labeller, theme, FALSE)
)
} |
LV_project_alpha_pairwise_lambdacov_global_alphacov_global <- function(lambda,
alpha_intra,
alpha_inter,
lambda_cov,
alpha_cov,
abundance,
covariates){
spnames <- names(abundance)
alpha <- c(alpha_intra,alpha_inter)
alpha <- alpha[spnames]
numsp <- length(abundance)
expected_abund <- NA_real_
num = 1
focal.cov.matrix <- as.matrix(covariates)
for(z in 1:ncol(focal.cov.matrix)){
num <- num + lambda_cov[z]*focal.cov.matrix[,z]
}
cov_term <- 0
for(v in 1:ncol(focal.cov.matrix)){
cov_term <- cov_term + alpha_cov[[v]] * focal.cov.matrix[,v]
}
term <- 0
for(z in 1:length(abundance)){
term <- term - abundance[z] * (alpha[z] + cov_term[[z]])
}
expected_abund <- (lambda * (num) + term) * abundance[names(lambda)]
expected_abund
} |
test_that("x", {
data <- expand.grid(
rater = 1:3,
stimulus = 1:2,
obs = 1:4
)
datar <- data %>%
add_ranef("rater", r_i = 1) %>%
add_ranef("stimulus", s_i = 10) %>%
add_ranef(c("rater", "stimulus"), rs_i = 100)
r <- datar$r_i
s <- datar$s_i
rs <- datar$rs_i
expect_equal(rep(r[1:3], 8), r)
expect_equal(rep(s[c(1, 4)], each = 3, times = 4), s)
expect_equal(rep(rs[1:6], 4), rs)
})
test_that("x and y", {
set.seed(1)
nrater <- 5000
x_sd <- sample(1:10, 1)
y_sd <- sample(1:10, 1)
r_xy <- 0.5
data <- expand.grid(
rater = 1:nrater,
stimulus = 1:2
)
datar <- add_ranef(data, "rater", x = x_sd, y = y_sd, .cors = r_xy)
x <- datar$x[1:nrater]
y <- datar$y[1:nrater]
expect_equal(x, datar$x[(nrater+1):(2*nrater)])
expect_true(mean(x) %>% abs() < .1)
expect_true(mean(y) %>% abs() < .1)
expect_equal(sd(x), x_sd, tol = 0.05)
expect_equal(sd(y), y_sd, tol = 0.05)
expect_equal(cor(x, y), r_xy, tol = .05)
})
test_that("add_random", {
data1 <- add_random(school = 3)
expect_equal(data1$school, paste0("s", 1:3))
data2 <- add_random(data1, class = 2, .nested_in = "school")
expect_equal(data2$class, paste0("c", 1:6))
expect_equal(data2$school, rep(data1$school, each = 2))
n <- c(20, 24, 23, 21, 25, 24)
data3 <- add_random(data2, student = n, .nested_in = "class")
expect_equal(nrow(data3), sum(n))
data4 <- add_random(data3, question = 10)
expect_equal(nrow(data4), sum(n)*10)
expect_equal(data4$student, rep(data3$student, each = 10))
data5 <- sim_design(within = 2, n = 3, long = TRUE, plot = FALSE)
data6 <- add_random(data5, Q = 2)
expect_equal(data6$Q, rep(c("Q1", "Q2"), 6))
data <- add_random(A = 2., B = 2)
nested_in_A <- add_random(data, C = 2, .nested_in = "A")
nested_in_B <- add_random(data, C = 2, .nested_in = "B")
expect_false(all(nested_in_A$C == nested_in_B$C))
})
test_that("add_between", {
base <- add_random(subj = 4, item = 2)
data <- add_between(base, "subj", cond = c("A", "B"))
cond <- rep(LETTERS[1:2], each = 2, times = 2) %>% factor()
expect_equal(data$cond, cond)
data <- add_between(base, "item", cond = c("A", "B"))
cond <- rep(LETTERS[1:2], 4) %>% factor()
expect_equal(data$cond, cond)
data <- add_between(base, "subj",
cond = c("A", "B"),
time = c("morning", "evening"))
cond <- rep(LETTERS[1:2], each = 4) %>% factor()
time <- rep(c("morning", "evening"), each = 2, times = 2) %>%
factor(levels = c("morning", "evening"))
expect_equal(data$cond, cond)
expect_equal(data$time, time)
set.seed(100)
base <- add_random(subj = 100, item = 2)
data <- add_between(base, "subj", time = c("morning", "evening"))
data_shuffle <- add_between(base, "subj", time = c("morning", "evening"),
.shuffle = TRUE)
time <- rep(c("morning", "evening"), each = 2, times = 50) %>%
factor(levels = c("morning", "evening"))
expect_equal(data$time, time)
expect_false(all(data_shuffle$time == time))
expect_equal(sum(data_shuffle$time == "morning"), 100)
set.seed(100)
mean_prob <- replicate(100, {
data_prob <- add_between(base, "subj", time = c("morning", "evening"),
.prob = c(.4, .6))
mean(data_prob$time == "morning")
}) %>% mean()
expect_equal(mean_prob, .4, tol = .005)
for (n in c(0, 10, 20, 30, 100)) {
data_prob <- add_between(base, "subj", time = c("morning", "evening"),
.prob = c(n, 100-n))
expect_equal(sum(data_prob$time == "morning"), n*2)
}
prob <- c(10, 20, 30, 40)
data_prob2 <- add_between(base, "subj",
cond = c("A", "B"),
time = c("morning", "evening"),
.prob = prob)
n <- dplyr::count(data_prob2, cond, time)$n
expect_equal(n, prob*2)
expect_warning({data_prob3 <-
add_between(base, "subj",
cond = c("A", "B"),
time = c("morning", "evening"),
.prob = list(cond = c(10, 90),
time = c(90, 10)))})
cond <- rep(c("A", "B"), c(10*2, 90*2)) %>% factor()
time <- rep(c("morning", "evening"), c(90*2, 10*2)) %>%
factor(c("morning", "evening"))
expect_equal(data_prob3$cond, cond)
expect_equal(data_prob3$time, time)
set.seed(100)
means <- replicate(100, {
data_prob4 <- add_between(base, "subj",
cond = c("A", "B"),
time = c("morning", "evening"),
.prob = list(cond = c(.3, .7),
time = c(.3, .7)))
list(
cond = mean(data_prob4$cond == "A"),
time = mean(data_prob4$time == "morning"),
joint = mean(data_prob4$cond == "A" &
data_prob4$time == "morning")
)
})
cond <- means["cond", ] %>% unlist() %>% mean()
time <- means["time", ] %>% unlist() %>% mean()
joint <- means["joint", ] %>% unlist() %>% mean()
expect_equal(cond, .3, tol = 0.01)
expect_equal(time, .3, tol = 0.01)
expect_equal(joint, .3*.3, tol = 0.01)
})
test_that("add_within", {
base <- add_random(subj = 4, item = 2)
data <- add_within(base, "subj", cond = c("A", "B"))
cond <- rep(LETTERS[1:2], 4*2) %>% factor()
expect_equal(data$cond, cond)
data <- add_within(base, "item", cond = c("A", "B"))
cond <- rep(LETTERS[1:2], 4*2) %>% factor()
expect_equal(data$cond, cond)
data <- add_within(base, "subj",
cond = c("A", "B"),
time = c("morning", "evening"))
cond <- rep(LETTERS[1:2], each = 2, times = 8) %>% factor()
time <- rep(c("morning", "evening"), 16) %>%
factor(levels = c("morning", "evening"))
expect_equal(data$cond, cond)
expect_equal(data$time, time)
}) |
fra <-
function(){
my.draw <- function(panel) {
r1 <-as.numeric(panel$r1)
r2 <-as.numeric(panel$r2)
quote = paste(panel$t1,"x", panel$t2)
t1 <-as.numeric(panel$t1)
t2 <-as.numeric(panel$t2)
if (panel$frequency=="continuous")
{
t2=t2/12
t1=t1/12
f12 = (r2*t2-r1*t1)/(t2-t1)
f12 = round(12/(t2-t1)*(exp(f12*(t2-t1)/12)-1), 4)
}
else{
r2=r2*(t2-t1)/12
r1=r1*(t2-t1)/12
t2new=t2/(t2-t1)
t1new=t1/(t2-t1)
t12=1
f12 <- round(12/(t2-t1)*(((1+r2)^t2new)/((1+r1)^t1new) - 1), 4)
}
plot(1:30, 1:30, type="n", xlab="", ylab="",
axes=FALSE, frame = TRUE)
text(15, 15, paste(quote, " Fwd Rate = ", f12, sep=""),cex=1.2)
panel
}
my.redraw <- function(panel) {
rp.tkrreplot(panel, my.tkrplot)
panel
}
my.panel <- rp.control(title = "Forward Rate")
rp.textentry(panel=my.panel,variable=t1,labels="Months1:",action=my.redraw,initval=3)
rp.textentry(panel=my.panel,variable=r1,labels="Rate1: ",action=my.redraw,initval=0.09)
rp.textentry(panel=my.panel,variable=t2,labels="Months2:",action=my.redraw,initval=6)
rp.textentry(panel=my.panel,variable=r2,labels="Rate2: ",action=my.redraw,initval=0.12)
rp.radiogroup(panel = my.panel, variable= frequency,
vals = c("Continuous", "Loan period"),
action = my.redraw, title = "Frequency of spot rates")
rp.tkrplot(panel = my.panel, pos="bottom",name = my.tkrplot, plotfun = my.draw)
} |
locate.outliers <- function(resid, pars, cval = 3.5,
types = c("AO", "LS", "TC"), delta = 0.7)
{
sigma <- 1.483 * quantile(abs(resid - quantile(resid, probs = 0.5, na.rm = TRUE)),
probs = 0.5, na.rm = TRUE)
tmp <- outliers.tstatistics(pars = pars, resid = resid,
types = types, sigma = sigma, delta = delta)
ind <- which(abs(tmp[,,"tstat",drop=FALSE]) > cval, arr.ind = TRUE)
mo <- data.frame(
factor(gsub("^(.*)tstats$", "\\1", dimnames(tmp)[[2]][ind[,2]]),
levels = c("IO", "AO", "LS", "TC", "SLS")), ind[,1],
tmp[,,"coefhat",drop=FALSE][ind], tmp[,,"tstat",drop=FALSE][ind])
colnames(mo) <- c("type", "ind", "coefhat", "tstat")
if (nrow(ind) == 1)
rownames(mo) <- NULL
ref <- unique(mo[,"ind"][duplicated(mo[,"ind"])])
for (i in ref)
{
ind <- which(mo[,"ind"] == i)
moind <- mo[ind,]
mo <- mo[-ind[-which.max(abs(moind[,"tstat"]))],]
}
mo
}
locate.outliers.iloop <- function(resid, pars, cval = 3.5,
types = c("AO", "LS", "TC"), maxit = 4, delta = 0.7,
logfile = NULL)
{
if(!is.ts(resid))
stop(paste(sQuote("resid"), "must be a", sQuote("ts"), "object"))
n <- length(resid)
s <- frequency(resid)
moall <- data.matrix(numeric(0))
iter <- 0
while (iter < maxit)
{
mo <- locate.outliers(resid = resid, pars = pars, cval = cval,
types = types, delta = delta)
if (!is.null(logfile))
{
msg <- paste("\niloop, iteration:", iter, "\n")
cat(msg, file = logfile, append = TRUE)
capture.output(mo, file = logfile, append = TRUE)
}
cond <- nrow(mo) > 0
if (cond)
{
rmid <- c(
find.consecutive.outliers(mo, "IO"),
find.consecutive.outliers(mo, "AO"),
find.consecutive.outliers(mo, "LS"),
find.consecutive.outliers(mo, "TC"),
find.consecutive.outliers(mo, "SLS"))
if (length(rmid) > 0)
{
mo <- mo[-rmid,]
}
}
if (cond && iter > 0)
{
id.dups <- na.omit(match(moall[,"ind"], mo[,"ind"]))
if (length(id.dups) > 0)
{
mo <- mo[-id.dups,]
cond <- nrow(mo) > 0
}
}
if (!cond)
break
moall <- rbind(moall, mo)
oxreg <- outliers.regressors(pars = pars, mo = mo, n = n, weights = TRUE,
delta = delta, freq = s)
resid <- resid - rowSums(oxreg)
iter <- iter + 1
}
if (iter == maxit)
warning(paste("stopped when", sQuote("maxit.iloop"), "was reached"))
moall
}
locate.outliers.oloop <- function(y, fit, types = c("AO", "LS", "TC"),
cval = NULL, maxit.iloop = 4, maxit.oloop = 4, delta = 0.7, logfile = NULL)
{
n <- length(y)
s <- frequency(y)
if (is.null(cval))
{
if (n <= 50) {
cval <- 3
} else
if (n >= 450) {
cval <- 4
} else
cval <- round(3 + 0.0025 * (n - 50), 2)
}
tsmethod <- tail(as.character(fit$call[[1]]), 1)
moall <- data.frame(matrix(nrow = 0, ncol=4,
dimnames = list(NULL, c("type", "ind", "coefhat", "tstat"))))
iter <- 0
if (inherits(fit, "Arima")) {
tmp <- fit$arma[6] + fit$arma[5] * fit$arma[7]
id0resid <- if (tmp > 1) seq.int(tmp) else c(1, 2)
} else
stop("unexpected type of fitted model")
while (iter < maxit.oloop)
{
pars <- switch(tsmethod,
"auto.arima" = , "arima" = coefs2poly(fit))
resid <- residuals(fit)
idna <- which(is.na(resid))
if (length(idna))
resid[idna] <- mean(resid, na.rm=TRUE)
if (any(abs(na.omit(resid[id0resid])) > 3.5 * sd(resid[-id0resid], na.rm = TRUE)))
{
resid[id0resid] <- 0
warning(paste("the first", tail(id0resid, 1), "residuals were set to zero"))
}
mo <- locate.outliers.iloop(resid = resid, pars = pars, cval = cval,
types = types, maxit = maxit.iloop, delta = delta,
logfile = logfile)
if (nrow(mo) > 0)
{
rmid <- c(
find.consecutive.outliers(mo, "IO"),
find.consecutive.outliers(mo, "AO"),
find.consecutive.outliers(mo, "LS"),
find.consecutive.outliers(mo, "TC"),
find.consecutive.outliers(mo, "SLS"))
if (length(rmid) > 0)
{
mo <- mo[-rmid,]
}
}
if (nrow(mo) > 0 && iter > 0)
{
id.dups <- na.omit(match(moall[,"ind"], mo[,"ind"]))
if (length(id.dups) > 0)
mo <- mo[-id.dups,]
}
if (!is.null(logfile))
{
msg <- paste("\noloop, iteration:", iter, "\n")
cat(msg, file = logfile, append = TRUE)
capture.output(mo, file = logfile, append = TRUE)
}
if (nrow(mo) == 0)
break
moall <- rbind(moall, mo)
oeff <- outliers.effects(mo = mo, n = n, weights = TRUE,
delta = delta, pars = pars, freq = s)
y <- y - rowSums(oeff)
switch(tsmethod,
"auto.arima" = fit <- arima(y, order = fit$arma[c(1,6,2)],
seasonal = list(order = fit$arma[c(3,7,4)])),
"arima" = {
fitcall <- fit$call
fitcall$x <- y
fit <- eval(fitcall)
}
)
if (!is.null(logfile))
{
msg <- paste("\nmodel chosen and fitted for the adjusted series:\n")
cat(msg, file = logfile, append = TRUE)
capture.output(fit, file = logfile, append = TRUE)
}
iter <- iter + 1
}
if (iter == maxit.oloop)
warning(sprintf("stopped when \'maxit.oloop = %d\' was reached", maxit.oloop))
if (any(duplicated(moall[,"ind"])))
{
stop("unexpected duplicates since they are handled within the loop above")
}
list(fit = list(coefs = coef(fit), pars = pars,
resid = resid, n = n), outliers = moall, iter = iter)
} |
list(warnings = "string") |
data("data_seatbelts", package = "forecastML")
horizons <- c(1, 6, 12)
lookback <- 1:15
data_train <- create_lagged_df(data_seatbelts, type = "train", outcome_col = 1,
lookback = lookback, horizon = horizons)
windows <- create_windows(data_train, window_length = 12)
plot(windows, data_train)
windows <- create_windows(data_train, window_start = c(20, 80), window_stop = c(30, 100))
plot(windows, data_train) |
library(gpuR)
context("CPU vclMatrix classes")
options(gpuR.default.device.type = "cpu")
set.seed(123)
A <- matrix(seq.int(100), nrow=5)
D <- matrix(rnorm(100), nrow=5)
v <- rnorm(100)
vi <- seq.int(100)
test_that("CPU vclMatrix integer class initializer" ,{
has_cpu_skip()
vclA <- vclMatrix(A)
expect_is(vclA, "ivclMatrix")
expect_equivalent(vclA[], A,
info="vcl integer matrix elements not equivalent")
expect_equal(dim(vclA), dim(A))
expect_equal(ncol(vclA), ncol(A))
expect_equal(nrow(vclA), nrow(A))
expect_equal(typeof(vclA), "integer")
})
test_that("CPU vclMatrix float class initializer" ,{
has_cpu_skip()
vclD <- vclMatrix(D, type="float")
expect_is(vclD, "fvclMatrix")
expect_equal(vclD[], D, tolerance=1e-07,
info="vcl float matrix elements not equivalent")
expect_equal(dim(vclD), dim(D))
expect_equal(ncol(vclD), ncol(D))
expect_equal(nrow(vclD), nrow(D))
expect_equal(typeof(vclD), "float")
})
test_that("CPU vclMatrix double class initializer" ,{
has_cpu_skip()
vclD <- vclMatrix(D)
expect_is(vclD, "dvclMatrix")
expect_equal(vclD[], D, tolerance=.Machine$double.eps ^ 0.5,
info="vcl double matrix elements not equivalent")
expect_equal(dim(vclD), dim(D))
expect_equal(ncol(vclD), ncol(D))
expect_equal(nrow(vclD), nrow(D))
expect_equal(typeof(vclD), "double")
})
test_that("CPU vclMatrix integer vector initializers", {
has_cpu_skip()
vi <- seq.int(10)
Ai <- matrix(vi, nrow=2)
err <- c(TRUE, FALSE)
err2 <- c("hello", FALSE, 6)
vclAi <- vclMatrix(vi, nrow=2, ncol=5)
expect_is(vclAi, "ivclMatrix")
expect_equivalent(vclAi[], Ai)
expect_equal(dim(Ai), dim(vclAi))
expect_error(vclMatrix(err, nrow=1, ncol=2, type="double"))
expect_error(vclMatrix(err, nrow=1, ncol=2))
expect_error(vclMatrix(err2, nrow=1, ncol=3, type="double"))
})
test_that("CPU vclMatrix float vector initializers", {
has_cpu_skip()
v <- rnorm(10)
A <- matrix(v, nrow=5)
vclA <- vclMatrix(v, nrow=5, ncol=2, type="float")
expect_equal(vclA[], A, tolerance=1e-07)
expect_equal(dim(A), dim(vclA))
expect_is(vclA, "fvclMatrix")
})
test_that("CPU vclMatrix double vector initializers", {
has_cpu_skip()
v <- rnorm(10)
A <- matrix(v, nrow=5)
vclA <- vclMatrix(v, nrow=5, ncol=2, type="double")
expect_equal(vclA[], A, tolerance=.Machine$double.eps^0.5)
expect_equal(dim(A), dim(vclA))
expect_is(vclA, "dvclMatrix")
})
test_that("CPU vclMatrix integer scalar initializers", {
has_cpu_skip()
vi <- 4L
Ai <- matrix(vi, nrow=2, ncol=7)
ivclA <- vclMatrix(vi, nrow=2, ncol=7, type="integer")
expect_error(vclMatrix(v, nrow=5, ncol=5, type="integer"))
expect_is(ivclA, "ivclMatrix")
expect_equivalent(ivclA[], Ai,
"scalar integer elements not equivalent")
expect_equivalent(dim(Ai), dim(ivclA),
"scalar integer dimensions not equivalent")
})
test_that("CPU vclMatrix float scalar initializers", {
has_cpu_skip()
v <- 3
A <- matrix(v, nrow=5, ncol=5)
vclA <- vclMatrix(v, nrow=5, ncol=5, type="float")
expect_equal(vclA[], A, tolerance=1e-07,
info = "scalar double elements not equivalent")
expect_equivalent(dim(A), dim(vclA),
info = "scalar double dimensions not equivalent")
expect_is(vclA, "fvclMatrix")
})
test_that("CPU vclMatrix double scalar initializers", {
has_cpu_skip()
v <- 3
A <- matrix(v, nrow=5, ncol=5)
vclA <- vclMatrix(v, nrow=5, ncol=5, type="double")
expect_equal(vclA[], A, tolerance=.Machine$double.eps^0.5,
info = "scalar double elements not equivalent")
expect_equivalent(dim(A), dim(vclA),
info = "scalar double dimensions not equivalent")
expect_is(vclA, "dvclMatrix")
})
options(gpuR.default.device.type = "gpu") |
test_that("chk_range", {
expect_true(chk_range(1, 1, 10))
expect_true(chk_range(10, 1, 10))
expect_true(chk_range(NA, 1, 10))
expect_false(chk_range(0, 1, 10))
expect_false(chk_range(11, 1, 10))
})
test_that("chk_range handles character format correctly", {
expect_true(chk_range("1", 1, 10))
expect_true(chk_range("10", 1, 10))
expect_true(chk_range("01", 1, 10))
expect_true(chk_range("010", 1, 10))
expect_true(chk_range("", 1, 10))
expect_false(chk_range("0", 1, 10))
expect_false(chk_range("00", 1, 10))
expect_false(chk_range("011", 1, 10))
expect_false(chk_range("a", 1, 10))
}) |
set.seed(11)
x1 <- sample(c(1, 2, 3), 100, replace = TRUE)
x2 <- sample(c(NA, 1, 2, 3), 100, replace = TRUE)
k <- sample(1:100, 100, replace = TRUE)
lag <- sample(-15:15, 100, replace = TRUE)
idx <- cumsum(sample(c(1, 2, 3, 4), 100, replace = TRUE))
max2 <- function(x, na_rm = TRUE) {
if (all(is.na(x))) return(NA) else max(x, na.rm = na_rm)
}
expect_identical(
max_run(x2),
runner(x2, f = max2)
)
expect_identical(
max_run(x2, na_pad = TRUE),
runner(x2, f = max2, na_pad = TRUE)
)
expect_identical(
max_run(x2, na_rm = FALSE),
runner(x2, function(x) max2(x, na_rm = FALSE))
)
expect_equal(
max_run(x2, lag = 3),
runner(x2, lag = 3, f = max2))
expect_equal(
max_run(x2, lag = 3, na_pad = TRUE),
runner(x2, lag = 3, f = max2, na_pad = TRUE))
expect_equal(
max_run(x2, lag = -3),
runner(x2, lag = -3, f = max2))
expect_equal(
max_run(x2, lag = -3, na_pad = TRUE),
runner(x2, lag = -3, f = max2, na_pad = TRUE))
expect_identical(
max_run(x2, lag = 100),
as.numeric(runner(x2, lag = 100, f = max2))
)
expect_identical(
max_run(x2, lag = 100, na_pad = TRUE),
as.numeric(runner(x2, lag = 100, f = max2, na_pad = TRUE))
)
expect_identical(
max_run(x2, lag = -100),
as.numeric(runner(x2, lag = -100, f = max2))
)
expect_identical(
max_run(x2, lag = -100, na_pad = TRUE),
as.numeric(runner(x2, lag = -100, f = max2, na_pad = TRUE))
)
expect_equal(
max_run(x2, k = 3),
runner(x2, k = 3, f = max2))
expect_equal(
max_run(x2, k = 3, na_pad = TRUE),
runner(x2, k = 3, f = max2, na_pad = TRUE))
expect_equal(
max_run(x2, k = 1),
runner(x2, k = 1, f = max2))
expect_equal(
max_run(x2, k = 1, na_pad = TRUE),
runner(x2, k = 1, f = max2, na_pad = TRUE))
expect_equal(
max_run(x2, k = 99),
runner(x2, k = 99, f = max2))
expect_equal(
max_run(x2, k = 99, na_pad = TRUE),
runner(x2, k = 99, f = max2, na_pad = TRUE))
expect_equal(
max_run(x2, k = 100),
runner(x2, k = 100, f = max2))
expect_equal(
max_run(x2, k = 100, na_pad = TRUE),
runner(x2, k = 100, f = max2, na_pad = TRUE))
expect_equal(
max_run(x2, k = 5, lag = 3),
runner(x2, k = 5, lag = 3, f = max2))
expect_equal(
max_run(x2, k = 5, lag = 3, na_pad = TRUE),
runner(x2, k = 5, lag = 3, f = max2, na_pad = TRUE)
)
expect_equal(
max_run(x2, k = 5, lag = 3, na_rm = FALSE),
runner(x2, k = 5, lag = 3, f = max2, na_rm = FALSE)
)
expect_equal(
max_run(x2, k = 5, lag = 3, na_pad = TRUE, na_rm = FALSE),
runner(x2, k = 5, lag = 3, f = max2, na_pad = TRUE, na_rm = FALSE)
)
expect_equal(
max_run(x2, k = 5, lag = -3),
runner(x2, k = 5, lag = -3, f = max2))
expect_equal(
max_run(x2, k = 5, lag = -3, na_pad = TRUE),
runner(x2, k = 5, lag = -3, f = max2, na_pad = TRUE))
expect_equal(
max_run(x2, k = 5, lag = -3, na_rm = FALSE),
runner(x2, k = 5, lag = -3, f = max))
expect_equal(
max_run(x2, k = 5, lag = -3, na_pad = TRUE, na_rm = FALSE),
runner(x2, k = 5, lag = -3, f = max2, na_pad = TRUE, na_rm = FALSE)
)
expect_equal(
max_run(x2, k = 5, lag = -7),
runner(x2, k = 5, lag = -7, f = max2))
expect_equal(
max_run(x2, k = 5, lag = -7, na_pad = TRUE),
runner(x2, k = 5, lag = -7, f = max2, na_pad = TRUE))
expect_equal(
max_run(x2, k = 1, lag = -1),
runner(x2, k = 1, lag = -1, f = max2))
expect_equal(
max_run(x2, k = 1, lag = -1, na_pad = TRUE),
runner(x2, k = 1, lag = -1, f = max2, na_pad = TRUE))
expect_equal(
max_run(x2, k = 1, lag = 1),
runner(x2, k = 1, lag = 1, f = max2))
expect_equal(
max_run(x2, k = 1, lag = 1, na_pad = TRUE),
runner(x2, k = 1, lag = 1, f = max2, na_pad = TRUE))
expect_equal(
max_run(x2, k = k, lag = 1),
runner(x2, k = k, lag = 1, f = max2))
expect_equal(
max_run(x2, k = k, lag = 1, na_pad = TRUE),
runner(x2, k = k, lag = 1, f = max2, na_pad = TRUE))
expect_equal(
max_run(x2, k = 3, lag = lag),
runner(x2, k = 3, lag = lag, f = max2))
expect_equal(
max_run(x2, k = 3, lag = lag, na_pad = TRUE),
runner(x2, k = 3, lag = lag, f = max2, na_pad = TRUE))
expect_equal(
max_run(x2, k = k, lag = lag),
runner(x2, k = k, lag = lag, f = max2))
expect_equal(
max_run(x2, k = k, lag = lag, na_pad = TRUE),
runner(x2, k = k, lag = lag, f = max2, na_pad = TRUE))
expect_equal(
max_run(x2, lag = 3, idx = idx, na_pad = FALSE),
runner(x2, lag = 3, idx = idx, f = max2, na_pad = FALSE))
expect_equal(
max_run(x2, lag = 3, idx = idx, na_pad = TRUE),
runner(x2, lag = 3, idx = idx, f = max2, na_pad = TRUE))
expect_equal(
max_run(x2, lag = -3, idx = idx, na_pad = FALSE),
runner(x2, lag = -3, idx = idx, f = max2, na_pad = FALSE))
expect_equal(
max_run(x2, lag = -3, idx = idx, na_pad = TRUE),
runner(x2, lag = -3, idx = idx, f = max2, na_pad = TRUE))
expect_equal(
max_run(x2, k = 3, idx = idx, na_pad = FALSE),
runner(x2, k = 3, idx = idx, f = max2, na_pad = FALSE))
expect_equal(
max_run(x2, k = 3, idx = idx, na_pad = TRUE),
runner(x2, k = 3, idx = idx, f = max2, na_pad = TRUE))
expect_equal(
max_run(x2, lag = -1, idx = idx, na_pad = FALSE),
runner(x2, lag = -1, idx = idx, f = max2, na_pad = FALSE))
expect_equal(
max_run(x2, lag = -1, idx = idx, na_pad = TRUE),
runner(x2, lag = -1, idx = idx, f = max2, na_pad = TRUE))
expect_equal(
max_run(x2, lag = 100, idx = idx, na_pad = FALSE),
runner(x2, lag = 100, idx = idx, f = max2, na_pad = FALSE))
expect_equal(
max_run(x2, lag = 100, idx = idx, na_pad = TRUE),
runner(x2, lag = 100, idx = idx, f = max2, na_pad = TRUE))
expect_equal(
max_run(x2, lag = -100, idx = idx, na_pad = FALSE),
runner(x2, lag = -100, idx = idx, f = max2, na_pad = FALSE))
expect_equal(
max_run(x2, lag = -100, idx = idx, na_pad = TRUE),
runner(x2, lag = -100, idx = idx, f = max2, na_pad = TRUE))
expect_equal(
max_run(x2, lag = lag, idx = idx, na_pad = FALSE),
runner(x2, lag = lag, idx = idx, f = max2, na_pad = FALSE))
expect_equal(
max_run(x2, lag = lag, idx = idx, na_pad = TRUE),
runner(x2, lag = lag, idx = idx, f = max2, na_pad = TRUE))
expect_equal(
max_run(x2, k = 3, lag = 4, idx = idx, na_pad = FALSE),
runner(x2, k = 3, lag = 4, idx = idx, f = max2, na_pad = FALSE))
expect_equal(
max_run(x2, k = 3, lag = 4, idx = idx, na_pad = TRUE),
runner(x2, k = 3, lag = 4, idx = idx, f = max2, na_pad = TRUE))
expect_equal(
max_run(x2, k = 3, lag = -4, idx = idx, na_pad = FALSE),
runner(x2, k = 3, lag = -4, idx = idx, f = max2, na_pad = FALSE))
expect_equal(
max_run(x2, k = 3, lag = -4, idx = idx, na_pad = TRUE),
runner(x2, k = 3, lag = -4, idx = idx, f = max2, na_pad = TRUE))
expect_equal(
max_run(x2, k = k, lag = -4, idx = idx, na_pad = FALSE),
runner(x2, k = k, lag = -4, idx = idx, f = max2, na_pad = FALSE))
expect_equal(
max_run(x2, k = k, lag = -4, idx = idx, na_pad = TRUE),
runner(x2, k = k, lag = -4, idx = idx, f = max2, na_pad = TRUE))
expect_equal(
max_run(x2, k = 4, lag = lag, idx = idx, na_pad = FALSE),
runner(x2, k = 4, lag = lag, idx = idx, f = max2, na_pad = FALSE))
expect_equal(
max_run(x2, k = 4, lag = lag, idx = idx, na_pad = TRUE),
runner(x2, k = 4, lag = lag, idx = idx, f = max2, na_pad = TRUE)) |
context("test-check_augment_no_data")
fail_gracefully <- function(model, data = NULL, newdata = NULL) {
stop("Must specify either `data` or `newdata` argument.", call. = FALSE)
}
fail_uninformatively <- function(model, data = NULL, newdata = NULL) {
stop("A bad error message.", call. = FALSE)
}
missing_rows <- function(model, data = NULL, newdata = NULL) {
as_tibble(head(iris))
}
missing_cols <- function(model, data = NULL, newdata = NULL) {
as_tibble(iris)[, 1:4]
}
missing_rownames <- function(model, data = NULL, newdata = NULL) {
as_tibble(iris, rownames = NULL)
}
correct <- function(model, data = NULL, newdata = NULL) {
as_tibble(iris)
}
test_that("strict = TRUE", {
expect_silent(
check_augment_no_data(
aug = correct,
model = NULL,
passed_data = iris,
strict = TRUE
)
)
expect_silent(
check_augment_no_data(
aug = fail_gracefully,
model = NULL,
passed_data = iris,
strict = TRUE
)
)
expect_error(
check_augment_no_data(
aug = fail_uninformatively,
model = NULL,
passed_data = iris,
strict = TRUE
),
paste0(
"Augment failed but did not give an informative error message.\n",
"Please use the following error message:\n",
" Must specify either `data` or `newdata` argument."
)
)
}) |
test_that('adjacency works', {
actual <- adjacency(checkerboard, epsg = FALSE)
expect_equal(lapply(actual, sort), lapply(checkerboard_adj, sort))
})
test_that('check_contiguity works', {
expected <- structure(list(
group = c(
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L
),
group_number = c(
1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L
),
component = c(
1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L
)
),
row.names = c(NA, -64L), class = c('tbl_df', 'tbl', 'data.frame')
)
actual <- check_contiguity(checkerboard_adj)
expect_equal(actual, expected)
}) |
"rvn_tidyhydat_sample" |
dltInverse <- function(cal.coeff, coor.3d){
if(!is.matrix(coor.3d)){coor.3d <- matrix(coor.3d, 1, 3)}
m = matrix(NA, nrow(coor.3d), 2)
m[, 1] <- (coor.3d[, 1]*cal.coeff[1] + coor.3d[, 2]*cal.coeff[2] + coor.3d[, 3]*cal.coeff[3] + cal.coeff[4]) / (coor.3d[, 1]*cal.coeff[9] + coor.3d[, 2]*cal.coeff[10] + coor.3d[, 3]*cal.coeff[11] + 1)
m[, 2] <- (coor.3d[, 1]*cal.coeff[5] + coor.3d[, 2]*cal.coeff[6] + coor.3d[, 3]*cal.coeff[7] + cal.coeff[8]) / (coor.3d[, 1]*cal.coeff[9] + coor.3d[, 2]*cal.coeff[10] + coor.3d[, 3]*cal.coeff[11] + 1)
m
} |
kiderafactor<-function(pssm_name,v=NULL){
x<-read.delim(pssm_name,skip = 2,sep = "",header = FALSE)
x<-x[-1,-c(1,23:44)]
d<-which(x=="Lambda")
if(length(d)!=0){
x<-x[-c(d:dim(x)[1]),]
}
x<-x[,-1]
colnames(x)<-NULL
rownames(x)<-NULL
x<-as.matrix(x)
mode(x)<-"integer"
m<-x
m<-1/(1+exp(-m))
L<-dim(m)[1]
smoothed_PSSM<-matrix(0,L,20)
h<-matrix(0,3,20)
k<-matrix(0,3,20)
m<-rbind(h,m,k)
for(i in 1:L){
E<-data.frame(m[i,],m[i+1,],m[i+2,],m[i+3,],m[i+4,],m[i+5,],m[i+6,])
smoothed_PSSM[i,]<-rowSums(E)/7
}
v1<-c(-1.56,0.22,1.14,0.58,0.12,-0.47,-1.45,1.46,-0.41,-0.73,-1.04,-0.34,-1.40,-0.21,2.06,0.81,0.26,0.30,1.38,-0.74)
v2<-c(-1.67,1.27,-0.07,-0.22,-0.89,0.24,0.19,-1.96,0.52,-0.16,0.00,0.82,0.18,0.98,-0.33,-1.08,-0.70,2.10,1.48,-0.71)
v3<-c(-0.97,1.37,-0.12,-1.58,0.45,0.07,-1.61,-0.23,-0.28,1.79,-0.24,-0.23,-0.42,-0.36,-1.15,0.16,1.21,-0.72,0.80,2.04)
v4<-c(-0.27,1.87,0.81,0.81,-1.05,1.10,1.17,-0.16,0.28,-0.77,-1.10,1.70,-0.73,-1.43,-0.75,0.42,0.63,-1.57,-0.56,-0.40)
v5<-c(-0.93,-1.70,0.18,-0.92,-0.71,1.10,-1.31,0.10,1.61,-0.54,-0.55,1.54,2.00,0.22,0.88,-0.21,-0.10,-1.16,-0.00,0.50)
v6<-c(-0.78,0.46,0.37,0.15,2.41,0.59,0.40,-0.11,1.01,0.03,-2.05,-1.62,1.52,-0.81,-0.45,-0.43,0.21,0.57,-0.68,-0.81)
v7<-c(-0.20,0.92,-0.09,-1.52,1.52,0.84,0.04,1.32,-1.85,-0.83,0.96,1.15,0.26,0.67,0.30,-1.89,0.24,-0.48,-0.31,-1.07)
v8<-c(-0.08,-0.39,1.23,0.47,-0.69,-0.71,0.38,2.36,0.47,0.51,-0.76,-0.08,0.11,1.10,-2.30,-1.15,-1.15,-0.40,1.03,0.06)
v9<-c(0.21,0.23,1.10,0.76,1.13,-0.03,-0.35,-1.66,1.13,0.66,0.45,-0.48,-1.27,1.71,0.74,-0.97,-0.56,-2.30,-0.05,-0.46)
v10<-c(-0.48,0.93,-1.73,0.70,1.10,-2.33,-0.12,0.46,1.63,-1.78,0.93,0.60,0.27,-0.44,-0.28,-0.23,0.19,-0.60,0.53,0.65)
kidera_table<-data.frame(v1,v2,v3,v4,v5,v6,v7,v8,v9,v10)
kidera_table<-as.matrix(kidera_table)
colnames(kidera_table)<-NULL
mh<-matrix(0,8,20)
mk<-matrix(0,8,20)
condenced_pssm<-rbind(mh,smoothed_PSSM,mk)
d<-9
a<-1
s<-0
x<-matrix(0,L,170)
w1=w2<-c()
for(i in (d-8):(d+8)){
for(p in 1:10){
for(j in 1:20){
s<-s+condenced_pssm[i,j]*kidera_table[j,p]
}
w2[p]<-s
s<-0
}
w1<-c(w1,w2)
w2<-c()
}
x[a,]<-w1
d<-i+1
a<-a+1
while(d<=(L+16)){
i==d
s<-0
for(p in 1:10){
for(j in 1:20){
s<-s+condenced_pssm[i,j]*kidera_table[j,p]
}
w2[p]<-s
s<-0
}
w1<-c(w1[11:170],w2)
x[a,]<-w1
a<-a+1
w2<-c()
d<-d+1
}
if(length(v)!=0){
y<-x[v,]
y<-as.data.frame(y)
rownames(y)<-v
colnames(y)<-1:170
}
else{
y<-x
y<-as.data.frame(y)
rownames(y)<-1:L
colnames(y)<-1:170
}
return(round(y,digits = 3))
} |
compute_lp <- function(object, newdata, feature, p){
if (class(object) != "forestry") {
stop("The object submitted is not a forestry random forest")
}
newdata <- as.data.frame(newdata)
train_set <- slot(object, "processed_dta")$processed_x
if (!(feature %in% colnames(train_set))) {
stop("The submitted feature is not in the set of possible features")
}
y_weights <- predict(object = object,
newdata = newdata,
weightMatrix = TRUE)$weightMatrix
if (is.factor(newdata[1, feature])) {
mapping <- slot(object, "categoricalFeatureMapping")
factor_vals <- mapping[[1]][2][[1]]
map <- function(x) {
return(which(factor_vals == x)[1])
}
newdata[ ,feature] <- unlist(lapply(newdata[,feature], map))
diff_mat <- matrix(newdata[,feature],
nrow = nrow(newdata),
ncol = nrow(train_set),
byrow = TRUE) !=
matrix(train_set[,feature],
nrow = nrow(newdata),
ncol = nrow(train_set),
byrow = FALSE)
diff_mat[diff_mat] <- 1
} else {
diff_mat <- matrix(newdata[,feature],
nrow = nrow(newdata),
ncol = nrow(train_set),
byrow = TRUE) -
matrix(train_set[,feature],
nrow = nrow(newdata),
ncol = nrow(train_set),
byrow = FALSE)
}
diff_mat <- abs(diff_mat) ^ p
distances <- apply(y_weights * diff_mat, 1, sum) ^ (1 / p)
if (is.factor(newdata[1, feature])) {
distances[distances < 0] <- 0
distances[distances > 1] <- 1
}
return(distances)
} |
altnamesfun <- function(m){lapply(m, function(x){local_uniprotfun(x);tempx = readLines("x.txt"); tempy = regexpr("AltName:", tempx, fixed=T);tempz = which(tempy != -1); if (length(tempz != 0)) {tempa = tempx[tempz]; tempb = NULL; for(i in 1:length(tempa)) {a = unlist(strsplit(tempa[i], "=",fixed=T));b= unlist(strsplit(a[2], ";",fixed=T)); tempb = c(tempb,b[1])}; return(tempb)} else return("NONE") })} |
get_current_for_group <- function(city_ids, ...) {
get <- owmr_wrap_get("group")
get(id = paste(city_ids, collapse = ",")) %>%
owmr_parse() %>%
owmr_class("owmr_group")
}
find_cities_by_geo_point <- function(lat, lon, cnt = 3, ...) {
find_city(lat = lat, lon = lon, cnt = cnt, ...)
} |
require(PBSmapping) || stop("PBS Mapping library not available");
local({
.PBSfig03()
}); |
context("group category")
data("diamonds", package = "ggplot2")
test_that("test non-data.table objects without update", {
expect_is(group_category(iris, "Species", 0.2), "data.frame")
expect_lt(sum(group_category(iris, "Species", 0.2)[["cnt"]]), nrow(iris))
expect_lt(sum(group_category(iris, "Species", 0.2, "Sepal.Length")[["cnt"]]), sum(iris$Sepal.Length))
})
test_that("test data.table objects without update", {
dt <- data.table(iris)
expect_is(group_category(dt, "Species", 0.2), "data.table")
expect_lt(sum(group_category(dt, "Species", 0.2)[["cnt"]]), nrow(dt))
expect_lt(sum(group_category(dt, "Species", 0.2, "Sepal.Length")[["cnt"]]), sum(dt$Sepal.Length))
})
test_that("test update without measure", {
dt <- data.table(diamonds)
unique_cut_old <- levels(dt$cut)
group_category(dt, "cut", 0.2, update = TRUE)
unique_cut_new <- unique(dt$cut)
expect_gte(length(unique_cut_old), length(unique_cut_new))
expect_true("OTHER" %in% unique_cut_new)
})
test_that("test update with measure", {
dt <- data.table(diamonds)
unique_cut_old <- levels(dt$cut)
group_category(dt, "cut", 0.2, measure = "price", update = TRUE)
unique_cut_new <- unique(dt$cut)
expect_gte(length(unique_cut_old), length(unique_cut_new))
expect_true("OTHER" %in% unique_cut_new)
expect_true("Ideal" %in% unique_cut_new)
expect_true("Premium" %in% unique_cut_new)
})
test_that("test update with different name", {
dt <- data.table(diamonds)
group_category(dt, "cut", 0.2, update = TRUE, category_name = "New Name")
expect_true("New Name" %in% unique(dt$cut))
})
test_that("test excluding columns", {
dt <- data.table("a" = c(rep("c1", 25), rep("c2", 10), "c3", "c4"))
group_category(dt, "a", 0.8, update = TRUE, exclude = c("c3", "c4"))
expect_identical(unique(dt$a), c("OTHER", "c3", "c4"))
})
test_that("test non-data.table objects with update", {
expect_is(group_category(iris, "Species", 0.2, update = TRUE), "data.frame")
expect_equal(class(group_category(diamonds, "cut", 0.2, update = TRUE)), class(diamonds))
expect_equal(dim(group_category(iris, "Species", 0.2, update = TRUE)), dim(iris))
}) |
library(testthat)
source("utils.R")
test_that("null alphabet throws an error", {
expect_error(seqR::count_kmers(sequences=list(c("a", "a", "a")),
kmer_alphabet=c()),
"alphabet param is empty")
})
test_that("null sequences throws an error", {
expect_error(seqR::count_kmers(sequences=c(),
kmer_alphabet=c("a"),
k=1),
"sequences param is empty")
})
test_that("alphabet has incompatible element (integer) type with sequences' elements (string list)", {
expect_error(seqR::count_kmers(sequences=list(c("a", "b")),
kmer_alphabet=c(1,2),
k=1),
"alphabet should contain strings")
})
test_that("alphabet has incompatible element (numeric) type with sequences' elements (string)", {
expect_error(seqR::count_kmers(sequences=list(c("aa", "bb")),
kmer_alphabet=c(1.2, 2.2),
k=1),
"alphabet should contain strings")
})
test_that("alphabet has incompatible element (numeric) type with sequences from vector input (string)", {
expect_error(seqR::count_kmers(sequences=c("aaaaaaa"),
kmer_alphabet=c(1.1, 2.2),
k=1),
"alphabet should contain strings")
})
test_that("alphabet has incompatible element (integer) type with sequences from vector input (string)", {
expect_error(seqR::count_kmers(sequences=c("aaaaaa"),
kmer_alphabet=c(1,2),
k=1),
"alphabet should contain strings")
})
test_that("k = 0 generate an error", {
expect_error(seqR::count_kmers(sequences=c("AAAAA"),
kmer_alphabet=c("A"),
k=0),
"k should be a positive integer")
})
test_that("non integer gaps vector generates an error", {
expect_error(seqR::count_kmers(sequences=c("AAAAA"),
kmer_alphabet=c("A"),
k=1,
kmer_gaps=c("A")),
"gaps should be an integer vector")
})
test_that("kmer gaps length larger than k-1 generates an error", {
expect_error(seqR::count_kmers(sequences=c("AAAA"),
kmer_alphabet=c("A"),
k=1,
kmer_gaps=c(1,2)),
"the length of kmer_gaps vector should be at most k-1")
})
test_that("provided batch size param is a negative integer", {
expect_error(seqR::count_kmers(sequences=c("AAAA"),
kmer_alphabet=c("A"),
k=1,
batch_size = -2),
"batch size field must be a positive integer number")
})
test_that("provided batch size param is a positive non integer", {
expect_error(seqR::count_kmers(sequences=c("AAAA"),
kmer_alphabet=c("A"),
k=1,
batch_size = 2.2),
"batch size field must be a positive integer number")
})
test_that("provided batch size param is zero", {
expect_error(seqR::count_kmers(sequences=c("AAAA"),
kmer_alphabet=c("A"),
k=1,
batch_size = 0),
"batch size field must be a positive integer number")
})
test_that("provided batch size param is a string", {
expect_error(seqR::count_kmers(sequences=c("AAAA"),
kmer_alphabet=c("A"),
k=1,
batch_size = "aaaa"),
"batch size field must be a positive integer number")
})
test_that("provided batch size param is an integer vector", {
expect_error(seqR::count_kmers(sequences=c("AAAA"),
kmer_alphabet=c("A"),
k=1,
batch_size = c(1,2,3)),
"batch size field must be a positive integer number")
})
test_that("provided batch size param is NULL", {
expect_error(seqR::count_kmers(sequences=c("AAAA"),
kmer_alphabet=c("A"),
k=1,
batch_size = NULL),
"batch size field must be a positive integer number")
})
test_that("batch_size = 1 generates message", {
expect_message(seqR::count_kmers(sequences=c("AAAAA"),
batch_size=1))
})
test_that("test list input sequences for gapped k-mers", {
sq <- c("AAAA", "AAACA")
expected_res <- matrix(c(
2, 0,
2, 1), byrow=TRUE, nrow=2)
colnames(expected_res) <- c("A.A_1", "A.C_1")
res <- seqR::count_kmers(sequences=sq,
k=2,
kmer_alphabet=c("A", "C"),
positional=FALSE,
kmer_gaps=c(1))
expect_equal(expected_res, as.matrix(res))
})
test_that("test the case when there is 0 found k-mers", {
sq <-c("AAAAAA", "AAACA")
expected_res <- matrix(nrow=2, ncol=0)
res <- seqR::count_kmers(sequences=sq,
k=100,
kmer_alphabet=c("A"),
positional=FALSE,
kmer_gaps=c(1))
expect_equal(expected_res, as.matrix(res))
})
run_batch_test <- function(batch_size) {
sq <- c("AAAAA", "AA", "AAAAAAAB", "BBB")
expected_res <- matrix(c(
3, 0, 0,
0, 0, 0,
5, 1, 0,
0, 0, 1
), nrow=4, byrow=TRUE)
colnames(expected_res) <- c("A.A.A_0.0", "A.A.B_0.0", "B.B.B_0.0")
res <- seqR::count_kmers(sequences = sq,
kmer_alphabet=c("A", "B"),
k=3,
positional=FALSE,
with_kmer_counts=TRUE,
batch_size = batch_size)
expect_equal(expected_res, as.matrix(res))
}
test_that("test list input sequences that are processed in ONE batch iteration", {
skip_on_cran()
run_batch_test(batch_size=3)
})
test_that("test list input sequences that are processed in TWO batch iterations", {
skip_on_cran()
run_batch_test(batch_size=2)
})
test_that("test list input sequences that are processed in THREE batch iterations", {
skip_on_cran()
run_batch_test(batch_size=1)
})
test_that("the last input sequence does not contain any specified k-mer", {
sq <- c("aaaaacbb", "aa")
expected_res <- matrix(c(
1, 1, 1, 1,
0, 0, 0, 0
), nrow=2, byrow=TRUE)
colnames(expected_res) <- c("a.a.c.b.b_0.0.0.0", "a.a.a.c.b_0.0.0.0", "a.a.a.a.a_0.0.0.0", "a.a.a.a.c_0.0.0.0")
res <- seqR::count_kmers(sequences=sq,
kmer_alphabet=letters,
k=5,
positional=FALSE,
with_kmer_counts = FALSE)
expect_matrices_equal(as.matrix(res), expected_res)
})
test_that("more than one last input sequences do not contain any specified k-mer", {
sq <- c("aaaaacbb", "aa", "bb", "aaa")
expected_res <- matrix(c(
1, 1, 1, 1,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0
), nrow=4, byrow=TRUE)
colnames(expected_res) <- c("a.a.c.b.b_0.0.0.0", "a.a.a.c.b_0.0.0.0", "a.a.a.a.a_0.0.0.0", "a.a.a.a.c_0.0.0.0")
res <- seqR::count_kmers(sequences=sq,
kmer_alphabet=letters,
k=5,
positional=FALSE,
with_kmer_counts = FALSE)
expect_matrices_equal(as.matrix(res), expected_res)
})
test_that("some input sequences do not contain any specified k-mer", {
sq <- c("aa", "aaaaacbb", "bb", "aaa")
expected_res <- matrix(c(
0, 0, 0, 0,
1, 1, 1, 1,
0, 0, 0, 0,
0, 0, 0, 0
), nrow=4, byrow=TRUE)
colnames(expected_res) <- c("a.a.c.b.b_0.0.0.0", "a.a.a.c.b_0.0.0.0", "a.a.a.a.a_0.0.0.0", "a.a.a.a.c_0.0.0.0")
res <- seqR::count_kmers(sequences=sq,
kmer_alphabet=letters,
k=5,
positional=FALSE,
with_kmer_counts = FALSE)
expect_matrices_equal(as.matrix(res), expected_res)
})
test_that("expect dgCMatrix as an output", {
sq <- c("AAAAA", "AA", "AAAAAAAB", "BBB")
res <- seqR::count_kmers(sequences = sq,
kmer_alphabet=c("A", "B"),
k=3,
positional=FALSE,
with_kmer_counts=TRUE)
expect_is(res, "dgCMatrix")
})
test_that("count positional 1-mers for one-dimensional hash P_a, 1_b (P is hashing prime)", {
P <- 101
sq <- c(paste0("B", strrep("C", P - 1), "A"))
expected_res <- matrix(c(1, 1), nrow=1, byrow = TRUE)
colnames(expected_res) <- c("102_A", "1_B")
res <- seqR::count_kmers(sequences = sq,
kmer_alphabet = c("A", "B"),
k = 1,
positional = TRUE,
hash_dim = 1)
expect_matrices_equal(as.matrix(res), expected_res)
})
test_that("count positional gapped 2-mers (gap == 1) for one-dimensional hash 1_B.C_1, 10202_A.A_1", {
sq <- c(paste0("BCA", strrep("C", 10198), "ACA"))
expected_res <- matrix(c(1, 1), nrow=1, byrow = TRUE)
colnames(expected_res) <- c("1_B.A_1", "10202_A.A_1")
res <- seqR::count_kmers(sequences = sq,
kmer_alphabet = c("A", "B"),
k = 2,
kmer_gaps = c(1),
positional = TRUE,
hash_dim = 1)
expect_matrices_equal(as.matrix(res), expected_res)
})
test_that("count 3-mers without k-mer names, one sequence", {
sq <- c("AAAAAAAAA")
res <- seqR::count_kmers(sequences = sq,
kmer_alphabet = LETTERS,
k = 3,
with_kmer_names = FALSE)
expect_true(is.null(dimnames(res)[[2]]))
expect_true(as.matrix(res) == as.matrix(c(7)))
})
test_that("count 3-mers without k-mer names, multiple sequences", {
sq <- c("AAAAAAAAA", "ACADSDSA", "AAABBB")
res <- seqR::count_kmers(sequences = sq,
kmer_alphabet = LETTERS,
k = 3,
with_kmer_names = FALSE)
expect_true(is.null(dimnames(res)[[2]]))
})
test_that("(string vector) count 2-mers with alphabet = all", {
sq <- c("XXXX", "XAXA", "ABC")
expected_res <- matrix(c(
3, 0, 0, 0, 0,
0, 2, 1, 0, 0,
0, 0, 0, 1, 1
), byrow=TRUE, nrow=3)
colnames(expected_res) <- c("X.X_0", "X.A_0", "A.X_0", "A.B_0", "B.C_0")
res <- seqR::count_kmers(sequences = sq,
kmer_alphabet = "all",
k = 2)
expect_matrices_equal(as.matrix(res), expected_res)
})
test_that("(string vector) count 2-mers with alphabet = all, batch size = 1", {
sq <- c("XXXX", "XAXA", "ABC")
expected_res <- matrix(c(
3, 0, 0, 0, 0,
0, 2, 1, 0, 0,
0, 0, 0, 1, 1
), byrow=TRUE, nrow=3)
colnames(expected_res) <- c("X.X_0", "X.A_0", "A.X_0", "A.B_0", "B.C_0")
res <- seqR::count_kmers(sequences = sq,
kmer_alphabet = "all",
batch_size = 1,
k = 2)
expect_matrices_equal(as.matrix(res), expected_res)
})
test_that("(string list) count 2-mers with alphabet = all", {
sq <- list(c("X", "X", "X", "X"),
c("X", "A", "X", "A"),
c("A", "B", "C"))
expected_res <- matrix(c(
3, 0, 0, 0, 0,
0, 2, 1, 0, 0,
0, 0, 0, 1, 1
), byrow=TRUE, nrow=3)
colnames(expected_res) <- c("X.X_0", "X.A_0", "A.X_0", "A.B_0", "B.C_0")
res <- seqR::count_kmers(sequences = sq,
kmer_alphabet = "all",
k = 2)
expect_matrices_equal(as.matrix(res), expected_res)
})
test_that("(string list) count 2-mers with alphabet = all, batch_size = 1", {
sq <- list(c("X", "X", "X", "X"),
c("X", "A", "X", "A"),
c("A", "B", "C"))
expected_res <- matrix(c(
3, 0, 0, 0, 0,
0, 2, 1, 0, 0,
0, 0, 0, 1, 1
), byrow=TRUE, nrow=3)
colnames(expected_res) <- c("X.X_0", "X.A_0", "A.X_0", "A.B_0", "B.C_0")
res <- seqR::count_kmers(sequences = sq,
kmer_alphabet = "all",
batch_size = 1,
k = 2)
expect_matrices_equal(as.matrix(res), expected_res)
}) |
mkl.cgb2 <- function(k, shape1, scale, shape2, shape3, pl0, decomp="r"){
if (decomp=="r")
{sh <- shape3
a0 <- shape1}
if (decomp=="l")
{sh <- shape2
a0 <- -shape1}
Egb2 <- moment.gb2(k,shape1,scale,shape2,shape3)
u2 <- qgamma(cumsum(pl0),sh)
u1 <- c(0,u2[-length(pl0)])
shk <- sh - k/a0
fac <- (pgamma(u2,shk) - pgamma(u1,shk))/(pgamma(u2,sh)-pgamma(u1,sh))
return(Egb2*fac)
}
moment.cgb2 <- function(k, shape1, scale, shape2, shape3, pl0, pl, decomp="r"){
pk <- shape2 + k/shape1
qk <- shape3 - k/shape1
if (qk <0) {print("moment does not exist: k >= aq", quote=FALSE);return(NA)}
if (pk <0) {print("moment does not exist: k <= -ap", quote=FALSE);return(NA)}
Ek <- mkl.cgb2(k,shape1,scale,shape2,shape3,pl0,decomp)
return(sum(pl*Ek))
}
incompl.cgb2 <- function(x, k, shape1, scale, shape2, shape3, pl0, pl, decomp="r"){
pk <- shape2+ k/shape1
qk <- shape3- k/shape1
if (qk <0) {print("moment does not exist: k >= aq", quote=FALSE);return(NA)}
if (pk <0) {print("moment does not exist: k <= -ap", quote=FALSE);return(NA)}
if (decomp=="r")
{sh <- shape3
a0 <- shape1}
if (decomp=="l")
{sh <- shape2
a0 <- -shape1}
shk <- sh -k/a0
u2 <- qgamma(cumsum(pl0),sh)
ppl0 <- pgamma(u2,shk)
ppl0 <- c(ppl0[1],diff(ppl0))
Fk <- pl.cgb2(x,shape1,scale,pk,qk,ppl0,decomp)
Ek <- mkl.cgb2(k,shape1,scale,shape2,shape3,pl0,decomp)
Mk <- Ek*Fk
num <- sum(pl*Mk)
denom <- sum(pl*Ek)
return(num/denom)
} |
test_that("tweet_embed() returns a string", {
out <- tweet_embed("kearneymw", "1087047171306856451")
expect_type(out, "character")
}) |
if(FALSE){
require(ROptEst)
options("newDevice"=TRUE)
N0 <- NormLocationScaleFamily(mean=-2, sd=3)
N0.Rob1<- InfRobModel(center = N0, neighbor = ContNeighborhood(radius = 15));
N0.IC2.MBRE <- optIC(model = N0.Rob1, risk = asBias(), tol = 1e-10);print(stand(N0.IC2.MBRE));print(cent(N0.IC2.MBRE));print(stand(N0.IC2.MBRE)/max(stand(N0.IC2.MBRE)));print(cent(N0.IC2.MBRE)/max(stand(N0.IC2.MBRE)));print(clip(N0.IC2.MBRE))
plot(N0.IC2.MBRE)
N0.IC2.OMSE <- optIC(model = N0.Rob1, risk = asMSE(), tol = 1e-10);print(stand(N0.IC2.OMSE)/max(stand(N0.IC2.OMSE)));print(cent(N0.IC2.OMSE)/max(stand(N0.IC2.OMSE)));print(clip(N0.IC2.OMSE));print(stand(N0.IC2.OMSE)/max(stand(N0.IC2.OMSE)));print(cent(N0.IC2.OMSE)/max(stand(N0.IC2.OMSE)));print(clip(N0.IC2.OMSE))
plot(N0.IC2.OMSE)
N0.IC2.MBRE.i <- optIC(model = N0.Rob1, risk = asBias(normtype=InfoNorm()), tol = 1e-10);print(stand(N0.IC2.MBRE.i));print(cent(N0.IC2.MBRE.i));print(stand(N0.IC2.MBRE.i)/max(stand(N0.IC2.MBRE.i)));print(cent(N0.IC2.MBRE.i)/max(stand(N0.IC2.MBRE.i)));print(clip(N0.IC2.MBRE.i));print(stand(N0.IC2.MBRE.i)/max(stand(N0.IC2.MBRE.i)));print(cent(N0.IC2.MBRE.i)/max(stand(N0.IC2.MBRE.i)));print(clip(N0.IC2.MBRE.i))
plot(N0.IC2.MBRE.i)
N0.IC2.OMSE.i <- optIC(model = N0.Rob1, risk = asMSE(normtype=InfoNorm()), tol = 1e-10);print(stand(N0.IC2.OMSE.i)/max(stand(N0.IC2.OMSE.i)));print(cent(N0.IC2.OMSE.i)/max(stand(N0.IC2.OMSE.i)));print(clip(N0.IC2.OMSE.i));print(stand(N0.IC2.OMSE.i)/max(stand(N0.IC2.OMSE.i)));print(cent(N0.IC2.OMSE.i)/max(stand(N0.IC2.OMSE.i)));print(clip(N0.IC2.OMSE.i))
plot(N0.IC2.OMSE.i)
N0.IC2.MBRE.s <- optIC(model = N0.Rob1, risk = asBias(normtype=SelfNorm()), tol = 1e-10);print(stand(N0.IC2.MBRE.s));print(cent(N0.IC2.MBRE.s));print(stand(N0.IC2.MBRE.s)/max(stand(N0.IC2.MBRE.s)));print(cent(N0.IC2.MBRE.s)/max(stand(N0.IC2.MBRE.s)));print(clip(N0.IC2.MBRE.s));print(stand(N0.IC2.MBRE.s)/max(stand(N0.IC2.MBRE.s)));print(cent(N0.IC2.MBRE.s)/max(stand(N0.IC2.MBRE.s)));print(clip(N0.IC2.MBRE.s))
plot(N0.IC2.MBRE.s)
N0.IC2.OMSE.s <- optIC(model = N0.Rob1, risk = asMSE(normtype=SelfNorm()), tol = 1e-10);print(stand(N0.IC2.OMSE.s)/max(stand(N0.IC2.OMSE.s)));print(cent(N0.IC2.OMSE.s)/max(stand(N0.IC2.OMSE.s)));print(clip(N0.IC2.OMSE.s));print(stand(N0.IC2.OMSE.s)/max(stand(N0.IC2.OMSE.s)));print(cent(N0.IC2.OMSE.s)/max(stand(N0.IC2.OMSE.s)));print(clip(N0.IC2.OMSE.s))
plot(N0.IC2.OMSE.s)
} |
`SCSnp` <-
function(x, ...){UseMethod("SCSnp")}
`SCSnp.default` <-
function(x, conf.level=0.95, alternative="two.sided", ...)
{
alternative <- match.arg(alternative, choices=c("two.sided","less","greater"))
DataMatrix <- x
N <- nrow(DataMatrix)
k <- round(conf.level*N,0)
RankDat <- apply(DataMatrix,2,rank)
switch(alternative,
"two.sided"={
W1 <- apply(RankDat,1,max)
W2 <- N + 1 - apply(RankDat,1,min)
Wmat <- cbind(W1,W2)
w <- apply(Wmat,1,max)
tstar <- round(sort(w)[k],0)
SCI <- function(x)
{
sortx <- sort(x)
cbind(sortx[N+1-tstar],sortx[tstar])
}
SCS <- t(apply(DataMatrix,2,SCI))
},
"less"={
W1 <- apply(RankDat,1,max)
tstar <- round(sort(W1)[k],0)
SCI <- function(x)
{
sortx <- sort(x)
cbind(-Inf, sortx[tstar])
}
SCS<-t(apply(DataMatrix,2,SCI))
},
"greater"={
W2 <- N + 1 - apply(RankDat,1,min)
tstar <- round(sort(W2)[k],0)
SCI <- function(x)
{
sortx <- sort(x)
cbind(sortx[N+1-tstar], Inf)
}
SCS<-t(apply(DataMatrix,2,SCI))
}
)
estimate<-apply(DataMatrix,2, median)
colnames(SCS)<-c("lower","upper")
out<-list(
conf.int=SCS,
estimate=estimate,
x=x,
k=k,
N=N,
conf.level=conf.level,
alternative=alternative)
class(out)<-"SCSnp"
return(out)
}
`SCSnp.CCRatio` <-
function(x,...)
{
args<-list(...)
args$x<-x$chains
out<-do.call("SCSnp.default", args)
out$x<-x
return(out)
}
`SCSnp.CCDiff` <-
function(x,...)
{
args<-list(...)
args$x<-x$chains
out<-do.call("SCSnp.default", args)
out$x<-x
return(out)
}
`SCSnp.bugs` <-
function(x, conf.level=0.95, alternative="two.sided", whichp=NULL, ...)
{
args<-list(...)
sl<-x$sims.list
if(is.null(whichp))
{
mat<-x$sims.matrix
}
else{
namsl<-names(sl)
if(!whichp %in% namsl)
{stop("whichp could not be found in the parameter list of the openbugs object")}
if(length(whichp)==1)
{
mat<-sl[[whichp]]
}
if(length(whichp)>1)
{
mat<-matrix(nrow=x$n.sims)
for (i in seq(along.with=whichp))
{
mat<-cbind(mat,x$sims.list[[whichp[i]]])
}
}
}
args$x<-mat
args$conf.level<-conf.level
args$alternative<-alternative
out<-do.call("SCSnp.default", args)
return(out)
} |
nset <- function(x, n, first = T, warn = T){
if(isTRUE(first)){
nx <- x
sx <- seq_len(length(x))
} else {
nx <- rev(x)
sx <- rev(seq_len(length(x)))
}
lr <- split(sx, nx)
lengths <- sapply(lr, length)
if(any(lengths < n & isTRUE(warn))) warning("NA values generated")
lf <- lapply(lr, function(x) x[seq_len(n)])
res <- do.call(rbind, lf)
if(!isTRUE(first)) res <- res[,rev(seq_len(n))]
res <- res[order(order(unique(x))),]
return(res)
} |
like.post <-
function(id=NA,reblog_key=NA,token=NA,consumer_key=NA,consumer_secret=NA){
if(class(token)[1]!="Token1.0")
stop("token must be a Token1.0 type")
if(!is.character(consumer_key))
stop("consumer_key must be a string")
if(!is.character(consumer_secret))
stop("consumer_secret must be a string")
if(is.na(id)){
stop("id is a required field")
} else{
if(!is.numeric(id))
stop("id must be a numeric type")
}
if(is.na(reblog_key)){
stop("reblog_key is a required field")
} else{
if(!is.character(reblog_key))
stop("reblog_key must be a string type")
}
url<-"https://api.tumblr.com/v2/user/like"
bodyParams <- list(id=as.character(id), reblog_key=reblog_key)
connection<-"POST"
res<-fromJSON(http.connection(url,token,bodyParams,consumer_key,consumer_secret,connection))
return(res)
} |
FindHomogeneousWindows <- function(inputted.data, names.of.columns.to.look.at){
output <- TRUE
for(i in 1:length(names.of.columns.to.look.at))
{
names.in.column <- levels(as.factor(inputted.data[,names.of.columns.to.look.at[i]]))
if(length(names.in.column) > 1){
output <- FALSE
}
}
return(output)
}
GetHomogeneousWindows <- function(inputted.data, window.ID.col.name, observation.vals.to.compare){
unique.window.IDs <- levels(as.factor(inputted.data[,window.ID.col.name]))
windows.captured <- list()
windows.captured.index <- 1
for(i in 1:length(unique.window.IDs)){
window.subset <- subset(inputted.data, inputted.data[,window.ID.col.name]==unique.window.IDs[i])
if(FindHomogeneousWindows(window.subset, observation.vals.to.compare)){
windows.captured[[windows.captured.index]] <- window.subset
windows.captured.index <- windows.captured.index + 1
}
}
return(windows.captured)
}
GetSubsetOfWindows <- function(list.of.windows, name.of.column.to.look.at.in.window, value.to.match.to){
selected.windows <- list()
selected.windows.index <- 1
for(i in 1:length(list.of.windows)){
single.window <- list.of.windows[[i]]
value.in.window <- single.window[1, name.of.column.to.look.at.in.window]
if(value.in.window == value.to.match.to){
selected.windows[[selected.windows.index]] <- single.window
selected.windows.index <- selected.windows.index + 1
}
}
return(selected.windows)
}
CountWindows <- function(list.of.windows, level1.column.name, level2.column.name,
level1.categories, level2.categories){
matrix.of.counts <- matrix(0, length(level1.categories), length(level2.categories))
rownames(matrix.of.counts) <- level1.categories
colnames(matrix.of.counts) <- level2.categories
for(i in 1:length(list.of.windows)){
window.to.look.at <- list.of.windows[[i]]
level1.value <- window.to.look.at[1,level1.column.name]
level1.index <- which(level1.categories == level1.value)
level2.value <- window.to.look.at[1,level2.column.name]
level2.index <- which(level2.categories == level2.value)
matrix.of.counts[level1.index, level2.index] <- matrix.of.counts[level1.index, level2.index] + 1
}
return(matrix.of.counts)
}
GetSubsetOfWindowsTwoLevels <- function(list.of.windows, level1.column.name, level2.column.name,
level1.categories, level2.categories){
selected.windows <- list()
windows.level1 <- list()
for(i in 1:length(level1.categories)){
windows.level1.temp <- GetSubsetOfWindows(list.of.windows, level1.column.name, level1.categories[[i]])
windows.level1 <- c(windows.level1, windows.level1.temp)
}
for(i in 1:length(level2.categories)){
windows.level2.temp <- GetSubsetOfWindows(windows.level1, level2.column.name, level2.categories[[i]])
selected.windows <- c(selected.windows, windows.level2.temp)
}
return(selected.windows)
} |
test_symmetry <- function(x, tol = .Machine$double.eps){
if(!is.matrix(x))
stop("x should be a matrix")
idl <- lower.tri(x)
tx <- t(x)
if(all(abs(x[idl] - tx[idl]) < tol )){
out <- "symmetric"
} else if(all( abs(x[idl] + tx[idl]) < tol )){
out <- "skewed"
} else {
out <- "none"
}
return(out)
} |
heat_colours <- function (n, alpha, rev = FALSE) {
pre_h <- seq(from = 0, to = 0.3, length.out = n - 1)
h <- c(1, pre_h)
s <- rep(0.69, length(h))
v <- seq(from = 1, to = 0.8, length.out = n)
cols <- grDevices::hsv(h = h, s = s, v = v, alpha = alpha)
if(rev){
rev(cols)
} else if(rev == FALSE) {
cols
}
}
heat_colors <- heat_colours |
getSubMarts <- function(submart = "ensembl") {
if (!is.element(submart, c("ensembl", "plants", "fungi",
"protists", "metazoa")))
stop(
"Please select a submart that is supported by ensembl:
submart = 'ensembl', submart = 'plants', submart = 'fungi',
submart = 'protists', or submart = 'metazoa'",
call. = FALSE
)
if (submart == "ensembl")
biomartPage <- httr::handle(
paste0("http://www.ensembl.org:80/biomart/martservice?",
"type=registry&requestid=biomart", collapse = "")
)
if (submart == "plants")
biomartPage <- httr::handle(
paste0("http://plants.ensembl.org:80/biomart/martservice?",
"type=registry&requestid=biomart", collapse = "")
)
if (submart == "fungi")
biomartPage <- httr::handle(
paste0("http://fungi.ensembl.org:80/biomart/martservice?",
"type=registry&requestid=biomart", collapse = "")
)
if (submart == "protists")
biomartPage <- httr::handle(
paste0("http://protists.ensembl.org:80/biomart/martservice?",
"type=registry&requestid=biomart", collapse = "")
)
if (submart == "metazoa")
biomartPage <- httr::handle(
paste0("http://metazoa.ensembl.org:80/biomart/martservice?",
"type=registry&requestid=biomart", collapse = "")
)
xmlContentMarts <- httr::GET(handle = biomartPage)
httr::stop_for_status(xmlContentMarts)
doc <- suppressMessages(XML::xmlTreeParse(
xmlContentMarts,
useInternalNodes = TRUE,
encoding = "UTF-8"
))
rootNode <- XML::xmlRoot(doc)
databases <- as.data.frame(XML::xmlSApply(rootNode, function(x)
XML::xmlGetAttr(x, "name")))
displayNames <- as.data.frame(XML::xmlSApply(rootNode, function(x)
XML::xmlGetAttr(x, "displayName")))
visible <- as.data.frame(XML::xmlSApply(rootNode, function(x)
XML::xmlGetAttr(x, "visible")))
dbBioMart <- tibble::tibble(
mart = as.character(databases[ , 1]),
version = as.character(displayNames[ , 1]),
visible = as.character(visible[ , 1])
)
mart <- version <- NULL
dbBioMart <- dplyr::select(dplyr::filter(dbBioMart, visible != "0"),
mart, version)
return(dbBioMart)
} |
predict.epspath <- function(object, newx, svr.eps = 1,...){
kernel.function <- object$kernel.function
param.kernel <- object$param.kernel
if(missing(newx)){
newx <- object$x
}
K <- kernel.function(newx, object$x, param.kernel)
lambda <- object$lambda
otheta <- object$theta
otheta0 <- object$theta0
osvr.eps <- object$svr.eps
minl <- min(osvr.eps)
maxl <- max(osvr.eps)
if(length(object$svr.eps) == 1){
warning('This model has only one epsilon value; prediction values at this epsilon are returned')
nlam <- length(svr.eps)
svr.eps <- rep(object$svr.eps, nlam)
theta <- otheta[rep(1, nlam), ,drop=FALSE]
theta0 <- rep(otheta0, nlam)
}else if(length(svr.eps) == 1){
if(svr.eps >= maxl){
theta <- object$theta[,1]
theta0 <- object$theta0[1]
}else if(svr.eps <= minl){
theta <- object$theta[,ncol(object$theta)]
theta0 <- object$theta0[ncol(object$theta)]
warning("The epsilon value is too small to predict values.")
}else{
theta <- rep(1, times = nrow(otheta))
for(i in 1:nrow(otheta)){
theta[i] <- approx(osvr.eps, otheta[i,], svr.eps)$y
}
theta0 <- approx(osvr.eps, otheta0, svr.eps)$y
}
fx <- (K%*%(theta) + theta0) / lambda
}else if(length(svr.eps) > 1){
theta <- otheta[,1:length(svr.eps)]
theta0 <- otheta0[1:length(svr.eps)]
for(i in 1:length(svr.eps)){
if(svr.eps[i] >= maxl){
theta[,i] <- object$theta[,1]
i <- i+1
}else if(svr.eps[i] <= minl){
theta[,i] <- rep(0, nrow(otheta))
i <- i+1
}
if(i > length(svr.eps))break
for(j in 1:nrow(otheta)){
theta[j,i] <- approx(osvr.eps, otheta[j,], svr.eps[i])$y
}
theta0[i] <- approx(osvr.eps, otheta0, svr.eps[i])$y
}
if(nrow(newx) > 1){
theta0 <- matrix(rep(theta0,nrow(newx)), ncol = length(svr.eps), byrow =T)
lambda <- matrix(rep(lambda,2*nrow(newx)), ncol = length(svr.eps), byrow = T)
fx <- (K%*%(theta) + theta0) / lambda
theta0 <- theta0[1,]
lambda <- lambda[1,1]
}else{
fx <- (K%*%(theta) + theta0) / lambda
}
}
obj <- list(fx = fx, theta = theta, theta0 = theta0, svr.eps = svr.eps, lambda = lambda)
class(obj) <- "predict.epspath"
obj
} |
b <- matrix(data = rnorm(25), nrow = 5, ncol = 5)
dimnames(b) <- list(letters[1:5], LETTERS[1:5])
a <- structure(b, class = "TestMatrix")
dim(a)
dimnames(a)
transform_ind <- function(k, lim) {
if (is.character(k))
stop("Character subsetting is not allowed.")
res <- seq_len(lim)[k]
if (any(is.na(res)))
stop("Subsetting with missing values is not allowed.")
res
}
transform_ind(c(TRUE, FALSE), 5)
transform_ind(1:2, 5)
extract <- function(extract_vector, extract_matrix) {
transform_ind <- function(k, lim) {
if (missing(k))
return(seq_len(lim))
if (is.character(k))
stop("Character subsetting is not allowed.")
res <- seq_len(lim)[k]
if (any(is.na(res)))
stop("Subsetting with missing values is not allowed.")
res
}
function(x, i, j, drop = TRUE) {
n <- nrow(x)
m <- ncol(x)
nargs <- (nargs() - !missing(drop))
if (nargs == 2) {
print("k")
if (missing(i)) {
nargs <- 3
} else {
if (is.logical(i))
stop("Logical vector subsetting is not allowed")
if (!isTRUE(all(i > 0)))
stop("Only positive vector subsetting is allowed")
if (is.matrix(i))
i <- (transform_ind(i[, 2], m) - 1L) * n + transform_ind(i[, 1], n)
if (any(i > (n * m)))
stop("Subscript out of bounds.")
return(extract_vector(x, i))
}
}
if (nargs == 3) {
print("(i, j)")
res <- extract_matrix(x, transform_ind(i, n), transform_ind(j, m))
return(`if`(drop, drop(res), res))
}
}
}
Rcpp::sourceCpp('tmp-tests/test-accessor2.cpp')
`[.TestMatrix` <- extract(getVec, getMat)
b
a[]
a[1]
a[1:5]
a[, 1]
a[1, ]
a[1, 1]
a[1:2, 1]
a[1:2, 1:2]
a[cbind(1:2, c(1, NA))]
b[cbind(1:2, c(1, NA))]
a[TRUE, ]
a[c(TRUE, FALSE, FALSE, TRUE, TRUE), ]
a[, "A"]
a[, 1, drop = FALSE]
a[-1]
a[-1, ]
a[25]
a[28]
a[-(1:5), ]
a[-6, ]
b[-6, ]
b[0]
b[-25]
b[c(NA, 2, 3, NA)]
crochet:::convertIndex(b, c(NA, 2, 3, NA), "k")
crochet:::convertIndex(b, 26, "k") |
bearpower<-function(h,down,day){
df<-RSI(Cl(h))
da<-Cl(h)
dc<-Cl(h)
d_num<-matrix(c(1:length(dc)), nrow = nrow(dc), ncol = 1)
colnames(d_num) <- c("num")
datas<-1
nrows <-nrow(dc)
d1<-0
x<-0
g<-1
m<-1
n<-1
f<-2
while(x < nrows ){
for(i in dc[g,]){
d1[n]<-i
n<-n+1
if(!is.na(df[g,])){
if(df[g,]<down){
dc[g,]<-da[g,]
}else{
dc[g,]<-0
}
}else{
dc[g,]<-0
}
}
g<-g+1
x<-x+1
m<-m+1
if(f==NROW(dc)){
break
}else{
f<-f+1
}
}
da2<-data.frame(dc,df,d_num)
dc2<-matrix(c(dc), nrow = nrow(dc), ncol = 1)
d_closes<-matrix(c(dc), nrow = nrow(dc), ncol = 1)
d_status<-matrix(c(0), nrow = nrow(dc), ncol = 1)
colnames(d_status) <- c("status")
d_buy<-matrix(c(0), nrow = nrow(dc), ncol = 1)
datas<-1
nrows <-nrow(dc)
dstatus<-0
x<-0
g<-1
m<-1
n<-1
f<-2
while(x < nrows ){
for(i in dc[g,]){
dstatus[n]<-i
n<-n+1
if(g>1){
if(!is.na(df[g,])){
if(df[g]<down){
if((g+2)<NROW(dc)){
if(dc[g-1]==0 && dc[g+1]!=0 && dc[g+2]!=0 && dc[g+3]!=0){
d_status[g]<-"X"
d_buy[g]<-'buy1'
}
if(dc[g+1]==0 && dc[g-1]!=0 && dc[g-2]!=0 && dc[g-3]!=0){
d_status[g]<-"X"
d_buy[g]<-'buy'
}
if(dc[(g-1),]==0 && dc[f,]==0){
d_status[g]<-"X1"
}
}
}
}
}
}
g<-g+1
x<-x+1
m<-m+1
if(f==NROW(dc)){
break
}else{
f<-f+1
}
}
dstatus<-data.frame(dc,df,d_status,dc2,d_num)
datas<-1
nrows <-nrow(dc)
d2<-0
x<-0
g<-1
m<-1
n<-1
f<-2
while(x < nrows ){
for(i in dc[g,]){
d2[n]<-i
n<-n+1
if(g>1){
if(dc[(g-1),]==0 && dc[f,]==0){
dc2[g,]<-0
}else{
if(!is.na(d_status[g])){
if(dc[(g-1),]==0 && d_status[g]=="X" || dc[f,]==0 && d_status[g]=="X"){
dc2[g,]<- dc[g,]
}
}
}
}
if(dc[g,]!=0 && is.na(d_status[g])){
dc2[g,]<- NA
}
}
g<-g+1
x<-x+1
m<-m+1
if(f==NROW(dc)){
break
}else{
f<-f+1
}
}
da3<-data.frame(dc,df,dc2,d_status,d_num)
datas<-1
nrows <-nrow(dc)
d3<-0
x<-0
g<-1
m<-1
n<-1
f<-2
bea1<-0
bea2<-0
wength<-0
while(x < nrows ){
for(i in dc[g,]){
d3[n]<-i
n<-n+1
if(g>1){
if(!is.na(d_status[g])){
if(d_closes[(g-1),]==0 && d_status[g]==0){
bea1<-g
}
if(d_closes[f,]==0 && d_status[g]==0){
bea2<-g
}
wength<-length((bea1+1):(bea2-1))
if(wength<day){
dc2[bea1:bea2]<-0
d_status[bea1:bea2]<-0
}
}
}
}
g<-g+1
x<-x+1
m<-m+1
if(f==NROW(dc)){
break
}else{
f<-f+1
}
}
da4<-data.frame(dc,df,dc2,d_status,d_num)
datas<-1
nrows <-nrow(dc)
d4<-0
x<-0
g<-1
m<-1
n<-1
f<-2
st1<-0
st2<-0
wength<-0
while(x < nrows ){
for(i in dc[g,]){
d4[n]<-i
n<-n+1
if(g>1){
if(!is.na(d_status[g])){
if(d_closes[(g-1),]==0 && d_status[g]=='X'){
st1<-g
}
if(d_closes[f,]==0 && d_status[g]=='X'){
st2<-g
}
wength<-length((st1+1):(st2-1))
if(st1!=0 && st2!=0){
dc2[(st1+1):(st2-1)]<-NA
st1<-0
st2<-0
}
}
}
}
g<-g+1
x<-x+1
m<-m+1
if(f==NROW(dc)){
break
}else{
f<-f+1
}
}
da5<-data.frame(dc,df,dc2,d_status,d_num)
bear<-d_status
nn <- NROW(dc)
dc2 <- approx( dc2, xout=1:nn )$y
dc2<-ifelse( dc2==0, NA, dc2 )
cd<-reclass( dc2, h[1:length(dc2)] )
return(cd)
} |
eliminateNA <- function(dat){
n <- dim(dat)[1]
c <- dim(dat)[2]
tmp <- matrix(NA, ncol = c, nrow = n)
for (i in 1:c){tmp[, i] <- as.numeric(dat[, i])}
compl <- dat[complete.cases(dat) == TRUE, ]
incompl <- dat[complete.cases(dat) == FALSE, ]
res <- list(complete = compl, incomplete = incompl)
return(res)
} |
write.biom <- function (biom, file, format="json") {
if (!is(biom, "BIOM"))
stop(simpleError("Invalid BIOM object."))
if (file.exists(file))
stop(simpleError(sprintf("Output file already exists: '%s'", file)))
if (is.null(biom$info$type)) biom$info$type <- "OTU table"
if (is.na(biom$info$type)) biom$info$type <- "OTU table"
if (length(biom$info$type) != 1) biom$info$id <- "OTU table"
if (nchar(biom$info$type) == 0) biom$info$type <- "OTU table"
if (is.null(biom$info$id)) biom$info$id <- "NA"
if (is.na(biom$info$id)) biom$info$id <- "NA"
if (length(biom$info$id) != 1) biom$info$id <- "NA"
if (nchar(biom$info$id) == 0) biom$info$id <- "NA"
opts <- c("tab", "json", "hdf5")
format <- tolower(head(format, 1))
format <- c(opts, format)[pmatch(format, opts, nomatch=4)]
switch (format,
"hdf5" = write.biom.2.1(biom, file),
"json" = write.biom.1.0(biom, file),
"tab" = write.biom.tsv(biom, file),
stop(simpleError(sprintf("unknown output format: '%s'", format)))
)
}
write.biom.tsv <- function (biom, file) {
mtx <- {
rbind(
matrix(
data = c("
nrow = 1
),
cbind(
matrix(
data = rownames(biom$counts),
ncol = 1
),
matrix(
data = as.character(as.matrix(biom$counts)),
nrow = nrow(biom$counts)
)
)
)
}
if (!is.null(biom$taxonomy)) {
if (ncol(biom$taxonomy) > 0) {
mtx <- {
cbind(
mtx,
matrix(
data = c("Taxonomy", apply(biom$taxonomy, 1L, paste, collapse="; ")),
ncol = 1
)
)
}
}
}
write.table(mtx, file, sep="\t", quote=FALSE, row.names=FALSE, col.names=FALSE)
return (invisible(NULL))
}
write.biom.1.0 <- function (biom, file) {
for (i in names(biom$metadata))
if (is(biom$metadata[[i]], "factor"))
biom$metadata[[i]] <- as.character(biom$metadata[[i]])
json <- rjson::toJSON(list(
id = biom$info$id,
type = biom$info$type,
format = "1.0.0",
format_url = "http://biom-format.org",
generated_by = paste("rbiom", utils::packageDescription('rbiom')$Version),
date = strftime(Sys.time(), "%Y-%m-%dT%H:%M:%SZ", tz="UTC"),
matrix_type = "sparse",
matrix_element_type = ifelse(sum(biom$counts$v %% 1) == 0, "int", "float"),
shape = c(biom$counts$nrow, biom$counts$ncol),
comment = biom$info$comment,
phylogeny = ifelse(is.null(biom$phylogeny), "", rbiom::write.tree(biom$phylogeny)),
rows = lapply(1:biom$counts$nrow, function (i) {
TaxaID <- biom$counts$dimnames[[1]][i]
Metadata <- list(taxonomy=unname(biom$taxonomy[i,]))
if (is(biom[['sequences']], "character"))
Metadata[['sequence']] <- biom$sequences[[TaxaID]]
list(id=TaxaID, metadata=Metadata)
}),
columns = lapply(1:biom$counts$ncol, function (j) list(
id = biom$counts$dimnames[[2]][j],
metadata = {
md_fields <- names(biom$metadata)
if (length(md_fields) > 0) {
sapply(md_fields, function (k) biom$metadata[j,k])
} else {
NULL
}
}
)),
data = lapply(seq_along(biom$counts$v), function (k)
c(biom$counts$i[k] - 1, biom$counts$j[k] - 1, biom$counts$v[k])
)
))
res <- try(writeChar(json, file, eos=NULL), silent = TRUE)
if (is(res, "try-error"))
stop(simpleError(sprintf("Can't save to '%s': %s", file, res)))
return (invisible(NULL))
}
write.biom.2.1 <- function (biom, file) {
if (!requireNamespace("rhdf5", quietly = TRUE)) {
stop(simpleError(paste0(
"\n",
"Error: rbiom requires the R package 'rhdf5' to be installed\n",
"in order to read and write HDF5 formatted BIOM files.\n\n",
"Please run the following commands to install 'rhdf5':\n",
" install.packages('BiocManager')\n",
" BiocManager::install('rhdf5')\n\n" )))
}
res <- try(rhdf5::h5createFile(file), silent = TRUE)
if (!identical(res, TRUE))
stop(simpleError(sprintf("Can't create file '%s': %s", file, as.character(res))))
invisible(rhdf5::h5createGroup(file, '/observation'))
invisible(rhdf5::h5createGroup(file, '/observation/matrix'))
invisible(rhdf5::h5createGroup(file, '/observation/metadata'))
invisible(rhdf5::h5createGroup(file, '/observation/group-metadata'))
invisible(rhdf5::h5createGroup(file, '/sample'))
invisible(rhdf5::h5createGroup(file, '/sample/matrix'))
invisible(rhdf5::h5createGroup(file, '/sample/metadata'))
invisible(rhdf5::h5createGroup(file, '/sample/group-metadata'))
h5 <- try(rhdf5::H5Fopen(file), silent = TRUE)
if (!is(h5, "H5IdComponent"))
stop(simpleError(sprintf("Can't open file '%s': %s", file, as.character(h5))))
rhdf5::h5writeAttribute(as.character(biom$info$id %||% ""), h5, 'id')
rhdf5::h5writeAttribute(as.character(biom$info$type %||% ""), h5, 'type')
rhdf5::h5writeAttribute(as.character(biom$info$comment %||% ""), h5, 'comment')
rhdf5::h5writeAttribute("http://biom-format.org", h5, 'format-url')
rhdf5::h5writeAttribute(as.integer(c(2,1,0)), h5, 'format-version', 3)
rhdf5::h5writeAttribute(paste("rbiom", utils::packageDescription('rbiom')$Version), h5, 'generated-by')
rhdf5::h5writeAttribute(strftime(Sys.time(), "%Y-%m-%dT%H:%M:%SZ", tz="UTC"), h5, 'creation-date')
rhdf5::h5writeAttribute(as.integer(c(biom$counts$nrow, biom$counts$ncol)), h5, 'shape', 2)
rhdf5::h5writeAttribute(as.integer(length(biom$counts$v)), h5, 'nnz')
x <- matrix(c(biom$counts$i - 1, biom$counts$j - 1, biom$counts$v), byrow=FALSE, ncol=3)
x <- x[order(x[,1]),,drop=FALSE]
rhdf5::h5writeDataset(as.character(biom$counts$dimnames[[1]]), h5, 'observation/ids')
rhdf5::h5writeDataset(as.numeric(x[,3]), h5, 'observation/matrix/data')
rhdf5::h5writeDataset(as.integer(x[,2]), h5, 'observation/matrix/indices')
rhdf5::h5writeDataset(as.integer(cumsum(unname(table(factor(x[,1]+1, 0:biom$counts$nrow))))), h5, 'observation/matrix/indptr')
x <- x[order(x[,2]),,drop=FALSE]
rhdf5::h5writeDataset(as.character(biom$counts$dimnames[[2]]), h5, 'sample/ids')
rhdf5::h5writeDataset(as.numeric(x[,3]), h5, 'sample/matrix/data')
rhdf5::h5writeDataset(as.integer(x[,1]), h5, 'sample/matrix/indices')
rhdf5::h5writeDataset(as.integer(cumsum(unname(table(factor(x[,2]+1, 0:biom$counts$ncol))))), h5, 'sample/matrix/indptr')
if (is(biom[['metadata']], "data.frame")) {
plyr::l_ply(setdiff(names(biom$metadata), 'SampleID'), function (field) {
h5path <- sprintf("/sample/metadata/%s", field)
values <- biom$metadata[biom$counts$dimnames[[2]], field]
if (is.numeric(values)) {
if (all(values %% 1 == 0, na.rm=TRUE))
values <- as.integer(values)
} else if (!is.logical(values)) {
values <- as.character(values)
}
rhdf5::h5writeDataset(values, h5, h5path)
})
}
if (is(biom[['taxonomy']], "matrix")) {
h5path <- 'observation/metadata/taxonomy'
x <- t(biom$taxonomy[biom$counts$dimnames[[1]],,drop=FALSE])
dimnames(x) <- list(NULL, NULL)
rhdf5::h5writeDataset(x, h5, h5path)
}
if (is(biom[['sequences']], "character")) {
h5path <- 'observation/metadata/sequences'
x <- unname(biom$sequences[biom$counts$dimnames[[1]]])
rhdf5::h5writeDataset(x, h5, h5path)
}
if (is(biom[['phylogeny']], "phylo")) {
h5path <- '/observation/group-metadata/phylogeny'
x <- rbiom::write.tree(biom[['phylogeny']])
rhdf5::h5writeDataset(x, h5, h5path)
h5path <- h5&'observation'&'group-metadata'&'phylogeny'
rhdf5::h5writeAttribute("newick", h5path, 'data_type')
}
rhdf5::H5Fflush(h5)
rhdf5::H5Fclose(h5)
return (invisible(NULL))
} |
library(testthat)
form_main <- children ~ german + years_school + voc_train + university + religion +
year_birth + rural + age_marriage
form_anc <- children ~ german + years_school + voc_train
IV_scale <- glm(form_main, family = poisson(), data = fertility)
IV_shape <- glm(form_anc, family = poisson(), data = fertility)
startR <- renewalCoef(IV_scale, target = "gamma")
startS <- renewalCoef(IV_shape, target = "gamma")
start <- c(startR[!grepl("shape", names(startR))],
startS[grepl("shape", names(startS))])
anc <- list(shape = form_anc)
print("............ flexsurv API ............")
res_old <- renewalCount(formula = form_main, data = fertility, dist = "gamma",
computeHessian = FALSE, anc = anc,
standardise = FALSE,
control = renewal.control(trace = 0, start = start)
)
print("............ FORMULA API ............")
form_new <- children ~ german + years_school + voc_train + university + religion +
year_birth + rural + age_marriage | german + years_school + voc_train
res_new <- renewalCount(formula = form_new, data = fertility, dist = "gamma",
computeHessian = FALSE, standardise = FALSE,
control = renewal.control(trace = 0, start = start)
)
expect_equal(coef(res_new), coef(res_old))
|
list.match <- function(.data, pattern, ...) {
.data[grep(pattern, names(.data), ...)]
} |
scale_colour_continuous_interactive <- function(...)
scale_interactive(scale_colour_continuous, ...)
scale_color_continuous_interactive <- scale_colour_continuous_interactive
scale_fill_continuous_interactive <- function(...)
scale_interactive(scale_fill_continuous, ...)
scale_colour_grey_interactive <- function(...)
scale_interactive(scale_colour_grey, ...)
scale_color_grey_interactive <- scale_colour_grey_interactive
scale_fill_grey_interactive <- function(...)
scale_interactive(scale_fill_grey, ...)
scale_colour_hue_interactive <- function(...)
scale_interactive(scale_colour_hue, ...)
scale_color_hue_interactive <- scale_colour_hue_interactive
scale_fill_hue_interactive <- function(...)
scale_interactive(scale_fill_hue, ...)
scale_colour_binned_interactive <- function(...)
scale_interactive(scale_colour_binned, ...)
scale_color_binned_interactive <- scale_colour_binned_interactive
scale_fill_binned_interactive <- function(...)
scale_interactive(scale_fill_binned, ...)
scale_colour_discrete_interactive <- function(...)
scale_interactive(scale_colour_discrete, ...)
scale_color_discrete_interactive <- scale_colour_discrete_interactive
scale_fill_discrete_interactive <- function(...)
scale_interactive(scale_fill_discrete, ...)
scale_colour_date_interactive <- function(...)
scale_interactive(scale_colour_date, ...)
scale_color_date_interactive <- scale_colour_date_interactive
scale_fill_date_interactive <- function(...)
scale_interactive(scale_fill_date, ...)
scale_colour_datetime_interactive <- function(...)
scale_interactive(scale_colour_datetime, ...)
scale_color_datetime_interactive <- scale_colour_datetime_interactive
scale_fill_datetime_interactive <- function(...)
scale_interactive(scale_fill_datetime, ...) |
binary_rr <- function(dt, num_col, reference, digits){
dt <- dt[complete.cases(dt),]
dt[,1] <- as.factor(dt[,1])
dt[,2] <- as.factor(dt[,2])
if (reference != levels(dt[,1])[1]){
dt[,1] <- relevel(dt[,1], ref=reference)
}
rnd <- paste0("%4.", digits, "f (%4.", digits, "f, %4.", digits, "f)")
RR_cal <- function(a,b,c,d){
pt_est <- (A/(A+B))/(C/(C+D))
logSE <- sqrt(1/A + 1/C + (1/(A+B)) + (1/(C+D)))
c(pt_est, exp(log(pt_est)-1.96*logSE), exp(log(pt_est)+1.96*logSE))
}
fmt <- function(dm) {sprintf(rnd, dm[1], dm[2], dm[3])}
RR <- list()
name <- c()
for (i in 2:num_col){
A <- sum(dt[,1] == reference & dt[,2] == levels(dt[,2])[1])
B <- sum(dt[,1] == reference & dt[,2] == levels(dt[,2])[i])
C <- sum(dt[,1] != reference & dt[,2] == levels(dt[,2])[1])
D <- sum(dt[,1] != reference & dt[,2] == levels(dt[,2])[i])
RR[[i-1]] <- RR_cal(A,B,C,D) %>%
fmt
name[i-1] <- paste0(levels(dt[,2])[1], " vs. ", levels(dt[,2])[i])
}
rnd2 <- paste0("%.", digits, "f")
chisq <- suppressWarnings(chisq.test(table(dt)))
N <- nrow(dt)
stat <- ((N-1)/N)*chisq$statistic
pval <- sprintf(rnd2, 1-pchisq(stat, df=chisq$parameter))
pval <- ifelse(as.numeric(pval)<.0005, "< 0.001", pval)
out <- RR %>%
as.data.frame %>%
setNames(name) %>%
mutate(Total = paste("p = ", pval))
out <- as.data.frame(cbind(Test="Risk Ratio", out))
out
} |
amBarChart <- function(
data,
data2 = NULL,
category,
values,
valueNames = NULL,
showValues = TRUE,
hline = NULL,
yLimits = NULL,
expandY = 5,
valueFormatter = "
chartTitle = NULL,
theme = NULL,
draggable = FALSE,
tooltip = NULL,
columnStyle = NULL,
threeD = FALSE,
bullets = NULL,
alwaysShowBullets = FALSE,
backgroundColor = NULL,
cellWidth = NULL,
columnWidth = NULL,
xAxis = NULL,
yAxis = NULL,
scrollbarX = FALSE,
scrollbarY = FALSE,
legend = NULL,
caption = NULL,
image = NULL,
button = NULL,
cursor = FALSE,
width = NULL,
height = NULL,
export = FALSE,
chartId = NULL,
elementId = NULL
) {
if(!all(values %in% names(data))){
stop("Invalid `values` argument.", call. = TRUE)
}
if(is.null(valueNames)){
valueNames <- setNames(as.list(values), values)
}else if(is.list(valueNames) || is.character(valueNames)){
if(is.null(names(valueNames)) && length(valueNames) == length(values)){
warning(sprintf(
"The `valueNames` %s you provided is unnamed - setting automatic names",
ifelse(is.list(valueNames), "list", "vector")
))
valueNames <- setNames(as.list(valueNames), values)
}else if(!all(values %in% names(valueNames))){
stop(
paste0(
"Invalid `valueNames` argument. ",
"It must be a named list associating a name to every column ",
"given in the `values` argument."
),
call. = TRUE
)
}
}else{
stop(
paste0(
"Invalid `valueNames` argument. ",
"It must be a named list giving a name for every column ",
"given in the `values` argument."
),
call. = TRUE
)
}
if(!is.null(data2) &&
(!is.data.frame(data2) ||
nrow(data2) != nrow(data) ||
!all(values %in% names(data2)))){
stop("Invalid `data2` argument.", call. = TRUE)
}
if(is.null(yLimits)){
yLimits <- range(pretty(do.call(c, data[values])))
pad <- diff(yLimits) * expandY/100
yLimits <- yLimits + c(-pad, pad)
}
if(is.character(chartTitle)){
chartTitle <- list(
text = amText(
text = chartTitle, color = NULL, fontSize = 22,
fontWeight = "bold", fontFamily = "Tahoma"
),
align = "left"
)
}else if("text" %in% class(chartTitle)){
chartTitle <- list(text = chartTitle, align = "left")
}
if(is.character(caption)){
caption <- list(text = amText(caption), align = "right")
}else if("text" %in% class(caption)){
caption <- list(text = caption, align = "right")
}
if(is.atomic(draggable)){
if(length(draggable) != 1L || !is.logical(draggable)){
stop(
paste0(
"Invalid `draggable` argument. ",
"It must be a named list defining `TRUE` or `FALSE` ",
"for every column given in the `values` argument, ",
"or just `TRUE` or `FALSE`."
),
call. = TRUE
)
}
draggable <- setNames(rep(list(draggable), length(values)), values)
}else if(is.list(draggable)){
if(!all(values %in% names(draggable)) ||
!all(draggable %in% c(FALSE,TRUE))){
stop(
paste0(
"Invalid `draggable` list. ",
"It must be a named list defining `TRUE` or `FALSE` ",
"for every column given in the `values` argument, ",
"or just `TRUE` or `FALSE`."
),
call. = TRUE
)
}
}else{
stop(
paste0(
"Invalid `draggable` argument. ",
"It must be a named list defining `TRUE` or `FALSE` ",
"for every column given in the `values` argument, ",
"or just `TRUE` or `FALSE`."
),
call. = TRUE
)
}
if(!isFALSE(tooltip)){
tooltipText <- sprintf(
"[bold]{name}:\n{valueY.value.formatNumber('%s')}[/]",
valueFormatter
)
if(is.null(tooltip)){
tooltip <-
setNames(
rep(list(
amTooltip(
text = tooltipText,
auto = FALSE
)
), length(values)),
values
)
}else if("tooltip" %in% class(tooltip)){
if(tooltip[["text"]] == "_missing")
tooltip[["text"]] <- tooltipText
tooltip <- setNames(rep(list(tooltip), length(values)), values)
}else if(is.list(tooltip)){
if(any(!values %in% names(tooltip))){
stop("Invalid `tooltip` list.", call. = TRUE)
}
tooltip <- lapply(tooltip, function(settings){
if(settings[["text"]] == "_missing")
settings[["text"]] <- tooltipText
return(settings)
})
}else if(is.character(tooltip)){
tooltip <-
setNames(
rep(list(amTooltip(text = tooltip, auto = FALSE)), length(values)),
values
)
}else{
stop("Invalid `tooltip` argument.", call. = TRUE)
}
}
if(is.null(columnStyle)){
columnStyle <- setNames(rep(list(amColumn()), length(values)), values)
}else if("column" %in% class(columnStyle)){
columnStyle <- setNames(rep(list(columnStyle), length(values)), values)
}else if(is.list(columnStyle)){
if(any(!values %in% names(columnStyle))){
stop("Invalid `columnStyle` list.", call. = TRUE)
}
}else{
stop("Invalid `columnStyle` argument.", call. = TRUE)
}
if(is.null(bullets)){
bullets <- setNames(rep(list(amCircle()), length(values)), values)
}else if("bullet" %in% class(bullets)){
bullets <- setNames(rep(list(bullets), length(values)), values)
}else if(is.list(bullets)){
if(any(!values %in% names(bullets))){
stop("Invalid `bullets` list.", call. = TRUE)
}
}else{
stop("Invalid `bullets` argument.", call. = TRUE)
}
if(is.null(cellWidth)){
cellWidth <- 90
}else{
cellWidth <- max(50, min(cellWidth, 100))
}
if(is.null(columnWidth)){
columnWidth <- ifelse(length(values) == 1L, 100, 90)
}else{
columnWidth <- max(10, min(columnWidth, 100))
}
if(is.null(xAxis)){
xAxis <- list(
title = amText(
text = category,
fontSize = 20,
color = NULL,
fontWeight = "bold"
),
labels = amAxisLabels(
color = NULL,
fontSize = 18,
rotation = 0
)
)
}else if(is.character(xAxis)){
xAxis <- list(
title = amText(
text = xAxis,
fontSize = 20,
color = NULL,
fontWeight = "bold"
),
labels = amAxisLabels(
color = NULL,
fontSize = 18,
rotation = 0
)
)
}
if(is.character(xAxis[["title"]])){
xAxis[["title"]] <- amText(
text = xAxis[["title"]],
fontSize = 20,
color = NULL,
fontWeight = "bold"
)
}
if(is.null(xAxis[["labels"]])){
xAxis[["labels"]] <- amAxisLabels(
color = NULL,
fontSize = 18,
rotation = 0
)
}
if(is.null(yAxis)){
yAxis <- list(
title = if(length(values) == 1L) {
amText(
text = values,
fontSize = 20,
color = NULL,
fontWeight = "bold"
)
},
labels = amAxisLabels(
color = NULL,
fontSize = 18,
rotation = 0,
formatter = valueFormatter
),
gridLines = amLine(opacity = 0.2, width = 1)
)
}else if(is.character(yAxis)){
yAxis <- list(
title = amText(
text = yAxis,
fontSize = 20,
color = NULL,
fontWeight = "bold"
),
labels = amAxisLabels(
color = NULL,
fontSize = 18,
rotation = 0,
formatter = valueFormatter
),
gridLines = amLine(opacity = 0.2, width = 1)
)
}
if(is.character(yAxis[["title"]])){
yAxis[["title"]] <- amText(
text = yAxis[["title"]],
fontSize = 20,
color = NULL,
fontWeight = "bold"
)
}
if(is.null(yAxis[["labels"]])){
yAxis[["labels"]] <- amAxisLabels(
color = NULL,
fontSize = 18,
rotation = 0,
formatter = valueFormatter
)
}
if(is.null(legend)){
legend <- length(values) > 1L
}
if(isTRUE(legend)){
legend <- amLegend(
position = "bottom",
itemsWidth = 20,
itemsHeight = 20
)
}
if(!(is.null(image) || isFALSE(image))){
if(!is.list(image)){
if(!"image" %in% class(image)){
stop("Invalid `image` argument.", call. = TRUE)
}else{
image <- list(image = image)
}
}else{
if(!"image" %in% names(image) || !"image" %in% class(image[["image"]])){
stop("Invalid `image` argument.", call. = TRUE)
}
}
}
if(is.null(button)){
button <- if(!is.null(data2))
amButton(
label = "Reset"
)
}else if(is.character(button)){
button <- amButton(
label = button
)
}
if("tooltip" %in% class(cursor)){
cursor <- list(tooltip = cursor)
}else if(is.list(cursor)){
if("modifier" %in% names(cursor)){
cursor[["renderer"]] <- list(y = htmlwidgets::JS(
"function(text){",
cursor[["modifier"]],
"return text;",
"}"
))
cursor[["modifier"]] <- NULL
}
if("extraTooltipPrecision" %in% names(cursor)){
cursor[["extraTooltipPrecision"]] <-
list(y = cursor[["extraTooltipPrecision"]])
}
}
if(is.null(width)){
width <- "100%"
}else{
width <- shiny::validateCssUnit(width)
}
height <- shiny::validateCssUnit(height)
if(is.null(height)){
if(grepl("^\\d", width) && !grepl("%$", width)){
height <- sprintf("calc(%s * 9 / 16)", width)
}else{
height <- "400px"
}
}
if(is.null(chartId)){
chartId <- paste0("barchart-", randomString(15))
}
if(!is.null(hline)){
if(any(!is.element(c("value", "line"), names(hline)))){
stop(
"Invalid `hline` argument."
)
}
}
component <- reactR::component(
"AmBarChart",
list(
data = data,
data2 = data2,
category = category,
values = as.list(values),
valueNames = as.list(valueNames),
showValues = showValues,
minValue = yLimits[1L],
maxValue = yLimits[2L],
hline = hline,
valueFormatter = valueFormatter,
chartTitle = chartTitle,
theme = theme,
draggable = draggable,
tooltip = tooltip,
columnStyle = columnStyle,
threeD = threeD,
bullets = bullets,
alwaysShowBullets = alwaysShowBullets,
backgroundColor = validateColor(backgroundColor),
cellWidth = cellWidth,
columnWidth = columnWidth,
xAxis = xAxis,
yAxis = yAxis,
scrollbarX = scrollbarX,
scrollbarY = scrollbarY,
legend = legend,
caption = caption,
image = image,
button = button,
cursor = cursor,
width = width,
height = height,
export = export,
chartId = chartId,
shinyId = elementId
)
)
htmlwidgets::createWidget(
name = 'amChart4',
reactR::reactMarkup(component),
width = "auto",
height = "auto",
package = 'rAmCharts4',
elementId = elementId
)
} |
fitsoilwater5 <-
function (theta, x, theta_S, xlab = NULL, ylab = NULL, ...)
{
if (!requireNamespace("rpanel", quietly = TRUE))
stop("package rpanel is required")
if (!inherits(c(theta, x), c("numeric", "integer")))
stop("non-numeric arguments!")
if (length(theta) != length(x))
stop("incompatible dimensions!")
stopifnot(theta_S >= 0)
dat <- data.frame(theta, x)
if (is.null(ylab))
ylab = "Soil water content"
if (is.null(xlab))
xlab = "Matric potential"
f.graph <- function() {
plot(theta ~ x, data = dat, las = 1, xlab = xlab, ylab = ylab,
main = "Soil Water Retention Curve", ...)
}
f.graph()
theta_R <- alpha <- n <- b0 <- b1 <- b2 <- NULL
f.panel <- function(pan) {
f.graph()
with(pan, curve(soilwater5(x, theta_R, theta_S = theta_S, alpha,
n, m = 1 - 1/n, b0, b1, b2), add = TRUE, col = "red"))
return(pan)
}
f.fit <- function(pan) {
start <- with(pan, pan[c("theta_R", "alpha",
"n", "b0", "b1", "b2")])
fit <- try(with(pan, nls(theta ~ soilwater5(x, theta_R,
theta_S = theta_S, alpha, n, m = 1 - 1/n, b0, b1, b2),
data = dat, start = start)))
if (inherits(fit, "try-error")) {
rpanel::rp.messagebox("No convergence... try other initial values.",
title = "Warning!")
}
else {
f.graph()
est <- coef(fit)
with(dat, lines(x, soilwater5(x, theta_R = est[1],
theta_S = theta_S, alpha = est[2], n = est[3], b0 = est[4],
b1 = est[5], b2 = est[6]), col = "blue"))
print(summary(fit))
print(Rsq(fit))
}
return(pan)
}
panel <- rpanel::rp.control("Interactive fit")
rpanel::rp.slider(panel, variable = theta_R, from = 0, to = max(theta)*1.5,
resolution = 0.01, initval = 0.2, title = "theta_R",
action = f.panel)
rpanel::rp.doublebutton(panel, variable = theta_R, step = 0.01, title = "",
action = f.panel, showvalue = TRUE, foreground = "blue")
rpanel::rp.slider(panel, variable = alpha, from = 0, to = 2, resolution = 0.01,
initval = 0.05, title = "alpha", action = f.panel)
rpanel::rp.doublebutton(panel, variable = alpha, step = 0.01, title = "",
action = f.panel, showvalue = TRUE, foreground = "blue")
rpanel::rp.slider(panel, variable = n, from = 0, to = 30, resolution = 0.01,
initval = 10, title = "n", action = f.panel)
rpanel::rp.doublebutton(panel, variable = n, step = 0.01, title = "",
action = f.panel, showvalue = TRUE, foreground = "blue")
rpanel::rp.slider(panel, variable = b0, from = -2, to = 2, resolution = 0.01,
initval = 0.1, title = "b0", action = f.panel)
rpanel::rp.doublebutton(panel, variable = b0, step = 0.01, title = "",
action = f.panel, showvalue = TRUE, foreground = "blue")
rpanel::rp.slider(panel, variable = b1, from = -0.5, to = 0.5, resolution = 1e-04,
initval = -0.017, title = "b1", action = f.panel)
rpanel::rp.doublebutton(panel, variable = b1, step = 1e-04, title = "",
action = f.panel, showvalue = TRUE, foreground = "blue")
rpanel::rp.slider(panel, variable = b2, from = -1, to = 1, resolution = 1e-05,
initval = 1e-04, title = "b2", action = f.panel)
rpanel::rp.doublebutton(panel, variable = b2, step = 1e-05, title = "",
action = f.panel, showvalue = TRUE, foreground = "blue")
rpanel::rp.button(panel, title = "NLS estimates", action = f.fit,
foreground = "white", background = "navy")
rpanel::rp.button(panel, title = "__________________ Quit __________________",
action = function(pan) return(pan), quitbutton = TRUE,
foreground = "red")
} |
"call_cccp" <- function(op, X=NULL, opt, quiet=FALSE){
if(class(op$f)=="ratioFun" && op$max){
stop("Rational functions can only be minimized with solver='cccp'.\n")
}
if(class(op$f)=="quadFun" && op$max){
stop("Quadratic functions can only be minimized with solver='cccp'.\n")
}
if(!("abstol" %in% names(opt))){opt$abstol = (1e-06)*(10^length(op$qc)) }
if(!("feastol" %in% names(opt))){opt$feastol = 1e-05}
if(!("trace" %in% names(opt))){opt$trace = !quiet}
if(!("stepadj" %in% names(opt))){opt$stepadj = ifelse(class(op$f)=="ratioFun",0.40, 0.90)}
if(!("maxiters" %in% names(opt))){opt$maxiters = 100L}
if(!("reltol" %in% names(opt))){opt$reltol = 1e-06}
if(!("beta" %in% names(opt))){opt$beta = 0.5}
storage.mode(opt$maxiters) <- "integer"
params.opt <- c("maxiters", "abstol", "reltol", "feastol", "stepadj", "beta", "trace")
opt <- opt[intersect(names(opt), params.opt)]
optctrl<- do.call(cccp::ctrl, opt)
if(!quiet){
cat("\n")
cat("Using solver 'cccp' with parameters: \n")
print(data.frame(Value=as.character(rapply(opt,c)),row.names=names(rapply(opt,c))))
cat("\n")
}
if(is.null(X) && (class(op$f)=="ratioFun")){
X <- getX(op)
eq <- op$lc$dir=="=="
if(any(eq)){
op$lc$val[eq] <- op$lc$A[eq,,drop=FALSE] %*% X
}
}
if(length(op$lb$val)>0 || length(op$ub$val)>0){
op <- bounds2lc(op)
}
op <- splitlc(op)
if(length(op$qc)>0){
op <- qc2socc(op, quiet=quiet)
}
if(class(op$f)=="ratioFun"){
op <- f2fun(op)
}
if(class(op$f)=="linFun"){
P <- NULL
q <- op$f$a
}
if(class(op$f)=="quadFun"){
P <- 2*op$f$Q
q <- op$f$a
}
cList <- NULL
soccNumber <- length(op$socc)
ineqNumber <- !is.null(op$inlc)
if(soccNumber+ineqNumber>0){
cList <- vector("list", soccNumber+ineqNumber)
if(ineqNumber>0){
cList[[1]] <- nnoc(G=op$inlc$A, h=op$inlc$val)
}
for(i in seq_along(op$socc)){
cList[[ineqNumber+i]] <- do.call(socc, op$socc[[i]][c("F", "g", "d", "f")])
}
}
if(class(op$f)=="Fun"){
suppressWarnings(res <- cccp(f0=op$f$f0, g0=op$f$g0, h0=op$f$h0, x0=X, A=op$eqlc$A, b=op$eqlc$val, cList=cList, optctrl=optctrl))
}else{
suppressWarnings(res <- cccp(P=P, q=c(q), A=op$eqlc$A, b=op$eqlc$val, cList=cList, optctrl=optctrl))
}
if(length(getx(res))==0){
x <- rep(NA, length(op$id))
}else{
x <- c(getx(res))
}
x <- setNames(x, op$id)
res <- list(x=x, solver="cccp", status=res$status)
res
} |
resource_specialisation <- function(eventlog, level, append, ...) {
UseMethod("resource_specialisation")
}
resource_specialization <- function(eventlog, level, append, ...) {
UseMethod("resource_specialisation")
}
resource_specialisation.eventlog <- function(eventlog,
level = c("log","case","activity","resource"),
append = F,
append_column = NULL,
sort = TRUE,
...) {
level <- match.arg(level)
level <- deprecated_level(level, ...)
absolute <- NULL
if(is.null(append_column)) {
append_column <- case_when(level == "case" ~ "median",
level == "resource" ~ "absolute",
level == "activity"~"absolute",
T ~ "NA")
}
FUN <- switch(level,
log = resource_specialisation_log,
case = resource_specialisation_case,
activity = resource_specialisation_activity,
resource = resource_specialisation_resource)
output <- FUN(eventlog = eventlog)
if(sort && level %in% c("activity","resource")) {
output %>%
arrange(-absolute) -> output
}
return_metric(eventlog, output, level, append,append_column, "resource_specialisation", ifelse(level == "case",10,2))
}
resource_specialisation.grouped_eventlog <- function(eventlog,
level = c("log","case","activity","resource"),
append = F,
append_column = NULL,
sort = TRUE,
...) {
absolute <- NULL
level <- match.arg(level)
level <- deprecated_level(level, ...)
if(is.null(append_column)) {
append_column <- case_when(level == "case" ~ "median",
level == "resource" ~ "absolute",
level == "activity"~"absolute",
T ~ "NA")
}
FUN <- switch(level,
log = resource_specialisation_log,
case = resource_specialisation_case,
activity = resource_specialisation_activity,
resource = resource_specialisation_resource)
if(!(level %in% c("log"))) {
grouped_metric(eventlog, FUN) -> output
}
else {
grouped_metric_raw_log(eventlog, FUN) -> output
}
if(sort && level %in% c("activity","resource")) {
output %>%
arrange(-absolute) -> output
}
return_metric(eventlog, output, level, append,append_column, "resource_specialisation", ifelse(level == "case",8,2))
} |
bayesx.construct.rw2.smooth.spec <- function(object, dir, prg, data)
{
return(construct.shrw(object, dir, prg, data, "rw2"))
} |
cumulative_cover <- function(veg.PI) {
plot.hits.temp <- veg.PI[, c("herbarium_determination", "hits_unique", "in_canopy_sky")]
transect.order <- c("N1-S1", "S1-N1", "S2-N2", "N2-S2", "N3-S3", "S3-N3", "S4-N4", "N4-S4", "N5-S5", "S5-N5", "W1-E1", "E1-W1", "E2-W2", "W2-E2", "W3-E3", "E3-W3", "E4-W4", "W4-E4", "W5-E5", "E5-W5")
plot.hits.temp <- plot.hits.temp[gtools::mixedorder(plot.hits.temp$hits_unique),]
trans.ord.vec <- plot.hits.temp$hits_unique[order(match(gsub(" .*$", "", plot.hits.temp$hits_unique), transect.order))]
plot.hits.temp <- plot.hits.temp[match(trans.ord.vec, plot.hits.temp$hits_unique),]
uniq.hits.plot.i <- length(unique(plot.hits.temp$hits_unique))
x.ca <- list()
if(length(na.omit(unique(plot.hits.temp$herbarium_determination))) > 0) {
spp.num <- 0
for(x in na.omit(unique(plot.hits.temp$herbarium_determination))) {
spp.num <- spp.num + 1
x.ca[[spp.num]] <- rep(NA, floor(uniq.hits.plot.i/10))
n <- 0
for(k in 10*c(1:(uniq.hits.plot.i/10))) {
n <- n+1
CA.temp <- subset(plot.hits.temp, hits_unique %in% unique(plot.hits.temp$hits_unique)[1:k])
CA.temp.x <- subset(CA.temp, herbarium_determination==x)
CA.temp.x <- CA.temp.x[which(as.character(CA.temp.x$in_canopy_sky) == "FALSE"),]
temp.cover.score <- as.numeric((count(CA.temp.x, vars="herbarium_determination")$freq)/k*100)
if(length(temp.cover.score) != 0) {x.ca[[spp.num]][n] <-temp.cover.score}
}
}
max.cover.plot.i <- max(na.omit(unlist(x.ca)))
plot(1, ylim=c(0, 100), xlim=c(0, 1100), type="n", xlab="Number of point intercepts", ylab="Estimated %CA", main=unique(veg.PI$site_unique), las=1, bty="l", cex.main=1.2)
spp.col <- sample(colors(), length(x.ca))
zzz <- 0
for(j in x.ca) {
zzz <- zzz + 1
points(10*c(1:(uniq.hits.plot.i/10))[1:length(j)], j, type="l", col=spp.col[zzz])
}
legend.data <- data.frame(cover = sapply(x.ca, FUN = function(x) {tail(x, n=1)}), species = na.omit(unique(plot.hits.temp$herbarium_determination)), colour = spp.col)
legend.data <- na.omit(legend.data[order(legend.data[,"cover"], decreasing=TRUE),][1:5,])
legend("topright", legend=legend.data$species, lty=rep(1, length(legend.data$species)), col=as.character(legend.data$colour), cex=0.8, bty='n', lwd=rep(3, length(legend.data$species)))
}
} |
infillCritAKG = function(points, model, control, par.set, design) {
maximize.mult = ifelse(control$minimize, 1, -1)
x=points
model=model$learner.model
type="SK"
new.noise.var = model@covariance@nugget
if(length(new.noise.var)==0) new.noise.var=0
newdata.num <- as.numeric(x)
newdata <- data.frame(t(newdata.num))
colnames(newdata) = colnames(model@X)
tau2.new <- new.noise.var
predx <- predict.km(model, newdata = newdata, type = type,
checkNames = FALSE)
mk.x <- maximize.mult*predx$mean
sk.x <- predx$sd
c.x <- predx$c
V.x <- predx$Tinv.c
T <- model@T
z <- model@z
U <- model@M
F.x <- model.matrix([email protected], data = newdata)
if (sk.x < sqrt(model@covariance@sd2)/1e+06 || model@covariance@sd2 <
1e-20) {
AKG <- 0
tuuinv <- mk.X <- V.X <- mu.x <- cn <- sQ <- Isort <- Iremove <- A1 <- at <- bt <- ct <- NULL
}
else {
predX <- predict.km(model, newdata = model@X, type = type,
checkNames = FALSE)
mk.X <-maximize.mult*predX$mean
V.X <- predX$Tinv.c
F.X <- model@F
m_min <- min(c(mk.X, mk.x))
if (type == "UK") {
tuuinv <- solve(t(U) %*% U)
mu.x <- (F.X - t(V.X) %*% U) %*% tuuinv %*% t(F.x -
t(V.x) %*% U)
}
else {
tuuinv <- mu.x <- 0
}
cn <- c.x - t(V.X) %*% V.x + mu.x
cn <- c(cn, sk.x^2)
A <- c(mk.X, mk.x)
B <- cn/sqrt(tau2.new + sk.x^2)
sQ <- B[length(B)]
A <- -A
nobs <- model@n
Isort <- order(x = B, y = A)
b <- B[Isort]
a <- A[Isort]
Iremove <- numeric()
for (i in 1:(nobs)) {
if (b[i + 1] == b[i]) {
Iremove <- c(Iremove, i)
}
}
if (length(Iremove) > 0) {
b <- b[-Iremove]
a <- a[-Iremove]
}
nobs <- length(a) - 1
C <- rep(0, nobs + 2)
C[1] <- -1e+36
C[length(C)] <- 1e+36
A1 <- 0
for (k in 2:(nobs + 1)) {
nondom <- 1
if (k == nobs + 1) {
nondom <- 1
}
else if ((a[k + 1] >= a[k]) && (b[k] == b[k + 1])) {
nondom <- 0
}
if (nondom == 1) {
loopdone <- 0
count <- 0
while (loopdone == 0 && count < 1000) {
count <- count + 1
u <- A1[length(A1)] + 1
C[u + 1] <- (a[u] - a[k])/(b[k] - b[u])
if ((length(A1) > 1) && (C[u + 1] <= C[A1[length(A1) -
1] + 2])) {
A1 <- A1[-length(A1)]
}
else {
A1 <- c(A1, k - 1)
loopdone <- 1
}
}
}
}
at <- a[A1 + 1]
bt <- b[A1 + 1]
ct <- C[c(1, A1 + 2)]
maxNew <- 0
for (k in 1:length(at)) {
maxNew <- maxNew + at[k] * (pnorm(ct[k + 1]) - pnorm(ct[k])) +
bt[k] * (dnorm(ct[k]) - dnorm(ct[k + 1]))
}
AKG <- maxNew - (-m_min)
}
return(-AKG)
} |
cor.show <- function(r, rm=FALSE, var.rm)
{
if(rm==FALSE){
datavalue <- [email protected]
} else{
datavalue <- (data.frame([email protected])) %>% dplyr::select(-one_of(var.rm))
datavalue <- data.matrix(datavalue, rownames.force = NA)
}
nm <- colnames(datavalue)
nn <- length(nm)
pairs(datavalue[,1 : nn],lower.panel=panel.smooth,
upper.panel=panel.r2,diag.panel=panel.hist)
} |
knitr::opts_chunk$set(
collapse = TRUE,
comment = "
)
library(goodpractice)
pkg_path <- system.file("bad1", package = "goodpractice")
g <- gp(pkg_path)
g
grep("url", all_checks(), value = TRUE)
g_url <- gp(pkg_path, checks = "description_url")
g_url
checks(g_url)
failed_checks(g)
results(g)[1:5,] |
FPTL <-
function (dp, t0, T, x0, S, env = NULL, n = 4000)
{
if (!is.diffproc(dp))
stop(paste(sQuote("dp"), "argument is not of class ", shQuote("diffproc"), ".", sep = ""))
if (!is.numeric(t0))
stop(paste(sQuote("t0"), "argument is not of numeric type."))
if (length(t0) > 1)
stop(paste(sQuote("t0"), "argument is not of length 1."))
if (!is.numeric(T))
stop(paste(sQuote("T"), "argument is not of numeric type."))
if (length(T) > 1)
stop(paste(sQuote("T"), "argument is not of length 1."))
if (t0 >= T)
stop("the final time instant is not greater than the initial time instant.")
if (!is.numeric(x0))
stop(paste(sQuote("x0"), "argument is not of numeric type."))
if (length(x0) > 1)
stop(paste(sQuote("x0"), "argument is not of length 1."))
if (!is.numeric(S) & !is.character(S))
stop(paste(sQuote("S"), "argument is not of numeric or character type."))
if (length(S) > 1)
stop(paste(sQuote("S"), "argument is not of length 1."))
if (is.character(S)){
}
if (inherits(try(parse(text = S), silent = TRUE), "try-error"))
stop(paste("the mathematical expression of the boundary shows syntax errors.", sep = ""))
if (inherits(try(D(parse(text = S), "t"), silent = FALSE), "try-error"))
stop("R can not compute the symbolic derivative with respect to 't' of the mathematical expression of the boundary")
if ((!is.list(env)) & (!is.null(env)))
stop(paste(sQuote("env"), "argument is not NULL or a list object."))
if (!all(is.element(sapply(env, mode), c("numeric", "character"))))
stop(paste(sQuote("env"), "argument is not a list of numeric or character objects."))
if (!is.numeric(n))
stop(paste(sQuote("n"), "argument is not of numeric type."))
if (round(n) != n)
stop(paste(sQuote("n"), "argument is not an integer number."))
if (length(n) > 1)
stop(paste(sQuote("n"), "argument is not of length 1."))
env2 <- env
logic <- unlist(lapply(env2, is.character))
if (any(logic))
env2[logic] <- lapply(env2[logic], function(x, env) eval(substitute(substitute(e, env), list(e = parse(text = x)[[1]]))), env = env2[!logic])
exprS <- as.expression(eval(substitute(substitute(e, env2), list(e = parse(text = S)[[1]]))))
exprFPTL <- as.expression(eval(substitute(substitute(e, env2), list(e = parse(text = dp$tpdF)[[1]]))))
s0 <- eval(exprS, list(t = t0, t0 = t0, x0 = x0))
if (x0 == s0)
stop("the value of the boundary at the initial time instant is equal to the initial value of the process.")
grid.t <- seq(t0, T, length = n)[-1]
S.t <- eval(exprS, list(t = grid.t, t0 = t0, x0 = x0))
y.t <- eval(exprFPTL, list(x = S.t, t = grid.t, y = x0, s = t0))
if (x0 < s0) y.t <- 1 - y.t
G <- growth.intervals(grid.t, y.t)
if (is.null(G))
warning("the FPTL function is not growing.")
else {
index <- c(t(G))
endlimits <- grid.t[index]
m <- diff(endlimits)/(T - t0)
if (G[1, 1] > 1) {
endlimits <- c(t0, endlimits)
m <- c((grid.t[G[1, 1]] - t0)/(5 * (T - t0)), m)
}
if (G[length(G)] < (n - 1)) {
i <- which.min(y.t[(1 + G[length(G)]):(n - 1)])
endlimits <- c(endlimits, grid.t[i + G[length(G)]])
m <- c(m, (grid.t[i + G[length(G)]] - grid.t[G[length(G)]])/(T - t0))
j <- G[length(G)] + i
if (j < n) {
endlimits <- c(endlimits, T)
m <- c(m, (T - grid.t[i + G[length(G)]])/(5 * (T - t0)))
}
}
m <- trunc(m * (n - 1)/sum(m))
m <- (m + 1 + abs(m - 1))/2
d <- (n - 1) - sum(m)
if (d > 0) {
j <- rep(order(m), length.out = d)
m[j] <- m[j] + 1
}
if (d < 0) {
i <- order(m, decreasing = T)
j <- rep(i[m[i] > d], length.out = d)
m[j] <- m[j] - 1
}
d <- (n - 1) - sum(m)
if (d != 0)
stop("n is too small.")
if (length(endlimits) == 2)
grid.t <- seq(endlimits[1], endlimits[2], length.out = m + 1)[-1]
else {
v <- mapply(seq, endlimits[-length(endlimits)], endlimits[-1], length.out = m + 1, SIMPLIFY = FALSE)
if (is.list(v))
grid.t <- unlist(lapply(v, function(l) l[-1]))
else grid.t <- as.vector(apply(v, 2, function(l) l[-1]))
}
S.t <- eval(exprS, list(t = grid.t, t0 = t0, x0 = x0))
y.t <- eval(exprFPTL, list(x = S.t, t = grid.t, y = x0, s = t0))
if (x0 < s0) y.t <- 1 - y.t
}
fptl <- list(x = c(t0, grid.t), y = c(0, y.t))
Call <- match.call()
Args <- list(t0=t0, T=T, x0=x0, S=S, n=n)
label <- intersect(c("t0", "T", "x0", "S", "n"), names(Call))
Call[label] <- Args[label]
if (is.name(Call$env)){
attr(fptl, "vars") <- list(env)
names(attr(fptl, "vars")) <- as.character(Call$env)
}
else{
if (!is.null(env) & (length(env) > 0L)){
logic <- (sapply(env, length) == 1L)
if (any(logic)) Call$env[names(env[logic])] <- env[logic]
label <- all.vars(Call$env)
if (length(label) > 0L) attr(fptl, "vars") <- mget(label, inherits = TRUE, ifnotfound = NA)
}
}
attr(fptl, "Call") <- Call
attr(fptl, "dp") <- dp
class(fptl) <- c("fptl", "list")
return(fptl)
} |
svc <- paws::iotjobsdataplane() |
dcarthwrite <- function (x, mu=NULL, psi=NULL) {
if (is.null(mu) || length(mu)!=1)
stop("the mean direction parameter 'mu' is mandatory and it must have length 1")
if (is.null(psi) || length(psi)!=1)
stop("the parameter 'psi' is mandatory and it must have length 1")
if(psi<0)
stop("the parameter 'psi' must be non negative")
x <- conversion.circular(x, units="radians", zero=0, rotation="counter")
mu <- conversion.circular(mu, units="radians", zero=0, rotation="counter")
mu <- as.vector(mu)
psi <- as.vector(psi)
attr(x, "class") <- attr(x, "circularp") <- NULL
attr(mu, "class") <- attr(mu, "circularp") <- NULL
DcarthwrightRad(x, mu, psi)
}
DcarthwrightRad <- function(x, mu, psi) {
cpc<-2^(1/psi-1) * (gamma(1+1/psi))^2 * (1+cos(x-mu))^(1/psi)
cpc<-cpc/(pi*gamma(1+2/psi))
return(cpc)
} |
Iscores <- function(imputations,
methods,
X.NA,
m=length(imputations[[1]]),
num.proj=100,
num.trees.per.proj = 5,
min.node.size=10,
n.cores = 1,
projection.function = NULL,
rescale =TRUE
){
Iscores.values <- doevaluation(imputations=imputations,
methods=methods,
X.NA=X.NA,
m=m,
num.proj=num.proj,
num.trees.per.proj = num.trees.per.proj,
min.node.size=min.node.size,
n.cores = n.cores,
projection.function = projection.function)
Iscores.values<- do.call(rbind, Iscores.values)
if(rescale==TRUE){
names.methods <- colnames(Iscores.values)
Iscores.values <-lapply(Iscores.values, FUN=function(l){
l-max(unlist(Iscores.values))
})
Iscores.values <- do.call(cbind,Iscores.values)
colnames(Iscores.values) <- names.methods
}
return(Iscores.values)
} |
geojson_list <- function(input, lat = NULL, lon = NULL, group = NULL,
geometry = "point", type = "FeatureCollection",
convert_wgs84 = FALSE, crs = NULL, precision = NULL, ...) {
UseMethod("geojson_list")
}
geojson_list.SpatialPolygons <- function(input, lat = NULL, lon = NULL,
group = NULL, geometry = "point", type = "FeatureCollection",
convert_wgs84 = FALSE, crs = NULL, precision = NULL, ...) {
as.geo_list(geojson_rw(input, target = "list", precision = precision,
convert_wgs84 = convert_wgs84, crs = crs), "SpatialPolygons")
}
geojson_list.SpatialPolygonsDataFrame <- function(input, lat = NULL, lon = NULL,
group = NULL, geometry = "point", type = "FeatureCollection",
convert_wgs84 = FALSE, crs = NULL, precision = NULL, ...) {
as.geo_list(geojson_rw(input, target = "list", precision = precision,
convert_wgs84 = convert_wgs84, crs = crs), "SpatialPolygonsDataFrame")
}
geojson_list.SpatialPoints <- function(input, lat = NULL, lon = NULL,
group = NULL, geometry = "point", type = "FeatureCollection",
convert_wgs84 = FALSE, crs = NULL, precision = NULL, ...) {
dat <- SpatialPointsDataFrame(input, data.frame(dat = 1:NROW(input@coords)))
as.geo_list(geojson_rw(dat, target = "list", precision = precision,
convert_wgs84 = convert_wgs84, crs = crs), "SpatialPoints")
}
geojson_list.SpatialPointsDataFrame <- function(input, lat = NULL, lon = NULL,
group = NULL, geometry = "point", type = "FeatureCollection",
convert_wgs84 = FALSE, crs = NULL, precision = NULL, ...) {
as.geo_list(geojson_rw(input, target = "list", precision = precision,
convert_wgs84 = convert_wgs84, crs = crs), "SpatialPointsDataFrame")
}
geojson_list.SpatialLines <- function(input, lat = NULL, lon = NULL,
group = NULL, geometry = "point", type = "FeatureCollection",
convert_wgs84 = FALSE, crs = NULL, precision = NULL, ...) {
as.geo_list(geojson_rw(input, target = "list", precision = precision,
convert_wgs84 = convert_wgs84, crs = crs), "SpatialLines")
}
geojson_list.SpatialLinesDataFrame <- function(input, lat = NULL, lon = NULL,
group = NULL, geometry = "point", type = "FeatureCollection",
convert_wgs84 = FALSE, crs = NULL, precision = NULL, ...) {
as.geo_list(geojson_rw(input, target = "list", precision = precision,
convert_wgs84 = convert_wgs84, crs = crs), "SpatialLinesDataFrame")
}
geojson_list.SpatialGrid <- function(input, lat = NULL, lon = NULL,
group = NULL, geometry = "point", type = "FeatureCollection",
convert_wgs84 = FALSE, crs = NULL, precision = NULL, ...) {
as.geo_list(geojson_rw(input, target = "list", precision = precision,
convert_wgs84 = convert_wgs84, crs = crs), "SpatialGrid")
}
geojson_list.SpatialGridDataFrame <- function(input, lat = NULL, lon = NULL,
group = NULL, geometry = "point", type = "FeatureCollection",
convert_wgs84 = FALSE, crs = NULL, precision = NULL, ...) {
as.geo_list(geojson_rw(input, target = "list", precision = precision,
convert_wgs84 = convert_wgs84, crs = crs), "SpatialGridDataFrame")
}
geojson_list.SpatialPixels <- function(input, lat = NULL, lon = NULL,
group = NULL, geometry = "point", type='FeatureCollection',
convert_wgs84 = FALSE, crs = NULL, precision = NULL, ...) {
as.geo_list(geojson_rw(input, target = "list", precision = precision,
convert_wgs84 = convert_wgs84, crs = crs), "SpatialPixels")
}
geojson_list.SpatialPixelsDataFrame <- function(input, lat = NULL, lon = NULL,
group = NULL, geometry = "point", type = "FeatureCollection",
convert_wgs84 = FALSE, crs = NULL, precision = NULL, ...) {
as.geo_list(geojson_rw(input, target = "list", precision = precision,
convert_wgs84 = convert_wgs84, crs = crs), "SpatialPixelsDataFrame")
}
geojson_list.SpatialRings <- function(input, lat = NULL, lon = NULL,
group = NULL, geometry = "point", type='FeatureCollection',
convert_wgs84 = FALSE, crs = NULL, precision = NULL, ...) {
as.geo_list(geojson_rw(input, target = "list", precision = precision,
convert_wgs84 = convert_wgs84, crs = crs), "SpatialRings")
}
geojson_list.SpatialRingsDataFrame <- function(input, lat = NULL, lon = NULL,
group = NULL, geometry = "point", type = "FeatureCollection",
convert_wgs84 = FALSE, crs = NULL, precision = NULL, ...) {
as.geo_list(geojson_rw(input, target = "list", precision = precision,
convert_wgs84 = convert_wgs84, crs = crs), "SpatialRingsDataFrame")
}
geojson_list.SpatialCollections <- function(input, lat = NULL, lon = NULL,
group = NULL, geometry = "point", type = "FeatureCollection",
convert_wgs84 = FALSE, crs = NULL, precision = NULL, ...) {
pt <- donotnull(input@pointobj, geojson_rw, target = "list",
convert_wgs84 = convert_wgs84, crs = crs,
precision = precision)
ln <- donotnull(input@lineobj, geojson_rw, target = "list",
convert_wgs84 = convert_wgs84, crs = crs,
precision = precision)
rg <- donotnull(input@ringobj, geojson_rw, target = "list",
convert_wgs84 = convert_wgs84, crs = crs,
precision = precision)
py <- donotnull(input@polyobj, geojson_rw, target = "list",
convert_wgs84 = convert_wgs84, crs = crs,
precision = precision)
alldat <- tg_compact(list(SpatialPoints = pt, SpatialLines = ln,
SpatialRings = rg, SpatialPolygons = py))
as.geo_list(alldat, "SpatialCollections")
}
donotnull <- function(x, fun, ...) {
if (!is.null(x)) {
fun(x, ...)
} else {
NULL
}
}
geojson_list.sf <- function(input, lat = NULL, lon = NULL, group = NULL,
geometry = "point", type = "FeatureCollection",
convert_wgs84 = FALSE, crs = NULL, ...) {
if (convert_wgs84) {
input <- convert_wgs84(input, crs)
}
sf_col <- get_sf_column_name(input)
sfc <- unclass(input[[sf_col]])
attr_df <- as.data.frame(input)[, setdiff(names(input), sf_col),
drop = FALSE]
type <- "FeatureCollection"
features <- lapply(seq_len(nrow(input)),
function(i) {
list(type = "Feature",
properties = as.list(attr_df[i, , drop = FALSE]),
geometry = unclass(geojson_list(sfc[[i]]))
)
})
out <- list(type = type, features = features)
as.geo_list(tg_compact(out), from = "sf")
}
geojson_list.sfc <- function(input, lat = NULL, lon = NULL, group = NULL,
geometry = "point", type = "FeatureCollection", convert_wgs84 = FALSE,
crs = NULL, ...) {
names(input) <- NULL
if (convert_wgs84) {
input <- convert_wgs84(input, crs)
}
if (length(input) == 1) {
return(geojson_list(input[[1]]))
} else {
out <- list(
type = "GeometryCollection",
geometries = lapply(input, function(x) unclass(geojson_list(x)))
)
}
as.geo_list(out, from = "sfc")
}
geojson_list.sfg <- function(input, lat = NULL, lon = NULL, group = NULL,
geometry = "point", type = "FeatureCollection",
convert_wgs84 = FALSE, crs = NULL, ...) {
type <- switch_geom_type(get_geometry_type(input))
if (type == "GeometryCollection") {
geometries <- lapply(input, function(x) unclass(geojson_list(x)))
out <- list(type = type, geometries = geometries)
} else {
coordinates <- make_coords(input)
out <- list(type = type, coordinates = coordinates)
}
as.geo_list(out, from = "sfg")
}
switch_geom_type <- function(x) {
switch(x,
"POINT" = "Point",
"LINESTRING" = "LineString",
"POLYGON" = "Polygon",
"MULTIPOINT" = "MultiPoint",
"MULTILINESTRING" = "MultiLineString",
"MULTIPOLYGON" = "MultiPolygon",
"GEOMETRY" = "GeometryCollection",
"GEOMETRYCOLLECTION" = "GeometryCollection"
)
}
get_sf_column_name <- function(x) attr(x, "sf_column")
get_geometry_type <- function(x) UseMethod("get_geometry_type")
get_geometry_type.sfc <- function(x) strsplit(class(x)[1], "_")[[1]][2]
get_geometry_type.sfg <- function(x) class(x)[2]
make_coords <- function(input) {
dim <- class(input)[1]
m_loc <- regexpr("M", dim)
if (m_loc > 0) {
message("removing M dimension as not supported in GeoJSON format")
return(drop_m(unclass(input), m_loc))
}
unclass(input)
}
drop_m <- function(input, m_loc) UseMethod("drop_m")
drop_m.list <- function(input, m_loc) lapply(input, drop_m, m_loc = m_loc)
drop_m.numeric <- function(input, m_loc) input[-m_loc]
drop_m.matrix <- function(input, m_loc) input[, -m_loc, drop = FALSE]
geojson_list.numeric <- function(input, lat = NULL, lon = NULL, group = NULL,
geometry = "point", type = "FeatureCollection", ...) {
as.geo_list(num_to_geo_list(input, geometry, type), "numeric")
}
geojson_list.data.frame <- function(input, lat = NULL, lon = NULL, group = NULL,
geometry = "point", type = "FeatureCollection", ...) {
tmp <- guess_latlon(names(input), lat, lon)
as.geo_list(df_to_geo_list(x = input, lat = tmp$lat, lon = tmp$lon,
geometry = geometry, type = type, group = group), "data.frame")
}
geojson_list.list <- function(input, lat = NULL, lon = NULL, group = NULL,
geometry = "point", type = "FeatureCollection", ...) {
if (geometry == "polygon") lint_polygon_list(input)
tmp <- if (!is.named(input)) {
list(lon = NULL, lat = NULL)
} else {
guess_latlon(names(input[[1]]), lat, lon)
}
as.geo_list(list_to_geo_list(input, lat = tmp$lat, lon = tmp$lon,
geometry, type, !is.named(input), group), "list")
}
geojson_list.geo_list <- function(input, lat = NULL, lon = NULL, group = NULL,
geometry = "point", type = "FeatureCollection", ...) {
return(input)
}
geojson_list.json <- function(input, lat = NULL, lon = NULL, group = NULL,
geometry = "point", type = "FeatureCollection", ...) {
output_list <- jsonlite::fromJSON(input, FALSE, ...)
as.geo_list(output_list, from = "json")
}
as.geo_list <- function(x, from) structure(x, class = "geo_list", from = from)
lint_polygon_list <- function(x) {
if (!identical(x[[1]], x[[length(x)]])) {
stop("First and last point in a polygon must be identical",
call. = FALSE)
}
} |
extractParams <- function(md, mode, SOFC, debug = 0) {
if (mode == "XYY") {
firstX <- grep("^\\s*
if (firstX == 0) stop("Couldn't find FIRSTX")
firstX <- md[firstX]
firstX <- sub("^\\s*
firstX <- gsub(",", ".", firstX)
firstX <- as.numeric(firstX)
lastX <- grep("^\\s*
if (lastX == 0) stop("Couldn't find LASTX")
lastX <- md[lastX]
lastX <- sub("^\\s*
lastX <- gsub(",", ".", lastX)
lastX <- as.numeric(lastX)
npoints <- grep("^\\s*
if (npoints == 0) stop("Couldn't find NPOINTS")
npoints <- md[npoints]
npoints <- sub("^\\s*
npoints <- as.integer(npoints)
factorX <- grep("^\\s*
if (factorX == 0) stop("Couldn't find XFACTOR")
factorX <- sub("^\\s*
factorX <- gsub(",", ".", factorX)
factorX <- as.numeric(factorX)
factorY <- grep("^\\s*
if (factorY == 0) stop("Couldn't find YFACTOR")
factorY <- sub("^\\s*
factorY <- gsub(",", ".", factorY)
factorY <- as.numeric(factorY)
if (!SOFC) {
firstY <- NA_real_
}
if (SOFC) {
firstY <- grep("^\\s*
if (firstY == 0) stop("Couldn't find FIRSTY")
firstY <- md[firstY]
firstY <- sub("^\\s*
firstY <- gsub(",", ".", firstY)
firstY <- as.numeric(firstY)
}
params <- c(as.numeric(npoints), firstX, lastX, firstY, factorX, factorY)
names(params) <- c("npoints", "firstX", "lastX", "firstY", "factorX", "factorY")
if (debug == 2) {
cat("\nExtracted parameters:\n")
print(params)
}
}
if (mode == "NMR_1D") {
npoints <- grep("^\\s*
if (npoints == 0) stop("Couldn't find VAR_DIM")
npoints <- md[npoints]
npoints <- sub("^\\s*
npoints <- as.numeric(unlist(strsplit(npoints, ",")))
npoints <- npoints[1:3]
firsts <- grep("^\\s*
if (firsts == 0) stop("Couldn't find FIRST")
firsts <- md[firsts]
firsts <- sub("^\\s*
firsts <- as.numeric(unlist(strsplit(firsts, ",")))
firsts <- firsts[1:3]
lasts <- grep("^\\s*
if (lasts == 0) stop("Couldn't find LAST")
lasts <- md[lasts]
lasts <- sub("^\\s*
lasts <- as.numeric(unlist(strsplit(lasts, ",")))
lasts <- lasts[1:3]
factors <- grep("^\\s*
if (factors == 0) stop("Couldn't find FACTOR")
factors <- md[factors]
factors <- sub("^\\s*
factors <- as.numeric(unlist(strsplit(factors, ",")))
factors <- factors[1:3]
pointsX <- npoints[1]
pointsR <- npoints[2]
pointsI <- npoints[3]
firstX <- firsts[1]
firstR <- firsts[2]
firstI <- firsts[3]
lastX <- lasts[1]
lastR <- lasts[2]
lastI <- lasts[3]
factorX <- factors[1]
factorR <- factors[2]
factorI <- factors[3]
params <- c(
as.numeric(pointsX), as.numeric(pointsR), as.numeric(pointsI),
firstX, firstR, firstI, lastX, lastR, lastI, factorX, factorR, factorI
)
names(params) <- c(
"pointsX", "pointsR", "pointsI", "firstX", "firstR", "firstI",
"lastX", "lastR", "lastI", "factorX", "factorR", "factorI"
)
if (debug == 2) {
cat("\nExtracted parameters:\n")
print(params)
}
if ((pointsX != pointsR) | (pointsX != pointsI)) stop("No. of frequency, real, imaginary points are not the same")
}
if (mode == "NMR_2D") {
npoints <- grep("^\\s*
if (npoints == 0) stop("Couldn't find VAR_DIM")
npoints <- md[npoints]
npoints <- sub("^\\s*
npoints <- as.numeric(unlist(strsplit(npoints, ",")))
npoints <- npoints[-length(npoints)]
firsts <- grep("^\\s*
if (length(firsts) == 0) stop("Couldn't find FIRST")
firsts <- md[firsts]
firsts <- sub("^\\s*
firsts <- as.numeric(unlist(strsplit(firsts, ",")))
lasts <- grep("^\\s*
if (lasts == 0) stop("Couldn't find LAST")
lasts <- md[lasts]
lasts <- sub("^\\s*
lasts <- as.numeric(unlist(strsplit(lasts, ",")))
factors <- grep("^\\s*
if (factors == 0) stop("Couldn't find FACTOR")
factors <- md[factors]
factors <- sub("^\\s*
factors <- as.numeric(unlist(strsplit(factors, ",")))
pointsF1 <- npoints[1]
pointsF2 <- npoints[2]
firstF1 <- firsts[1]
firstF2 <- firsts[2]
lastF1 <- lasts[1]
lastF2 <- lasts[2]
factorF1 <- factors[1]
factorF2 <- factors[2]
factorZ <- factors[3]
params <- c(
as.numeric(pointsF1), as.numeric(pointsF2),
firstF1, firstF2, lastF1, lastF2, factorF1, factorF2, factorZ
)
names(params) <- c(
"pointsF1", "pointsF2", "firstF1", "firstF2",
"lastF1", "lastF2", "factorF1", "factorF2", "factorZ"
)
if (debug == 2) {
cat("\nExtracted parameters:\n")
print(params)
}
}
if (mode == "LC_MS") {
npoints <- grep("^\\s*
if (SOFC) if (npoints == 0L) stop("Couldn't find VAR_DIM")
if (npoints != 0L) {
npoints <- md[npoints]
npoints <- sub("^\\s*
npoints <- as.numeric(unlist(strsplit(npoints, ",")))
}
firsts <- grep("^\\s*
if (SOFC) if (length(firsts) == 0) stop("Couldn't find FIRST")
if (firsts != 0L) {
firsts <- md[firsts]
firsts <- sub("^\\s*
firsts <- as.numeric(unlist(strsplit(firsts, ",")))
}
lasts <- grep("^\\s*
if (SOFC) if (lasts == 0) stop("Couldn't find LAST")
if (lasts != 0L) {
lasts <- md[lasts]
lasts <- sub("^\\s*
lasts <- as.numeric(unlist(strsplit(lasts, ",")))
}
time_points <- npoints[2]
first_time <- firsts[3]
last_time <- lasts[3]
params <- c(
as.numeric(time_points), as.numeric(first_time), as.numeric(last_time)
)
names(params) <- c("time_points", "first_time", "last_time")
if (debug == 2) {
cat("\nExtracted parameters:\n")
print(params)
}
}
if (mode == "XYXY") {
npoints <- grep("^\\s*
if (SOFC) if (npoints == 0) stop("Couldn't find NPOINTS")
if (npoints != 0L) {
npoints <- md[npoints]
npoints <- sub("^\\s*
npoints <- as.integer(npoints)
}
params <- npoints
names(params) <- "npoints"
if (debug == 2) {
cat("\nExtracted parameters:\n")
print(params)
}
}
return(params)
} |
test_that("AIPW stratified_fit: SuperLeaner & k_split", {
require(SuperLearner)
vec <- function() sample(0:1,100,replace = T)
sl.lib <- c("SL.glm")
aipw <- AIPW$new(Y=vec(),
A=vec(),
W.Q =vec(),
W.g =vec(),
Q.SL.library=sl.lib,
g.SL.library=sl.lib,
k_split = 1,verbose = FALSE,
save.sl.fit = TRUE)
expect_warning(aipw$stratified_fit())
expect_false(any(sapply(aipw$libs, is.null)))
expect_false(any(sapply(aipw$obs_est[1:4], is.na)))
expect_true(any(sapply(aipw$obs_est[5:7], is.null)))
expect_true(is.null(aipw$result))
expect_true(is.null(aipw$estimate))
aipw <- AIPW$new(Y=vec(),
A=vec(),
W.Q =vec(),
W.g =vec(),
Q.SL.library=sl.lib,
g.SL.library=sl.lib,
k_split = 2,verbose = FALSE)
aipw <- AIPW$new(Y=vec(),
A=vec(),
W.Q =vec(),
W.g =vec(),
Q.SL.library=sl.lib,
g.SL.library=sl.lib,
k_split = 3,verbose = FALSE,
save.sl.fit = TRUE)
expect_warning(aipw$stratified_fit())
expect_false(any(sapply(aipw$libs, is.null)))
expect_false(any(sapply(aipw$obs_est[1:4], is.na)))
expect_true(any(sapply(aipw$obs_est[5:7], is.null)))
expect_true(is.null(aipw$result))
expect_true(is.null(aipw$estimate))
})
test_that("AIPW stratified_fit: verbose", {
library(SuperLearner)
vec <- function() sample(0:1,100,replace = T)
sl.lib <- c("SL.mean")
aipw <- AIPW$new(Y=vec(),
A=vec(),
W.Q =vec(),
W.g =vec(),
Q.SL.library=sl.lib,
g.SL.library=sl.lib,
k_split = 1,verbose = T)
expect_message(aipw$stratified_fit(),regexp = "Done!")
library(progressr)
expect_message(aipw$stratified_fit())
expect_true(aipw$.__enclos_env__$private$isLoaded_progressr)
})
test_that("AIPW stratified_fit: missing outcome", {
require(SuperLearner)
vec <- function() sample(0:1,100,replace = T)
sl.lib <- c("SL.mean")
expect_warning(aipw <- AIPW$new(Y=c(NA,vec()[2:100]),
A=c(1,vec()[2:100]),
W =vec(),
Q.SL.library=sl.lib,
g.SL.library=sl.lib,
k_split = 1,verbose = FALSE)$stratified_fit())
expect_true(is.na(aipw$obs_est$mu0[1]))
expect_true(is.na(aipw$obs_est$mu1[1]))
expect_true(is.na(aipw$obs_est$mu[1]))
expect_false(is.na(aipw$obs_est$raw_p_score[1]))
expect_warning(aipw <- AIPW$new(Y=c(NA,vec()[2:100]),
A=c(1,vec()[2:100]),
W =vec(),
Q.SL.library=sl.lib,
g.SL.library=sl.lib,
k_split = 2,verbose = FALSE)$stratified_fit())
expect_true(is.na(aipw$obs_est$mu0[1]))
expect_true(is.na(aipw$obs_est$mu1[1]))
expect_true(is.na(aipw$obs_est$mu[1]))
expect_false(is.na(aipw$obs_est$raw_p_score[1]))
expect_warning(aipw <- AIPW$new(Y=c(NA,vec()[2:100]),
A=c(1,vec()[2:100]),
W =vec(),
Q.SL.library=sl.lib,
g.SL.library=sl.lib,
k_split = 3,verbose = FALSE)$stratified_fit())
expect_true(is.na(aipw$obs_est$mu0[1]))
expect_true(is.na(aipw$obs_est$mu1[1]))
expect_true(is.na(aipw$obs_est$mu[1]))
expect_false(is.na(aipw$obs_est$raw_p_score[1]))
})
test_that("AIPW stratified_fit: object", {
library(SuperLearner)
vec <- function() sample(0:1,100,replace = T)
sl.lib <- c("SL.mean")
aipw <- AIPW$new(Y=vec(),
A=vec(),
W.Q =vec(),
W.g =vec(),
Q.SL.library=sl.lib,
g.SL.library=sl.lib,
k_split = 3,
verbose = F,
save.sl.fit = T)
aipw$fit()
expect_false(aipw$stratified_fitted)
expect_equal(length(aipw$libs$Q.fit),3)
aipw <- AIPW$new(Y=vec(),
A=vec(),
W.Q =vec(),
W.g =vec(),
Q.SL.library=sl.lib,
g.SL.library=sl.lib,
k_split = 3,
verbose = F,
save.sl.fit = T)
aipw$stratified_fit()
expect_true(aipw$stratified_fitted)
expect_equal(length(aipw$libs$Q.fit),3)
expect_equal(length(aipw$libs$Q.fit[[1]]),2)
}) |
fslsd = function(img, nonzero = FALSE, verbose = TRUE, ts = FALSE){
opts = "-s"
opts = ifelse(nonzero, toupper(opts), opts)
val = fslstats(img, opts = opts, verbose = verbose, ts = ts)
val = strsplit(val, " ")
if (length(val) == 1) {
val = as.numeric(val[[1]])
} else {
val = sapply(val, as.numeric)
}
val
} |
prob_detect <- function(c, r, t, d, p, N, method) {
prob <- 0
for (i in 0:c) {
prob <- prob + prob_contaminant(i, r, t, d, p, N, method)
}
prob_detection <- 1 - prob
return(prob_detection)
} |
downsideRisk <-
function(periodPercentReturns, targetReturn = 0)
{
stopifnot(isUnivariate(periodPercentReturns))
Returns = periodPercentReturns - targetReturn
n = nrow(Returns)
Returns = Returns[Returns < 0, ]
ans = sqrt(sum(Returns^2)/n)
names(ans) = "% Downside Risk per Period"
ans
}
downsideVariance <-
function(periodPercentReturns, targetReturn = 0)
{
stopifnot(isUnivariate(periodPercentReturns))
Returns = periodPercentReturns - targetReturn
n = nrow(Returns)
Returns = Returns[Returns < 0, ]
ans = sum(Returns^2)/n
names(ans) = "Downside Variance per Period"
ans
}
downsidePotential <-
function(periodPercentReturns, targetReturn = 0)
{
stopifnot(isUnivariate(periodPercentReturns))
Returns = periodPercentReturns - targetReturn
n = nrow(Returns)
Returns = Returns[Returns < 0, ]
ans = sum(Returns)/n
names(ans) = "% Downside Risk per Period"
ans
}
upsideRisk <-
function(periodPercentReturns, targetReturn = 0)
{
stopifnot(isUnivariate(periodPercentReturns))
Returns = periodPercentReturns - targetReturn
n = nrow(Returns)
Returns = Returns[Returns > 0, ]
ans = sqrt(sum(Returns^2)/n)
names(ans) = "% Upside Risk per Period"
ans
}
upsideVariance <-
function(periodPercentReturns, targetReturn = 0)
{
stopifnot(isUnivariate(periodPercentReturns))
Returns = periodPercentReturns - targetReturn
n = nrow(Returns)
Returns = Returns[Returns > 0, ]
ans = sum(Returns^2)/n
names(ans) = "Upside Variance per Period"
ans
}
upsidePotential <-
function(periodPercentReturns, targetReturn = 0)
{
stopifnot(isUnivariate(periodPercentReturns))
Returns = periodPercentReturns - targetReturn
n = nrow(Returns)
Returns = Returns[Returns > 0, ]
ans = sum(Returns)/n
names(ans) = "% Upside Potential per Period"
ans
}
downsideFrequency <-
function(periodPercentReturns, targetReturn = 0)
{
stopifnot(isUnivariate(periodPercentReturns))
Returns = periodPercentReturns - targetReturn
Returns = Returns[Returns < 0, ]
nDownside = nrow(Returns)
ans = nDownside / n
names(ans) = "Downside Frequency"
ans
}
upsideFrequency <-
function(periodPercentReturns, targetReturn = 0)
{
stopifnot(isUnivariate(periodPercentReturns))
Returns = periodPercentReturns - targetReturn
n = nrow(Returns)
Returns = Returns[Returns < 0, ]
nUpside = nrow(Returns)
ans = nDownside / n
names(ans) = "Upside Frequency"
ans
}
omegaRatio <-
function(periodPercentReturns, targetReturn = 0)
{
stopifnot(isUnivariate(periodPercentReturns))
Returns = periodPercentReturns - targetReturn
Return1 = sum(Returns[Returns > 0, ])
Return2 = sum(Returns[-Returns > 0, ])
ans = Return1 / Return2
names(ans) = "Omega Ratio"
ans
}
bernardoLedoitRatio <-
function(periodPercentReturns)
{
stopifnot(isUnivariate(periodPercentReturns))
R = periodPercentReturns
targetReturn = 0
ans = omegaRatio(periodPercentReturns, targetReturn)
names(ans) = "Bernardo Ledoit Ratio"
ans
}
dRatio <-
function(periodPercentReturns)
{
stopifnot(isUnivariate(periodPercentReturns))
R = periodPercentReturns
nd = nrow(R[R < 0, ])
nu = nrow(R[R > 0, ])
ans = (nd/nu) / bernardoLedoitRatio(R)
names(ans) = "d Ratio"
ans
}
omegaSharpeRatio <-
function(periodPercentReturns, targetReturn = 0)
{
stopifnot(isUnivariate(periodPercentReturns))
Return = averageReturn(periodPercentReturns) - targetReturn
Returns = targetReturn - periodPercentReturns
Returns = Returns[Returns > 0]
n = NROW(Returns)
Risk = sum(Returns) / n
ans = Return / Risk
names(ans) = "Omega Sharpe Ratio"
ans
}
sortinoRatio <-
function(periodPercentReturns, targetReturn = 0)
{
stopifnot(isUnivariate(periodPercentReturns))
Return = averageReturn(periodPercentReturns) - targetReturn
Risk = downsideRisk(periodPercentReturns, targetReturn)
ans = Return / Risk
names(ans) = "Sortino Ratio"
ans
}
kappaRatio <-
function(periodPercentReturns, targetReturn = 0, a = 1)
{
stopifnot(isUnivariate(periodPercentReturns))
Return = averageReturn(periodPercentReturns) - targetReturn
Returns = targetReturn - periodPercentReturns
Returns = Returns[Returns > 0]^a
n = NROW(Returns)
Risk = sum(Returns) / n
ans = Return / Risk
names(ans) = paste("a =", kappa, "Kappa Ratio")
ans
}
upsidePotentialRatio <-
function(periodPercentReturns, targetReturn = 0)
{
stopifnot(isUnivariate(periodPercentReturns))
R = periodPercentReturns
Returns = targetReturn - R
Returns = Returns[Returns > 0]
Returns = sum(Returns) / n
Risk = downsideRisk(R, targetReturn)
ans = Return / Risk
names(ans) = "Upside Potential Ratio"
ans
}
volatilitySkewness <-
function(periodPercentReturns, targetReturn = 0)
{
stopifnot(isUnivariate(periodPercentReturns))
ans =
upsideVariance(periodPercentReturns, targetReturn = 0) /
downsideVariance(periodPercentReturns, targetReturn = 0)
names(ans) = "Volatility Skewness"
ans
}
variabilitySkewness <-
function(periodPercentReturns, targetReturn = 0)
{
stopifnot(isUnivariate(periodPercentReturns))
R = periodPercentReturns
ans = upsideRisk(R, targetReturn = 0) / downsideRisk(R, targetReturn = 0)
names(ans) = "Variability Skewness"
ans
}
adjustedSharpeRatio <-
function(periodPercentReturns, riskFreeRate = 0,
method = c("geometric", "arithmetic"),
scale = c("quarterly", "monthly", "weekly", "daily"))
{
stopifnot(isUnivariate(periodPercentReturns))
SR = sharpeRatio(periodPercentReturns, riskFreeRate, method, scale)
S = nSkewness
ans = SR * ( 1 + S*SR/6 - (K-3)*(SR^2)/24 )
names(ans) = "Adjusted Sharpe Ratio"
ans
}
skewnessKurtosisRatio <-
function(periodPercentReturns)
{
stopifnot(isUnivariate(periodPercentReturns))
R = periodPercentReturns
ans = NA
names(ans) = "Skewness Kurtosis Ratio"
ans
}
prospectRatio <-
function(periodPercentReturns)
{
stopifnot(isUnivariate(periodPercentReturns))
R = periodPercentReturns
ans = NA
names(ans) = "Prospect Ratio"
ans
} |
esest_nsig <- function(yi, vi, int, tau.int, ycv, method, con)
{
if (method == "ML")
{
tau.est <- con$stval.tau
int <- con$int
tau.int <- con$tau.int
est.ci <- con$est.ci
tau.ci <- con$tau.ci
tol <- con$tol
max.iter <- con$max.iter
verbose <- con$verbose
stay <- TRUE
est <- -999
i <- 0
while(stay) {
i <- i+1
old <- est
tau.old <- tau.est
est <- suppressWarnings(optimize(ml_est, int, tau.est, yi, vi, ycv,
maximum = TRUE)$maximum)
tau.est <- suppressWarnings(optimize(ml_tau, tau.int, est, yi, vi, ycv,
maximum = TRUE)$maximum)
if (verbose == TRUE)
{
cat("est = ", est, "tau.est = ", tau.est, fill = TRUE)
}
stay <- ifelse(abs(tau.est-tau.old) < tol & abs(est-old) < tol |
i == max.iter, FALSE, TRUE)
}
if (i == max.iter | any(round(est, 3) %in% int | round(tau.est, 3) %in%
tau.int) & round(tau.est, 3) != 0)
{
est <- NA
tau.est <- NA
lb <- NA
ub <- NA
tau.lb <- NA
tau.ub <- NA
} else
{
tmp.lb <- suppressWarnings(try(uniroot(get_LR_est, interval = c(est-est.ci[1], est),
yi = yi, vi = vi, est = est, tau.est = tau.est,
ycv = ycv)$root, silent = TRUE))
lb <- ifelse(class(tmp.lb) == "try-error", NA, tmp.lb)
tmp.ub <- suppressWarnings(try(uniroot(get_LR_est, interval = c(est, est+est.ci[2]),
yi = yi, vi = vi, est = est, tau.est = tau.est,
ycv = ycv)$root, silent = TRUE))
ub <- ifelse(class(tmp.ub) == "try-error", NA, tmp.ub)
if (get_LR_tau(prof.tau = 0, yi = yi, vi = vi, est = est, tau.est = tau.est, ycv = ycv) < 0)
{
tau.lb <- 0
} else
{
tmp.lb <- suppressWarnings(try(uniroot(get_LR_tau, interval = c(max(0, tau.est-tau.ci[1]),
tau.est), yi = yi,
vi = vi, est = est, tau.est = tau.est, ycv = ycv)$root,
silent = TRUE))
tau.lb <- ifelse(class(tmp.lb) == "try-error", NA, tmp.lb)
}
tmp.ub <- suppressWarnings(try(uniroot(get_LR_tau, interval = c(tau.est, tau.est+tau.ci[2]),
yi = yi, vi = vi, est = est, tau.est = tau.est, ycv = ycv)$root,
silent = TRUE))
tau.ub <- ifelse(class(tmp.ub) == "try-error", NA, tmp.ub)
}
} else if (method == "P" | method == "LNP")
{
bounds.int <- con$bounds.int
tau.int <- con$tau.int
est.ci <- con$est.ci
tau.ci <- con$tau.ci
tol <- con$tol
max.iter <- con$max.iter
verbose <- con$verbose
tau.est <- 0
est <- 0
stay <- TRUE
i <- 0
est.max <- taus.max <- numeric(10)
while(stay)
{
i <- i+1
if (i > max.iter-99)
{
tau.dif.new <- round(abs(tau.est-tau.old), 2)
if (tau.dif.new == tau.dif)
{
tau.est <- tau.est+(abs(tau.old-tau.est))/2
}
}
if (i > max.iter-100)
{
tau.dif <- round(abs(tau.old-tau.est), 2)
}
int <- bounds_nsig(yi = yi, vi = vi, tau.est = tau.est, ycv = ycv,
method = method, bounds.int = c(bounds.int[1], sort(yi),
bounds.int[2]))
old <- est
tau.old <- tau.est
est <- try(uniroot(pdist_nsig, interval = c(int[1], int[2]), tau = tau.est, yi = yi,
vi = vi, param = "est", ycv = ycv, method = method, val = "es",
cv_P = 0)$root, silent = TRUE)
if (class(est) == "try-error")
{
est <- NA
tau.est <- NA
break
}
if (i == 1)
{
tau0 <- round(pdist_nsig(est = est, tau = 0, yi = yi, vi = vi, param = "tau",
ycv = ycv, method = method, val = "es", cv_P = 0), 3)
if (method == "P" & tau0 <= 0 | method == "LNP" & tau0 >= 0)
{
tau.est <- 0
est <- try(uniroot(pdist_nsig, interval = c(int[1], int[2]), tau = tau.est, yi = yi,
vi = vi, param = "est", ycv = ycv, method = method, val = "es",
cv_P = 0)$root, silent = TRUE)
if (class(est) == "try-error")
{
est <- NA
tau.est <- NA
}
break
}
}
tau.est <- try(uniroot(pdist_nsig, interval = c(tau.int[1], tau.int[2]), est = est, yi = yi,
vi = vi, param = "tau", ycv = ycv, method = method, val = "es",
cv_P = 0)$root, silent = TRUE)
if (class(tau.est) == "try-error")
{
est <- NA
tau.est <- NA
break
}
if (con$verbose == TRUE)
{
cat("est = ", est, "tau.est = ", tau.est, fill = TRUE)
}
if (i > max.iter-10)
{
est.max[i-max.iter+10] <- est
taus.max[i-max.iter+10] <- tau.est
}
if (i == max.iter)
{
if(mean(abs(diff(est.max))) < 0.1)
{
tau.est <- mean(taus.max)
int <- bounds_nsig(yi = yi, vi = vi, ycv = ycv, method = method, tau.est = tau.est,
bounds.int = c(bounds.int[1], sort(yi), bounds.int[2]))
est <- try(uniroot(pdist_nsig, interval = c(int[1], int[2]), tau = tau.est, yi = yi,
vi = vi, param = "est", method = method, val = "es")$root, silent = TRUE)
if (class(est) == "try-error")
{
est <- NA
tau.est <- NA
break
}
} else
{
est <- NA
tau.est <- NA
}
}
stay <- ifelse(abs(tau.est-tau.old) < tol & abs(est-old) < tol | i == max.iter,
FALSE, TRUE)
}
if (any(round(est, 3) %in% int | round(tau.est, 3) %in% tau.int) & round(tau.est, 3) != 0)
{
tau.est <- NA
est <- NA
}
if (is.na(est) == TRUE & is.na(tau.est) == TRUE)
{
lb <- NA
ub <- NA
tau.lb <- NA
tau.ub <- NA
} else
{
lb <- suppressWarnings(try(uniroot(pdist_nsig, interval = c(est-con$est.ci[1], est),
tau = tau.est, yi = yi, vi = vi, param = "est",
ycv = ycv, method = method, val = "ci.lb",
get_cv_P(length(yi)))$root, silent = TRUE))
if (class(lb) == "try-error")
{
lb <- NA
}
ub <- suppressWarnings(try(uniroot(pdist_nsig, interval = c(est, est+con$est.ci[2]),
tau = tau.est, yi = yi, vi = vi, param = "est",
ycv = ycv, method = method, val = "ci.ub",
get_cv_P(length(yi)))$root, silent = TRUE))
if (class(ub) == "try-error")
{
ub <- NA
}
if (method == "P")
{
if (pdist_nsig(est = est, tau = 0, yi = yi, vi = vi, param = "tau", ycv = ycv,
method = method, val = "ci.ub", cv_P = get_cv_P(length(yi))) < 0)
{
tau.lb <- tau.ub <- 0
} else if (pdist_nsig(est = est, tau = 0, yi = yi, vi = vi, param = "tau", ycv = ycv,
method = method, val = "ci.lb", cv_P = get_cv_P(length(yi))) < 0)
{
tau.lb <- 0
tau.ub <- suppressWarnings(try(uniroot(pdist_nsig, interval = c(0, tau.est+con$tau.ci[1]),
est = est, yi = yi, vi = vi, param = "tau", ycv = ycv,
method = method, val = "ci.ub", cv_P = get_cv_P(length(yi)))$root,
silent = TRUE))
if (class(tau.ub) == "try-error")
{
tau.ub <- NA
}
} else
{
tau.lb <- suppressWarnings(try(uniroot(pdist_nsig, interval = c(max(0, tau.est-con$tau.ci[2]), tau.est),
est = est, yi = yi, vi = vi, param = "tau", ycv = ycv,
method = method, val = "ci.lb", cv_P = get_cv_P(length(yi)))$root,
silent = TRUE))
if (class(tau.lb) == "try-error")
{
tau.lb <- NA
}
tau.ub <- suppressWarnings(try(uniroot(pdist_nsig, interval = c(0, tau.est+con$tau.ci[1]),
est = est, yi = yi, vi = vi, param = "tau", ycv = ycv,
method = method, val = "ci.ub", cv_P = get_cv_P(length(yi)))$root,
silent = TRUE))
if (class(tau.ub) == "try-error")
{
tau.ub <- NA
}
}
} else if (method == "LNP")
{
if (pdist_nsig(est = est, tau = 0, yi = yi, vi = vi, param = "tau", ycv = ycv,
method = method, val = "ci.ub", cv_P = get_cv_P(length(yi))) > 0)
{
tau.lb <- tau.ub <- 0
} else if (pdist_nsig(est = est, tau = 0, yi = yi, vi = vi, param = "tau", ycv = ycv,
method = method, val = "ci.lb", cv_P = get_cv_P(length(yi))) > 0)
{
tau.lb <- 0
tau.ub <- suppressWarnings(try(uniroot(pdist_nsig, interval = c(0, tau.est+con$tau.ci[1]),
est = est, yi = yi, vi = vi, param = "tau", ycv = ycv,
method = method, val = "ci.ub", cv_P = get_cv_P(length(yi)))$root,
silent = TRUE))
if (class(tau.ub) == "try-error")
{
tau.ub <- NA
}
} else
{
if (con$tau.ci[2] == 0)
{
tau.lb <- suppressWarnings(try(uniroot(pdist_nsig, interval = c(0, tau.est), est = est,
yi = yi, vi = vi, param = "tau", ycv = ycv,
method = method, val = "ci.lb", cv_P = get_cv_P(length(yi)))$root,
silent = TRUE))
} else
{
tau.lb <- suppressWarnings(try(uniroot(pdist_nsig, interval = c(max(0, tau.est-con$tau.ci[2]), tau.est),
est = est, yi = yi, vi = vi, param = "tau", ycv = ycv,
method = method, val = "ci.lb", cv_P = get_cv_P(length(yi)))$root,
silent = TRUE))
}
if (class(tau.lb) == "try-error")
{
tau.lb <- NA
}
tau.ub <- suppressWarnings(try(uniroot(pdist_nsig, interval = c(0, tau.est+con$tau.ci[1]),
est = est, yi = yi, vi = vi, param = "tau", ycv = ycv,
method = method, val = "ci.ub", cv_P = get_cv_P(length(yi)))$root,
silent = TRUE))
if (class(tau.ub) == "try-error")
{
tau.ub <- NA
}
}
}
}
}
return(data.frame(est = est, tau.est = tau.est, lb = lb, ub = ub, tau.lb = tau.lb,
tau.ub = tau.ub))
} |
context("date_y")
test_that("date_y works as expected", {
expect_identical(as.Date(date_y(2018)), as.Date(c("2018-01-01")))
}) |
devRateModel <- function(
eq, temp, devRate, startValues, dfData = NULL, algo = "GN", ...){
if (!is.null(dfData)){
temp <- dfData[, 1]
devRate <- dfData[, 2]
}
if(algo == "GN"){
if(eq$id == "eq040" | eq$id == "eq150"){
tTh <- temp[devRate == max(devRate, na.rm = TRUE)]
tTh <- unique(tTh[!is.na(tTh)])
if(length(tTh) > 1){
meanDevRates <- sapply(seq_along(tTh), function(ti){
mean(devRate[temp == tTh[ti]], na.rm = TRUE)})
tTh <- tTh[meanDevRates == max(meanDevRates)][1]
}
part1_temp <- temp[temp <= tTh]
part2_temp <- temp[temp >= tTh]
part1_devRate <- devRate[temp <= tTh]
part2_devRate <- devRate[temp >= tTh]
if(eq$id == "eq040") {
nls_devRate1 <- stats::nls(
formula = eq[[1]][[1]],
data = data.frame(rT = part1_devRate, T = part1_temp),
start = startValues[[1]],
...)
newEq <- gsub("C", stats::coef(nls_devRate1)[1], eq$eqAlt[2])
newEq <- gsub("k1", stats::coef(nls_devRate1)[2], newEq)
newEq <- gsub("k2", stats::coef(nls_devRate1)[3], newEq)
newEq <- paste0("rT ~ ", newEq)
nls_devRate2 <- stats::nls(
formula = newEq,
data = data.frame(rT = part2_devRate, x = part2_temp),
start = startValues[[2]])
nls_devRate <- list(nls_devRate1, nls_devRate2)
}
if(eq$id == "eq150") {
nls_devRate1 <- stats::nls(
formula = eq[[1]][[1]],
data = data.frame(rT = part1_devRate, T = part1_temp),
start = startValues[[1]],
...)
newEq <- gsub("Rm", stats::coef(nls_devRate1)[1], eq$eqAlt[2])
newEq <- gsub("Tmax", stats::coef(nls_devRate1)[2], newEq)
newEq <- gsub("To", stats::coef(nls_devRate1)[3], newEq)
newEq <- paste0("rT ~ ", newEq)
nls_devRate2 <- stats::nls(
formula = newEq,
data = data.frame(rT = part2_devRate, x = part2_temp),
start = startValues[[2]],
...)
nls_devRate <- list(nls_devRate1, nls_devRate2)
}
} else {
if(eq$id == "eq030"){
nls_devRate <- stats::nls(
formula = eq[[1]],
data = data.frame(rT = devRate, T = temp),
start = list(aa = 1, bb = 1), ...)
} else {
if(eq$id == "eq110"){
nls_devRate <- stats::nls(
formula = eq[[1]],
data = data.frame(rT = devRate, T = temp),
start = list(a0 = 1, a1 = 1, a2 = 1), ...)
} else {
if(eq$id == "eq120"){
nls_devRate <- stats::nls(
formula = eq[[1]],
data = data.frame(rT = devRate, T = temp),
start = list(a0 = 1, a1 = 1, a2 = 1, a3 = 1), ...)
} else {
if(eq$id == "eq130"){
nls_devRate <- stats::nls(
formula = eq[[1]],
data = data.frame(rT = devRate, T = temp),
start = list(a0 = 1, a1 = 1, a2 = 1, a3 = 1, a4 = 1), ...)
} else {
nls_devRate <- stats::nls(
formula = eq[[1]],
data = data.frame(rT = devRate, T = temp),
start = startValues, ...)
}
}
}
}
}
}else{
if(algo == "LM"){
if(eq$id == "eq040" | eq$id == "eq150"){
tTh <- temp[devRate == max(devRate, na.rm = TRUE)]
tTh <- unique(tTh[!is.na(tTh)])
if(length(tTh) > 1){
meanDevRates <- sapply(seq_along(tTh), function(ti){
mean(devRate[temp == tTh[ti]], na.rm = TRUE)})
tTh <- tTh[meanDevRates == max(meanDevRates)][1]
}
part1_temp <- temp[temp <= tTh]
part2_temp <- temp[temp >= tTh]
part1_devRate <- devRate[temp <= tTh]
part2_devRate <- devRate[temp >= tTh]
if(eq$id == "eq040") {
nls_devRate1 <- minpack.lm::nlsLM(
formula = eq[[1]][[1]],
data = data.frame(rT = part1_devRate, T = part1_temp),
start = startValues[[1]],
...)
newEq <- gsub("C", stats::coef(nls_devRate1)[1], eq$eqAlt[2])
newEq <- gsub("k1", stats::coef(nls_devRate1)[2], newEq)
newEq <- gsub("k2", stats::coef(nls_devRate1)[3], newEq)
newEq <- paste0("rT ~ ", newEq)
nls_devRate2 <- minpack.lm::nlsLM(
formula = newEq,
data = data.frame(rT = part2_devRate, x = part2_temp),
start = startValues[[2]])
nls_devRate <- list(nls_devRate1, nls_devRate2)
}
if(eq$id == "eq150") {
nls_devRate1 <- minpack.lm::nlsLM(
formula = eq[[1]][[1]],
data = data.frame(rT = part1_devRate, T = part1_temp),
start = startValues[[1]],
...)
newEq <- gsub("Rm", stats::coef(nls_devRate1)[1], eq$eqAlt[2])
newEq <- gsub("Tmax", stats::coef(nls_devRate1)[2], newEq)
newEq <- gsub("To", stats::coef(nls_devRate1)[3], newEq)
newEq <- paste0("rT ~ ", newEq)
nls_devRate2 <- minpack.lm::nlsLM(
formula = newEq,
data = data.frame(rT = part2_devRate, x = part2_temp),
start = startValues[[2]],
...)
nls_devRate <- list(nls_devRate1, nls_devRate2)
}
} else {
if(eq$id == "eq030"){
nls_devRate <- minpack.lm::nlsLM(
formula = eq[[1]],
data = data.frame(rT = devRate, T = temp),
start = list(aa = 1, bb = 1), ...)
} else {
if(eq$id == "eq110"){
nls_devRate <- minpack.lm::nlsLM(
formula = eq[[1]],
data = data.frame(rT = devRate, T = temp),
start = list(a0 = 1, a1 = 1, a2 = 1), ...)
} else {
if(eq$id == "eq120"){
nls_devRate <- minpack.lm::nlsLM(
formula = eq[[1]],
data = data.frame(rT = devRate, T = temp),
start = list(a0 = 1, a1 = 1, a2 = 1, a3 = 1), ...)
} else {
if(eq$id == "eq130"){
nls_devRate <- minpack.lm::nlsLM(
formula = eq[[1]],
data = data.frame(rT = devRate, T = temp),
start = list(a0 = 1, a1 = 1, a2 = 1, a3 = 1, a4 = 1), ...)
} else {
nls_devRate <- minpack.lm::nlsLM(
formula = eq[[1]],
data = data.frame(rT = devRate, T = temp),
start = startValues, ...)
}
}
}
}
}
}else{
warning(paste0("error: algorithm ", algo," unknown"))
return(NULL)
}
}
return(nls_devRate)
}
devRatePrint <- function(myNLS, doPlots = FALSE){
temp <- get("T", myNLS$m$getEnv())
devRate <- get("rT", myNLS$m$getEnv())
cat("
print(summary(myNLS))
cat("
print(stats::confint.default(myNLS))
cat("\n")
cat("
cat("
print(stats::shapiro.test(stats::residuals(myNLS)))
if(doPlots == TRUE){
opar <- graphics::par(mfrow = c(1,2))
graphics::plot(temp,
devRate,
main = paste0(
"Obs. versus fitted (cor: ",
round(stats::cor(devRate, stats::predict(myNLS)), digits = 4),
")")
)
cat("
graphics::points(temp,
stats::predict(myNLS),
lty = 2,
lwd = 2,
col = 2
)
stats::qqnorm(stats::residuals(myNLS))
stats::qqline(stats::residuals(myNLS))
graphics::par(opar)
}
cat("
cat("
cat("
N <- length(stats::residuals(myNLS))
indTest <- stats::lm(stats::residuals(myNLS)[-N] ~ stats::residuals(myNLS)[-1])
print(summary(indTest))
cat("
cat("
cat(paste0("Akaike Information Criterion (AIC): ", stats::AIC(myNLS), "\n"))
cat(paste0("Bayesian Information Criterion (BIC): ", stats::BIC(myNLS), "\n"))
objReturn <-list(sumNLS = summary(myNLS),
confint = stats::confint.default(myNLS),
normRD = stats::shapiro.test(stats::residuals(myNLS)),
indTest = summary(indTest),
AIC = stats::AIC(myNLS),
BIC = stats::BIC(myNLS))
return(objReturn)
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.