code
stringlengths 1
13.8M
|
---|
makeTrecanniFunction = function() {
makeSingleObjectiveFunction(
name = "Trecanni Function",
id = "trecanni_2d",
fn = function(x) {
assertNumeric(x, len = 2L, any.missing = FALSE, all.missing = FALSE)
x[1]^4 + 4 * (x[1]^3 + x[1]^2) + x[2]^2
},
par.set = makeNumericParamSet(
len = 2L,
id = "x",
lower = c(-5, -5),
upper = c(5, 5),
vector = TRUE
),
tags = c("continuous", "differentiable", "separable", "non-scalable", "unimodal"),
global.opt.params = matrix(
c(0, 0,
-2, 0),
ncol = 2L, byrow = TRUE),
global.opt.value = 0
)
}
class(makeTrecanniFunction) = c("function", "smoof_generator")
attr(makeTrecanniFunction, "name") = c("Trecanni")
attr(makeTrecanniFunction, "type") = c("single-objective")
attr(makeTrecanniFunction, "tags") = c("single-objective", "continuous", "differentiable", "separable", "non-scalable", "unimodal") |
library(wordcloud)
library(RColorBrewer)
library(SnowballC)
library(RCurl)
library(XML)
library(tm)
source('http://www.sthda.com/upload/rquery_wordcloud.r')
filePath <- "http://www.sthda.com/sthda/RDoc/example-files/martin-luther-king-i-have-a-dream-speech.txt"
res<-rquery.wordcloud(filePath, type ="file", lang = "english")
res
res<-rquery.wordcloud(filePath, type ="file", lang = "english", min.freq = 1, max.words = 200)
res<-rquery.wordcloud(filePath, type ="file", lang = "english", colorPalette = "Reds")
res<-rquery.wordcloud(filePath, type ="file", lang = "english", colorPalette = "RdBu")
res<-rquery.wordcloud(filePath, type ="file", lang = "english", colorPalette = "black")
tdm <- res$tdm
freqTable <- res$freqTable
head(freqTable, 10)
barplot(freqTable[1:10,]$freq, las = 2,
names.arg = freqTable[1:10,]$word,
col ="lightblue", main ="Most frequent words",
ylab = "Word frequencies")
findFreqTerms(tdm, lowfreq = 4)
findAssocs(tdm, terms = "freedom", corlimit = 0.3)
url = "http://www.sthda.com/english/wiki/create-and-format-powerpoint-documents-from-r-software"
rquery.wordcloud(x=url, type="url") |
D_Jost <- function(x, hsht_mean = "arithmetic"){
mean_type <- match.arg(hsht_mean, c("arithmetic", "harmonic"))
mean_f <- if(mean_type == "arithmetic") mean else harmonic_mean
gn <- length(unique(pop(x)))
loci <- t(sapply(seploc(x), D.per.locus))
global_Hs <- mean_f(loci[,1], na.rm=T)
global_Ht <- mean_f(loci[,2], na.rm=T)
global_D <- (global_Ht - global_Hs)/(1 - global_Hs ) * (gn/(gn-1))
harm_D <- harmonic_mean(loci[,3])
return(list("per.locus"=loci[,3],
"global.het"=global_D,
"global.harm_mean" = harm_D
))
}
D.per.locus <- function(g) {
hets <- HsHt(g)
if(all(is.na(hets))){
return(hets)
}
Ht_est <- hets[[1]]
Hs_est <- hets[[2]]
n <- hets[[3]]
D <- (Ht_est-Hs_est)/(1-Hs_est) * (n/(n-1))
return(c(Hs_est, Ht_est, D))
} |
resource_frequency_activity <- function(eventlog) {
nr_of_resources <- NULL
eventlog %>%
group_by(!!activity_id_(eventlog), !!resource_id_(eventlog), !!activity_instance_id_(eventlog)) %>%
summarize() %>%
summarize(freq = n()) %>%
grouped_summary_statistics("freq", nr_of_resources = n()) %>%
select(!!activity_id_(eventlog), nr_of_resources, everything())
} |
context("Testing par is reset when using plot() on a S3 BayesMassBal object")
test_that("par is reset within plot.BayesMassBal",{
par_initial <- par(no.readonly =TRUE)
y <- importObservations(file = system.file("extdata", "twonode_example.csv",
package = "BayesMassBal"),
header = TRUE, csv.params = list(sep = ";"))
C <- matrix(c(1,-1,0,-1,0,0,1,-1,0,-1), byrow = TRUE, ncol = 5, nrow = 2)
X <- constrainProcess(C = C)
BMB_example <- BMB(X = X, y = y, cov.structure = "indep",
BTE = c(10,50,1), lml = FALSE, verb=0)
plot(BMB_example,sample.params = list(beta = list(CuFeS2 = 1:3, gangue = 1:3)),layout = "trace",hdi.params = c(1,0.95))
par_final <- par(no.readonly =TRUE)
expect_that(identical(par_initial, par_final), equals(TRUE))
}) |
label_box <- 'box(
title = "Closable box, with label",
closable = TRUE,
width = 12,
label = boxLabel(1, status = "danger"),
solidHeader = FALSE,
collapsible = TRUE,
p("Box Content")
)' |
nonideal <- function(species, speciesprops, IS, T, P, A_DH, B_DH, m_star=NULL, method=thermo()$opt$nonideal) {
mettext <- function(method) {
mettext <- paste(method, "equation")
if(method=="Bdot0") mettext <- "B-dot equation (B-dot = 0)"
mettext
}
if(missing(speciesprops)) {
if(species[1] %in% c("Bdot", "Bdot0", "bgamma", "bgamma0", "Alberty")) {
thermo <- get("thermo", CHNOSZ)
oldnon <- thermo$opt$nonideal
thermo$opt$nonideal <- species[1]
assign("thermo", thermo, CHNOSZ)
message("nonideal: setting nonideal option to use ", mettext(species))
return(invisible(oldnon))
} else stop(species[1], " is not a valid nonideality setting (Bdot, Bdot0, bgamma, bgamma0, or Alberty)")
}
if(!method %in% c("Alberty", "Bdot", "Bdot0", "bgamma", "bgamma0")) {
if(missing(method)) stop("invalid setting (", thermo$opt$nonideal, ") in thermo()$opt$nonideal")
else stop("invalid method (", thermo$opt$nonideal, ")")
}
Alberty <- function(prop = "loggamma", Z, I, T) {
B <- 1.6
alpha <- expression(3 * (-16.39023 + 261.3371/T + 3.3689633*log(T)- 1.437167*(T/100) + 0.111995*(T/100)^2))
DD <- function(expr, name, order = 1) {
if(order < 1) stop("'order' must be >= 1")
if(order == 1) D(expr, name)
else DD(D(expr, name), name, order - 1)
}
lngamma <- function(alpha, Z, I, B) - alpha * Z^2 * I^(1/2) / (1 + B * I^(1/2))
R <- 1.9872
if(prop=="loggamma") return(lngamma(eval(alpha), Z, I, B) / log(10))
else if(prop=="G") return(R * T * lngamma(eval(alpha), Z, I, B))
else if(prop=="H") return(- R * T^2 * lngamma(eval(DD(alpha, "T", 1)), Z, I, B))
else if(prop=="S") return( ( - R * T^2 * lngamma(eval(DD(alpha, "T", 1)), Z, I, B) - R * T * lngamma(eval(alpha), Z, I, B) ) / T)
else if(prop=="Cp") return(- 2 * R * T * lngamma(eval(DD(alpha, "T", 1)), Z, I, B) - R * T^2 * lngamma(eval(DD(alpha, "T", 2)), Z, I, B))
}
Helgeson <- function(prop = "loggamma", Z, I, T, A_DH, B_DH, acirc, m_star, bgamma) {
loggamma <- - A_DH * Z^2 * I^0.5 / (1 + acirc * B_DH * I^0.5) - log10(1 + 0.0180153 * m_star) + bgamma * I
R <- 1.9872
if(prop=="loggamma") return(loggamma)
else if(prop=="G") return(R * T * log(10) * loggamma)
}
Setchenow <- function(prop = "loggamma", I, T, m_star, bgamma) {
loggamma <- - log10(1 + 0.0180153 * m_star) + bgamma * I
R <- 1.9872
if(prop=="loggamma") return(loggamma)
else if(prop=="G") return(R * T * log(10) * loggamma)
}
if(!is.numeric(species[[1]])) species <- info(species, "aq")
Z <- numeric(length(species))
for(i in 1:length(species)) {
mkp <- makeup(c("Z0", species[i]), sum=TRUE)
thisZ <- mkp[match("Z", names(mkp))]
if(is.na(thisZ)) next
if(thisZ==0) next
Z[i] <- thisZ
}
formula <- get("thermo", CHNOSZ)$OBIGT$formula[species]
if(grepl("Bdot", method)) {
acircdat <- c("Rb+"=2.5, "Cs+"=2.5, "NH4+"=2.5, "Tl+"=2.5, "Ag+"=2.5,
"K+"=3, "Cl-"=3, "Br-"=3, "I-"=3, "NO3-"=3,
"OH-"=3.5, "F-"=3.5, "HS-"=3.5, "BrO3-"=3.5, "IO3-"=3.5, "MnO4-"=3.5,
"Na+"=4, "HCO3-"=4, "H2PO4-"=4, "HSO3-"=4, "Hg2+2"=4, "SO4-2"=4, "SeO4-2"=4, "CrO4-2"=4, "HPO4-2"=4, "PO4-3"=4,
"Pb+2"=4.5, "CO3-2"=4.5, "SO4-2"=4.5, "MoO4-2"=4.5,
"Sr+2"=5, "Ba+2"=5, "Ra+2"=5, "Cd+2"=5, "Hg+2"=5, "S-2"=5, "WO4-2"=5,
"Li+"=6, "Ca+2"=6, "Cu+2"=6, "Zn+2"=6, "Sn+2"=6, "Mn+2"=6, "Fe+2"=6, "Ni+2"=6, "Co+2"=6,
"Mg+2"=8, "Be+2"=8,
"H+"=9, "Al+3"=9, "Cr+3"=9, "La+3"=9, "Ce+3"=9, "Y+3"=9, "Eu+3"=9,
"Th+4"=11, "Zr+4"=11, "Ce+4"=11, "Sn+4"=11)
acirc <- as.numeric(acircdat[formula])
acirc[is.na(acirc)] <- 4.5
acirc <- acirc * 10^-8
} else if(grepl("bgamma", method)) {
acirc <- rep(3.72e-8, length(species))
}
if(method=="bgamma") bgamma <- bgamma(convert(T, "C"), P)
else if(method=="Bdot") bgamma <- Bdot(convert(T, "C"))
else if(method %in% c("Bdot0", "bgamma0")) bgamma <- 0
if(is.null(m_star)) m_star <- IS
iH <- info("H+")
ie <- info("e-")
speciesprops <- as.list(speciesprops)
icharged <- ineutral <- logical(length(species))
for(i in 1:length(species)) {
myprops <- speciesprops[[i]]
if(species[i] == iH & get("thermo", CHNOSZ)$opt$ideal.H) next
if(species[i] == ie & get("thermo", CHNOSZ)$opt$ideal.e) next
didcharged <- didneutral <- FALSE
if(Z[i]==0) {
for(j in 1:ncol(myprops)) {
pname <- colnames(myprops)[j]
if(!pname %in% c("G", "H", "S", "Cp")) next
if(identical(get("thermo", CHNOSZ)$opt$Setchenow, "bgamma")) {
myprops[, j] <- myprops[, j] + Setchenow(pname, IS, T, m_star, bgamma)
didneutral <- TRUE
} else if(identical(get("thermo", CHNOSZ)$opt$Setchenow, "bgamma0")) {
myprops[, j] <- myprops[, j] + Setchenow(pname, IS, T, m_star, bgamma = 0)
didneutral <- TRUE
}
}
} else {
for(j in 1:ncol(myprops)) {
pname <- colnames(myprops)[j]
if(!pname %in% c("G", "H", "S", "Cp")) next
if(method=="Alberty") {
myprops[, j] <- myprops[, j] + Alberty(pname, Z[i], IS, T)
didcharged <- TRUE
} else {
myprops[, j] <- myprops[, j] + Helgeson(pname, Z[i], IS, T, A_DH, B_DH, acirc[i], m_star, bgamma)
didcharged <- TRUE
}
}
}
if(didcharged) {
if(method=="Alberty") myprops <- cbind(myprops, loggam = Alberty("loggamma", Z[i], IS, T))
else myprops <- cbind(myprops, loggam = Helgeson("loggamma", Z[i], IS, T, A_DH, B_DH, acirc[i], m_star, bgamma))
}
if(didneutral) {
if(get("thermo", CHNOSZ)$opt$Setchenow == "bgamma") myprops <- cbind(myprops, loggam = Setchenow("loggamma", IS, T, m_star, bgamma))
else if(get("thermo", CHNOSZ)$opt$Setchenow == "bgamma0") myprops <- cbind(myprops, loggam = Setchenow("loggamma", IS, T, m_star, bgamma = 0))
}
speciesprops[[i]] <- myprops
if(didcharged) icharged[i] <- TRUE
if(didneutral) ineutral[i] <- TRUE
}
if(sum(icharged) > 0) message("nonideal: calculations for ", paste(formula[icharged], collapse=", "), " (", mettext(method), ")")
if(sum(ineutral) > 0) message("nonideal: calculations for ", paste(formula[ineutral], collapse=", "), " (Setchenow equation)")
return(speciesprops)
}
bgamma <- function(TC = 25, P = 1, showsplines = "") {
T <- TC
uP <- unique(P)
is1 <- identical(uP, 1) & all(T==25)
is500 <- identical(uP, 500)
is1000 <- identical(uP, 1000)
is2000 <- identical(uP, 2000)
is3000 <- identical(uP, 3000)
is4000 <- identical(uP, 4000)
is5000 <- identical(uP, 5000)
is10000 <- identical(uP, 10000)
is20000 <- identical(uP, 20000)
is30000 <- identical(uP, 30000)
is40000 <- identical(uP, 40000)
is50000 <- identical(uP, 50000)
is60000 <- identical(uP, 60000)
isoP <- is1 | is500 | is1000 | is2000 | is3000 | is4000 | is5000 | is10000 | is20000 | is30000 | is40000 | is50000 | is60000
if(!isoP | showsplines != "") {
T0 <- c(23.8, 49.4, 98.9, 147.6, 172.6, 197.1, 222.7, 248.1, 268.7)
B0 <- c(4.07, 4.27, 4.30, 4.62, 4.86, 4.73, 4.09, 3.61, 1.56) / 100
S0 <- splinefun(T0, B0)
}
if(is500 | !isoP | showsplines != "") {
T0.5 <- seq(0, 400, 25)
B0.5 <- c(5.6, 7.1, 7.8, 8.0, 7.8, 7.5, 7.0, 6.4, 5.7, 4.8, 3.8, 2.6, 1.0, -1.2, -4.1, -8.4, -15.2) / 100
S0.5 <- splinefun(T0.5, B0.5)
if(is500) return(S0.5(T))
}
if(is1000 | !isoP | showsplines != "") {
T1 <- seq(0, 500, 25)
B1 <- c(6.6, 7.7, 8.7, 8.3, 8.2, 7.9, 7.5, 7.0, 6.5, 5.9, 5.2, 4.4, 3.5, 2.5, 1.1, -0.6, -2.8, -5.7, -9.3, -13.7, -19.2) / 100
S1 <- splinefun(T1, B1)
if(is1000) return(S1(T))
}
if(is2000 | !isoP | showsplines != "") {
T2 <- c(seq(0, 500, 25), 550, 600)
B2 <- c(7.4, 8.3, 8.8, 8.9, 8.9, 8.7, 8.5, 8.1, 7.8, 7.4, 7.0, 6.6, 6.2, 5.8, 5.2, 4.6, 3.8, 2.9, 1.8, 0.5, -1.0, -3.93, -4.87) / 100
S2 <- splinefun(T2, B2)
if(is2000) return(S2(T))
}
if(is3000 | !isoP | showsplines != "") {
T3 <- seq(0, 500, 25)
B3 <- c(6.5, 8.3, 9.2, 9.6, 9.7, 9.6, 9.4, 9.3, 9.2, 9.0, 8.8, 8.6, 8.3, 8.1, 7.8, 7.5, 7.1, 6.6, 6.0, 5.4, 4.8) / 100
S3 <- splinefun(T3, B3)
if(is3000) return(S3(T))
}
if(is4000 | !isoP | showsplines != "") {
T4 <- seq(0, 500, 25)
B4 <- c(4.0, 7.7, 9.5, 10.3, 10.7, 10.8, 10.8, 10.8, 10.7, 10.6, 10.5, 10.4, 10.3, 10.2, 10.0, 9.8, 9.6, 9.3, 8.9, 8.5, 8.2) / 100
S4 <- splinefun(T4, B4)
if(is4000) return(S4(T))
}
if(is5000 | !isoP | showsplines != "") {
T5 <- c(seq(0, 500, 25), 550, 600)
B5 <- c(0.1, 6.7, 9.6, 11.1, 11.8, 12.2, 12.4, 12.4, 12.4, 12.4, 12.4, 12.3, 12.3, 12.2, 12.1, 11.9, 11.8, 11.5, 11.3, 11.0, 10.8, 11.2, 12.52) / 100
S5 <- splinefun(T5, B5)
if(is5000) return(S5(T))
}
if(is10000 | !isoP | showsplines != "") {
T10 <- c(25, seq(300, 1000, 50))
B10 <- c(12, 17.6, 17.8, 18, 18.2, 18.9, 21, 23.3, 26.5, 28.8, 31.4, 34.1, 36.5, 39.2, 41.6, 44.1) / 100
S10 <- splinefun(T10, B10)
if(is10000) return(S10(T))
}
if(is20000 | !isoP | showsplines != "") {
T20 <- c(25, seq(300, 1000, 50))
B20 <- c(16, 21.2, 21.4, 22, 22.4, 23.5, 26.5, 29.2, 32.6, 35.2, 38.2, 41.4, 44.7, 47.7, 50.5, 53.7) / 100
S20 <- splinefun(T20, B20)
if(is20000) return(S20(T))
}
if(is30000 | !isoP | showsplines != "") {
T30 <- c(25, seq(300, 1000, 50))
B30 <- c(19, 23.9, 24.1, 24.6, 25.2, 26.7, 30.3, 32.9, 36.5, 39.9, 43, 46.4, 49.8, 53.2, 56.8, 60) / 100
S30 <- splinefun(T30, B30)
if(is30000) return(S30(T))
}
if(is40000 | !isoP | showsplines != "") {
T40 <- c(seq(300, 1000, 50))
B40 <- c(25.8, 26, 26.4, 27.2, 28.9, 33, 35.5, 39.2, 43.2, 46.4, 49.9, 53.4, 57.1, 61.2, 64.4) / 100
S40 <- splinefun(T40, B40)
if(is40000) return(S40(T))
}
if(is50000 | !isoP | showsplines != "") {
T50 <- c(seq(300, 1000, 50))
B50 <- c(27.1, 27.3, 27.7, 28.5, 30.5, 34.8, 37.3, 41.1, 45.5, 48.7, 52.4, 55.9, 59.8, 64.3, 67.5) / 100
S50 <- splinefun(T50, B50)
if(is50000) return(S50(T))
}
if(is60000 | !isoP | showsplines != "") {
T60 <- c(seq(300, 1000, 50))
B60 <- c(28, 28.2, 28.6, 29.5, 31.6, 36.1, 38.6, 42.5, 47.1, 50.4, 54.1, 57.6, 61.6, 66.5, 69.7) / 100
S60 <- splinefun(T60, B60)
if(is60000) return(S60(T))
}
if(showsplines == "T") {
thermo.plot.new(c(0, 1000), c(-.2, .7), xlab=axis.label("T"), ylab=expression(italic(b)[gamma]))
points(T0, B0, pch=0)
points(T0.5, B0.5, pch=1)
points(T1, B1, pch=1)
points(T2[-c(22:23)], B2[-c(22:23)], pch=1)
points(T2[c(22:23)], B2[c(22:23)], pch=2)
points(T3, B3, pch=1)
points(T4, B4, pch=1)
points(T5[-c(22:23)], B5[-c(22:23)], pch=1)
points(T5[c(22:23)], B5[c(22:23)], pch=2)
points(T10[-1], B10[-1], pch=2)
points(T20[-1], B20[-1], pch=2)
points(T30[-1], B30[-1], pch=2)
points(T10[1], B10[1], pch=5)
points(T20[1], B20[1], pch=5)
points(T30[1], B30[1], pch=5)
points(T40, B40, pch=6)
points(T50, B50, pch=6)
points(T60, B60, pch=6)
col <- rev(topo.colors(13))
T0 <- seq(0, 350, 5); lines(T0, S0(T0), col=col[1])
T0.5 <- seq(0, 500, 5); lines(T0.5, S0.5(T0.5), col=col[2])
T1 <- seq(0, 500, 5); lines(T1, S1(T1), col=col[3])
T2 <- seq(0, 600, 5); lines(T2, S2(T2), col=col[4])
T3 <- seq(0, 600, 5); lines(T3, S3(T3), col=col[5])
T4 <- seq(0, 600, 5); lines(T4, S4(T4), col=col[6])
T5 <- seq(0, 600, 5); lines(T5, S5(T5), col=col[7])
T10 <- c(25, seq(100, 1000, 5)); lines(T10, S10(T10), col=col[8])
T20 <- c(80, seq(100, 1000, 5)); lines(T20, S20(T20), col=col[9])
T30 <- c(125, seq(200, 1000, 5)); lines(T30, S30(T30), col=col[10])
T40 <- c(175, seq(300, 1000, 5)); lines(T40, S40(T40), col=col[11])
T50 <- c(225, seq(300, 1000, 5)); lines(T50, S50(T50), col=col[12])
T60 <- c(250, seq(300, 1000, 5)); lines(T60, S60(T60), col=col[13])
legend("topleft", pch=c(0, 1, 2, 5, 6), bty = "n",
legend=c("Helgeson, 1969", "Helgeson et al., 1981", "Manning et al., 2013", "spline control point", "high-P extrapolation"))
legend("bottomright", col=c(NA, rev(col)), lty=1, bty = "n",
legend=c("kbar", "60", "50", "40", "30", "20", "10", "5", "4", "3", "2", "1", "0.5", "Psat"))
title(main=expression("Deybe-H\u00FCckel extended term ("*italic(b)[gamma]*") parameter"))
} else if(showsplines=="P") {
thermo.plot.new(c(0, 5), c(-.2, .7), xlab=expression(log~italic(P)*"(bar)"), ylab=expression(italic(b)[gamma]))
P25 <- c(1, 500, 1000, 2000, 3000, 4000, 5000)
P100 <- c(1, 500, 1000, 2000, 3000, 4000, 5000, 10000, 20000)
P200 <- c(16, 500, 1000, 2000, 3000, 4000, 5000, 10000, 20000, 30000, 40000)
P300 <- c(86, 500, 1000, 2000, 3000, 4000, 5000, 10000, 20000, 30000, 40000, 50000, 60000)
P400 <- c(500, 1000, 2000, 3000, 4000, 5000, 10000, 20000, 30000, 40000, 50000, 60000)
P500 <- c(1000, 2000, 3000, 4000, 5000, 10000, 20000, 30000, 40000, 50000, 60000)
P600 <- c(2000, 3000, 4000, 5000, 10000, 20000, 30000, 40000, 50000, 60000)
P700 <- c(10000, 20000, 30000, 40000, 50000, 60000)
P800 <- c(10000, 20000, 30000, 40000, 50000, 60000)
P900 <- c(10000, 20000, 30000, 40000, 50000, 60000)
P1000 <- c(10000, 20000, 30000, 40000, 50000, 60000)
points(log10(P25), bgamma(25, P25))
points(log10(P100), bgamma(100, P100))
points(log10(P200), bgamma(200, P200))
points(log10(P300), bgamma(300, P300))
points(log10(P400), bgamma(400, P400))
points(log10(P500), bgamma(500, P500))
points(log10(P600), bgamma(600, P600))
points(log10(P700), bgamma(700, P700))
points(log10(P800), bgamma(800, P800))
points(log10(P900), bgamma(900, P900))
points(log10(P1000), bgamma(1000, P1000))
col <- tail(rev(rainbow(12)), -1)
P <- c(1, seq(50, 5000, 50)); lines(log10(P), bgamma(25, P), col=col[1])
P <- c(1, seq(50, 20000, 50)); lines(log10(P), bgamma(100, P), col=col[2])
P <- c(1, seq(50, 40000, 50)); lines(log10(P), bgamma(200, P), col=col[3])
P <- c(1, seq(50, 60000, 50)); lines(log10(P), bgamma(300, P), col=col[4])
P <- seq(500, 60000, 50); lines(log10(P), bgamma(400, P), col=col[5])
P <- seq(1000, 60000, 50); lines(log10(P), bgamma(500, P), col=col[6])
P <- seq(2000, 60000, 50); lines(log10(P), bgamma(600, P), col=col[7])
P <- seq(10000, 60000, 50); lines(log10(P), bgamma(700, P), col=col[8])
P <- seq(10000, 60000, 50); lines(log10(P), bgamma(800, P), col=col[9])
P <- seq(10000, 60000, 50); lines(log10(P), bgamma(900, P), col=col[10])
P <- seq(10000, 60000, 50); lines(log10(P), bgamma(1000, P), col=col[11])
legend("topleft", col=c(NA, col), lty=1, bty = "n", legend=c("degrees C", 25, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000))
legend("bottomright", pch=1, bty = "n", legend="points from iso-P splines")
title(main=expression("Deybe-H\u00FCckel extended term ("*italic(b)[gamma]*") parameter"))
} else {
ncond <- max(length(T), length(P))
T <- rep(T, length.out=ncond)
P <- rep(P, length.out=ncond)
bgamma <- numeric()
lastT <- NULL
for(i in 1:length(T)) {
if(T[i]==25 & P[i]==1) bgamma <- c(bgamma, 0.041)
else {
if(!identical(T[i], lastT)) {
if(T[i] >= 700) {
PT <- c(10000, 20000, 30000, 40000, 50000, 60000)
B <- c(S10(T[i]), S20(T[i]), S30(T[i]), S40(T[i]), S50(T[i]), S60(T[i]))
} else if(T[i] >= 600) {
PT <- c(2000, 3000, 4000, 5000, 10000, 20000, 30000, 40000, 50000, 60000)
B <- c(S2(T[i]), S3(T[i]), S4(T[i]), S5(T[i]), S10(T[i]), S20(T[i]), S30(T[i]), S40(T[i]), S50(T[i]), S60(T[i]))
} else if(T[i] >= 500) {
PT <- c(1000, 2000, 3000, 4000, 5000, 10000, 20000, 30000, 40000, 50000, 60000)
B <- c(S1(T[i]), S2(T[i]), S3(T[i]), S4(T[i]), S5(T[i]), S10(T[i]), S20(T[i]), S30(T[i]), S40(T[i]), S50(T[i]), S60(T[i]))
} else if(T[i] >= 400) {
PT <- c(500, 1000, 2000, 3000, 4000, 5000, 10000, 20000, 30000, 40000, 50000, 60000)
B <- c(S0.5(T[i]), S1(T[i]), S2(T[i]), S3(T[i]), S4(T[i]), S5(T[i]), S10(T[i]), S20(T[i]), S30(T[i]), S40(T[i]), S50(T[i]), S60(T[i]))
} else if(T[i] >= 300) {
PT <- c(86, 500, 1000, 2000, 3000, 4000, 5000, 10000, 20000, 30000, 40000, 50000, 60000)
B <- c(S0(T[i]), S0.5(T[i]), S1(T[i]), S2(T[i]), S3(T[i]), S4(T[i]), S5(T[i]), S10(T[i]), S20(T[i]), S30(T[i]), S40(T[i]), S50(T[i]), S60(T[i]))
} else if(T[i] >= 200) {
PT <- c(16, 500, 1000, 2000, 3000, 4000, 5000, 10000, 20000, 30000, 40000)
B <- c(S0(T[i]), S0.5(T[i]), S1(T[i]), S2(T[i]), S3(T[i]), S4(T[i]), S5(T[i]), S10(T[i]), S20(T[i]), S30(T[i]), S40(T[i]))
} else if(T[i] >= 100) {
PT <- c(1, 500, 1000, 2000, 3000, 4000, 5000, 10000, 20000)
B <- c(S0(T[i]), S0.5(T[i]), S1(T[i]), S2(T[i]), S3(T[i]), S4(T[i]), S5(T[i]), S10(T[i]), S20(T[i]))
} else if(T[i] >= 0) {
PT <- c(1, 500, 1000, 2000, 3000, 4000, 5000)
B <- c(S0(T[i]), S0.5(T[i]), S1(T[i]), S2(T[i]), S3(T[i]), S4(T[i]), S5(T[i]))
}
ST <- splinefun(PT, B)
lastT <- T[i]
}
bgamma <- c(bgamma, ST(P[i]))
}
}
return(bgamma)
}
}
Bdot <- function(TC) {
Bdot <- splinefun(c(25, 50, 100, 150, 200, 250, 300), c(0.0418, 0.0439, 0.0468, 0.0479, 0.0456, 0.0348, 0))(TC)
Bdot[TC > 300] <- 0
return(Bdot)
} |
aic_model <-
function(model){
n=length(model$Y)
log(model$SSR) + 2*(ifelse(is.null(model$tS),model$edf,model$tS))/n
} |
skim <- skim_with()
skim_tee <- function(data, ..., skim_fun = skim) {
skimmed <- skim_fun(data, ...)
print(skimmed)
invisible(data)
}
skim_without_charts <- skim_with(
numeric = sfl(hist = NULL),
ts = sfl(line_graph = NULL)
) |
context("extract_CrIs")
test_that("extract_CrIs return the expected credible intervals", {
samples <- data.frame(value = 1:10, type = "car")
summarised <- calc_CrIs(samples, summarise_by = "type",
CrIs = c(seq(0.1, 0.9, 0.1)))
expect_equal(extract_CrIs(summarised), seq(90, 10, -10))
}) |
"xysplom" <-
function(x, ...)
UseMethod("xysplom")
"xysplom.formula" <-
function(x, data=NULL,
na.action=na.pass, ...) {
dft <- do.formula.trellis.xysplom(x, data, na.action)
other <- list(...)
if (!("xlab" %in% names(list(...)))) other$xlab <- deparse(dft$x.formula[[2]])
if (!("ylab" %in% names(list(...)))) other$ylab <- deparse(dft$y.formula[[2]])
do.call("xysplom.default", c(dft[1:3], other))
}
"xysplom.default" <-
function(x, y=x, group, relation="free",
x.relation=relation, y.relation=relation,
xlim.in=NULL, ylim.in=NULL,
corr=FALSE, beta=FALSE, abline=corr||beta, digits=3,
x.between=NULL, y.between=NULL,
between.in=list(x=x.between, y=y.between),
scales.in=list(
x=list(relation=x.relation, alternating=FALSE),
y=list(relation=y.relation, alternating=FALSE)),
strip.in=strip.xysplom,
pch=16, cex=.75,
panel.input="panel.xysplom",
...,
cartesian=TRUE,
plot=TRUE) {
other <- list(...)
if (!("xlab" %in% names(list(...)))) other$xlab <- deparse(substitute(x))
if (!("ylab" %in% names(list(...)))) other$ylab <-
if (missing(y)) other$xlab
else deparse(substitute(y))
if (!is.null(xlim.in)) scales.in$x$limits <- xlim.in
if (!is.null(ylim.in)) scales.in$y$limits <- ylim.in
if (is.matrix(x) && !is.null(dimnames(x)[[2]])) {
dx2 <- dimnames(x)[[2]]
dx2.done <- TRUE
}
else
dx2.done <- FALSE
x <- as.data.frame(x)
n <- nrow(x)
if (!dx2.done) dx2 <- dimnames(x)[[2]]
dx2.tmp <- paste(deparse(substitute(x)), seq(length=ncol(x)), sep=".")
if (length(dx2))
dx2 <- ifelse(nchar(dx2), dx2, dx2.tmp)
else
dx2 <- dx2.tmp
if (missing(y)) dy2 <- dx2
else {
if (is.matrix(y) && !is.null(dimnames(y)[[2]])) {
dy2 <- dimnames(y)[[2]]
dy2.done <- TRUE
}
else
dy2.done <- FALSE
y <- as.data.frame(y)
if (!dy2.done) dy2 <- dimnames(y)[[2]]
dy2.tmp <- paste(deparse(substitute(y)), seq(length=ncol(y)), sep=".")
if (length(dy2))
dy2 <- ifelse(nchar(dy2), dy2, dy2.tmp)
else
dy2 <- dy2.tmp
}
y.cn <- rep(dy2, rep(n, ncol(y)))
x.cn <- rep(dx2, rep(n, ncol(x)))
old.warn <- options(warn=-1)
nxy <- n*ncol(x)*ncol(y)
if (cartesian)
ccd <- data.frame(y.list = unlist(rep(y, rep(ncol(x),ncol(y)))),
y = ordered(as.vector(sapply(
rep(as.data.frame(matrix(y.cn,n)),
rep(ncol(x),ncol(y))),
as.matrix)), dy2),
x.list = rep(unlist(x), length=nxy),
x = ordered(rep(x.cn, length=nxy), dx2),
original.row.names = rep(dimnames(x)[[1]], length=nxy))
else {
Lx <- length(unique(dx2))
Ly <- length(unique(dy2))
if (!(Lx == Ly || Lx==1 || Ly==1)) {
stop(paste("\nWhen 'cartesian==FALSE' the left-hand side '",
paste(dy2, collapse=" + "),
"' and right-hand side of the formula '",
paste(dx2, collapse=" + "),
"' must have the same number of variables."))
}
ccd <- data.frame(y.list = unlist(y),
y = ordered(y.cn, unique(dy2)),
x.list = unlist(x),
x = ordered(x.cn, unique(dx2)),
original.row.names = dimnames(x)[[1]])
ccd$y <- ordered(paste(as.character(ccd$y), as.character(ccd$x), sep=" ~ "),
paste(levels(ccd$y), levels(ccd$x), sep=" ~ "))
ccd$x <- "x"
}
if (missing(group) || is.null(group)) {
if (cartesian) formula <- y.list ~ x.list | x * y
else formula <- y.list ~ x.list | y
}
else {
group <- interaction(group)
if (cartesian) ccd$group <- rep(group, length=nxy)
else ccd$group <- rep(group, length=nxy/ncol(x))
if (cartesian) formula <- y.list ~ x.list | x * y * group
else formula <- y.list ~ x.list | y * group
}
options(old.warn)
switch(paste(c("corr", "beta")[c(corr, beta)], collapse="."),
corr={
ccd <- cbind(ccd,
corr=factor(rep(digits, nrow(ccd))))
if (missing(group) || is.null(group)) {
if (cartesian) formula <- y.list ~ x.list | x * y * corr
else formula <- y.list ~ x.list | y * corr
}
else {
if (cartesian) formula <- y.list ~ x.list | x * y * group * corr
else formula <- y.list ~ x.list | y * group * corr
}
},
beta={
ccd <- cbind(ccd,
beta=factor(rep(digits, nrow(ccd))))
if (missing(group) || is.null(group)) {
if (cartesian) formula <- y.list ~ x.list | x * y * beta
else formula <- y.list ~ x.list | y * beta
}
else {
if (cartesian) formula <- y.list ~ x.list | x * y * group * beta
else formula <- y.list ~ x.list | y * group * beta
}
},
corr.beta={
ccd <- cbind(ccd,
corr.beta=factor(rep(digits, nrow(ccd))))
if (missing(group) || is.null(group)) {
if (cartesian) formula <- y.list ~ x.list | x * y * corr.beta
else formula <- y.list ~ x.list | y * corr.beta
}
else {
if (cartesian) formula <- y.list ~ x.list | x * y * group * corr.beta
else formula <- y.list ~ x.list | y * group * corr.beta
}
}
)
panel.to.use <-
if (missing(panel.input) && abline)
panel=function(x,y,...) {
panel.xyplot(x,y,...)
panel.abline(lm(y~x, na.action=na.exclude))
}
else panel.input
if (!cartesian)
formals(strip.in)$strip.names <- c(FALSE, FALSE)
result <- list(formula,
data=ccd,
between=between.in,
scales=scales.in,
panel=panel.to.use,
strip=strip.in,
pch=pch, cex=cex)
result <- c(result, other)
if (plot) do.call("xyplot", result)
else result
}
"strip.xysplom" <-
function(which.given,
which.panel,
var.name,
factor.levels,
shingle.intervals,
par.strip.text=trellis.par.get("add.text"),
strip.names=c(TRUE,TRUE),
style=1,
...) {
vnwg <- var.name[which.given]
if (match(vnwg, c("corr","beta","corr.beta"), 0)) {
{
which.parent <- 1
while(!(exists("rows.per.page", frame=which.parent)))
which.parent <- which.parent + 1
cell <- panel.number()
xy <- get("x",pos=sys.frame(which.parent))$panel.args[[cell]]
x <- xy$x
y <- xy$y
}
digits <- as.numeric(factor.levels[which.panel[which.given]])
if (vnwg != "beta") corr <- round(cor(na.exclude(cbind(x,y)))[1,2], digits)
if (vnwg != "corr") beta <- format(coef(lm(y ~ x, na.action=na.exclude))[2], digits=4)
strip.names <- c(TRUE,TRUE)
factor.levels[which.panel[which.given]] <-
switch(vnwg,
corr=corr,
beta=beta,
corr.beta={
strip.names <- c(FALSE,FALSE)
paste("corr: ", corr, " beta: ", beta, sep="")
})
}
strip.default(which.given=which.given,
which.panel=which.panel,
var.name=var.name,
factor.levels=factor.levels,
shingle.intervals=shingle.intervals,
par.strip.text=par.strip.text,
strip.names=strip.names,
style=style,
...)
}
"panel.xysplom" <-
function(corr, ...) panel.xyplot(...) |
data <- data.frame(
a = lubridate::parse_date_time("7th of July 2020 06:15:30 pm", "%d%m%y%H%M%S%p"),
b = lubridate::parse_date_time("19700613", "%y%m%d")
)
stripcode <- function(x) {
attributes(x)$code <- NULL
x
}
test_that("Desired parts are extracted", {
expect_equal(
stripcode(extract_part(data, "a", "Year Month", "a.dt")),
data %>% tibble::add_column(
a.dt = factor(format(data$a, "%YM%m")),
.after = "a"
)
)
expect_equal(
stripcode(extract_part(data, "a", "Year Quarter", "a.dt")),
data %>% tibble::add_column(
a.dt = factor(format(zoo::as.yearqtr(data$a), '%YQ%q')),
.after = "a"
)
)
expect_equal(
stripcode(extract_part(data, "a", "Date only", "a.dt")),
data %>% tibble::add_column(
a.dt = as.Date("2020-07-07"),
.after = "a"
)
)
expect_equal(
stripcode(extract_part(data, "a", "Hour", "a.dt")),
data %>% tibble::add_column(
a.dt = as.numeric(format(data$a, "%H")),
.after = "a"
)
)
expect_true(is_dt(extract_part(data, "a", "Time only", "time")$time))
})
test_that("Invalid parts are handled", {
expect_equal(
stripcode(extract_part(data, "b", "Hour", "b.dt")),
data %>% tibble::add_column(b.dt = 0, .after = "b")
)
expect_equal(
stripcode(extract_part(data, "b", "Time only", "b.dt")),
data %>% tibble::add_column(b.dt = chron::times("00:00:00"), .after = "b")
)
})
months <- as.Date(paste("2019", 1:12, "01", sep = "-"))
weekdays <- as.character(
lubridate::wday(as.Date("2020-12-07") + 0:6, label = TRUE, abbr = FALSE)
)
test_that("Factor levels have correct order", {
expect_equal(
levels(extract_part(data, "a", "Month (full)", "month")$month),
format(months, "%B")
)
expect_equal(
levels(extract_part(data, "a", "Month (abbreviated)", "month")$month),
format(months, "%b")
)
expect_equal(
levels(extract_part(data, "a", "Day of the week (name)", "dow")$dow),
weekdays
)
expect_equal(
levels(extract_part(data, "a", "Day of the week (abbreviated)", "dow")$dow),
substr(weekdays, 1, 3)
)
}) |
"createModel" <-function(x, y, wts, method, tuneValue, obsLevels, pp = NULL, last = FALSE, sampling = NULL, classProbs, ...) {
if(is.data.frame(x) | is.matrix(x))
rownames(x) <- make.names(rownames(x), unique = TRUE)
if(!is.null(sampling) && sampling$first) {
tmp <- sampling$func(x, y)
x <- tmp$x
y <- tmp$y
rm(tmp)
}
if(!is.null(pp$options)) {
pp$method <- pp$options
pp$options <- NULL
if("ica" %in% pp$method) pp$n.comp <- pp$ICAcomp
pp$ICAcomp <- NULL
pp$x <- x
pp$outcome <- y
ppObj <- do.call("preProcess", pp)
ppObj$call <- "scrubed"
x <- predict(ppObj, x)
rm(pp)
} else ppObj <- NULL
if(!is.null(sampling) && !sampling$first) {
tmp <- sampling$func(x, y)
x <- tmp$x
y <- tmp$y
rm(tmp)
}
modelFit <- method$fit(x = x,
y = y, wts = wts,
param = tuneValue, lev = obsLevels,
last = last,
classProbs = classProbs, ...)
if(is.null(method$label)) method$label <- ""
if(!isS4(modelFit) &
!(method$label %in% c("Ensemble Partial Least Squares Regression",
"Ensemble Partial Least Squares Regression with Feature Selection"))) {
modelFit$xNames <- colnames(x)
modelFit$problemType <- if(is.factor(y)) "Classification" else "Regression"
modelFit$tuneValue <- tuneValue
modelFit$obsLevels <- obsLevels
modelFit$param <- list(...)
}
list(fit = modelFit, preProc = ppObj)
} |
"rald" <- function(n, location = 0, scale = 1, p = 0.5) {
use.n <- if ((length.n <- length(n)) > 1)
length.n else if (!is.numeric(n))
stop("bad input for argument 'n'") else n
tau <- p
kappa <- sqrt(tau/(1 - tau))
location <- rep(location, length.out = use.n)
scale <- rep(scale, length.out = use.n)
tau <- rep(tau, length.out = use.n)
kappa <- rep(kappa, length.out = use.n)
ans <- location + scale * log(runif(use.n)^kappa/runif(use.n)^(1/kappa))/sqrt(2)
indexTF <- (scale > 0) & (tau > 0) & (tau < 1) & (kappa > 0)
ans[!indexTF] <- NaN
ans
} |
get_counts_one_parent <- function(ploidy, gen.par.mk1, gen.par.mk2, gen.prog.mk1, gen.prog.mk2) {
res <- .Call("get_counts_one_parent_cpp", as.numeric(ploidy), as.numeric(gen.par.mk1), as.numeric(gen.par.mk2), as.numeric(gen.prog.mk1), as.numeric(gen.prog.mk2),
as.numeric(rep(0, ploidy + 1)), PACKAGE = "mappoly")
return(res[[6]])
}
get_counts_two_parents <- function(x = c(2, 2), ploidy, p.k, p.k1, q.k, q.k1, verbose = FALSE, joint.prob = FALSE) {
gen.prog.mk1 <- x[1]
gen.prog.mk2 <- x[2]
if (verbose) {
cat("Ploidy: ", ploidy, "\n")
M <- matrix(rep(letters[1:2], 2), ploidy, 4, byrow = TRUE)
M[1 + p.k, 1] <- "A"
M[1 + p.k1, 2] <- "B"
M[1 + q.k, 3] <- "A"
M[1 + q.k1, 4] <- "B"
format(apply(M, 1, function(x) cat(c("\t", x[1], "--------", x[2], " ", x[3], "--------", x[4], "\n"), collapse = "")))
cat("\n---------------------------------------------------\n\n")
}
dpk <- 0:length(p.k)
dpk1 <- 0:length(p.k1)
dqk <- 0:length(q.k)
dqk1 <- 0:length(q.k1)
comb.all.gam.k <- expand.grid(dpk, dqk)
comb.all.gam.k1 <- expand.grid(dpk1, dqk1)
pos.k <- comb.all.gam.k[apply(comb.all.gam.k, 1, sum) == x[1], ]
pos.k1 <- comb.all.gam.k1[apply(comb.all.gam.k1, 1, sum) == x[2], ]
r <- NULL
den <- 0
for (i in 1:nrow(pos.k)) {
b <- NULL
for (j in 1:nrow(pos.k1)) {
a1 <- get_counts_one_parent(ploidy, p.k, p.k1, pos.k[i, 1], pos.k1[j, 1])
a2 <- get_counts_one_parent(ploidy, q.k, q.k1, pos.k[i, 2], pos.k1[j, 2])
r <- rbind(r, kronecker(a1[-(2 + ploidy/2)], a2[-(2 + ploidy/2)]))
b <- c(b, a1[2 + ploidy/2] * a2[2 + ploidy/2])
}
den <- den + mean(b)
}
r <- apply(r, 2, sum)
y <- apply(expand.grid(0:(ploidy/2), 0:(ploidy/2)), 1, function(x) paste(sort(x), collapse = ""))
names(r) <- y
res <- NULL
for (i in sort(unique(y))) res <- c(res, sum(r[names(r) == i]))
if (!joint.prob)
res <- res/den
names(res) <- sort(unique(y))
res
}
get_counts <- function(ploidy, P.k = NULL, P.k1 = NULL, Q.k = NULL, Q.k1 = NULL, verbose = FALSE, make.names = FALSE, joint.prob = FALSE) {
if (verbose) {
cat("Ploidy: ", ploidy, "\n")
M <- matrix(rep(letters[1:2], 2), ploidy, 4, byrow = TRUE)
M[1 + P.k, 1] <- "A"
M[1 + P.k1, 2] <- "B"
M[1 + Q.k, 3] <- "A"
M[1 + Q.k1, 4] <- "B"
format(apply(M, 1, function(x) cat(c("\t", x[1], "--------", x[2], " ", x[3], "--------", x[4], "\n"), collapse = "")))
cat("\n---------------------------------------------------\n\n")
}
if (all(is.null(P.k)))
dP.k <- 0 else if (length(P.k) > ploidy/2)
dP.k <- (ploidy/2):(ploidy/2 + length(P.k) - ploidy) else dP.k <- 0:length(P.k)
if (all(is.null(P.k1)))
dP.k1 <- 0 else if (length(P.k1) > ploidy/2)
dP.k1 <- (ploidy/2):(ploidy/2 + length(P.k1) - ploidy) else dP.k1 <- 0:length(P.k1)
if (all(is.null(Q.k)))
dQ.k <- 0 else if (length(Q.k) > ploidy/2)
dQ.k <- (ploidy/2):(ploidy/2 + length(Q.k) - ploidy) else dQ.k <- 0:length(Q.k)
if (all(is.null(Q.k1)))
dQ.k1 <- 0 else if (length(Q.k1) > ploidy/2)
dQ.k1 <- (ploidy/2):(ploidy/2 + length(Q.k1) - ploidy) else dQ.k1 <- 0:length(Q.k1)
counts <- NULL
bla <- sort(unique(kronecker(dP.k, dQ.k, "+")))
ble <- sort(unique(kronecker(dP.k1, dQ.k1, "+")))
bli <- expand.grid(ble, bla)[, 2:1]
blo <- bli[1:ceiling(nrow(bli)/2), ]
if (make.names == TRUE)
counts <- matrix(NA, nrow = nrow(bli)) else {
counts <- t(apply(blo, 1, get_counts_two_parents, ploidy = ploidy, p.k = P.k, p.k1 = P.k1, q.k = Q.k, q.k1 = Q.k1, joint.prob = joint.prob))
if (nrow(bli) == 1) {
rownames(counts) <- apply(bli, 1, paste, collapse = " ")
return(counts)
}
if (nrow(bli)%%2 == 1) {
counts <- rbind(counts, counts[(nrow(counts) - 1):1, ])
} else {
counts <- rbind(counts, counts[nrow(counts):1, ])
}
}
rownames(counts) <- apply(bli, 1, paste, collapse = " ")
return(counts)
}
get_counts_all_phases <- function(x, ploidy, verbose = FALSE, make.names = FALSE, joint.prob = FALSE) {
pk <- x[1]
pk1 <- x[2]
qk <- x[3]
qk1 <- x[4]
if (any(is.na(c(ploidy, pk, pk1, qk, qk1))))
return(NULL)
if (any(c(pk, pk1) == 0))
sh.p <- 0 else {
sh.p <- min(pk, pk1):0
if (length(sh.p) > ploidy - max(pk, pk1))
sh.p <- sh.p[1:(ploidy - max(pk, pk1) + 1)]
}
if (any(c(qk, qk1) == 0))
sh.q <- 0 else {
sh.q <- min(qk, qk1):0
if (length(sh.q) > ploidy - max(qk, qk1))
sh.q <- sh.q[1:(ploidy - max(qk, qk1) + 1)]
}
if (pk == 0)
pk <- NULL else pk <- 0:(pk - 1)
if (pk1 == 0)
pk1 <- NULL else pk1 <- 0:(pk1 - 1)
if (qk == 0)
qk <- NULL else qk <- 0:(qk - 1)
if (qk1 == 0)
qk1 <- NULL else qk1 <- 0:(qk1 - 1)
pk.ph <- NULL
pk1.ph <- NULL
if (length(pk) < length(pk1)) {
for (i in 0:(length(sh.p) - 1)) {
pk.ph <- rbind(pk.ph, pk)
pk1.ph <- rbind(pk1.ph, pk1 + i)
}
} else {
for (i in 0:(length(sh.p) - 1)) {
if (!is.null(pk))
pk.ph <- rbind(pk.ph, pk + i)
pk1.ph <- rbind(pk1.ph, pk1)
}
}
qk.ph <- NULL
qk1.ph <- NULL
if (length(qk) < length(qk1)) {
for (i in 0:(length(sh.q) - 1)) {
qk.ph <- rbind(qk.ph, qk)
qk1.ph <- rbind(qk1.ph, qk1 + i)
}
} else {
for (i in 0:(length(sh.q) - 1)) {
if (!is.null(qk))
qk.ph <- rbind(qk.ph, qk + i)
qk1.ph <- rbind(qk1.ph, qk1)
}
}
pk.num <- NULL
if (any(is.null(pk.ph), is.null(pk1.ph)))
pk.num <- 0 else {
for (i in 1:nrow(pk.ph)) pk.num <- c(pk.num, sum(!is.na(match(pk.ph[i, ], pk1.ph[i, ]))))
}
qk.num <- NULL
if (any(is.null(qk.ph), is.null(qk1.ph)))
qk.num <- 0 else {
for (i in 1:nrow(qk.ph)) qk.num <- c(qk.num, sum(!is.na(match(qk.ph[i, ], qk1.ph[i, ]))))
}
a.names <- expand.grid(qk.num, pk.num)
a <- vector("list", length(pk.num) * length(qk.num))
names(a) <- apply(a.names, 1, function(x) paste(rev(x), collapse = "-"))
for (i in 1:length(pk.num)) {
for (j in 1:length(qk.num)) {
if (verbose)
print(names(a)[(i - 1) * length(qk.num) + j])
a[[(i - 1) * length(qk.num) + j]] <- get_counts(ploidy, pk.ph[i, ], pk1.ph[i, ], qk.ph[j, ], qk1.ph[j, ], verbose = verbose, make.names = make.names,
joint.prob = joint.prob)
}
}
a
} |
NULL
isNumeric = function(par, include.int = TRUE) {
assert(checkClass(par, "Param"), checkClass(par, "ParamSet"))
UseMethod("isNumeric")
}
isNumeric.ParamSet = function(par, include.int = TRUE) {
all(vlapply(par$pars, isNumeric.Param, include.int = include.int))
}
isNumeric.Param = function(par, include.int = TRUE) {
isNumericTypeString(par$type, include.int)
}
isDiscrete = function(par, include.logical = TRUE) {
assert(checkClass(par, "Param"), checkClass(par, "ParamSet"))
UseMethod("isDiscrete")
}
isDiscrete.ParamSet = function(par, include.logical = TRUE) {
hasAllParamsOfTypes(par, getTypeStringsDiscrete(include.logical))
}
isDiscrete.Param = function(par, include.logical = TRUE) {
par$type %fin% getTypeStringsDiscrete(include.logical = include.logical)
}
isInteger = function(par) {
assert(checkClass(par, "Param"), checkClass(par, "ParamSet"))
UseMethod("isInteger")
}
isInteger.ParamSet = function(par) {
return(hasAllParamsOfTypes(par, getTypeStringsInteger()))
}
isInteger.Param = function(par) {
return(par$type %fin% c("integer", "integervector"))
}
isLogical = function(par) {
assert(checkClass(par, "Param"), checkClass(par, "ParamSet"))
UseMethod("isLogical")
}
isLogical.ParamSet = function(par) {
return(hasAllParamsOfTypes(par, getTypeStringsLogical()))
}
isLogical.Param = function(par) {
return(isLogicalTypeString(par$type))
}
isCharacter = function(par) {
assert(checkClass(par, "Param"), checkClass(par, "ParamSet"))
UseMethod("isCharacter")
}
isCharacter.ParamSet = function(par) {
return(hasAllParamsOfTypes(par, getTypeStringsCharacter()))
}
isCharacter.Param = function(par) {
return(isCharacterTypeString(par$type))
} |
seqentrans <- function(fsubseq, avg.occ=FALSE){
fsubseq$data$ntrans <- sapply(strsplit(as.character(fsubseq$subseq), "-"),
length)
fsubseq$data$nevent <- fsubseq$data$ntrans - 1 +
sapply(strsplit(as.character(fsubseq$subseq), ","), length)
if (avg.occ){
wtot <- sum(seqeweight(fsubseq$eseq))
fsubseq$data$Avg.occ <- fsubseq$data$Count/wtot
}
return(fsubseq)
} |
dgev = function( x , loc = 0 , scale = 1 , shape = 0 , log = FALSE )
{
size_x = length(x)
loc = if( length(loc) == size_x ) loc else base::rep( loc[1] , size_x )
scale = if( length(scale) == size_x ) scale else base::rep( scale[1] , size_x )
shape = if( length(shape) == size_x ) shape else base::rep( shape[1] , size_x )
Z = ( x - loc ) / scale
valid = (1 + shape * Z > 0)
shape_zero = ( base::abs(shape) < 1e-10 )
cshape_zero = !shape_zero
TX = numeric(length(x)) + NA
if( base::any(shape_zero) )
{
TX[shape_zero] = base::exp( - Z[shape_zero] )
}
if( base::any(cshape_zero) )
{
TX[cshape_zero] = ( 1 + shape[cshape_zero] * Z[cshape_zero] )^( - 1. / shape[cshape_zero] )
}
out = TX^( shape + 1 ) * base::exp( - TX ) / scale
if( base::any(!valid) )
{
out[!valid] = 0
}
if( log )
return(base::log(out))
else
return(out)
}
pgev = function( q , loc = 0 , scale = 1 , shape = 0 , lower.tail = TRUE )
{
if( !lower.tail )
{
return( 1. - pgev( q , loc , scale , shape , lower.tail = TRUE ) )
}
size_q = base::length(q)
loc = if( length(loc) == size_q ) loc else base::rep( loc[1] , size_q )
scale = if( length(scale) == size_q ) scale else base::rep( scale[1] , size_q )
shape = if( length(shape) == size_q ) shape else base::rep( shape[1] , size_q )
shape_zero = ( base::abs(shape) < 1e-10 )
cshape_zero = !shape_zero
Z = ( q - loc ) / scale
out = numeric(size_q) + NA
if( base::any(shape_zero) )
{
out[shape_zero] = base::exp( - base::exp( - Z[shape_zero] ) )
}
if( base::any(cshape_zero) )
{
out[cshape_zero] = base::exp( - ( 1. + shape[cshape_zero] * Z[cshape_zero] )^( - 1. / shape[cshape_zero] ) )
}
valid = (1 + shape * Z > 0)
if( base::any(!valid) )
{
out[(shape > 0) & !valid] = 0
out[(shape < 0) & !valid] = 1
}
return(out)
}
qgev = function( p , loc = 0 , scale = 1 , shape = 0 , lower.tail = TRUE )
{
if( !lower.tail )
{
return( qgev( 1. - p , loc , scale , shape , lower.tail = TRUE ) )
}
size_p = base::length(p)
loc = if( length(loc) == size_p ) loc else base::rep( loc[1] , length(p) )
scale = if( length(scale) == size_p ) scale else base::rep( scale[1] , length(p) )
shape = if( length(shape) == size_p ) shape else base::rep( shape[1] , length(p) )
shape_zero = ( base::abs(shape) < 1e-10 )
cshape_zero = !shape_zero
out = numeric(length(p)) + NA
if( base::any(shape_zero) )
{
out[shape_zero] = loc[shape_zero] - scale[shape_zero] * base::log( - base::log(p[shape_zero]) )
}
if( base::any(cshape_zero) )
{
out[cshape_zero] = loc[cshape_zero] + scale[cshape_zero] * ( ( - base::log(p[cshape_zero]) )^(- shape[cshape_zero]) - 1. ) / shape[cshape_zero]
}
return(out)
}
rgev = function( n = 1 , loc = 0 , scale = 1 , shape = 0 )
{
p = stats::runif( n = n )
return( qgev( p , loc , scale , shape ) )
} |
library(checkargs)
context("isNumberOrNaScalar")
test_that("isNumberOrNaScalar works for all arguments", {
expect_identical(isNumberOrNaScalar(NULL, stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNumberOrNaScalar(TRUE, stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNumberOrNaScalar(FALSE, stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNumberOrNaScalar(NA, stopIfNot = FALSE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNumberOrNaScalar(0, stopIfNot = FALSE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNumberOrNaScalar(-1, stopIfNot = FALSE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNumberOrNaScalar(-0.1, stopIfNot = FALSE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNumberOrNaScalar(0.1, stopIfNot = FALSE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNumberOrNaScalar(1, stopIfNot = FALSE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNumberOrNaScalar(NaN, stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNumberOrNaScalar(-Inf, stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNumberOrNaScalar(Inf, stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNumberOrNaScalar("", stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNumberOrNaScalar("X", stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNumberOrNaScalar(c(TRUE, FALSE), stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNumberOrNaScalar(c(FALSE, TRUE), stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNumberOrNaScalar(c(NA, NA), stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNumberOrNaScalar(c(0, 0), stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNumberOrNaScalar(c(-1, -2), stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNumberOrNaScalar(c(-0.1, -0.2), stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNumberOrNaScalar(c(0.1, 0.2), stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNumberOrNaScalar(c(1, 2), stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNumberOrNaScalar(c(NaN, NaN), stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNumberOrNaScalar(c(-Inf, -Inf), stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNumberOrNaScalar(c(Inf, Inf), stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNumberOrNaScalar(c("", "X"), stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNumberOrNaScalar(c("X", "Y"), stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_error(isNumberOrNaScalar(NULL, stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNumberOrNaScalar(TRUE, stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNumberOrNaScalar(FALSE, stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_identical(isNumberOrNaScalar(NA, stopIfNot = TRUE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNumberOrNaScalar(0, stopIfNot = TRUE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNumberOrNaScalar(-1, stopIfNot = TRUE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNumberOrNaScalar(-0.1, stopIfNot = TRUE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNumberOrNaScalar(0.1, stopIfNot = TRUE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNumberOrNaScalar(1, stopIfNot = TRUE, message = NULL, argumentName = NULL), TRUE)
expect_error(isNumberOrNaScalar(NaN, stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNumberOrNaScalar(-Inf, stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNumberOrNaScalar(Inf, stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNumberOrNaScalar("", stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNumberOrNaScalar("X", stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNumberOrNaScalar(c(TRUE, FALSE), stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNumberOrNaScalar(c(FALSE, TRUE), stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNumberOrNaScalar(c(NA, NA), stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNumberOrNaScalar(c(0, 0), stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNumberOrNaScalar(c(-1, -2), stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNumberOrNaScalar(c(-0.1, -0.2), stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNumberOrNaScalar(c(0.1, 0.2), stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNumberOrNaScalar(c(1, 2), stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNumberOrNaScalar(c(NaN, NaN), stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNumberOrNaScalar(c(-Inf, -Inf), stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNumberOrNaScalar(c(Inf, Inf), stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNumberOrNaScalar(c("", "X"), stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNumberOrNaScalar(c("X", "Y"), stopIfNot = TRUE, message = NULL, argumentName = NULL))
}) |
rm(list = ls())
s <- search()[-1]
s <- s[-match(c("package:base", "package:stats", "package:graphics", "package:grDevices",
"package:utils", "package:datasets", "package:methods", "Autoloads"), s)]
if (length(s) > 0) sapply(s, detach, character.only = TRUE)
if (!file.exists("tables")) dir.create("tables")
if (!file.exists("figures")) dir.create("figures")
set.seed(290875)
options(prompt = "R> ", continue = "+ ",
width = 63,
show.signif.stars = FALSE,
SweaveHooks = list(leftpar = function()
par(mai = par("mai") * c(1, 1.05, 1, 1)),
bigleftpar = function()
par(mai = par("mai") * c(1, 1.7, 1, 1))))
HSAURpkg <- require("HSAUR3")
if (!HSAURpkg) stop("cannot load package ", sQuote("HSAUR3"))
rm(HSAURpkg)
a <- Sys.setlocale("LC_ALL", "C")
book <- TRUE
refs <- cbind(c("AItR", "DAGD", "SI", "CI", "ANOVA", "MLR", "GLM",
"DE", "RP", "GAM", "SA", "ALDI", "ALDII", "SIMC", "MA", "PCA",
"MDS", "CA"), 1:18)
ch <- function(x) {
ch <- refs[which(refs[,1] == x),]
if (book) {
return(paste("Chapter~\\\\ref{", ch[1], "}", sep = ""))
} else {
return(paste("Chapter~", ch[2], sep = ""))
}
}
if (file.exists("deparse.R"))
source("deparse.R")
setHook(packageEvent("lattice", "attach"), function(...) {
lattice.options(default.theme =
function()
standard.theme("pdf", color = FALSE))
})
book <- FALSE
data("bp", package = "HSAUR3")
toLatex(HSAURtable(bp), pcol = 2,
caption = paste("Blood pressure data."),
label = "MV-bp-tab")
sapply(bp, function(x) sum(is.na(x)))
summary(bp$recovtime, na.rm = TRUE)
sd(bp$recovtime, na.rm = TRUE)
with(bp, cor(bloodp, recovtime, use = "complete.obs"))
with(bp, cor(logdose, recovtime, use = "complete.obs"))
layout(matrix(1:3, nrow = 1))
plot(bloodp ~ logdose, data = bp)
plot(recovtime ~ bloodp, data = bp)
plot(recovtime ~ logdose, data = bp)
summary(lm(recovtime ~ bloodp + logdose, data = bp))
library("mice")
imp <- mice(bp, method = "mean", m = 1, maxit = 1)
with(imp, summary(recovtime))
with(imp, sd(recovtime))
with(imp, cor(bloodp, recovtime))
with(imp, cor(logdose, recovtime))
layout(matrix(1:2, nrow = 1))
plot(recovtime ~ bloodp, data = complete(imp),
pch = is.na(bp$recovtime) + 1)
plot(recovtime ~ logdose, data = complete(imp),
pch = is.na(bp$recovtime) + 1)
legend("topleft", pch = 1:2, bty = "n",
legend = c("original", "imputed"))
with(imp, summary(lm(recovtime ~ bloodp + logdose)))
imp_ppm <- mice(bp, m = 10, method = "pmm",
print = FALSE, seed = 1)
layout(matrix(1:2, nrow = 1))
plot(recovtime ~ bloodp, data = complete(imp_ppm),
pch = is.na(bp$recovtime) + 1)
plot(recovtime ~ logdose, data = complete(imp_ppm),
pch = is.na(bp$recovtime) + 1)
legend("topleft", pch = 1:2, bty = "n",
legend = c("original", "imputed"))
summary(unlist(with(imp_ppm, mean(recovtime))$analyses))
summary(unlist(with(imp_ppm, sd(recovtime))$analyses))
summary(unlist(with(imp_ppm,
cor(bloodp, recovtime))$analyses))
summary(unlist(with(imp_ppm,
cor(logdose, recovtime))$analyses))
fit <- with(imp_ppm, lm(recovtime ~ bloodp + logdose))
summary(pool(fit))
with(bp, t.test(recovtime, mu = 27))
with(imp, t.test(recovtime, mu = 27))$analyses[[1]]
fit <- with(imp_ppm, lm(I(recovtime - 27) ~ 1))
summary(pool(fit))
data("UStemp", package = "HSAUR3")
toLatex(HSAURtable(UStemp),
caption = "Lowest temperatures in Fahrenheit recorded in various months for cities in the US.",
label = "MI-UStemp-tab", rownames = TRUE) |
JtTest <- function(formula, data, alpha = 0.05, na.rm = TRUE, verbose = TRUE) {
dp=as.character(formula)
DNAME <- paste(dp[[2L]], "and", dp[[3L]])
METHOD <- "Jonckheere-Terpstra Test"
TEST <- "JT"
if (na.rm){
completeObs <- complete.cases(data)
data <- data[completeObs,]
}
if (any(colnames(data)==dp[[3L]])==FALSE) stop("The name of group variable does not match the variable names in the data. The group variable must be one factor.")
if (any(colnames(data)==dp[[2L]])==FALSE) stop("The name of response variable does not match the variable names in the data.")
y = data[, dp[[2L]]]
group = data[, dp[[3L]]]
if (!is.factor(group)) stop("The group variable must be a factor.")
if (!is.numeric(y)) stop("The response must be a numeric variable.")
n <- length(y)
x.levels <- levels(factor(group))
k=NROW(x.levels)
JT =0;s=1
y.n <- NULL
for (i in x.levels) {
y.n[i] <- length(y[group==i])
}
for (i in x.levels[1:k-1])
{s=s+1
for (j in x.levels[s:k]) {
A <- rank(y[group==i|group==j])
JT=JT+sum(A[(y.n[i]+1):(y.n[i]+y.n[j])])-NROW(y[group==j])*(NROW(y[group==j])+1)/2
}}
a=sum(y.n^2);
b=sum((y.n^2)*(2*y.n+3))
EJT=(n^2-a)/4
VJT=((n^2*(2*n+3))-b)/72
Z=(JT-EJT)/sqrt(VJT)
p.value=1-pnorm(Z, mean = 0, sd = 1, lower.tail = TRUE, log.p = FALSE)
if (verbose) {
cat("---------------------------------------------------------","\n", sep = " ")
cat(" Test :", METHOD, "\n", sep = " ")
cat(" data :", DNAME, "\n\n", sep = " ")
cat(" Statistic =", JT, "\n", sep = " ")
cat(" Mean =", EJT, "\n", sep = " ")
cat(" Variance =", VJT, "\n", sep = " ")
cat(" Z =", Z, "\n", sep = " ")
cat(" Asymp. p-value =", p.value, "\n\n", sep = " ")
cat(if (p.value > alpha) {" Result : Null hypothesis is not rejected."}
else {" Result : Null hypothesis is rejected."}, "\n")
cat("---------------------------------------------------------","\n\n", sep = " ")
}
result <- list()
result$statistic <- JT
result$mean <- EJT
result$variance <- VJT
result$Z <- Z
result$p.value <- p.value
result$alpha <- alpha
result$method <- METHOD
result$data <- data
result$formula <- formula
attr(result, "class") <- "owt"
invisible(result)
} |
add.rule <- function(strategy
, name
, arguments
, parameters=NULL
, label=NULL
, type=c(NULL,"risk","order","rebalance","exit","enter","chain")
, parent=NULL
, ...
, enabled=TRUE
, indexnum=NULL
, path.dep=TRUE
, timespan=NULL
, store=FALSE
, storefun=TRUE) {
if (!is.strategy(strategy)) {
strategy<-try(getStrategy(strategy))
if(inherits(strategy,"try-error"))
stop ("You must supply an object or the name of an object of type 'strategy'.")
store=TRUE
}
type=type[1]
if(is.null(type)) stop("You must specify a type")
if(is.na(charmatch(type,c("risk","order","rebalance","exit","enter","chain","pre","post")))) stop(paste("type:",type,' must be one of "risk", "order", "rebalance", "exit", "enter", "chain", "pre", or "post"'))
tmp_rule<-list()
if(!is.function(name) && isTRUE(storefun)) {
if(exists(name, mode="function")) {
fn <- get(name, mode="function")
} else {
rule.name <- paste("rule", name, sep=".")
if(exists(rule.name, mode="function")) {
fn <- get(rule.name, mode="function")
name <- rule.name
} else {
message("Skipping rule ", name,
" because there is no function by that name to call")
}
}
} else {
fn <- name
}
tmp_rule$name<-fn
tmp_rule$type<-type
if(type == 'chain')
{
if(is.null(parent)) stop("You must specify the label of the parent rule if ruletype=='chain'")
tmp_rule$parent<-parent
}
tmp_rule$enabled<-enabled
if (!is.list(arguments)) stop("arguments must be passed as a named list")
if(is.null(label)) label = paste(name,"rule",sep='.')
tmp_rule$label<-label
tmp_rule$arguments<-arguments
if(!is.null(parameters)) tmp_rule$parameters = parameters
if(!is.null(timespan)) tmp_rule$timespan = timespan
tmp_rule$path.dep<-path.dep
if(length(list(...))) tmp_rule<-c(tmp_rule,list(...))
tmp_rule$call<-match.call()
class(tmp_rule)<-'trade_rule'
if(!hasArg(indexnum) | (hasArg(indexnum) & is.null(indexnum))) indexnum = length(strategy$rules[[type]])+1
strategy$rules[[type]][[indexnum]]<-tmp_rule
strategy$trials <- strategy$trials+1
if (store) assign(strategy$name,strategy,envir=as.environment(.strategy))
else return(strategy)
strategy$name
}
enable.rule <- function(strategy, type=c(NULL,"risk","order","rebalance","exit","enter","chain"), label, enabled=TRUE, store=FALSE)
{
if (!is.strategy(strategy)) {
strategy<-try(getStrategy(strategy))
if(inherits(strategy,"try-error"))
stop ("You must supply an object or the name of an object of type 'strategy'.")
store=TRUE
}
for(i in 1:length(strategy$rules[[type]]))
if(grepl(label, strategy$rules[[type]][[i]]$label))
strategy$rules[[type]][[i]]$enabled <- enabled
if (store) assign(strategy$name,strategy,envir=as.environment(.strategy))
else return(strategy)
strategy$name
}
applyRules <- function(portfolio,
symbol,
strategy,
mktdata,
indicators=NULL,
signals=NULL,
parameters=NULL,
...,
path.dep=TRUE,
rule.order=NULL,
rule.subset=NULL,
debug=FALSE) {
if(any(diff(.index(mktdata)) == 0)) {
warning("'mktdata' index contains duplicates; calling 'make.index.unique'")
mktdata <- make.index.unique(mktdata)
}
if(!is.null(rule.subset)){
first.index <- which(index(mktdata)==first(index(mktdata[rule.subset])))
last.index <- which(index(mktdata)==last(index(mktdata[rule.subset])))
} else {
first.index <- 1
last.index <- which(index(mktdata)==last(index(mktdata)))
}
.Data <- new.env()
get.dindex <- function() get("dindex",pos=.Data)
assign.dindex <- function(dindex, lindex=last.index) {
dindex <- unique(dindex)
if(!isOrdered(dindex))
dindex <- sort(dindex)
dindex <- dindex[dindex <= lindex]
assign("dindex", dindex, .Data)
}
if (!is.strategy(strategy)) {
strategy<-try(getStrategy(strategy))
if(inherits(strategy,"try-error"))
stop ("You must supply an object of type 'strategy'.")
}
ret <- NULL
Dates <- index(mktdata)
if(isTRUE(path.dep)){
dindex<-c(first.index,last.index)
assign.dindex(dindex)
for ( type in names(strategy$rules)){
if(type=='rebalance') next()
if(length(strategy$rules[[type]])>=1){
for (rule in strategy$rules[[type]]){
sigcol <- rule$arguments$sigcol
sigval <- rule$arguments$sigval
if (!is.null(sigcol) && !sigcol %in% colnames(mktdata)) {
stop("mktdata does not contain 'sigcol': ", sigcol)
}
if(isTRUE(rule$path.dep)){
if(is.null(sigcol) || is.null(sigval)) {
if(is.null(rule$timespan)) {
assign.dindex(1:length(Dates))
} else {
assign.dindex(c(get.dindex(), which(.index(mktdata) %in% .index(mktdata[rule$timespan]))))
}
} else {
if(is.null(rule$timespan)) {
assign.dindex(c(get.dindex(),which(mktdata[, sigcol] == sigval)))
} else {
assign.dindex(c(get.dindex(),which(merge(.xts(,.index(mktdata)),mktdata[rule$timespan, sigcol]) == sigval)))
}
}
}
}
}
}
dindex<-get.dindex()
if(!is.null(rule.subset)){
assign.dindex(c(mktdata[rule.subset,which.i=TRUE][1]
,dindex[which(dindex %in% mktdata[rule.subset,which.i=TRUE])]))
dindex <- get.dindex()
}
if(length(dindex)==0) return(NULL)
} else {
Dates=''
dindex=first.index
}
dindexOrderProc <- function(Order, mktPrices, curIndex) {
out <- list()
orderQty <- Order[1L,'Order.Qty']
if (orderQty=='all' || orderQty=='trigger' || orderQty=='0'){
side <- Order[1L,'Order.Side']
if(side=='long')
orderQty <- -1
else
orderQty <- 1
}
orderQty <- as.numeric(orderQty)
orderPrice <- as.numeric(Order[1L,'Order.Price'])
orderType <- Order[1L,'Order.Type']
mktPrice <- mktPrices[[orderType]]$price
if (orderQty > 0) {
relationship <-
switch(orderType,
limit = if(mktPrices$isOHLC) 'lt' else 'lte',
stoptrailing = 'gte',
stoplimit = if(mktPrices$isOHLC) 'gt' else 'gte')
if(mktPrices$isOHLC || mktPrices$isBBO)
mktPrice <- mktPrices[[orderType]]$posQty
low_mktPrice <- mktPrices[[orderType]]$negQty
high_mktPrice <- mktPrices[[orderType]]$posQty
} else {
relationship <-
switch(orderType,
limit = if(mktPrices$isOHLC) 'gt' else 'gte',
stoptrailing = 'lte',
stoplimit = if(mktPrices$isOHLC) 'lt' else 'lte')
if(mktPrices$isOHLC || mktPrices$isBBO)
mktPrice <- mktPrices[[orderType]]$negQty
low_mktPrice <- mktPrices[[orderType]]$negQty
high_mktPrice <- mktPrices[[orderType]]$posQty
}
if (is.null(mktPrice) || (length(mktPrice) == 1L && is.na(mktPrice)))
stop("no price discernable for ", orderType, " in applyRules")
if(orderType %in% "stoptrailing") {
if(orderQty > 0) {
out$cross <- .firstCross(high_mktPrice, orderPrice, relationship, start=curIndex+1L)
} else if(orderQty < 0) {
out$cross <- .firstCross(low_mktPrice, orderPrice, relationship, start=curIndex+1L)
}
} else {
out$cross <- .firstCross(mktPrice, orderPrice, relationship, start=curIndex+1L)
}
out$move_order <- FALSE
if(orderType %in% "stoptrailing") {
orderThreshold <- as.numeric(Order[1L,'Order.Threshold'])
if(orderQty > 0) {
relationship <- "lt"
newOrderPrice <- orderPrice - abs(orderThreshold)
mktPrice <- low_mktPrice
} else {
relationship <- "gt"
newOrderPrice <- orderPrice + abs(orderThreshold)
mktPrice <- high_mktPrice
}
out$move_order <- .firstCross(mktPrice, newOrderPrice, relationship, start=curIndex+1L)
}
out
}
nextIndex <- function(curIndex, ..., mktPrices){
if (!isTRUE(path.dep)){
curIndex = FALSE
return(curIndex)
}
hasmktord <- FALSE
nidx=FALSE
neworders=NULL
orderbook <- getOrderBook(portfolio)
ordersubset <- orderbook[[portfolio]][[symbol]]
oo.idx <- getOrders(portfolio=portfolio, symbol=symbol, status="open",which.i=TRUE)
if(length(oo.idx)==0){
nidx=FALSE
} else {
timespan<-paste(timestamp,"::",sep='')
if(nrow(ordersubset[oo.idx,][timespan])==0 &&
!any(ordersubset[oo.idx,"Order.Type"] %in% "stoptrailing"))
{
nidx=FALSE
} else {
openOrderSubset <- ordersubset[oo.idx,]
if(any('market'==openOrderSubset[,'Order.Type']))
{
hasmktord <- TRUE
}
openOrders <- which(openOrderSubset[,'Order.Type'] %in% c("limit","stoplimit"))
if(length(openOrders) > 0) {
newIndex <- sapply(openOrders, function(i) dindexOrderProc(openOrderSubset[i,], mktPrices, curIndex)$cross)
assign.dindex(c(get.dindex(),newIndex))
}
openOrders <- which(openOrderSubset[,'Order.Type'] %in% "stoptrailing")
for(openOrder in openOrders)
{
dindex <- get.dindex()
dindexNext <- dindex[.firstCross(dindex, curIndex, "gt")]
newIndex <- dindexOrderProc(openOrderSubset[openOrder,], mktPrices, curIndex)
if(newIndex$move_order < dindexNext || newIndex$cross < dindex[length(dindex)]) {
assign.dindex(c(dindex, min(newIndex$move_order, newIndex$cross, na.rm=TRUE)))
}
}
}
}
if(curIndex){
if(hasmktord) {
curIndex <- curIndex+1
} else {
dindex<-get.dindex()
dindexNext <- dindex[.firstCross(dindex, curIndex, "gt")]
if (dindexNext < dindex[length(dindex)]) {
curIndex <- dindexNext
} else {
curIndex <- FALSE
}
}
}
if (is.na(curIndex) || curIndex > length(Dates)) curIndex=FALSE
return(curIndex)
}
hold=FALSE
holdtill=first(time(Dates))-1
mktinstr<-getInstrument(symbol)
curIndex<-first.index
if(nrow(mktdata)>1)
freq <- periodicity(mktdata)
else {
freq <- structure(list(difftime = structure(NA, units="secs", class="difftime"),
frequency=1, start=start(mktdata), end=end(mktdata), units="secs",
scale="seconds", label="second"), class="periodicity")
}
if(is.BBO(mktdata)) {
mktPrices <- list(
stoplimit = list(
posQty = mktdata[,has.Ask(mktdata,which=TRUE)[1]],
negQty = mktdata[,has.Bid(mktdata,which=TRUE)[1]]),
limit = list(
posQty = mktdata[,has.Ask(mktdata,which=TRUE)[1]],
negQty = mktdata[,has.Bid(mktdata,which=TRUE)[1]]),
stoptrailing = list(
posQty = getPrice(mktdata, prefer='offer')[,1],
negQty = getPrice(mktdata, prefer='bid')[,1]))
} else if (is.OHLC(mktdata)) {
mktPrices <- list(
stoplimit = list(
posQty = mktdata[,has.Hi(mktdata,which=TRUE)[1]],
negQty = mktdata[,has.Lo(mktdata,which=TRUE)[1]]),
limit = list(
posQty = mktdata[,has.Lo(mktdata,which=TRUE)[1]],
negQty = mktdata[,has.Hi(mktdata,which=TRUE)[1]]),
stoptrailing = list(
posQty = getPrice(mktdata, prefer='high')[,1],
negQty = getPrice(mktdata, prefer='low')[,1]))
} else {
prefer <- if(hasArg("prefer")) match.call(expand.dots=TRUE)$prefer else NULL
mktPrices <- list(
stoplimit = list(
price = getPrice(mktdata, prefer=prefer)[,1]),
limit = list(
price = getPrice(mktdata, prefer=prefer)[,1]),
stoptrailing = list(
price = getPrice(mktdata, prefer=prefer)[,1]))
}
mktPrices$isOHLC <- is.OHLC(mktdata)
mktPrices$isBBO <- is.BBO(mktdata)
while(curIndex){
timestamp=Dates[curIndex]
if(isTRUE(hold) & holdtill<timestamp){
hold=FALSE
holdtill=NULL
}
if(is.null(rule.order)){
types <- sort(factor(names(strategy$rules), levels=c("pre","risk","order","rebalance","exit","enter","chain","post")))
} else {
print("Be aware that order of operations matters, and poor choices in rule order can create unintended consequences.")
types <- rule.order
}
for ( type in types ) {
switch( type ,
pre = {
if(length(strategy$rules[[type]])>=1){
ruleProc(strategy$rules$pre,timestamp=timestamp, path.dep=path.dep, mktdata=mktdata,portfolio=portfolio, symbol=symbol, ruletype=type, mktinstr=mktinstr, parameters=parameters, curIndex=curIndex, ...)
}
},
risk = {
if(length(strategy$rules$risk)>=1){
ruleProc(strategy$rules$risk,timestamp=timestamp, path.dep=path.dep, mktdata=mktdata,portfolio=portfolio, symbol=symbol, ruletype=type, mktinstr=mktinstr,parameters=parameters, curIndex=curIndex, ...)
}
},
order = {
if(length(strategy$rules[[type]])>=1) {
ruleProc(strategy$rules[[type]],timestamp=timestamp, path.dep=path.dep, mktdata=mktdata,portfolio=portfolio, symbol=symbol, ruletype=type, mktinstr=mktinstr, parameters=parameters, curIndex=curIndex, ...)
} else {
if (isTRUE(path.dep))
timespan <- format(timestamp, "::%Y-%m-%d %H:%M:%OS6")
else
timestamp=NULL
closed.orders <- ruleOrderProc(portfolio=portfolio, symbol=symbol, mktdata=mktdata, timestamp=timestamp, periodicity=freq, curIndex=curIndex, ...)
}
},
chain = {
if(!is.null(closed.orders))
{
chain.rules <- strategy$rules[[type]]
chain.rule.names <- sapply(chain.rules, '[[', 'parent')
closed.chain <- closed.orders[closed.orders$Rule %in% chain.rule.names]
for(i in seq_len(nrow(closed.chain))) {
rules <- chain.rules[chain.rule.names %in% closed.chain$Rule[i]]
for(j in seq_along(rules)) {
txns <- getTxns(Portfolio=portfolio, Symbol=symbol, Dates=timestamp)
txn.price <- last(txns$Txn.Price)
ruleProc(rules[j], timestamp=timestamp, path.dep=path.dep, mktdata=mktdata, portfolio=portfolio, symbol=symbol, ruletype=type, mktinstr=mktinstr, parameters=list(chain.price=txn.price), curIndex=curIndex)
}
}
}
},
exit = , enter = {
if(isTRUE(hold)) next()
if(isTRUE(path.dep)) openOrdersLen <- length(getOrders(portfolio=portfolio, symbol=symbol, status="open", timespan=timestamp,which.i=TRUE))
if(length(strategy$rules[[type]])>=1) {
ruleProc(strategy$rules[[type]],timestamp=timestamp, path.dep=path.dep, mktdata=mktdata,portfolio=portfolio, symbol=symbol, ruletype=type, mktinstr=mktinstr, parameters=parameters, curIndex=curIndex, ...)
}
if(isTRUE(path.dep) && length(getOrders(portfolio=portfolio, symbol=symbol, status="open", timespan=timestamp,which.i=TRUE)) != openOrdersLen) {
assign.dindex(c(get.dindex(),curIndex+1))
}
},
post = {
if(length(strategy$rules$post)>=1) {
ruleProc(strategy$rules$post,timestamp=timestamp, path.dep=path.dep, mktdata=mktdata,portfolio=portfolio, symbol=symbol, ruletype=type, mktinstr=mktinstr, parameters=parameters, curIndex=curIndex, ...)
}
}
)
}
if(isTRUE(path.dep)) curIndex <- nextIndex(curIndex, ..., mktPrices=mktPrices)
else curIndex=FALSE
}
if(isTRUE(debug)){
mktdata<<-mktdata
if(is.null(ret)) {
return(mktdata)
}
else return(ret)
} else {
return(NULL)
}
}
ruleProc <- function (ruletypelist,timestamp=NULL, path.dep, ruletype, ..., parameters=NULL){
for (rule in ruletypelist){
if (!rule$path.dep==path.dep) next()
if(is.function(rule$name)) {
ruleFun <- rule$name
} else {
if(exists(rule$name, mode="function")) {
ruleFun <- get(rule$name, mode="function")
} else {
rule.name <- paste("rule", rule$name, sep=".")
if(exists(rule.name, mode="function")) {
ruleFun <- get(rule.name, mode="function")
rule$name <- rule.name
} else {
message("Skipping rule ", rule$name,
" because there is no function by that name to call")
}
}
}
if(!isTRUE(rule$enabled)) next()
if(!is.null(rule$timespan)) {
if(hasArg(curIndex))
curIndex <- eval(match.call(expand.dots=TRUE)$curIndex, parent.frame())
else
curIndex <- timestamp
if(nrow(mktdata[curIndex][rule$timespan])==0)
next()
}
rule$arguments$timestamp = timestamp
rule$arguments$ruletype = ruletype
rule$arguments$label = rule$label
.formals <- formals(rule$name)
.formals <- modify.args(.formals, rule$arguments, dots=TRUE)
.formals <- modify.args(.formals, parameters, dots=TRUE)
.formals <- modify.args(.formals, NULL, ..., dots=TRUE)
.formals$`...` <- NULL
if(!is.null(rule$arguments$prefer)) .formals$prefer = rule$arguments$prefer
tmp_val <- do.call(ruleFun, .formals, envir=parent.frame(1))
}
} |
elicitMixture <- function(){
ui <- shinyUI(fluidPage(
titlePanel("SHELF: extension method - mixture distributions"),
sidebarLayout(
sidebarPanel(
conditionalPanel(condition="input.myTab==1",
wellPanel(
h4("Extension variable"),
numericInput("nY", label = h5("Number of possible values"),
value = 3, min = 1)
)
),
conditionalPanel(condition="input.myTab==2",
wellPanel(
h4("Conditional probability judgements"),
radioButtons("entry", "Input method",
choices = c("Quantiles", "Roulette")),
conditionalPanel(
condition = "input.entry == 'Quantiles'",
textInput("probs", label = h5("Cumulative probabilities"),
value = "0.25, 0.5, 0.75")),
conditionalPanel(
condition = "input.entry == 'Roulette'",
numericInput("nBins", label = h5("Number of bins"), value = 10),
textInput("limits", label = h5("Parameter limits"), value = "0, 100")
)
)),
conditionalPanel(condition="input.myTab==2 | input.myTab==3",
wellPanel(
conditionalPanel(condition="input.myTab==2",
h4("Conditional density plots"),
uiOutput("extensionValue"),
selectInput("dist", label = h5("Distribution"),
choices = list(Histogram = "hist",
Normal = "normal",
'Student-t' = "t",
Gamma = "gamma",
'Log normal' = "lognormal",
'Log Student-t' = "logt",
Beta = "beta",
'Mirror gamma' = "mirrorgamma",
'Mirror log normal' = "mirrorlognormal",
'Mirror log Student-t' = "mirrorlogt",
'Best fitting' = "best"),
selected = "normal"
)),
conditionalPanel(condition="input.myTab==3",
h4("Marginal density plot"),
uiOutput("allDistributions")),
hr(),
uiOutput("setPDFxaxisLimits"),
textInput("fq", label = h5("Feedback quantiles"),
value = "0.05, 0.95")
)
)
),
mainPanel(
wellPanel(
fluidRow(
column(3, selectInput("outFormat", label = "Report format",
choices = list('html' = "html_document",
'pdf' = "pdf_document",
'Word' = "word_document"))
),
column(3, offset = 1,
numericInput("fs", label = "Font size", value = 12)
),
conditionalPanel(condition = "input.myTab == 3",
column(3,
numericInput("sampleSize",
label = "sample size",
value = 1000, min = 1, step = 1)
))
),
fluidRow(
column(3, downloadButton("report", "Download report")
),
column(3, offset = 1, actionButton("exit", "Quit")
),
conditionalPanel(condition = "input.myTab == 3",
column(3,
downloadButton("downloadData", "Download sample")))
)
),
hr(),
tabsetPanel(id = "myTab",
tabPanel("Extension variable", value = 1,
helpText("Specify the marginal probability distribution of
the extension variable: the probability of the
extension variable taking each of its possible values.
Click in the cells to edit. You can also click on the
column names to change them."),
uiOutput("ExtensionProbs"),
plotOutput("barplot")),
tabPanel("Judgements", value = 2,
conditionalPanel(
condition = "input.entry == 'Quantiles'",
helpText("Enter the judgements in the table below,
one column per value of the extension variable. Each column gives judgements
about the target variable,
conditional on the corresponding value of the extension variable.
Enter lower plausible limits in the first row,
upper plausible limits in the last row, and quantile values in between,
corresponding to the cumulative probabilities."),
uiOutput("EnterQuantiles"),
fluidRow(
column(3, downloadButton("saveQuantiles", "Download judgements") ),
column(3, fileInput("loadQuantiles", label = NULL,
buttonLabel = "Upload judgements") )
)),
conditionalPanel(
condition = "input.entry == 'Roulette'",
helpText("Enter the number of chips allocated to each bin, one row per value of
the extension variable. Each row corresponds to a conditional distribution
of the target variable, given the respective value of the extension variable.
The bins are defined to have equal size, starting and ending at the specified
values in 'Parameter limits'. To fit distributions,
each row must have at least three non-empty bins."),
uiOutput("EnterChips"),
fluidRow(
column(3, downloadButton("saveChips", "Download judgements") ),
column(3, fileInput("loadChips", label = NULL,
buttonLabel = "Upload judgements") )
)
),
helpText("You can save and load judgements as .csv files.
When loading a file, make sure the Input
method, Number of possible values for the extension variable,
Cumulative probabilities/Number of bins
and Parameter limits are correctly specified in the
control panel."),
hr(),
plotOutput("condDistPlot")
),
tabPanel("PDF", value = 3,
plotOutput("distPlot"),
fluidRow(
column(4,
tableOutput("bestFittingDistributions")
),
column(4,
tableOutput('lpquantiles'))
)
)
)
)
)
))
server <- shinyServer(function(input, output) {
probability <- value <- NULL
pQuantile <- reactive({
pQ <- tryCatch(eval(parse(text = paste("c(",
input$probs, ")"))),
error = function(e){NULL})
if(!is.null(pQ)){
if(min(pQ) > 0.4 | max(pQ) < 0.6){
showNotification("Smallest cumulative probability needs to be less than 0.4,
and largest needs to be greater than 0.6.",
type = "error",
duration = 10)
}
}
pQ
})
pChip <- reactive({
req(input$myChips)
check <- apply(input$myChips > 0, 1, sum)
if(min(check) < 3 | min(input$myChips) < 0){
return(NULL)
}else{
rouletteP <- apply(input$myChips, 1, cumsum) /
matrix(apply(input$myChips, 1, sum),
ncol(input$myChips), nExp(), byrow = TRUE)
rownames(rouletteP) <- NULL
return(rouletteP)
}
})
fq <- reactive({
feedbackq <- tryCatch(eval(parse(text=paste("c(",input$fq,")"))),
error = function(e){NULL})
if(!is.null(feedbackq)){
if(min(feedbackq)<=0 | max(feedbackq)>=1 | length(feedbackq) !=2){
return(NULL)
}
if(length(feedbackq) == 2 & feedbackq[1] >= feedbackq[2]){
return(NULL)
}
}
return(feedbackq)
})
nExp <- reactive({
req(input$nY)
if(input$nY <=0 | !is.integer(input$nY)){
return(NULL)}else{
return(input$nY)
}
})
limits <- reactive({
tryCatch(eval(parse(text=paste("c(",input$limits,")"))),
error = function(e){NULL})
})
boundaries <- reactive({
req(limits(), input$nBins)
if(length(limits()) == 1){return(NULL)}
if(is.integer(input$nBins) & input$nBins > 0 & limits()[1] < limits()[2]){
return(signif(seq(from = limits()[1],
to = limits()[2],
length = 1 + input$nBins),
3))
}else{
return(NULL)
}
})
l <- reactive({
if(input$entry == "Quantiles"){
return(input$myvals[1, ])}
if(input$entry == "Roulette"){
return(boundaries()[1])}
})
u <- reactive({
if(input$entry == "Quantiles"){
return(input$myvals[nrow(input$myvals), ])}
if(input$entry == "Roulette"){
return(boundaries()[1 + input$nBins])}
})
vQuantile <- reactive({
req(input$myvals)
n <- nrow(input$myvals)
as.matrix(input$myvals[2:(n - 1), ])
})
vChip <- reactive({
req(boundaries())
matrix(boundaries()[2:(input$nBins +1)],
input$nBins,
input$nY)
})
myfitQuantile <- reactive({
req(pQuantile(), vQuantile(), l(), u())
tryCatch(fitdist(vals = vQuantile(),
probs = pQuantile(),
lower = l(),
upper = u(),
expertnames = colnames(input$extensionProbs),
tdf = 10),
error = function(e){NULL})
})
myfitChip <- reactive({
if(is.null(pChip())){return(NULL)}else{
return(tryCatch(fitdist(vals = vChip(),
probs = pChip(),
lower = l(),
upper = u(),
expertnames = colnames(input$extensionProbs))),
error = function(e){NULL})}
})
myfit <- reactive({
if(input$entry == "Quantiles"){mf <- myfitQuantile()}
if(input$entry == "Roulette"){mf <- myfitChip()}
mf
})
initialPy <- reactive({
Py <- matrix(round(1 / input$nY, 2), 1, input$nY)
Py[1, 1] <- 1 - sum(Py[1, -1 ])
rownames(Py) <- "probability"
colnames(Py) <- paste0("Y=", 1:input$nY)
Py
})
newFile <- reactiveValues(chips = TRUE,
quantiles = TRUE)
validPy <- reactiveValues(valid = TRUE)
initialChips <- reactive({
req(boundaries(), input$nBins, nExp())
inFile <- input$loadChips
if (is.null(inFile) | isolate(newFile$chips)){
initialdf <- matrix(0, nExp(), input$nBins)
}else{
initialdf <- as.matrix(utils::read.csv(inFile$datapath, row.names = 1))
newFile$chips <- TRUE
if(nrow(initialdf) != nExp() | ncol(initialdf) != input$nBins){
showNotification("Make sure selected Number of experts and selected Number of bins
match the numbers of rows and columns in the input file. Then try
uploading the file again.",
type = "error",
duration = 60)
initialdf <- matrix(0, nExp(), input$nBins)
}
}
rownames(initialdf) <- colnames(input$extensionProbs)
colnames(initialdf)<- paste0("(",
boundaries()[1:input$nBins],
"-",
boundaries()[2:(1 + input$nBins)], "]")
initialdf
})
initialVals <- reactive({
inFile <- input$loadQuantiles
if (is.null(inFile) | isolate(newFile$quantiles)){
initialdf <- matrix(rep(1:(2 + length(pQuantile())), input$nY),
2 + length(pQuantile()),
input$nY)
}else{
initialdf <- as.matrix(utils::read.csv(inFile$datapath, row.names = 1))
newFile$quantiles <- TRUE
if(nrow(initialdf) != (2 + length(pQuantile())) |
ncol(initialdf) != input$nY){
showNotification("Check that the dimensions and row/column headings of the table in the input file
match those displayed in the table here. Adjust the number of values
for the extension variable (on the extension variable tab) and/or Cumulative probabilities
as appropriate, then try uploading the file again.",
type = "error",
duration = 60)
initialdf <- matrix(rep(1:(2 + length(pQuantile())), input$nY),
2 + length(pQuantile()),
input$nY)
}
}
colnames(initialdf) <- colnames(input$extensionProbs)
rownames(initialdf) <- c("L", pQuantile(), "U")
initialdf
})
xSample <- reactive({
req(myfit(), input$dist1)
xMatrix <- matrix(0, input$sampleSize, input$nY )
for(i in 1:input$nY){
dist <- eval(parse(text = paste0("input$dist", i)))
if(dist == "best"){
dist <- as.character(myfit()$best.fitting[i, 1])
}
xMatrix[, i] <- sampleFit(myfit(), n = input$sampleSize, expert = i)[, dist]
}
data.frame(X = apply(xMatrix,
1,
sample,
size = 1,
prob = input$extensionProbs[1, ]))
})
quantileValues <- reactive({
distVector <- rep("best", input$nY)
for(i in 1:input$nY){
distVector[i] <- eval(parse(text = paste0("input$dist", i)))
}
req(input$extensionProbs[1, ], myfit(), fq())
values <- qlinearpool(myfit(), fq(),
d = distVector,
w = input$extensionProbs[1, ])
data.frame(quantiles = fq(), values=values)
})
xlimPDF <- reactive({
tryCatch(eval(parse(text = paste("c(", input$xlimPDF, ")"))),
error = function(e){NULL})
})
output$extensionValue <- renderUI({
selectInput("extensionVariableValue",
label = h5("Extension variable value"),
choices = colnames(input$extensionProbs))
})
output$ExtensionProbs <- renderUI({
shinyMatrix::matrixInput(inputId = "extensionProbs", value = initialPy(),
class = "numeric",
cols = list(names = TRUE, editableNames = TRUE),
rows = list(names = TRUE),
paste = TRUE,
copy = TRUE)
})
output$EnterQuantiles <- renderUI({
shinyMatrix::matrixInput(inputId = "myvals", value = initialVals(),
class = "numeric",
cols = list(names = TRUE),
rows = list(names = TRUE),
paste = TRUE,
copy = TRUE)
})
output$EnterChips <- renderUI({
shinyMatrix::matrixInput(inputId = "myChips", value = initialChips(),
class = "numeric",
cols = list(names = TRUE),
rows = list(names = TRUE),
paste = TRUE,
copy = TRUE)
})
output$setPDFxaxisLimits <- renderUI({
req(input$myvals, boundaries())
if(input$entry == "Quantiles"){
initialRange <- range(input$myvals)
}
if(input$entry == "Roulette"){
initialRange <- range(boundaries())
}
textInput("xlimPDF", label = h5("x-axis limits"),
paste(initialRange, collapse = ", "))
})
output$allDistributions <- renderUI({
distnames <- paste0("dist", 1:input$nY)
allControls <- vector("list", input$nY)
for(i in 1:input$nY){
allControls[[i]] <- selectInput(distnames[i],
label = colnames(input$extensionProbs)[i],
choices = list(Histogram = "hist",
Normal = "normal",
'Student-t' = "t",
Gamma = "gamma",
'Log normal' = "lognormal",
'Log Student-t' = "logt",
Beta = "beta",
'Mirror gamma' = "mirrorgamma",
'Mirror log normal' = "mirrorlognormal",
'Mirror log Student-t' = "mirrorlogt",
'Best fitting' = "best"),
selected = "best")
}
tagList(allControls)
})
observeEvent(input$extensionProbs,{
pY <- input$extensionProbs[1, ]
if(min(pY<0) | max(pY>1) | sum(pY)!=1){
showNotification("Make sure probabilities are between 0 and 1, and sum
to 1.",
type = "error",
duration = 10)
validPy$valid <- NULL
}else{
validPy$valid <- TRUE
}
}
)
observeEvent(input$loadQuantiles,{
newFile$quantiles <- FALSE
}, priority = 1
)
observeEvent(input$loadChips,{
newFile$chips <- FALSE
}
)
observeEvent(input$exit, {
stopApp(list(fit = myfit(), extensionProbs = input$extensionProbs))
})
output$lpquantiles <- renderTable({
req(quantileValues(), validPy$valid)
quantileValues()
})
output$bestFittingDistributions <- renderTable({
req(myfit(), nExp(), input$extensionProbs, validPy$valid)
df <- data.frame(Y = colnames(input$extensionProbs),
weight = input$extensionProbs[1, ],
bf = myfit()$best.fitting[, 1])
colnames(df) <- c("Y", "weight", "best fit")
df
})
output$barplot <- renderPlot({
req(input$extensionProbs, validPy$valid)
dat <- data.frame(
value = factor(colnames(input$extensionProbs),
levels = colnames(input$extensionProbs)),
probability = input$extensionProbs[1, ]
)
ggplot(data=dat, aes(x=value, y=probability)) +
geom_bar(stat="identity", fill = "
col = "
ylim(0, 1) +
theme(text = element_text(size = input$fs))
})
output$condDistPlot <- renderPlot({
req(myfit(), fq(), xlimPDF(), input$fs)
xlimits <- xlimPDF()
suppressWarnings(plotfit(myfit(), d = input$dist,
ex = which(input$extensionVariableValue ==
colnames(input$extensionProbs))[1],
ql = fq()[1], qu = fq()[2],
xl = xlimPDF()[1], xu = xlimPDF()[2],
fs = input$fs))
})
output$distPlot <- renderPlot({
req(myfit(), fq(), xlimPDF(), input$fs, validPy$valid)
distVector <- rep("best", input$nY)
for(i in 1:input$nY){
distVector[i] <- eval(parse(text = paste0("input$dist", i)))
}
xlimits <- xlimPDF()
print(makeLinearPoolPlot(myfit(), xl = xlimits[1],
xu = xlimits[2],
d=distVector,
w = input$extensionProbs[1, ], lwd = 1,
xlab = "x",
ylab = expression(f[X](x)),
ql = quantileValues()[1, 2],
qu = quantileValues()[2, 2],
addquantile = TRUE,
fs = input$fs,
expertnames = colnames(input$extensionProbs),
lpname = "marginal"))
})
output$Tertiles <- renderPlot({
req(myfit(), input$fs)
tertilevals <- matrix(0, 3, input$nY)
for(i in 1:input$nY){
if(input$entry == "Quantiles"){
tertilevals[, i] <- approx(c(0, pQuantile(), 1),
input$myvals[, i],
c(1/3, 0.5, 2/3))$y}
if(input$entry == "Roulette"){
tertilevals[, i] <- approx(pChip()[, i],
vChip()[, i],
c(1/3, 0.5, 2/3))$y}
}
plotTertiles(tertilevals, l(), u(), fs = input$fs)
})
output$Quartiles <- renderPlot({
req(myfit(), input$fs)
quartilevals <- matrix(0, 3, input$nY)
for(i in 1:input$nY){
if(input$entry == "Quantiles"){
quartilevals[, i] <- approx(c(0, pQuantile(), 1),
input$myvals[, i],
c(0.25, 0.5, 0.75))$y}
if(input$entry == "Roulette"){
quartilevals[, i] <- approx(pChip()[, i],
vChip()[, i],
c(0.25, 0.5, 0.75))$y}
}
plotQuartiles(quartilevals, l(), u(), fs = input$fs)
})
output$saveQuantiles <- downloadHandler(
filename = function() {
paste('judgements-', Sys.Date(), '.csv', sep='')
},
content = function(file) {
utils::write.csv(input$myvals, file)
}
)
output$saveChips <- downloadHandler(
filename = function() {
paste('judgements-', Sys.Date(), '.csv', sep='')
},
content = function(file) {
utils::write.csv(input$myChips, file)
}
)
output$report <- downloadHandler(
filename = function(){switch(input$outFormat,
html_document = "distributions-report.html",
pdf_document = "distributions-report.pdf",
word_document = "distributions-report.docx")},
content = function(file) {
tempReport <- file.path(tempdir(), "elicitationShinySummaryMixture.Rmd")
file.copy(system.file("shinyAppFiles", "elicitationShinySummaryMixture.Rmd",
package="SHELF"),
tempReport, overwrite = TRUE)
params <- list(fit = myfit(),
entry = input$entry,
chips = input$myChips,
probY = input$extensionProbs)
rmarkdown::render(tempReport, output_file = file,
params = params,
output_format = input$outFormat,
envir = new.env(parent = globalenv())
)
}
)
output$downloadData <- downloadHandler(
filename = "marginal-sample.csv",
content = function(file) {
utils::write.csv(xSample(), file, row.names = FALSE)
}
)
})
runApp(list(ui=ui, server=server), launch.browser = TRUE)
} |
library(dplyr)
library(tidyr)
library(forcats)
library(ggplot2)
library(tidypaleo)
theme_set(theme_paleo(8))
knitr::opts_chunk$set(
collapse = TRUE,
comment = "
fig.height = 3,
fig.width = 6,
dpi = 96
)
data("long_lake_plottable")
data("alta_lake_geochem")
data("keji_lakes_plottable")
data("halifax_lakes_plottable")
alta_lake_geochem
alta_plot <- ggplot(alta_lake_geochem, aes(x = value, y = depth)) +
geom_lineh() +
geom_point() +
scale_y_reverse() +
facet_geochem_gridh(vars(param)) +
labs(x = NULL, y = "Depth (cm)")
alta_plot
alta_plot +
geom_hline(yintercept = c(4, 16), col = "red", lty = 2, alpha = 0.7)
zone_data <- tibble(ymin = 4, ymax = 16, xmin = -Inf, xmax = Inf)
alta_plot +
geom_rect(
mapping = aes(ymin = ymin, ymax = ymax, xmin = xmin, xmax = xmax),
data = zone_data,
alpha = 0.2,
fill = "blue",
inherit.aes = FALSE
)
cu_standard_data <- tibble(param = "Cu", xmin = 35.7, xmax = Inf, ymin = -Inf, ymax = Inf)
alta_plot +
geom_rect(
mapping = aes(ymin = ymin, ymax = ymax, xmin = xmin, xmax = xmax),
data = cu_standard_data,
alpha = 0.2,
fill = "red",
inherit.aes = FALSE
)
alta_plot +
geom_errorbarh(aes(xmin = value - stdev, xmax = value + stdev), height = 0.5)
alta_lake_geochem %>%
mutate(param = fct_relevel(param, "Ti", "Cu", "C/N")) %>%
ggplot(aes(x = value, y = depth)) +
geom_lineh() +
geom_point() +
scale_y_reverse() +
facet_geochem_gridh(vars(param)) +
labs(x = NULL, y = "Depth (cm)")
alta_lake_geochem %>%
filter(param %in% c("d15N", "d13C", "C/N")) %>%
ggplot(aes(x = value, y = depth)) +
geom_lineh() +
geom_point() +
scale_y_reverse() +
facet_geochem_gridh(vars(param)) +
labs(x = NULL, y = "Depth (cm)")
alta_adm <- age_depth_model(
alta_lake_bacon_ages,
depth = depth_cm,
age = 1950 - age_weighted_mean_year_BP
)
alta_plot +
scale_y_depth_age(
alta_adm,
age_name = "Age (Year AD)"
)
alta_plot +
facet_geochem_gridh(
vars(param),
units = c("C/N" = NA, "Cu" = "ppm", "d13C" = "‰", "d15N" = "‰"),
default_units = "%"
)
combined_data <- bind_rows(long_lake_plottable, alta_lake_geochem)
combined_data
ggplot(combined_data, aes(x = value, y = depth)) +
geom_lineh() +
geom_point() +
scale_y_reverse() +
facet_geochem_gridh(vars(param), grouping = vars(location), scales = "free") +
labs(x = NULL, y = "Depth (cm)")
alta_plot_1 <- combined_data %>%
filter(location == "ALGC2") %>%
ggplot(aes(x = value, y = depth)) +
geom_lineh() +
geom_point() +
scale_y_reverse() +
facet_geochem_gridh(vars(param), scales = "free") +
labs(x = NULL, y = "Depth (cm)", title = "Alta Lake")
long_plot_2 <- combined_data %>%
filter(location == "LL PC2") %>%
ggplot(aes(x = value, y = depth)) +
geom_lineh() +
geom_point() +
scale_y_reverse() +
facet_geochem_gridh(vars(param), scales = "free") +
labs(x = NULL, y = "Depth (cm)", title = "Long Lake")
library(patchwork)
wrap_plots(alta_plot_1, long_plot_2, ncol = 1)
coniss <- alta_lake_geochem %>%
nested_data(qualifiers = c(age, depth), key = param, value = value, trans = scale) %>%
nested_chclust_coniss()
alta_plot +
layer_dendrogram(coniss, aes(y = depth), param = "CONISS") +
layer_zone_boundaries(coniss, aes(y = depth))
alta_plot +
facet_geochem_wraph(vars(param), rotate_axis_labels = 0, ncol = 4)
ggplot(alta_lake_geochem, aes(x = age, y = value)) +
geom_line() +
geom_point() +
scale_y_reverse() +
facet_geochem_grid(vars(param)) +
labs(x = "Age (Year AD)", y = NULL)
data("keji_lakes_plottable")
keji_lakes_plottable
keji_plot <- ggplot(keji_lakes_plottable, aes(x = rel_abund, y = depth)) +
geom_col_segsh() +
scale_y_reverse() +
facet_abundanceh(vars(taxon), grouping = vars(location)) +
labs(x = "Relative abundance (%)", y = "Depth (cm)")
keji_plot
ggplot(keji_lakes_plottable, aes(x = rel_abund, y = depth)) +
geom_areah() +
scale_y_reverse() +
facet_abundanceh(vars(taxon), grouping = vars(location)) +
labs(x = "Relative abundance (%)", y = "Depth (cm)")
ggplot(keji_lakes_plottable, aes(x = rel_abund, y = depth)) +
geom_col_segsh() +
geom_lineh() +
scale_y_reverse() +
facet_abundanceh(vars(taxon), grouping = vars(location)) +
labs(x = "Relative abundance (%)", y = "Depth (cm)")
keji_plot +
geom_lineh_exaggerate(exaggerate_x = 5, col = "grey70", lty = 2)
keji_pca_scores <- keji_lakes_plottable %>%
group_by(location) %>%
nested_data(qualifiers = depth, key = taxon, value = rel_abund, trans = sqrt) %>%
nested_prcomp() %>%
unnest(qualifiers, scores) %>%
gather(key = component, value = value, starts_with("PC")) %>%
filter(component %in% c("PC1", "PC2"))
keji_pca_plot <- ggplot(keji_pca_scores, aes(x = value, y = depth)) +
geom_lineh() +
geom_point() +
scale_y_reverse() +
facet_geochem_gridh(vars(component), grouping = vars(location)) +
labs(x = NULL)
library(patchwork)
wrap_plots(
keji_plot +
theme(strip.background = element_blank(), strip.text.y = element_blank()),
keji_pca_plot +
theme(axis.text.y.left = element_blank(), axis.ticks.y.left = element_blank()) +
labs(y = NULL),
nrow = 1,
widths = c(4, 1)
)
keji_coniss <- keji_lakes_plottable %>%
group_by(location) %>%
nested_data(qualifiers = depth, key = taxon, value = rel_abund) %>%
nested_chclust_coniss()
library(patchwork)
wrap_plots(
keji_plot +
theme(strip.background = element_blank(), strip.text.y = element_blank()),
keji_pca_plot +
layer_dendrogram(keji_coniss, component = "CONISS", aes(y = depth)) +
theme(axis.text.y.left = element_blank(), axis.ticks.y.left = element_blank()) +
labs(y = NULL),
nrow = 1,
widths = c(2, 1)
)
coniss_plot <- ggplot() +
layer_dendrogram(keji_coniss, aes(y = depth)) +
scale_y_reverse() +
facet_geochem_gridh(vars("CONISS"), grouping = vars(location)) +
labs(x = NULL)
wrap_plots(
keji_plot +
theme(strip.background = element_blank(), strip.text.y = element_blank()),
coniss_plot +
theme(axis.text.y.left = element_blank(), axis.ticks.y.left = element_blank()) +
labs(y = NULL),
nrow = 1,
widths = c(6, 1)
)
data("halifax_lakes_plottable")
halifax_lakes_plottable
halifax_plot <- ggplot(halifax_lakes_plottable, aes(x = rel_abund, y = location, fill = sample_type)) +
geom_colh(width = 0.5, position = "dodgev") +
facet_abundanceh(vars(taxon)) +
labs(x = "Relative abundance (%)", y = NULL, fill = "Sample Type")
halifax_plot
halifax_lakes_plottable %>%
mutate(location = fct_relevel(location, "Bell Lake", "Bayers", "Little Springfield") %>% fct_rev()) %>%
ggplot(aes(x = rel_abund, y = location, fill = sample_type)) +
geom_colh(width = 0.5, position = "dodgev") +
facet_abundanceh(vars(taxon)) +
labs(x = "Relative abundance (%)", y = NULL, fill = "Sample Type")
halifax_clust <- halifax_lakes_plottable %>%
filter(sample_type == "top") %>%
nested_data(qualifiers = location, key = taxon, value = rel_abund) %>%
nested_hclust(method = "average")
dendro_order <- halifax_clust %>%
unnest(qualifiers, dendro_order) %>%
arrange(dendro_order) %>%
pull(location)
library(patchwork)
wrap_plots(
halifax_plot +
scale_y_discrete(limits = dendro_order) +
theme(legend.position = "left"),
ggplot() +
layer_dendrogram(halifax_clust, aes(y = location)) +
scale_y_discrete(limits = dendro_order) +
labs(x = "Dispersion", y = NULL) +
theme(axis.text.y.left = element_blank(), axis.ticks.y.left = element_blank()),
widths = c(4, 1)
) |
makeNhoods <- function(x, prop=0.1, k=21, d=30, refined=TRUE, reduced_dims="PCA", refinement_scheme = "reduced_dim") {
if(is(x, "Milo")){
message("Checking valid object")
if(!.valid_graph(graph(x))){
stop("Not a valid Milo object - graph is missing. Please run buildGraph() first.")
}
graph <- graph(x)
if(isTRUE(refined) & refinement_scheme == "reduced_dim"){
X_reduced_dims <- reducedDim(x, reduced_dims)
if (d > ncol(X_reduced_dims)) {
warning("Specified d is higher than the total number of dimensions in reducedDim(x, reduced_dims).
Falling back to using",ncol(X_reduced_dims),"dimensions\n")
d <- ncol(X_reduced_dims)
}
X_reduced_dims <- X_reduced_dims[,seq_len(d)]
mat_cols <- ncol(x)
match.ids <- all(rownames(X_reduced_dims) == colnames(x))
if(!match.ids){
stop("Rownames of reduced dimensions do not match cell IDs")
}
}
} else if(is(x, "igraph")){
if(isTRUE(refined) & refinement_scheme == "reduced_dim" & !is.matrix(reduced_dims)) {
stop("No reduced dimensions matrix provided - required for refined sampling with refinement_scheme = reduced_dim.")
}
graph <- x
if(isTRUE(refined) & refinement_scheme == "reduced_dim"){
X_reduced_dims <- reduced_dims
mat_cols <- nrow(X_reduced_dims)
if(is.null(rownames(X_reduced_dims))){
stop("Reduced dim rownames are missing - required to assign cell IDs to neighbourhoods")
}
}
if(isTRUE(refined) & refinement_scheme == "graph" & is.matrix(reduced_dims)){
warning("Ignoring reduced dimensions matrix because refinement_scheme = graph was selected.")
}
} else{
stop("Data format: ", class(x), " not recognised. Should be Milo or igraph.")
}
random_vertices <- .sample_vertices(graph, prop, return.vertices = TRUE)
if (isFALSE(refined)) {
sampled_vertices <- random_vertices
} else if (isTRUE(refined)) {
if(refinement_scheme == "reduced_dim"){
sampled_vertices <- .refined_sampling(random_vertices, X_reduced_dims, k)
} else if (refinement_scheme == "graph") {
sampled_vertices <- .graph_refined_sampling(random_vertices, graph)
} else {
stop("When refined == TRUE, refinement_scheme must be one of \"reduced_dim\" or \"graph\".")
}
} else {
stop("refined must be TRUE or FALSE")
}
sampled_vertices <- unique(sampled_vertices)
if(is(x, "Milo")){
nh_mat <- Matrix(data = 0, nrow=ncol(x), ncol=length(sampled_vertices), sparse = TRUE)
} else if(is(x, "igraph")){
nh_mat <- Matrix(data = 0, nrow=length(V(x)), ncol=length(sampled_vertices), sparse = TRUE)
}
v.class <- V(graph)$name
if(is(x, "Milo")){
rownames(nh_mat) <- colnames(x)
} else if(is(x, "igraph")){
if(is.null(v.class) & refinement_scheme == "reduced_dim"){
rownames(nh_mat) <- rownames(X_reduced_dims)
} else if(!is.null(v.class)){
rownames(nh_mat) <- V(graph)$name
}
}
for (X in seq_len(length(sampled_vertices))){
nh_mat[unlist(neighborhood(graph, order = 1, nodes = sampled_vertices[X])), X] <- 1
}
colnames(nh_mat) <- as.character(sampled_vertices)
if(is(x, "Milo")){
nhoodIndex(x) <- as(sampled_vertices, "list")
nhoods(x) <- nh_mat
return(x)
} else {
return(nh_mat)
}
}
.refined_sampling <- function(random_vertices, X_reduced_dims, k){
message("Running refined sampling with reduced_dim")
vertex.knn <-
findKNN(
X = X_reduced_dims,
k = k,
subset = as.vector(random_vertices),
get.index = TRUE,
get.distance = FALSE
)
nh_reduced_dims <- t(apply(vertex.knn$index, 1, function(x) colMedians(X_reduced_dims[x,])))
if(is.null(rownames(X_reduced_dims))){
warning("Rownames not set on reducedDims - setting to row indices")
rownames(X_reduced_dims) <- as.character(seq_len(nrow(X_reduced_dims)))
}
colnames(nh_reduced_dims) <- colnames(X_reduced_dims)
rownames(nh_reduced_dims) <- paste0('nh_', seq_len(nrow(nh_reduced_dims)))
all_reduced_dims <- rbind(nh_reduced_dims, X_reduced_dims)
nn_mat <- findKNN(all_reduced_dims,
k = nrow(nh_reduced_dims) + 1,
subset = rownames(nh_reduced_dims))[["index"]]
nh_ixs <- seq_len(nrow(nh_reduced_dims))
i = 1
sampled_vertices <- rep(0, nrow(nn_mat))
while (any(sampled_vertices <= max(nh_ixs))) {
update_ix <- which(sampled_vertices <= max(nh_ixs))
sampled_vertices[update_ix] <- nn_mat[update_ix, i]
i <- i + 1
}
sampled_vertices <- sampled_vertices - max(nh_ixs)
return(sampled_vertices)
}
.valid_graph <- function(x){
if(isTRUE(is_igraph(x))){
TRUE
} else{
FALSE
}
}
.sample_vertices <- function(graph, prop, return.vertices=FALSE){
random.vertices <- sample(V(graph), size=floor(prop*length(V(graph))))
if(isTRUE(return.vertices)){
return(random.vertices)
} else{
message("Finding neighbours of sampled vertices")
vertex.list <- sapply(seq_len(length(random.vertices)), FUN=function(X) neighbors(graph, v=random.vertices[X]))
return(list(random.vertices, vertex.list))
}
}
.graph_refined_sampling <- function(random_vertices, graph){
message("Running refined sampling with graph")
random_vertices <- as.vector(random_vertices)
X_graph <- set_vertex_attr(graph, "name", value = 1:length(V(graph)))
refined_vertices <- lapply(seq_along(random_vertices), function(i){
target_vertices <- unlist(neighborhood(X_graph, order = 1, nodes = random_vertices[i]))
target_vertices <- target_vertices[-1]
rv_induced_subgraph <- induced_subgraph(graph = X_graph, vids = target_vertices)
triangles <- count_triangles(rv_induced_subgraph)
max_triangles <- max(triangles)
max_triangles_indices <- which(triangles == max_triangles)
resulting_vertices <- V(rv_induced_subgraph)[max_triangles_indices]$name[1]
return(resulting_vertices)
}) %>% unlist() %>% as.integer()
return(refined_vertices)
} |
HKMpred <- function(blk,At,par,rp,Rd,sigmu,X,Z,invZchol){
Zinv <- matrix(list(),nrow=nrow(blk),1)
dd <- matrix(list(),nrow=nrow(blk),1)
gamx <- matrix(list(),nrow=nrow(blk),1)
gamz <- matrix(list(),nrow=nrow(blk),1)
for(p in 1:nrow(blk)){
n <- sum(blk[[p,2]])
numblk <- max(dim(as.matrix(blk[[p,2]])))
if(blk[[p,1]] == "l"){
Zinv[[p,1]] <- 1/Z[[p,1]]
dd[[p,1]] <- X[[p,1]]/Z[[p,1]]
}else if(blk[[p,1]] == "q"){
gaptmp <- qops(blk,p,as.matrix(X[[p]]),as.matrix(Z[[p]]),1)
gamz2 <- qops(blk,p,as.matrix(Z[[p]]), as.matrix(Z[[p]]),2)
gamz[[p]] <- sqrt(gamz2)
Zinv[[p]] <- qops(blk,p,-1/gamz2,as.matrix(Z[[p]]),4)
dd[[p]] <- qops(blk,p,gaptmp/gamz2, matrix(1,n,1),4)
}else if(blk[[p,1]] == "s"){
if(numblk == 1){
Zinv[[p,1]] <- Prod2(blk,p,invZchol[[p,1]],t(invZchol[[p,1]]),1)
if(par$iter == 2 | par$iter==3 & !is(Zinv[[p]], "sparseMatrix")){
Zinv[[p]] <- Zinv[[p]] + 1e-16
}
}else{
Zinv[[p,1]] <- Prod2(blk,p,invZchol[[p,1]],t(invZchol[[p,1]]),1)
}
}
}
par$Zinv <- Zinv
par$gamx <- gamx
par$gamz <- gamz
par$dd <- dd
m <- length(rp)
schur <- matrix(0,m,m)
UU <- c()
EE <- c()
Afree <- c()
dX <- matrix(list(),nrow(blk),1)
dy <- c()
dZ <- matrix(list(),nrow(blk),1)
for(p in 1:nrow(blk)){
if(blk[[p,1]] == "l"){
out <- schurmat_lblk(blk,At,par,schur,UU,EE,p,par$dd)
schur <- out$schur
UU <- out$UU
EE <- out$EE
}else if(blk[[p,1]] == "q"){
out <- schurmat_qblk(blk,At,par,schur,UU,EE,p,par$dd, par$Zinv,X)
schur <- out$schur
UU <- out$UU
EE <- out$EE
}else if(blk[[p,1]] == "s"){
if(length(get("schurfun", pos=sys.frame(which = -2))[[p]]) == 0){
schur <- schurmat_sblk(blk,At,par,schur,p,X,par$Zinv)
}else if(is.character(get("schurfun", pos=sys.frame(which = -2))[[p]])){
schurtmp <- Matrix(0,m,m,sparse=TRUE)
if(length(par$permZ[[p]]) > 0){
Zpinv <- Zinv[[p]][par$permZ[[p]], par$permZ[[p]]]
Xp <- X[[p]][par$permZ[[p]], par$permZ[[p]]]
}else{
Xp <- X[[p]]
Zpinv <- Zinv[[p]]
}
schurfun_input <- get("schurfun", pos=sys.frame(which = -2))
schurfun_tmp <- match.fun(schurfun_input[[p]])
schurtmp <- schurfun_tmp(Xp,Zpinv,get("schurfun_par", pos=sys.frame(which = -2))[p,])
schur <- schur + schurtmp
}
}else if(blk[[p,1]] == "u"){
Afree <- cbind(Afree,t(At[[p]]))
}
}
out <- HKMrhsfun(blk,At,par,X,Z,rp,Rd,sigmu)
rhs <- out$rhs
EinvRc <- out$EinvRc
hRd <- out$hRd
out <- linsysolve(par, schur, UU, Afree, EE, rhs)
xx <- as.matrix(out$xx)
coeff <- out$coeff
L <- out$L
flag <- out$flag
if(flag == 1){
return(list(par=par,dX=c(),dy=c(),dZ=c(),coeff=c(),L=c(),hRd=c()))
}
out <- HKMdirfun(blk,At,par,Rd,EinvRc,X,xx,m)
dX <- out$dX
dy <- out$dy
dZ <- out$dZ
return(list(par=par,dX=dX,dy=dy,dZ=dZ,coeff=coeff,L=L,hRd=hRd))
} |
NULL
osf_dev_on <- function() {
renviron <- normalizePath(".Renviron")
stopifnot(file.exists(renviron))
stopifnot(readRenviron(renviron))
Sys.setenv(OSF_SERVER = "test")
message("osfr development mode enabled.")
osf_auth()
}
osf_dev_off <- function() {
renviron <- normalizePath("~/.Renviron")
stopifnot(file.exists(renviron))
stopifnot(readRenviron(renviron))
Sys.unsetenv("OSF_SERVER")
message("osfr development mode disabled.")
osf_auth()
} |
expected <- TRUE
test(id=3, code={
argv <- list(c(1L, 0L, NA, 1L))
do.call('is.integer', argv);
}, o = expected);
|
test_that("use_vars_template works", {
flatly <- file.path(tempdir(), "flatly.scss")
default <- file.path(tempdir(), "default.scss")
use_vars_template(
output_file = flatly,
theme = "flatly"
)
use_vars_template(
output_file = default,
theme = "default"
)
expect_true(file.exists(flatly))
expect_true(file.exists(default))
unlink(flatly)
unlink(default)
})
test_that("vars_file works", {
vars <- bs_vars_file(input_file = system.file(
"assets/bootstrap-3.4.1/default/stylesheets/bootstrap/_variables.scss",
package = "fresh"
))
expect_is(vars, "sass_file")
expect_is(vars, "bootstrap_vars_file")
})
test_that("create_theme works with bs_vars_file", {
my_vars <- file.path(tempdir(), "custom-vars.scss")
my_theme <- file.path(tempdir(), "theme.css")
use_vars_template(
output_file = my_vars,
theme = "flatly"
)
expect_true(file.exists(my_vars))
create_theme(
theme = "flatly",
bs_vars_file(input_file = my_vars),
output_file = my_theme
)
expect_true(file.exists(my_theme))
unlink(my_vars)
unlink(my_theme)
})
test_that("create_theme works with bs_vars_file and vars", {
my_vars <- file.path(tempdir(), "custom-vars.scss")
my_theme <- file.path(tempdir(), "theme.css")
use_vars_template(
output_file = my_vars,
theme = "flatly"
)
expect_true(file.exists(my_vars))
create_theme(
theme = "default",
bs_vars_alert(padding = "5px"),
bs_vars_file(input_file = my_vars),
output_file = my_theme
)
expect_true(file.exists(my_theme))
unlink(my_vars)
unlink(my_theme)
}) |
library(datasets)
data(attitude)
attitude
dat = attitude[,c(3,4)]
plot(dat, main = "% of favourable responses to
Learning and Privilege", pch =20, cex =2)
set.seed(7)
km2 = kmeans(dat, 2, nstart=100)
km2$withinss
km2$tot.withinss
plot(dat, col =(km2$cluster +1) , main="K-Means result with 2 clusters", pch=20, cex=2)
km3 = kmeans(dat, 3, nstart=100)
km3$withinss
km3$tot.withinss
mydata <- dat
wss <- (nrow(mydata)-1)*sum(apply(mydata,2,var))
for (i in 2:15)
wss[i] <- sum(kmeans(mydata, centers=i)$withinss)
plot(1:15, wss, type="b", xlab="Number of Clusters", ylab="Within groups sum of squares", main="Assessing the Optimal Number of Clusters with the Elbow Method", pch=20, cex=2)
set.seed(7)
km6 = kmeans(dat, 6, nstart=100)
km10 = kmeans(dat, 10, nstart=100)
km6
km1$tot.withinss; km2$tot.withinss;
km3$tot.withinss; km4$tot.withinss
plot(dat, col =(km6$cluster +1) , main="K-Means result with 6 clusters", pch=20, cex=2)
?kmeans |
FUP<-function(data, thetaInit, item, startvals, k = 0){
P <- function(m){
1/(1+exp(-m))
}
fncFUP<- function(bParam, k=1, data, item, thetaInit){
if(k>3) stop("\n\n*** FATAL ERROR:k must be <=3 ***\n\n")
if(k==0){
b0 = bParam[1]
b1 = bParam[2]
y = data[,item]
m = b0 + b1*thetaInit
Pm <- P(m);
Pm[Pm==1] <-1-1e-12
Pm[Pm<1e-12]<-1e-12
return(-mean( y*log(Pm) + (1 - y) * log(1 - Pm) ))
}
if(k==1){
b0 = bParam[1]
b1 = bParam[2]
b2 = bParam[3]
b3 = bParam[4]
y = data[,item]
m = b0 + b1*thetaInit + b2*thetaInit^2 + b3*thetaInit^3
Pm <- P(m);
Pm[Pm=="NaN"]<-.5
Pm[Pm==1] <-1-1e-12
Pm[Pm<1e-12]<-1e-12
return(-mean( y*log(Pm) + (1 - y) * log(1 - Pm) ))
}
if(k==2){
b0 = bParam[1]
b1 = bParam[2]
b2 = bParam[3]
b3 = bParam[4]
b4 = bParam[5]
b5 = bParam[6]
y = data[,item]
m = b0 + b1*thetaInit + b2*thetaInit^2 + b3*thetaInit^3 + b4*thetaInit^4 + b5*thetaInit^5
Pm <- P(m);
Pm[Pm=="NaN"]<-.5
Pm[Pm==1] <-1-1e-12
Pm[Pm<1e-12]<-1e-12
return(-mean( y*log(Pm) + (1 - y) * log(1 - Pm) ))
}
if(k==3){
b0 = bParam[1]
b1 = bParam[2]
b2 = bParam[3]
b3 = bParam[4]
b4 = bParam[5]
b5 = bParam[6]
b6 = bParam[7]
b7 = bParam[8]
y = data[,item]
m = b0 + b1*thetaInit + b2*thetaInit^2 + b3*thetaInit^3 +
b4*thetaInit^4 + b5*thetaInit^5 +
b6*thetaInit^6 + b7*thetaInit^7
Pm <- P(m);
Pm[Pm=="NaN"]<-.5
Pm[Pm==1] <-1-1e-12
Pm[Pm<1e-12]<-1e-12
return(-mean( y*log(Pm) + (1 - y) * log(1 - Pm) ))
}
}
out <- optim(par=startvals,
fn=fncFUP,
method="BFGS",
k=k,
data=data,
item=item,
thetaInit=thetaInit,
control=list(factr=1e-12,
maxit=1000))
NSubj <- nrow(data)
q <- 2 * k + 2
AIC <- 2 * out$value + (2/NSubj) * q
BIC <- 2 * out$value + (log(NSubj)/NSubj) * q
list( b = out$par,
FHAT=out$value,
counts=out$counts,
AIC = AIC,
BIC = BIC,
convergence = out$convergence)
} |
all_easylinear <- function(...) UseMethod("all_easylinear")
all_easylinear.formula <- function(formula, data, h = 5, quota = 0.95,
subset = NULL, ...) {
if (inherits(data, "tbl_df")) data <- as.data.frame(data)
X <- get_all_vars(formula, data)
if (!is.null(subset)) X <- X[subset, ]
all_easylinear.data.frame(data = X, grouping = formula, h = h, quota = quota)
}
all_easylinear.data.frame <-
function(data, grouping, time = "time", y = "value", h = 5, quota = 0.95, ...) {
if (inherits(data, "tbl_df")) data <- as.data.frame(data)
splitted.data <- multisplit(data, grouping)
if (inherits(grouping, "formula")) {
p <- parse_formula(grouping)
time <- p$timevar
y <- p$valuevar
grouping <- p$groups
}
fits <- lapply(splitted.data,
function(tmp)
suppressWarnings(fit_easylinear(
tmp[,time], tmp[,y], h = h, quota = quota
)))
new("multiple_easylinear_fits", fits = fits, grouping = grouping)
} |
context("Checking likert")
test_that("likert ...",{
}) |
expected <- eval(parse(text="structure(list(Df = NULL, `Sum Sq` = NULL, `Mean Sq` = NULL, `F value` = NULL, `Pr(>F)` = NULL), .Names = c(\"Df\", \"Sum Sq\", \"Mean Sq\", \"F value\", \"Pr(>F)\"), class = c(\"anova\", \"data.frame\"), row.names = c(\"(Intercept) \", \"rate \", \"additive \", \"rate:additive\", \"Residuals \"))"));
test(id=0, code={
argv <- eval(parse(text="list(structure(list(Df = c(1, 1, 1, 1, 16), `Sum Sq` = c(309.6845, 0.420500000000001, 4.90050000000001, 3.9605, 64.924), `Mean Sq` = c(309.6845, 0.420500000000001, 4.90050000000001, 3.9605, 4.05775), `F value` = c(76.3192656028586, 0.103628858357464, 1.20768899020393, 0.976033516111146, NA), `Pr(>F)` = c(1.73825946976405e-07, 0.751685166772039, 0.288052080502172, 0.337885793589305, NA)), .Names = c(\"Df\", \"Sum Sq\", \"Mean Sq\", \"F value\", \"Pr(>F)\"), class = c(\"anova\", \"data.frame\"), row.names = c(\"(Intercept) \", \"rate \", \"additive \", \"rate:additive\", \"Residuals \")), structure(list(Df = NULL, `Sum Sq` = NULL, `Mean Sq` = NULL, `F value` = NULL, `Pr(>F)` = NULL), .Names = c(\"Df\", \"Sum Sq\", \"Mean Sq\", \"F value\", \"Pr(>F)\"), class = c(\"anova\", \"data.frame\"), row.names = c(\"(Intercept) \", \"rate \", \"additive \", \"rate:additive\", \"Residuals \")))"));
.Internal(copyDFattr(argv[[1]], argv[[2]]));
}, o=expected); |
segment_snags = function(las, algorithm, attribute = "snagCls")
{
UseMethod("segment_snags", las)
}
segment_snags.LAS = function(las, algorithm, attribute = "snagCls")
{
assert_is_algorithm(algorithm)
assert_is_algorithm_sng(algorithm)
stopif_forbidden_name(attribute)
lidR.context <- "segment_snags"
snags <- algorithm(las)
las <- add_lasattribute(las, snags, attribute, "Number identifying a snag class")
return(las)
}
segment_snags.LAScatalog = function(las, algorithm, attribute = "snagCls")
{
assert_is_algorithm(algorithm)
assert_is_algorithm_sng(algorithm)
stopif_forbidden_name(attribute)
opt_select(las) <- "*"
options <- list(need_buffer = TRUE, drop_null = TRUE, need_output_file = TRUE)
output <- catalog_map(las, segment_snags, algorithm = algorithm, .options = options)
return(output)
} |
tabsetPanel(
type = "tabs",
tabPanel(
"File",
verticalLayout(
br(),
uiOutput('fileInputSelectCtrl'),
fluidRow(
column(4, uiOutput('fileInputHeaderCtrl')),
column(4, uiOutput('fileInputQuoteCtrl')),
column(4, uiOutput('fileInputSepCtrl')))),
value='addFileTab'),
tabPanel(
"Database",
br(),
splitLayout(
cellWidths = c("25%", "75%"),
uiOutput('dbDriverTypeCtrl'),
div(
fluidRow(
column(8, textInput('dbHost', 'Host', 'db4free.net')),
column(4, textInput('dbPort', 'Port', '3307'))),
fluidRow(
column(6, textInput('dbUser', 'User', 'gray')),
column(6, passwordInput('dbPass', 'Password', '12348888'))),
textInput('dbName', 'Db name', 'ggtest'),
style='padding-right: 15px;')),
br(),
fluidRow(
column(9, textAreaInput('dbSqlQuery', 'Sql query', 'select * from diam_db_short',
width='430px')),
column(3, uiOutput('dbExecuteBtn'), style='padding-top:25px')),
value='addDbTab'),
id = 'addDatasetTabset',
selected = 'addDbTab'
) |
test_that("indention character can be arbitrary", {
sg <- function(indent_by = 1) {
create_style_guide(
indention = list(purrr::partial(indent_braces, indent_by = indent_by)),
indent_character = "\t",
style_guide_name = "test",
style_guide_version = 1
)
}
expect_equal(
style_text("{\n1\n}", style = sg) %>%
as.character(),
c("{", "\t1", "}")
)
}) |
SubATA.Forecast <- function(ata_output, hh=NULL, initialLevel)
{
X <- as.numeric(ata_output$actual)
ph <- ata_output$p
qh <- ata_output$q
phih <- ata_output$phi
modelType <- ata_output$model.type
lenX <- length(X)
if(is.null(hh)){
hh <- ata_output$h
}
if (initialLevel==TRUE){
Xobs <- mean(X)
}else {
Xobs <- X[lenX]
}
ata.forecast.fitted <- rep(NA, hh)
if (modelType=="A"){
coefph <- abs(ph/lenX)
coefqh <- abs(qh/lenX)
T_1 <- ata_output$trend[lenX-1]
S_1 <- ata_output$level[lenX-1]
ata_output$level[lenX] <- S <- coefph * Xobs + (1-coefph)*(S_1 + phih * T_1)
ata_output$trend[lenX] <- T <- coefqh * (S-S_1) + (1-coefqh) * (phih * T_1)
ata.forecast.fitted[1] <- S + (phih * T)
phiTotal <- phih
if (hh > 1){
for (h in 2:hh){
phiTotal <- phiTotal + (phih^h)
ata.forecast.fitted[h] <- S + (phiTotal * T)
}
}
}
if (modelType=="M"){
coefph <- abs(ph/lenX)
coefqh <- abs(qh/lenX)
T_1 <- ata_output$trend[lenX-1]
S_1 <- ata_output$level[lenX-1]
ata_output$level[lenX] <- S <- coefph * Xobs + (1-coefph)* S_1 * (T_1^phih)
ata_output$trend[lenX] <- T <- coefqh * (S/S_1) + (1-coefqh) * (T_1^phih)
ata.forecast.fitted[1] <- S * (T^phih)
phiTotal <- phih
if (hh > 1){
for (h in 2:hh){
phiTotal <- phiTotal + (phih^h)
ata.forecast.fitted[h] <- S * (T^phiTotal)
}
}
}
my_list <- ata_output
my_list$forecast <- ata.forecast.fitted
return(my_list)
} |
BH_competitive_ability <- function(lambda, pair_matrix){
if(all(pair_matrix >= 0)){
(lambda - 1)/sqrt(pair_matrix[1,1] * pair_matrix[1,2])
}else{
NA_real_
}
} |
extension <- function(filename, value=NULL, maxchar=10) {
if (!is.null(value)) {
extension(filename) <- value
return(filename)
}
lfn <- nchar(filename)
ext <- list()
for (f in 1:length(filename)) {
extstart <- -1
for (i in lfn[f] : 2) {
if (substr(filename[f], i, i) == ".") {
extstart <- i
break
}
}
if (extstart > 0) {
ext[f] <- substr(filename[f], extstart, lfn[f])
} else {
ext[f] <- ""
}
}
ext <- unlist(ext)
ext[nchar(ext) > maxchar] <- ''
return(ext)
}
'extension<-' <- function(filename, value) {
value <- trim(value)
if (value != "" & substr(value, 1, 1) != ".") {
value <- paste(".", value, sep="")
}
lfn <- nchar(filename)
fname <- list()
for (f in 1:length(filename)) {
extstart <- -1
for (i in lfn[f] : 2) {
if (substr(filename[f], i, i) == ".") {
extstart <- i
break
}
}
if (extstart > 0 & (lfn[f] - extstart) < 8) {
fname[f] <- paste(substr(filename[f], 1, extstart-1), value, sep="")
} else {
fname[f] <- paste(filename[f], value, sep="")
}
}
return( unlist(fname) )
}
.getExtension <- function(f, format) {
if (.setfileext()) {
def <- .defaultExtension(format, f)
if (def != '') {
extension(f) <- def
}
}
return(f)
}
.defaultExtension <- function(format=.filetype(), filename="") {
format <- toupper(format)
if (format == 'RASTER') { return('.grd')
} else if (format == 'GTIFF') {
e <- extension(filename)
if (tolower(e) %in% c(".tiff", ".tif")) {
return (e)
} else {
return('.tif')
}
} else if (format == 'CDF') { return('.nc')
} else if (format == 'KML') { return('.kml')
} else if (format == 'KMZ') { return('.kmz')
} else if (format == 'BIL') { return('.bil')
} else if (format == 'BSQ') { return('.bsq')
} else if (format == 'BIP') { return('.bip')
} else if (format == 'ASCII') { return('.asc')
} else if (format == 'RST') { return('.rst')
} else if (format == 'ILWIS') { return('.mpr')
} else if (format == 'SAGA') { return('.sdat')
} else if (format == 'BMP') { return('.bmp')
} else if (format == 'ADRG') { return('.gen')
} else if (format == 'BT') { return('.bt')
} else if (format == 'EHdr') { return('.bil')
} else if (format == 'ENVI') { return('.envi')
} else if (format == 'ERS') { return('.ers')
} else if (format == 'GSBG') { return('.grd')
} else if (format == 'HFA') { return( '.img')
} else if (format == 'IDA') { return( '.img')
} else if (format == 'RMF') { return('.rsw')
} else { return('') }
} |
LKDiag <- function(entries, nrow, diags = NULL, ncol = nrow, full = FALSE) {
entries = as.double(entries)
nEntries = as.integer(length(entries))
nrow = as.integer(nrow)
ncol = as.integer(ncol)
mat <- as.double(matrix(0, nrow=nrow, ncol=ncol))
if (is.null(diags)) {
if (nEntries %% 2 == 0) {
diags = c((-nEntries/2) : -1, 1 : (nEntries/2))
} else {
diags = (-(nEntries-1)/2) : ((nEntries-1)/2)
}
}
diags = as.integer(diags)
if(max(diags) >= ncol || min(diags) <= -nrow) {
warning("One of the given diagonals is outside the matrix, will be ignored")
diags = diags[(diags < ncol) && (diags > -nrow)]
}
if (length(entries) == 1) {
entries = as.double(rep(entries[1], length(diags)))
nEntries = as.integer(length(entries))
}
if (length(entries) != length(diags)) {
stop("The length of entries and length of diagonals don't match")
}
if(full) {
out <- .Fortran("LKDiag", entries = entries, nEntries = nEntries, diags = diags,
nRow = nrow, nCol = ncol, matrix = mat, PACKAGE="LatticeKrig")
return(matrix(out$mat, nrow=nrow, ncol=ncol))
} else {
ind <- NULL
ra <- NULL
da <- c(nrow, ncol)
for(idx in 1:length(diags)) {
diag <- diags[idx];
entry <- entries[idx];
if (diag < 0) {
diagLength <- min(ncol, nrow + diag)
startRow <- 1 - diag
} else {
diagLength <- min(nrow, ncol - diag)
startRow <- 1
}
rowInd <- (1:diagLength) - 1 + startRow
colInd <- rowInd + diag
diagInd <- cbind(rowInd, colInd)
ind <- rbind(ind, diagInd)
ra <- c(ra, rep(entry, diagLength))
}
ord <- order(ind[,1], ind[,2])
ind <- ind[ord,]
ra <- ra[ord]
return(spind2spam(list(ind = ind, da = da, ra = ra)))
}
} |
autoencoder_denoising <- function(network, loss = "mean_squared_error", noise_type = "zeros", ...) {
autoencoder(network, loss) %>%
make_denoising(noise_type, ...)
}
make_denoising <- function(learner, noise_type = "zeros", ...) {
learner$filter <- noise(noise_type, ...)
learner
}
is_denoising <- function(learner) {
(!is.null(learner$filter)) & (ruta_noise %in% class(learner$filter))
} |
teamBowlersWicketKindOppnAllMatches <- function(matches,main,opposition,plot=1){
team=bowler=ball=NULL
ggplotly=NULL
runs=over=runsConceded=NULL
byes=legbyes=noballs=wides=runConceded=NULL
extras=wicketFielder=wicketKind=wicketPlayerOut=NULL
a <-filter(matches,team !=main)
b <- a %>%
select(bowler,ball,noballs,wides,runs,wicketKind,wicketPlayerOut) %>%
mutate(over=gsub("1st\\.","",ball)) %>%
mutate(over=gsub("\\.\\d+","",over))
c <- summarise(group_by(b,bowler,over),sum(runs,wides,noballs))
names(c) <- c("bowler","over","runsConceded")
d <-summarize(group_by(c,bowler),maidens=sum(runsConceded==0))
e <- summarize(group_by(c,bowler),runs=sum(runsConceded))
h <- b %>%
select(bowler,wicketKind,wicketPlayerOut) %>%
filter(wicketPlayerOut != "nobody")
i <- summarise(group_by(h,bowler),wickets=length(unique(wicketPlayerOut)))
r <- full_join(h,e,by="bowler")
if(sum(is.na(r$wicketKind)) != 0){
r[is.na(r$wicketKind),]$wicketKind="noWicket"
}
if(sum(is.na(r$wicketPlayerOut)) !=0){
r[is.na(r$wicketPlayerOut),]$wicketPlayerOut="noWicket"
}
if(plot == 1){
plot.title = paste("Wicket kind taken by bowlers -",main," Vs ",opposition,"(all matches)",sep="")
ggplot(data=r,aes(x=wicketKind,y=runs,fill=factor(wicketKind))) +
facet_wrap( ~ bowler,scales = "fixed", ncol=8) +
geom_bar(stat="identity") +
xlab("Wicket kind") + ylab("Runs conceded") +
ggtitle(bquote(atop(.(plot.title),
atop(italic("Data source:http://cricsheet.org/"),"")))) +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
} else if(plot == 2){
plot.title = paste("Wicket kind taken by bowlers -",main," Vs ",opposition,"(all matches)",sep="")
g <- ggplot(data=r,aes(x=wicketKind,y=runs,fill=factor(wicketKind))) +
facet_wrap( ~ bowler,scales = "fixed", ncol=8) +
geom_bar(stat="identity") +
xlab("Wicket kind") + ylab("Runs conceded") +
ggtitle(plot.title) +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
ggplotly(g,height=500)
}
else{
r
}
} |
fitted.indscal <-
function(object,...){
mydim <- c(rep(nrow(object$B),2),nrow(object$C))
array(tcrossprod(object$B,krprod(object$C,object$B)),dim=mydim)
} |
read_exercise <- function(file, markup = NULL)
{
x <- readLines(file)
if(is.null(markup)) markup <- switch(tolower(tools::file_ext(file)),
"tex" = "latex",
"rtex" = "latex",
"rnw" = "latex",
"md" = "markdown",
"rmd" = "markdown"
)
if(markup == "latex") {
sep <- "\\007\\007\\007\\007\\007"
end <- c("\\end{answerlist}", "\\end")
end1 <- paste(end, collapse = "")
end2 <- paste(end, collapse = sep)
x <- as.list(x)
x <- lapply(x, function(y) {
if(grepl(end1, y, fixed = TRUE)) {
y <- gsub(end1, end2, y, fixed = TRUE)
strsplit(y, sep, fixed = TRUE)[[1L]]
} else {
y
}
})
x <- unlist(x)
}
zap_text_if_empty <- function(x) {
if(length(x) < 1L) return("")
if(all(grepl("^[[:space:]]*$", x))) return("")
return(x)
}
question <- extract_environment(x, "question", markup = markup)
questionlist <- ql <- extract_environment(question, "answerlist", value = FALSE, markup = markup)
if(!is.null(questionlist)) {
qli <- if(markup == "latex") (ql[1L] + 1L):(ql[2L] - 1L) else (ql[1L] + 2L):ql[2L]
questionlist <- extract_items(question[qli], markup = markup)
question <- zap_text_if_empty(question[-(ql[1L]:ql[2L])])
}
solution <- extract_environment(x, "solution", markup = markup)
solutionlist <- sl <- extract_environment(solution, "answerlist", value = FALSE, markup = markup)
if(!is.null(solutionlist)) {
sli <- if(markup == "latex") (sl[1L] + 1L):(sl[2L] - 1L) else (sl[1L] + 2L):sl[2L]
solutionlist <- extract_items(solution[sli], markup = markup)
solution <- zap_text_if_empty(solution[-(sl[1L]:sl[2L])])
}
metainfo <- read_metainfo(file)
if(!is.null(questionlist) && metainfo$type %in% c("schoice", "mchoice") && metainfo$length != length(questionlist))
warning("length of exsolution and questionlist does not match")
if(!is.null(solutionlist) && metainfo$type %in% c("schoice", "mchoice") && metainfo$length != length(solutionlist))
warning("length of exsolution and solutionlist does not match")
if(!identical(metainfo$shuffle, FALSE) & metainfo$type %in% c("schoice", "mchoice")) {
o <- sample(metainfo$length)
if(is.numeric(metainfo$shuffle)) {
ns <- min(c(metainfo$length, metainfo$shuffle))
os <- c(
if(any(metainfo$solution)) which.max(metainfo$solution[o]),
if(any(!metainfo$solution)) which.max(!metainfo$solution[o])
)
nos <- if(metainfo$type == "mchoice") {
seq_along(o)[-os]
} else {
seq_along(o)[-unique(c(os, which(metainfo$solution[o])))]
}
os <- c(os, nos[1L:min(c(ns - length(os), length(nos)))])
o <- o[sample(os)]
if(length(o) < metainfo$shuffle) warning(sprintf("%s shuffled answers requested, only %s available", metainfo$shuffle, length(o)))
}
questionlist <- questionlist[o]
solutionlist <- solutionlist[o]
metainfo$solution <- metainfo$solution[o]
metainfo$tolerance <- metainfo$tolerance[o]
metainfo$string <- if(metainfo$type == "schoice") {
paste(metainfo$name, ": ", which(metainfo$solution), sep = "")
} else {
paste(metainfo$name, ": ", paste(if(any(metainfo$solution)) which(metainfo$solution) else "-", collapse = ", "), sep = "")
}
metainfo$length <- length(questionlist)
}
if(!identical(metainfo$shuffle, FALSE) & metainfo$type == "cloze") {
gr <- rep.int(1L:metainfo$length, sapply(metainfo$solution, length))
questionlist <- split(questionlist, gr)
solutionlist <- split(solutionlist, gr)
for(i in which(metainfo$clozetype %in% c("schoice", "mchoice"))) {
o <- sample(length(questionlist[[i]]))
if(is.numeric(metainfo$shuffle)) {
ns <- min(c(length(questionlist[[i]]), metainfo$shuffle))
os <- c(
if(any(metainfo$solution[[i]])) which.max(metainfo$solution[[i]]),
if(any(!metainfo$solution[[i]])) which.max(!metainfo$solution[[i]])
)
os <- c(os, (seq_along(o)[-os])[1L:(ns - length(os))])
o <- o[sample(os)]
}
questionlist[[i]] <- questionlist[[i]][o]
solutionlist[[i]] <- solutionlist[[i]][o]
metainfo$solution[[i]] <- metainfo$solution[[i]][o]
}
questionlist <- unlist(questionlist)
solutionlist <- unlist(solutionlist)
metainfo$string <- paste(metainfo$name, ": ", paste(sapply(metainfo$solution, paste, collapse = ", "), collapse = " | "), sep = "")
}
list(
question = question,
questionlist = questionlist,
solution = solution,
solutionlist = solutionlist,
metainfo = metainfo
)
} |
IC_clean_data <- function(data=stop("A dataframe object is required"),
use = c("pairwise.complete.obs", "everything",
"all.obs", "complete.obs", "na.or.complete"),
method=c("pearson", "kendall", "spearman"),
variable.retain=NULL,
test.partial.correlation=TRUE,
progress=TRUE, debug=FALSE) {
method <- method[1]
use <- use[1]
if (!(is.element("ppcor", installed.packages()[,1]))) {
ginv <- function (X, tol = sqrt(.Machine$double.eps))
{
if (length(dim(X)) > 2L || !(is.numeric(X) || is.complex(X)))
stop("'X' must be a numeric or complex matrix")
if (!is.matrix(X))
X <- as.matrix(X)
Xsvd <- svd(X)
if (is.complex(X))
Xsvd$u <- Conj(Xsvd$u)
Positive <- Xsvd$d > max(tol * Xsvd$d[1L], 0)
if (all(Positive))
Xsvd$v %*% (1/Xsvd$d * t(Xsvd$u))
else if (!any(Positive))
array(0, dim(X)[2L:1L])
else Xsvd$v[, Positive, drop = FALSE] %*% ((1/Xsvd$d[Positive]) *
t(Xsvd$u[, Positive, drop = FALSE]))
}
pcor <- function (x, method = c("pearson", "kendall", "spearman"))
{
method <- match.arg(method)
if (is.data.frame(x))
x <- as.matrix(x)
if (!is.matrix(x))
stop("supply a matrix-like 'x'")
if (!(is.numeric(x) || is.logical(x)))
stop("'x' must be numeric")
stopifnot(is.atomic(x))
n <- dim(x)[1]
gp <- dim(x)[2] - 2
cvx <- cov(x, method = method)
if (det(cvx) < .Machine$double.eps) {
warning("The inverse of variance-covariance matrix is calculated using Moore-Penrose generalized matrix invers due to its determinant of zero.")
icvx <- ginv(cvx)
}
else icvx <- solve(cvx)
pcor <- -cov2cor(icvx)
diag(pcor) <- 1
if (method == "kendall") {
statistic <- pcor/sqrt(2 * (2 * (n - gp) + 5)/(9 * (n -
gp) * (n - 1 - gp)))
p.value <- 2 * pnorm(-abs(statistic))
}
else {
statistic <- pcor * sqrt((n - 2 - gp)/(1 - pcor^2))
p.value <- 2 * pt(-abs(statistic), (n - 2 - gp))
}
diag(statistic) <- 0
diag(p.value) <- 0
list(estimate = pcor, p.value = p.value, statistic = statistic,
n = n, gp = gp, method = method)
}
} else {
pcor <- getFromNamespace("pcor", ns="ppcor")
}
zc_y <- data
if (debug) cat(paste0("The data have ", ncol(zc_y), " variables\n"))
if (debug) cat("I remove the columns with only NA\n")
if (debug) {cat(colnames(zc_y)[apply(X = zc_y, MARGIN = 2, FUN = function(x) all(is.na(x)))]); cat("\n")}
zc_y <- zc_y[, !apply(X = zc_y, MARGIN = 2, FUN = function(x) all(is.na(x)))]
if (debug) cat(paste0("The data have ", ncol(zc_y), " variables\n"))
if (debug) cat("I remove the columns with no variability\n")
if (debug) {cat(colnames(zc_y)[apply(X = zc_y, MARGIN = 2, FUN = function(x) all(na.omit(x)==na.omit(x)[1]))]); cat("\n")}
zc_y <- zc_y[, !apply(X = zc_y, MARGIN = 2, FUN = function(x) all(na.omit(x)==na.omit(x)[1]))]
if (debug) cat(paste0("The data have ", ncol(zc_y), " variables\n"))
if (debug) cat("I remove the non-numeric columns\n")
zc_y <- zc_y[, (sapply(zc_y, class) == "numeric") | (sapply(zc_y, class) == "integer")]
if (debug) cat(paste0("The data have ", ncol(zc_y), " variables\n"))
if (debug) cat("I remove the columns producing an error for correlations estimations\n")
repeat {
c <- suppressWarnings(expr=cor(zc_y, use=use, method=method))
k <- apply(c, MARGIN=1, FUN=function(x) sum(ifelse(is.na(x), 1, 0)))
if (!is.null(variable.retain)) k[variable.retain] <- 0
km <- max(k)
if (km == 0) break
if (debug) cat(paste0("Remove: ", colnames(zc_y)[which.max(k)], "\n"))
zc_y <- zc_y[, -which.max(k)]
}
if (debug) cat(paste0("The data have ", ncol(zc_y), " variables\n"))
if (test.partial.correlation) {
if (debug) cat("I remove the columns producing an error for partial correlations estimations\n")
lapp <- lapply
if (is.element("parallel", installed.packages()[,1])) lapp <- getFromNamespace("mclapply", ns="parallel")
if (progress & (is.element("pbmcapply", installed.packages()[,1]))) lapp <- getFromNamespace("pbmclapply", ns="pbmcapply")
options(mc.cores = getFromNamespace("detectCores", ns="parallel")())
cname <- colnames(zc_y)
lc <- length(cname)
llc <- 1:lc
df <- expand.grid(e=1:(lc-2), f=2:(lc-1), g=3:lc)
df <- df[(df[, 1]<df[, 2]) & (df[, 2]<df[, 3]), ]
z <- lapp(1:nrow(df), FUN = function(x) {
e <- df[x, 1]
f <- df[x, 2]
g <- df[x, 3]
dfg <- na.omit(zc_y[, c(e, f, g)])
pc <- try(suppressWarnings(pcor(dfg, method=method)),
silent=TRUE)
outg <- (class(pc)=="try-error")
if (!outg) outg <- any(!is.finite(pc$estimate))
return(!outg)
}
)
zul <- unlist(z)
asupprimer <- NULL
repeat {
if (all(zul)) break
zdf <- as.data.frame(table(as.vector(as.matrix(df[!zul, ]))), stringsAsFactors = FALSE)
if (!is.null(variable.retain)) {
zdf <- cbind(zdf, name=colnames(zc_y)[as.numeric(zdf$Var1)])
zdf[zdf$name == variable.retain, "Freq"] <- 0
}
as <- as.numeric(zdf$Var1[which.max(zdf$Freq)])
if (debug) cat(paste0("Remove: ", colnames(zc_y)[as], "\n"))
zul[which(df[,1]==as)] <- TRUE
zul[which(df[,2]==as)] <- TRUE
zul[which(df[,3]==as)] <- TRUE
asupprimer <- c(asupprimer, as)
}
if (!is.null(asupprimer)) zc_y <- zc_y[, -asupprimer]
if (debug) cat(paste0("The data have ", ncol(zc_y), " variables\n"))
}
return(zc_y)
} |
context("Check package name for bad words")
test_that("Catches bad word", {
skip_on_cran()
expect_identical("hell", get_bad_words("hell")[[1L]])
})
test_that("Passes safe word", {
skip_on_cran()
expect_true(length(get_bad_words("happy")) == 0)
}) |
Create.twomode.twitter <- function(datasource, type, removeTermsOrHashtags = NULL, weighted = TRUE,
verbose = FALSE, ...) {
cat("Generating twitter 2-mode network...")
if (verbose) { cat("\n") }
if (!requireNamespace("tidytext", quietly = TRUE)) {
stop(paste0("Please install the tidytext package before calling Create.twomode.twitter.", call. = FALSE))
}
if (verbose) { df_stats <- networkStats(NULL, "collected tweets", nrow(datasource)) }
class(datasource) <- rmCustCls(class(datasource))
datasource <- datasource %>% dplyr::select(.data$status_id, .data$user_id, .data$screen_name,
.data$text, .data$created_at, .data$is_retweet,
.data$is_quote)
datasource$text = HTMLdecode(datasource$text)
capture.output(
tokens_df <- datasource %>% tidytext::unnest_tokens(.data$word, .data$text, token = "tweets", to_lower = TRUE)
, type = "output")
tokens_df %<>% dplyr::mutate(at_name = paste0("@", tolower(.data$screen_name)))
if (!is.null(removeTermsOrHashtags) && length(removeTermsOrHashtags) > 0) {
removeTermsOrHashtags <- unlist(lapply(removeTermsOrHashtags, tolower))
token_count <- nrow(tokens_df)
if (verbose) {
cat(paste0("Removing terms and hashtags: ",
paste0(as.character(removeTermsOrHashtags), collapse = ", "),
"\n"))
}
tokens_df %<>% dplyr::filter(!(.data$word %in% removeTermsOrHashtags) &
!(.data$user_id %in% removeTermsOrHashtags) &
!(tolower(.data$screen_name) %in% removeTermsOrHashtags) &
!(.data$at_name %in% removeTermsOrHashtags))
if (verbose) {
df_stats <- networkStats(df_stats, "removed specified", token_count - nrow(tokens_df), FALSE)
}
}
tokens_df %<>% dplyr::mutate(type = if_else(grepl("^
if_else(grepl("^@.*", .data$word), "user", "term"))) %>%
dplyr::filter(.data$type %in% c("hashtag", "user") & .data$at_name != .data$word)
if (verbose) {
df_stats <- networkStats(df_stats, "users", nrow(tokens_df %>% dplyr::filter(.data$type == "user")), FALSE)
df_stats <- networkStats(df_stats, "hashtags", nrow(tokens_df %>% dplyr::filter(.data$type == "hashtag")), FALSE)
}
edges <- tokens_df %>% dplyr::mutate(from = .data$at_name, to = .data$word) %>%
dplyr::select(.data$from, .data$to, .data$status_id, .data$created_at, .data$is_retweet, .data$is_quote)
if (weighted) {
edges %<>% dplyr::count(.data$from, .data$to, name = "weight")
}
nodes <- dplyr::distinct(tibble::tibble(name = c(edges$to, edges$from))) %>%
dplyr::left_join(tokens_df %>% dplyr::select(.data$at_name, .data$user_id) %>%
dplyr::distinct(), by = c("name" = "at_name"))
if (verbose) {
df_stats <- networkStats(df_stats, "nodes", nrow(nodes))
df_stats <- networkStats(df_stats, "edges", nrow(edges))
networkStats(df_stats, print = TRUE)
}
func_output <- list(
"nodes" = nodes,
"edges" = edges
)
class(func_output) <- append(class(func_output), c("network", "twomode", "twitter"))
cat("Done.\n")
func_output
} |
polarDiff <- function(before, after, pollutant = "nox",
x = "ws",
limits = NA, ...) {
Args <- list(...)
before <- checkPrep(before, c(x, "wd", pollutant),
"default", remove.calm = FALSE)
after <- checkPrep(after, c(x, "wd", pollutant),
"default", remove.calm = FALSE)
Args$new_limits <- limits
Args$limits <- NA
before <- mutate(before, period = "before")
after <- mutate(after, period = "after")
all_data <- bind_rows(before, after)
polar_plt <- polarPlot(all_data,
pollutant = pollutant,
x = x,
type = "period",
...)
polar_data <- pivot_wider(polar_plt$data,
id_cols = u:v,
names_from = period,
values_from = z) %>%
mutate(!!(sym(pollutant)) := after - before,
!!(sym(x)) := (u ^ 2 + v ^ 2) ^ 0.5,
wd = 180 * atan2(u, v) / pi,
wd = ifelse(wd < 0, wd + 360, wd))
Args$cols <- if ("cols" %in% names(Args)) {
Args$cols
} else {
c("
"
}
lims_adj <- pretty(seq(0, max(abs(polar_data[[pollutant]]), na.rm = TRUE), 5))
lims_adj <- lims_adj[length(lims_adj) - 1]
Args$limits <- if (is.na(Args$new_limits[1])) {
c(-lims_adj, lims_adj)
} else {
Args$new_limits
}
polarPlot(polar_data, pollutant = pollutant,
x = x,
cols = Args$cols,
limits = Args$limits,
force.positive = FALSE)
output <- list(data = polar_data, call = match.call())
invisible(output)
} |
demOptimiseGp <-
function(path=getwd(), filename='demOptimiseGp', png=FALSE, gif=FALSE) {
options = gpOptions()
options$kern$comp = list("rbf","white")
lengthScale = c(0.05, 0.1, 0.25, 0.5, 1, 2, 4, 8, 16)
figNo = 0
x = matrix(seq(-1, 1, length=6), ncol=1)
xtest = matrix(seq(-1.5, 1.5, length=200),ncol=1)
trueKern = kernCreate(x, list(type="cmpnd",comp=list("rbf", "white")))
kern = trueKern
K = kernCompute(trueKern, x)
y = t(gaussSamp(matrix(0,6,1), K, 1))
y = scale(y,scale=FALSE)
model = gpCreate(dim(x)[2], dim(y)[2], x, y, options)
graphics.off();
ll=c(); llLogDet=c(); llFit=c();
for (i in 1:length(lengthScale)) {
inithypers = log(c(1/(lengthScale[i]), 1, model$kern$comp[[2]]$variance))
model = gpExpandParam(model, inithypers)
invK = model$invK_uu
logDetK = model$logDetK_uu
ll[i] = gpLogLikelihood(model)
llLogDet[i] = -.5 * (logDetK + dim(y)[1] * log(2*pi))
llFit[i] = -.5 * t(y) %*% invK %*% y
meanVar = gpPosteriorMeanVar(model, xtest, varsigma.return=TRUE)
dev.new(); plot.new()
gpPlot(model, xtest, meanVar$mu, meanVar$varsigma, ylim=c(-2.5,2.5), xlim=range(xtest), col='black')
figNo = figNo + 1
if (png) {
dev.copy2eps(file=paste(path,'/',filename,'1_', as.character(figNo), '.eps', sep='') )
system(paste('eps2png ',path,'/',filename,'1_',as.character(figNo),'.eps',sep=''))
}
dev.new(); plot.new()
matplot(lengthScale[1:i], cbind(ll[1:i], llLogDet[1:i], llFit[1:i]), type="l", lty=c(1,2,3), log="x",
xlab='length-scale', ylab='log-probability')
legend(x='topleft',c('marginal likelihood','minus complexity penalty','data-fit'),
lty=c(1,2,3),col=c('black','red','green'))
if (png) {
dev.copy2eps(file = paste(path,'/',filename,'2_', as.character(figNo), '.eps', sep=''))
system(paste('eps2png ',path,'/',filename,'2_', as.character(figNo), '.eps',sep=''))
}
}
if (gif) {
system(paste('convert -delay 80 ',path,'/',filename,'1_*.png ', path,'/',filename,'1.gif', sep=''))
system(paste('convert -delay 80 ',path,'/',filename,'2_*.png ', path,'/',filename,'2.gif', sep=''))
}
} |
knitr::opts_chunk$set(collapse = T, comment = "
options(tibble.print_min = 4L, tibble.print_max = 4L)
library(LMest)
library(knitr)
opts_chunk$set(fig.align = "center",
fig.width = 6, fig.height = 5,
dev.args = list(pointsize=10),
collapse = TRUE, par = TRUE,
warning = FALSE, message = FALSE,
highlight = FALSE)
set.seed(1945)
library(LMest)
cat(LMest:::Startup.Message(), sep="")
data("RLMSlong")
dim(RLMSlong)
str(RLMSlong)
data("PSIDlong")
dim(PSIDlong)
str(PSIDlong)
data(data_criminal_sim)
dim(data_criminal_sim)
str(data_criminal_sim)
data("NLSYlong")
dim(NLSYlong)
dt <- lmestData(data = NLSYlong, id = "id", time="time",
responsesFormula= anti+self ~NULL)
summary(dt, dataSummary="responses", varType=rep("c",ncol(dt$Y)))
data_criminal_sim<-data.frame(data_criminal_sim)
crimf <- data_criminal_sim[data_criminal_sim$sex == 2,]
dt1 <- lmestData(data = crimf, id = "id", time = "time")
summary(dt1, varType = rep("d",ncol(dt1$Y)))
fmBasic <- lmestFormula(data = RLMSlong, response = "value")
fmLatent <- lmestFormula(data = PSIDlong, response = "Y",
LatentInitial = "X", LatentTransition ="X")
fmLatent2 <- lmestFormula(data = PSIDlong, response = "Y",
LatentInitial = c("X1Race","X2Age","X3Age2","X9Income"),
LatentTransition =c("X1Race","X2Age","X3Age2","X9Income"))
mod <- lmest(responsesFormula = fmLatent$responsesFormula,
index = c("id","time"),
data = PSIDlong, k = 2)
mod <- lmest(responsesFormula = fmLatent$responsesFormula,
index = c("id","time"),
data = PSIDlong, k = 1:3)
print(mod)
se(mod)
mod2 <- lmest(responsesFormula = fmLatent$responsesFormula,
latentFormula = fmLatent$latentFormula,
index = c("id","time"),
data = PSIDlong, k = 2,
paramLatent = "multilogit",
start = 0, out_se=TRUE)
summary(mod2)
plot(mod2, what = "CondProb")
plot(mod2, what="transitions")
plot(mod2, what="marginal")
modc <- lmestCont(responsesFormula = anti + self ~ NULL,
latentFormula = ~ gender + childage + hispanic + black + pov +
momwork + married| gender + childage + hispanic + black+ pov+
momwork + married,
index = c("id", "time"),
data = dt$data,
k = 1:3,
modBasic=1,
output = TRUE,
tol=10^-1)
plot(modc,what="modSel")
plot(modc, what="density")
plot(modc,what="density",components=c(1,2))
plot(modc,what="transitions")
semodc<-se(modc)
TabBe <-cbind(modc$Be, semodc$seBe, modc$Be/semodc$seBe)
colnames(TabBe) <- c("estBe", "s.e.Be","t-test")
round(TabBe,3)
TabGa1 <- cbind(modc$Ga,semodc$seGa,modc$Ga/semodc$seGa)
colnames(TabGa1) <- c("estGa(1)","estGa(2)", "s.e.Ga(1)","s.e.Ga(2)", "t-test(1)","t-test(2)")
round(TabGa1,3)
responsesFormula <- lmestFormula(data = crimf,response = "y")$responsesFormula
modm <- lmestMixed(responsesFormula =responsesFormula,
index = c("id","time"),
k1 = 2, k2 = 2,
tol = 10^-3,
data = crimf)
summary(modm)
round(modm$Psi[2, , ], 3)
out <- lmestSearch(responsesFormula = fmBasic$responsesFormula,
index = c("id","time"),
data = RLMSlong,version ="categorical", k = 1:4,
modBasic = 1, seed = 123)
summary(out)
mod4 <- out$out.single[[4]]
summary(mod4)
plot(mod4, what="CondProb")
dec <- lmestDecoding(mod)
head(dec$Ul)
head(dec$Ug)
mboot <- bootstrap(modc, n = 581, B = 2, seed = 172)
mboot$seMu
draw3 <- drawLMbasic(est = mod4, format = "matrices", seed = 4321, n = 100)
head(draw3$Y) |
context("test-rx_range")
test_that("range rule works", {
expect_equal(rx_range(value = "") %>% as.character(), "[]")
expect_length(
regmatches(
rx_range(value = c(1, 2, 3)),
gregexpr(rx_digit(), rx_range(value = c(1, 2, 3))))[[1]], 2
)
expect_equal(
regmatches(
rx_range(value = c(1, 2, 3)),
gregexpr(rx_digit(), rx_range(value = c(1, 2, 3))))[[1]], c("1", "2")
)
}) |
context("Ex script 8/9 (cladocera)")
test_that("Cladocera ex works",{
mix.filename <- system.file("extdata", "cladocera_consumer.csv", package = "MixSIAR")
mix <- load_mix_data(filename=mix.filename,
iso_names=c("c14.0","c16.0","c16.1w9","c16.1w7","c16.2w4",
"c16.3w3","c16.4w3","c17.0","c18.0","c18.1w9",
"c18.1w7","c18.2w6","c18.3w6","c18.3w3","c18.4w3",
"c18.5w3","c20.0","c22.0","c20.4w6","c20.5w3",
"c22.6w3","BrFA"),
factors="id",
fac_random=FALSE,
fac_nested=FALSE,
cont_effects=NULL)
source.filename <- system.file("extdata", "cladocera_sources.csv", package = "MixSIAR")
source <- load_source_data(filename=source.filename,
source_factors=NULL,
conc_dep=FALSE,
data_type="means",
mix)
discr.filename <- system.file("extdata", "cladocera_discrimination.csv", package = "MixSIAR")
discr <- load_discr_data(filename=discr.filename, mix)
model_filename <- "MixSIAR_model.txt"
resid_err <- FALSE
process_err <- TRUE
write_JAGS_model(model_filename, resid_err, process_err, mix, source)
run <- list(chainLength=3, burn=1, thin=1, chains=3, calcDIC=TRUE)
invisible(capture.output(
jags.1 <- run_model(run, mix, source, discr, model_filename)
))
expect_is(jags.1,"rjags")
file.remove(model_filename)
}) |
glmRob.mallows <- function(x, y, control, offset, null.dev, family, Terms)
{
the.call <- match.call()
family.name <- family$family
if(casefold(family.name) != "binomial")
stop(paste("The mallows method is not implemented for the ",
family.name, " family.", sep = ""))
if(is.factor(y))
y <- as.numeric(y != levels(y)[1])
else
y <- as.vector(y)
if(any(y > 1) || is.matrix(y))
stop("Response doesn't look Bernoulli. The mallows method is only implemented for Bernoulli responses.")
wt.fn <- control$wt.fn
b <- control$wt.tuning
x0 <- x
tmp <- dimnames(x0)[[2]] == "(Intercept)"
if(any(tmp)) x0 <- x0[,!tmp, drop = FALSE]
n <- dim(x0)[1]
p <- dim(x0)[2]
tmp <- covMcd(x0, use.correction = FALSE)
mu <- tmp$center
V <- tmp$cov
x1 <- scale(x0, center = mu, scale = rep(1,p))
tmp2 <- solve(V) %*% t(x1)
d1 <- diag(x1 %*% tmp2)
d1 <- sqrt(d1 / p)
w <- wt.fn(d1, b)
old.warn <- options()$warn
on.exit(options(warn = old.warn))
options(warn = -1)
w.glm.fit <- glm.fit(x = x, y = y, weights = w, offset = offset,
family = binomial())
w.glm.fit$call <- the.call
w.glm.fit$control <- control
w.glm.fit$prior.weights <- NULL
if(any(offset) && attr(Terms, "intercept")) {
if(length(Terms)) {
null.deviance <- glm.fit(x[, "(Intercept)", drop = FALSE],
y, w, offset = offset, family = family)$deviance
}
else
null.deviance <- w.glm.fit$deviance
w.glm.fit$null.deviance <- null.deviance
}
p <- dim(x)[2]
n <- dim(x)[1]
tmp1 <- tmp2 <- matrix(0, p, p)
tmp3 <- x %*% w.glm.fit$coef
for(i in 1:n) {
tmp <- x[i,] %*% t(x[i,])
tmp <- tmp * glmRob.misclass.f( tmp3[i] )
tmp1 <- tmp1 + tmp * w[i]
tmp2 <- tmp2 + tmp * w[i] * w[i]
}
tmp1 <- tmp1 / n
tmp2 <- tmp2 / n
cov <- solve(tmp1) %*% tmp2 %*% solve(tmp1)
cov <- cov / n
xn <- dimnames(x)[[2]]
dimnames(cov) <- list(xn, xn)
w.glm.fit$cov <- cov
c(w.glm.fit, list(mallows.weights = w))
} |
add_data_from_standard_block <- function(data, block){
id <- get_last_id(data) + 1
if (missing(block)) {
data <- add_node(
data,
id,
block_type = "standard",
code_str = "")
} else {
code_str <- sapply(as.list(block), robust_deparse)
code_str <- styler::style_text(code_str)
code_str <- paste(code_str, collapse = "\n")
data <- add_node(
data,
id,
block_type = "standard",
code_str = code_str,
label = attr(block, "label"))
}
data <- add_edge(data, from = id, to = id + 1)
data
} |
class.Lee <-
function( cutscore, ip, ability = NULL, rdm = NULL, quadrature=NULL, D = 1.7){
if(is.null(quadrature)){
if(is.null(ability)){
theta <- MLE(rdm, ip, D)} else{
theta <- ability}
results <- Lee.P(cutscore, ip, theta, D)
results
} else {
if(length(quadrature)!=2) stop("quadrature points and weights must be a list of length 2")
if(length(quadrature[[1]])!=length(quadrature[[2]])) stop("number of quadrature points and weights do not match")
results <- Lee.D(cutscore, ip, quadrature, D)
results}
} |
lsp_add_examples = function(x, y, window = NULL) UseMethod("lsp_add_examples")
lsp_add_examples.lsp = function(x, y, window = NULL){
if (inherits(y, "SpatRaster")){
y = stars::st_as_stars(y)
}
windows_sf = lsp_add_sf(x = x, window = window)
x$region = vector(mode = "list", length = nrow(x))
for (i in seq_len(nrow(x))){
windows_sf_id = windows_sf[windows_sf$id == x$id[[i]], ]
if (is.null(window)){
x$region[[i]] = stars::st_as_stars(y[sf::st_bbox(windows_sf_id)])
} else {
x$region[[i]] = stars::st_as_stars(y[windows_sf_id])
}
}
x
}
lsp_add_examples.sf = function(x, y, window = NULL){
if (!inherits(x, "tbl_df")){
x = sf::st_as_sf(tibble::as_tibble(x))
}
if (inherits(y, "SpatRaster")){
y = stars::st_as_stars(y)
}
windows_sf = x
x$region = vector(mode = "list", length = nrow(x))
for (i in seq_len(nrow(x))){
windows_sf_id = windows_sf[windows_sf$id == x$id[[i]], ]
x$region[[i]] = stars::st_as_stars(y[windows_sf_id])
}
x
} |
library("scales")
x <- seq(0, 1, length = 25)
r <- sqrt(outer(x ^ 2, x ^ 2, "+"))
palettes <-
ggthemes_data[["tableau"]][["color-palettes"]][["ordered-sequential"]]
for (palname in names(palettes)) {
col <- tableau_seq_gradient_pal(palname)(seq(0, 1, length = 12))
image(r, col = col)
title(main = palname)
} |
compare_endpoints <- function(x, comparison = "nec", ecx_val = 10,
type = "absolute", hormesis_def = "control",
sig_val = 0.01, precision, x_range = NA) {
if (is.na(x_range)) {
x_range <- return_x_range(x)
}
if (comparison == "nec") {
posterior_list <- lapply(x, return_nec_post, xform = NA)
}
if (comparison == "ecx") {
posterior_list <- lapply(x, ecx, ecx_val = ecx_val, precision = precision,
posterior = TRUE, type = type,
hormesis_def = hormesis_def, x_range = x_range)
}
if (comparison == "nsec") {
posterior_list <- lapply(x, nsec, sig_val = sig_val, precision = precision,
posterior = TRUE, hormesis_def = hormesis_def,
x_range = x_range)
}
names(posterior_list) <- names(x)
n_samples <- min(sapply(posterior_list, length))
r_posterior_list <- lapply(posterior_list, function(m, n_samples) {
m[sample(seq_len(n_samples), replace = FALSE)]
}, n_samples = n_samples)
posterior_data <- do.call("cbind", r_posterior_list) %>%
data.frame %>%
pivot_longer(cols = everything(), names_to = "model") %>%
arrange(.data$model) %>%
data.frame
all_combn <- combn(names(x), 2, simplify = FALSE)
diff_list <- lapply(all_combn, function(a, r_list) {
r_list[[a[1]]] - r_list[[a[2]]]
}, r_list = r_posterior_list)
names(diff_list) <- sapply(all_combn, function(m) paste0(m[1], "-", m[2]))
diff_data_out <- bind_rows(diff_list, .id = "comparison") %>%
pivot_longer(everything(), names_to = "comparison", values_to = "diff") %>%
data.frame
prob_diff <- lapply(diff_list, function(m) {
m[m > 0] <- 1
m[m <= 0] <- 0
data.frame(prob = mean(m))
})
prob_diff_out <- bind_rows(prob_diff, .id = "comparison") %>%
data.frame
list(posterior_list = posterior_list, posterior_data = posterior_data,
diff_list = diff_list, diff_data = diff_data_out,
prob_diff = prob_diff_out)
} |
MultiIdeal <- function(dataset,col.p,col.j,id.recogn,level.search.desc=0.2,correct=FALSE,nbchoix=NULL,nbsimul=500,coord=c(1,2)){
hotelling2 <- function(d1, d2, n1 = nrow(d1), n2 = nrow(d2)) {
k <- ncol(d1)
xbar1 <- apply(d1, 2, mean)
xbar2 <- apply(d2, 2, mean)
dbar <- xbar2 - xbar1
if (n1 + n2 < 3)
return(NA)
v <- ((n1 - 1) * var(d1) + (n2 - 1) * var(d2))/(n1 +n2 - 2)
if (sum(v^2) < 1/10^10){
return(NA)
} else {
t2 <- n1 * n2 * dbar %*% solve(v) %*% dbar/(n1 +n2)
}
f <- (n1 + n2 - k - 1) * t2/((n1 + n2 - 2) * k)
return(pf(f, k, n1 + n2 - k - 1, lower.tail = FALSE))
}
"ellipse2" <- function(loc, cov, alpha) {
A <- cov
detA <- A[1, 1] * A[2, 2] - A[1, 2]^2
dist <- sqrt(qchisq(1 - alpha/2, 2))
ylimit <- sqrt(A[2, 2]) * dist
y <- seq(-ylimit, ylimit, 0.01 * ylimit)
sqrt.discr <- sqrt(detA/A[2,2]^2*abs(A[2,2]*dist^2-y^2))
sqrt.discr[c(1, length(sqrt.discr))] <- 0
b <- loc[1] + A[1, 2]/A[2, 2] * y
x1 <- b - sqrt.discr
x2 <- b + sqrt.discr
y <- loc[2] + y
return(rbind(cbind(x1, y), cbind(rev(x2), rev(y))))
}
dataset[,col.j] <- as.factor(dataset[,col.j])
juge <- levels(dataset[,col.j])
nbjuge <- length(juge)
dataset[,col.p] <- as.factor(dataset[,col.p])
product <- levels(dataset[,col.p])
nbprod <- length(product)
info <- dataset[,c(col.j,col.p)]
id.pos <- grep(id.recogn,colnames(dataset))
intensity <- dataset[,id.pos-1]
attribut <- colnames(intensity)
nbatt <- length(attribut)
ideal <- dataset[,id.pos]
id.data <- cbind(info,ideal)
int.data <- cbind(info,intensity)
if (level.search.desc < 1){
att.rm <- NULL
res.aov <- vector("list",1)
for (i in 1:nbatt){
res.aov[1] <- as.matrix(summary(aov(int.data[,i+2]~int.data[,2]+int.data[,1])))
if (res.aov[[1]][1,5]>level.search.desc){
att.rm <- c(att.rm,i+2)
print(paste("The attribute ",attribut[i]," is removed from the analysis.",sep=""))
}
}
if (!is.null(att.rm)){
int.data <- int.data[,-att.rm]
id.data <- id.data[,-att.rm]
attribut <- attribut[-(att.rm-2)]
nbatt <- length(attribut)
intensity <- intensity[,-(att.rm-2)]
ideal <- ideal[,-(att.rm-2)]
}
}
if (!correct){
int.avg <- averagetable(int.data,formul=paste("~",colnames(info)[2],"+",colnames(info)[1],sep=""),firstvar=3)
colnames(ideal) <- colnames(int.avg)
data.pca <- rbind(int.avg,ideal)
} else {
int.p.avg <- averagetable(int.data,formul=paste("~",colnames(info)[2],"+",colnames(info)[1],sep=""),firstvar=3)
int.j.avg <- averagetable(int.data,formul=paste("~",colnames(info)[1],"+",colnames(info)[2],sep=""),firstvar=3)
ideal.juge <- vector("list",nbjuge)
names(ideal.juge) <- juge
for (j in 1:nbjuge){
ideal.j <- id.data[id.data[,1]==juge[j],]
rownames(ideal.j) <- ideal.j[,2]
ideal.j <- ideal.j[,-c(1,2)]
temp <- as.matrix(int.j.avg[j,])
ideal.juge[[j]] <- sweep(ideal.j,2,as.vector(as.matrix(int.j.avg[j,])),FUN="-")
}
data.j.cplt <- matrix(0,0,nbatt)
colnames(data.j.cplt) <- attribut
l=0
for (j in 1:nbjuge){
data.j.cplt <- rbind(data.j.cplt,ideal.juge[[j]])
rownames(data.j.cplt)[c((l+1):nrow(data.j.cplt))] <- paste(rownames(ideal.juge[[j]]),"_",juge[j],sep="")
l=nrow(data.j.cplt)
}
colnames(data.j.cplt) <- colnames(int.p.avg)
data.pca <- rbind.data.frame(scale(int.p.avg),data.j.cplt)
}
res.pca <- PCA(data.pca,ind.sup=c((nbprod+1):nrow(data.pca)),graph=F)
ncp=0
eig=res.pca$eig[1,1]
while(eig>1 && ncp<nrow(res.pca$eig)){
ncp=ncp+1
eig <- res.pca$eig[ncp+1,1]
}
res.pca <- PCA(data.pca,ind.sup=c((nbprod+1):nrow(data.pca)),graph=F, ncp=max(ncp,2))
for (i in 1:2)
if (!coord[i] %in% c(1:ncp)){
warning("The dimensions 1 and 2 will be used in this analysis!")
coord=c(1,2)
}
ind.sup <- cbind(info,res.pca$ind.sup$coord)
ideal.p.dim <- vector("list",nbprod)
names(ideal.p.dim) <- product
for (p in 1:nbprod){
ideal.p.dim[[p]] <- ind.sup[ind.sup[,2]==product[p],-c(1,2)]
rownames(ideal.p.dim[[p]]) <- paste(product[p],"_",juge,sep="")
}
if (is.null(nbchoix))
nbchoix=nbjuge
simul <- matrix(0,nbchoix,0)
rownames(simul) <- paste("prod",1:nbchoix,sep="")
for (sim in 1:nbsimul)
simul <- cbind(simul,as.matrix(sample(nbprod,nbchoix,replace=T)))
colnames(simul) <- paste("Simul.",1:nbsimul,sep="")
coord.ellipse <- array(0,dim=c(nbprod,nbsimul,length(coord)))
for (p in 1:nbprod)
for (sim in 1:nbsimul)
for (l in 1:length(coord))
coord.ellipse[p,sim,l] <- mean(ideal.p.dim[[p]][simul[,sim],coord[l]])
res.simul <- list()
res.simul$sample <- t(simul)
matP <- matrix(0,nbprod,length(coord)+1)
rownames(matP) <- product
colnames(matP) <- c(colnames(ideal.p.dim[[1]])[coord],"name.prod")
for (p in 1:nbprod){
matP[p,1:length(coord)] <- round(apply(ideal.p.dim[[p]][,coord],2,mean),3)
matP[p,ncol(matP)] <- names(ideal.p.dim)[p]
}
matP <- as.data.frame(matP)
for (l in 1:length(coord))
matP[,l] <- as.numeric(as.character(matP[,l]))
res.simul$moy$P <- matP
matJP <- matrix(0,0,length(coord)+1)
for (p in 1:nbprod)
matJP <- rbind(matJP,cbind(ideal.p.dim[[p]][,coord],as.matrix(rep(product[p],nrow(ideal.p.dim[[p]])))))
colnames(matJP) <- colnames(matP)
matJP <- as.data.frame(matJP)
for (l in 1:length(coord))
matJP[,l] <- as.numeric(as.character(matJP[,l]))
res.simul$moy$JP <- as.data.frame(matJP)
res.simul2 <- matrix(0,0,length(coord)+1)
for (p in 1:nbprod){
res.simul2.temp <- matrix(0,nbsimul,0)
for (l in 1:length(coord))
res.simul2.temp <- cbind(res.simul2.temp,coord.ellipse[p,,l])
res.simul2 <- rbind(res.simul2,cbind(res.simul2.temp,as.matrix(rep(product[p],nrow(res.simul2.temp)))))
}
colnames(res.simul2) <- colnames(matP)
res.simul2 <- as.data.frame(res.simul2)
for (l in 1:length(coord))
res.simul2[,l] <- as.numeric(as.character(res.simul2[,l]))
res.simul$moy$simul <- res.simul2
mat <- res.simul
alpha=0.05
eig=res.pca$eig
matP = cbind.data.frame(mat$moy$P[,1:2],mat$moy$P[,ncol(mat$moy$P)])
matJP = cbind.data.frame(mat$moy$JP[,1:2],mat$moy$JP[,ncol(mat$moy$JP)])
matsimul = cbind.data.frame(mat$moy$simul[,1:2],mat$moy$simul[,ncol(mat$moy$simul)])
nbj <- nrow(matP)
coord.ellipse.a.tracer <- matrix(0, 402, 2 * nbj)
p <- 2
for (i in 1:nbj) {
VX <- var(matsimul[((i-1)*nbsimul+1):(i*nbsimul),1:2])
coord.ellipse.a.tracer[,(1+2*(i-1)):(2*i)] <- ellipse2(as.numeric(t(matP[i,1:2])),VX,alpha)
}
minx <- min(min(coord.ellipse.a.tracer[,1+2*(0:(nbj-1))],na.rm=T),min(res.pca$ind$coord[,1]))
maxx <- max(max(coord.ellipse.a.tracer[,1+2*(0:(nbj-1))],na.rm=T),max(res.pca$ind$coord[,1]))
miny <- min(min(coord.ellipse.a.tracer[,2*(1:nbj)],na.rm=T),min(res.pca$ind$coord[,2]))
maxy <- max(max(coord.ellipse.a.tracer[,2*(1:nbj)],na.rm=T),max(res.pca$ind$coord[,2]))
if (!nzchar(Sys.getenv("RSTUDIO_USER_IDENTITY"))) dev.new()
plot(res.pca,choix="ind",invisible="ind.sup",xlim=c(minx,maxx),ylim=c(miny,maxy),title="Single vs. Multiple Ideal",axes=coord, graph.type = "classic")
text(matP[,1],matP[,2],matP[,ncol(matP)],cex=0.7,pos=4,offset=0.2,col="blue3")
for (p in 1:nbprod) {
points(matP[p,1],matP[p,2],cex=1,col="blue3",pch=20)
lines(coord.ellipse.a.tracer[,(1+2*(p-1)):(2*p)],col="blue3",lty=2)
}
res.hotelling <- matrix(1,nbprod,nbprod)
rownames(res.hotelling) <- colnames(res.hotelling) <- product
prod.sup <- averagetable(ind.sup,formul=paste("~",colnames(dataset)[col.p],"+",colnames(dataset)[col.j],sep=""),firstvar=3,method="mean")
prod.sup <- cbind(prod.sup,as.matrix(rownames(prod.sup)),matrix("mean",nbprod,1))
colnames(prod.sup)[(ncol(prod.sup)-1):ncol(prod.sup)] <- c("product","juge")
ind.sup <- cbind(ind.sup[,-c(1,2)],ind.sup[,2],ind.sup[,1])
colnames(ind.sup)[(ncol(ind.sup)-1):ncol(ind.sup)] <- c("product","juge")
data.hotelling <- rbind(prod.sup,ind.sup)
labprod <- data.hotelling[data.hotelling[,ncol(data.hotelling)]=="mean",ncol(data.hotelling)-1]
aa = data.hotelling[-(1:nbprod), ]
for (i in 1:(nbprod-1))
for (j in (i+1):nbprod)
res.hotelling[i,j]=res.hotelling[j,i]=hotelling2(aa[aa[,ncol(aa)-1]==labprod[i],1:(ncol(aa)-2)],aa[aa[,ncol(aa)-1]==labprod[j],1:(ncol(aa)-2)],nbchoix,nbchoix)
return(res.hotelling)
} |
test_that("fredr_releases()", {
skip_if_no_key()
release <- fredr_releases(limit = 10L)
expect_s3_class(release, c("tbl_df", "tbl", "data.frame"))
expect_true(ncol(release) == 7)
expect_true(nrow(release) == 10)
}) |
interpret.addreg.smooth <- function(formula) {
p.env <- environment(formula)
tf <- terms.formula(formula, specials = c("B","Iso"))
if (attr(tf, "intercept") != 1) stop("models without intercept are not supported by addreg.smooth")
if (any(attr(tf,"order") > 1)) stop("models with interactions are not supported by addreg.smooth")
if (attr(tf, "response") == 0) stop("missing response")
terms <- attr(tf, "term.labels")
vars <- attr(tf, "variables")
nt <- length(terms)
respvar <- attr(tf, "response")
response <- as.character(vars[respvar+1L])
full.formula <- fake.formula <- paste(response, "~", sep = "")
Bp <- attr(tf, "specials")$B
Isop <- attr(tf, "specials")$Iso
off <- attr(tf, "offset")
vtab <- attr(tf, "factors")
ns <- length(Bp) + length(Isop)
if(ns == 0) stop("formula does not include any semi-parametric terms. Use 'addreg' instead.")
kp <- 1
smooth.ind <- NULL
smooth.spec <- list()
for(i in seq_len(nt)) {
varind <- which(as.logical(vtab[,i]))
if((varind %in% Bp) || (varind %in% Isop)) {
st <- eval(parse(text = terms[i]), envir = p.env)
full.newterm <- st$termlabel
fake.newterm <- st$term
smooth.ind <- c(smooth.ind, i)
smooth.spec[[st$term]] <- st
} else full.newterm <- fake.newterm <- as.character(vars[varind+1L])
if(kp > 1) {
full.formula <- paste(full.formula, "+", full.newterm, sep = "")
fake.formula <- paste(fake.formula, "+", fake.newterm, sep = "")
} else {
full.formula <- paste(full.formula, full.newterm, sep = "")
fake.formula <- paste(fake.formula, fake.newterm, sep = "")
}
kp <- kp + 1
}
if(!is.null(off)) {
if (kp > 1)
fake.formula <- paste(fake.formula, "+", sep = "")
fake.formula <- paste(fake.formula, paste(as.character(vars[off+1L]),collapse="+"), sep = "")
kp <- kp + 1
}
full.formula <- as.formula(full.formula, p.env)
fake.formula <- as.formula(fake.formula, p.env)
list(full.formula = full.formula, fake.formula = fake.formula,
smooth.spec = smooth.spec, smooth.ind = smooth.ind,
terms = tf)
} |
knitr::opts_chunk$set(echo = TRUE, comment = "
library(popdemo)
options(digits = 4)
an_input <- function() cat("an output")
an_input()
data(Tort); Tort
Tortpic <- magick::image_read("https://upload.wikimedia.org/wikipedia/commons/thumb/1/1e/Baby_Desert_Tortoise_%2816490346262%29.jpg/1024px-Baby_Desert_Tortoise_%2816490346262%29.jpg")
par(mar = c(0, 0, 0, 0))
plot(as.raster(Tortpic))
text(10, 10, adj = c (0, 0), col = "white",
"A baby desert tortoise")
par(mar = c(5, 4, 2, 2) + 0.1)
Tortvec1 <- runif(8)
Tortvec1 <- Tortvec1/sum(Tortvec1)
( Tortp1.1 <- project(Tort, Tortvec1, time = 100) )
plot(Tortp1.1, log = "y")
vec(Tortp1.1)[1:11, ]
vec(Tortp1.1)[ , 2]
eigs(Tort, "all")
par(mar = c(5, 4, 2, 2) + 0.1); plot(Tortp1.1, log = "y")
Tortw <- eigs(Tort, "ss")
Tortpw <- project(Tort, Tortw, time = 100)
lines(0:100, Tortpw, lty = 2)
par(mar = c(5, 4, 2, 2) + 0.1)
Tortp1.1s <- project(Tort, Tortvec1, time = 100,
standard.A = TRUE, standard.vec = TRUE)
Tortpws <- project(Tort, Tortw, time = 100,
standard.A = TRUE, standard.vec = TRUE)
plot(Tortp1.1s, log = "y")
lines(Tortpws, lty = 2)
par(mar = c(5, 4, 2, 2) + 0.1); plot(Tortp1.1s, log = "y"); lines(Tortpws, lty = 2)
( r1 <- reac(Tort, Tortvec1) )
( i1 <- inertia(Tort, Tortvec1) )
points(c(1, 100), c(r1, i1), pch = 3, col = "red")
par(mar = c(5, 4, 2, 2) + 0.1)
TortAMP <- c(1, 1, 2, 3, 5, 8, 13, 21)
TortATT <- c(21, 13, 8, 5, 3, 2, 1, 1)
TortBTH <- c(0, 0, 0, 1, 0, 0, 0, 0)
Tortvec3 <- cbind(AMP = TortAMP,
ATT = TortATT,
BTH = TortBTH)
Tortp3.1 <- project(Tort, Tortvec3, time = 100,
standard.A = TRUE, standard.vec = TRUE)
plot(Tortp3.1, log = "y"); lines(Tortpws, lty = 2)
( r3 <- apply(Tortvec3, 2, reac, A = Tort) )
( r3t <- rep(1, 3) )
( i3 <- apply(Tortvec3, 2, inertia, A = Tort) )
( i3t <- rep(100, 3) )
( max3 <- apply(Tortvec3[,c(1,3)], 2, maxamp, A = Tort) )
( max3t <- apply(Tortvec3[,c(1,3)], 2, function(x){
maxamp(vector = x, A = Tort, return.t = TRUE)$t}) )
( min3 <- apply(Tortvec3[,c(2,3)], 2, maxatt, A = Tort) )
( min3t <- apply(Tortvec3[,c(2,3)], 2, function(x){
maxatt(vector = x, A = Tort, return.t = TRUE )$t}) )
points(c(r3t, i3t, max3t, min3t),
c(r3, i3, max3, min3),
pch = 3, col = "red")
plot(Tortp1.1s, log = "y", bounds = TRUE)
plot(project(Tort, standard.A = TRUE), log = "y")
par(mar = c(5, 4, 2, 2) + 0.1)
plot(Tortp3.1, log = "y", bounds = TRUE)
lines(Tortpws, lty = 2)
( ruprB <- reac(Tort, bound = "upper") )
( rlwrB <- reac(Tort, bound = "lower") )
( iuprB <- inertia(Tort, bound = "upper") )
( ilwrB <- inertia(Tort, bound = "lower") )
( maxB <- maxamp(Tort, return.t = TRUE) )
( minB <- maxatt(Tort, return.t = TRUE) )
points(c(rep(1, 5), rep(100, 5), max3t, maxB$t, min3t, minB$t),
c(r3, ruprB, rlwrB, i3, iuprB, ilwrB, max3, maxB$maxamp, min3, minB$maxatt),
pch = 3, col = "red",
lwd = c(rep(c(1, 1, 1, 2, 2), 2), rep(c(1, 1, 2) ,2 )) )
par(mar = c(5, 4, 2, 2) + 0.1)
Tortpd <- project(Tort, "diri", time = 31,
standard.A = TRUE)
plot(Tortpd, plottype = "shady", bounds = T, log = "y")
delta <- seq(0, 4*Tort[1, 6], 0.1)
Tort_delta <- Tort
lambda_delta <- numeric(length(delta))
for(i in 1:length(delta)){
Tort_delta[1, 6] <- Tort[1, 6] + delta[i]
lambda_delta[i] <- eigs(Tort_delta, "lambda")
}
plot(delta, lambda_delta, type = "l")
sens(Tort)
elas(Tort)
delta <- seq(-0.2, 0.4, 0.01)
d1 <- c(0, 0, 0, 0, 0, 0, 1, 0)
e1 <- c(0, 0, 0, 0, 0, 1, 0, 0)
tf1 <- tfa_lambda(Tort, d = d1, e = e1, prange = delta)
plot(tf1)
s76 <- sens(Tort)[7, 6]
abline(eigs(Tort, "lambda"), s76, lty = 2)
tfml <- tfam_lambda(Tort)
plot(tfml)
Tortvec <- c(1, 1, 2, 3, 5, 8, 13, 21)
tfsm_inertia(Tort, Tortvec, tolerance=1e-5)
tfsm_inertia(Tort, bound="upper", tolerance=1e-5)
tfsm_inertia(Tort, bound="lower", tolerance=1e-5)
tfmi <- tfam_inertia(Tort, vector = Tortvec)
plot(tfmi)
tfmi_upr <- tfam_inertia(Tort, bound="upper")
plot(tfmi_upr)
tfmi_lwr<-tfam_inertia(Tort, bound="lower")
plot(tfmi_lwr)
data(Pbear); Pbear
Pbearvec <- c(0.106, 0.068, 0.106, 0.461, 0.151, 0.108)
Pbearpic <- magick::image_read("https://upload.wikimedia.org/wikipedia/commons/thumb/7/7b/Polar_Bears_Across_the_Arctic_Face_Shorter_Sea_Ice_Season_%2829664357826%29_%282%29.jpg/1024px-Polar_Bears_Across_the_Arctic_Face_Shorter_Sea_Ice_Season_%2829664357826%29_%282%29.jpg")
par(mar = c(0, 0, 0, 0))
plot(as.raster(Pbearpic))
text(10, 10, adj = c (0, 0), col = "white",
"Polar bear (photo by NASA Goddard Space Flight Center from Greenbelt, MD, USA)")
Pbearp1.1 <- project(Pbear, Pbearvec, time = 50)
plot(Pbearp1.1, log = "y")
Aseq(Pbearp1.1)
nmat(Pbearp1.1)
mat(Pbearp1.1)
p1 <- 0.4
( PbearM1 <- matrix(rep(c((1-p1)/3, (1-p1)/3, (1-p1)/3, p1/2, p1/2), 5), 5, 5) )
Pbearp2.1 <- project(Pbear, Pbearvec, Aseq = PbearM1, time = 50)
plot(Pbearp2.1, log = "y")
p2 <- 0.5
( PbearM2 <- matrix(rep(c((1-p2)/3, (1-p2)/3, (1-p2)/3, p2/2, p2/2), 5), 5, 5) )
Pbearp2.2 <- project(Pbear, Pbearvec, Aseq = PbearM2, time = 50)
plot(Pbearp2.2, log = "y")
p3 <- 0.8
( PbearM3 <- matrix(rep(c((1-p3)/3, (1-p3)/3, (1-p3)/3, p3/2, p3/2), 5), 5, 5) )
Pbearp2.3 <- project(Pbear, Pbearvec, Aseq = PbearM3, time = 50)
plot(Pbearp2.3, log = "y")
p4 <- 0.2
q4 <- 0.8
( PbearM4 <- matrix(c(rep(c((1-p4)/3, (1-p4)/3, (1-p4)/3, p4/2, p4/2), 3),
rep(c((1-q4)/3, (1-q4)/3, (1-q4)/3, q4/2, q4/2), 2)),
5, 5) )
Pbearp2.4 <- project(Pbear, Pbearvec, Aseq = PbearM4, time = 50)
plot(Pbearp2.4, log = "y")
p5 <- 0.5
q5 <- 0.8
( PbearM5 <- matrix(c(rep(c((1-p5)/3, (1-p5)/3, (1-p5)/3, p5/2, p5/2), 3),
rep(c((1-q5)/3, (1-q5)/3, (1-q5)/3, q5/2, q5/2), 2)),
5, 5) )
Pbearp2.5 <- project(Pbear, Pbearvec, Aseq = PbearM5, time = 50)
plot(Pbearp2.5, log = "y")
( Pbearseq <- rep(1:5, 10) )
Pbearp3.1 <- project(Pbear, Pbearvec, Aseq = Pbearseq)
plot(Pbearp3.1, log = "y")
( Thistle <- Matlab2R("[0.5 0 2.8; 0.25 0.222 0; 0 0.667 0]") )
Thistlevec <- runif(3); Thistlevec <- Thistlevec / sum(Thistlevec)
x<- seq(0, 1, 0.01)
plot(x, dbeta(x, shape1 = 12, shape2 = 5), type = "l", ylab = "beta PDF")
ThistleS <- rep(list(Thistle), 30)
times <- 30
pflwr <- rbeta(times, shape1 = 12, shape2 = 5)
a32 <- 0.889*pflwr
a22 <- 0.889*(1-pflwr)
ThistleS <- mapply(function(A, r){A[3,2] <- r; A}, ThistleS, a32, SIMPLIFY = FALSE)
ThistleS <- mapply(function(A, r){A[2,2] <- r; A}, ThistleS, a22, SIMPLIFY = FALSE)
Thistlep1.1 <- project(ThistleS, vector = Thistlevec, Aseq = 1:30)
plot(Thistlep1.1, log = "y")
lines(0:30, project(Thistle, Thistlevec, time = 30), lty = 3)
stoch(Pbear, c("lambda", "var"), vector = Pbearvec, Aseq = PbearM1,
iterations = 3000, discard = 100)
stoch(Pbear, c("lambda", "var"), vector = Pbearvec, Aseq = PbearM2,
iterations = 3000, discard = 100)
stoch(Pbear, c("lambda", "var"), vector = Pbearvec, Aseq = PbearM3,
iterations = 3000, discard = 100)
stoch(Pbear, c("lambda", "var"), vector = Pbearvec, Aseq = PbearM4,
iterations = 3000, discard = 100)
stoch(Pbear, c("lambda", "var"), vector = Pbearvec, Aseq = PbearM5,
iterations = 3000, discard = 100)
eigs(PbearM2, "ss")
eigs(PbearM4, "ss")
eigs(PbearM5, "ss") |
logFileRead <-function(fileName, columnList=c("MSTimestamp", "clientip", "url", "httpcode", "elapsed"), logTimeZone = "", timeFormat = "")
{
columnDefinitions = data.frame(
name = c("ApacheTimestamp", "MSTimestamp", "servername", "serverip", "httpop", "url", "parms", "port", "username", "userip", "useragent", "httpcode", "windowscode", "windowssubcode", "responsebytes", "requestbytes", "elapsedms", "elapsedus", "elapseds", "ignore", "jsessionid"),
type = c("character", "character", "character", "character", "character", "character", "character", "numeric", "character", "character", "character", "numeric", "character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "character", "character"),
minimum = c(FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE)
)
minimumColumnList = columnDefinitions[columnDefinitions$minimum,]$name
baseColumnList = columnDefinitions$name
if(!setequal(intersect(minimumColumnList, columnList), minimumColumnList)) {
stop("specified column set does not include all of the minimum required columns: ",toString(intersect(setdiff(minimumColumnList, columnList), minimumColumnList)), " missing")
}
if(!(is.element("ApacheTimestamp", columnList) | is.element("MSTimestamp", columnList))) {
stop("specified column set does not include a timestamp: ApacheTimestamp or MSTimestamp")
}
if(!(is.element("elapsedms", columnList) | is.element("elapseds", columnList)| is.element("elapsedus", columnList))) {
stop("specified column set does not include a duration: elapsedms, elaspedus, elapseds")
}
ignores = setdiff(columnList, baseColumnList)
for(i in ignores) {
if(substr(i,1,6) != "ignore")
{
stop("column name \"",i,"\" is neither a defined column name nor begins with the text 'ignore'")
}
}
ignores = append(ignores, grep( "ignore.*",columnList, value=TRUE))
wkTypeList = list()
wkColumnList = list()
isMSTimestamp = FALSE
isApacheTimestamp = FALSE
for(i in columnList)
{
if(i == "ApacheTimestamp") {
wkColumnList = append(wkColumnList, c("apachetimestamp", "apachetzoffset"))
wkTypeList = append(wkTypeList, c("character", "character"))
isApacheTimestamp = TRUE
}else {
if(i == "MSTimestamp") {
wkColumnList = append(wkColumnList, c("msdatepart", "mstimepart"))
wkTypeList = append(wkTypeList, c("character", "character"))
isMSTimestamp = TRUE
}
else {
if(substr(i,1,6) == "ignore")
{
wkColumnList = append(wkColumnList, i)
wkTypeList = append(wkTypeList, c("character"))
}
else
{
wkColumnList = append(wkColumnList, i)
wkTypeList = append(wkTypeList, as.character(columnDefinitions[columnDefinitions$name == i,]$type))
}
}
}
}
logRecs = withCallingHandlers(read.table(fileName,
header=FALSE,
quote="\"'",
col.names = wkColumnList,
colClasses = wkTypeList,
na.strings="-",
stringsAsFactors = TRUE),
error=function(e)
{
message("")
message("Error Trapped reading data file:")
message(toString(e))
message("Columns: ",toString(wkColumnList))
message("Types: ",toString(wkTypeList))
colCount = length(wkColumnList)
fieldCounts = count.fields(fileName)
distinctCountRows = which(!duplicated(fieldCounts))
if(length(distinctCountRows) > 1)
{
message("Different Field Counts (", toString(unique(fieldCounts)),") found in file ", fileName, " first instances only shown")
for(r in distinctCountRows)
{
message("Row: ", r)
recs = readLines(fileName, n=r+1)
message(recs[r])
}
} else {
if(any(length(wkColumnList) != fieldCounts))
{
message("The number of fields in the file (", toString(unique(fieldCounts)), ") does not match the number implied by the column list (", length(wkColumnList), ")")
}
}
stop(e)
}
)
ignores = append(ignores, grep( "ignore.*",names(logRecs), value=TRUE))
logRecs = logRecs[,!(names(logRecs) %in% ignores)]
if(is.element("elapsedms",columnList)){
logRecs$elapsed = logRecs$elapsedms
ignores = append(ignores,"elapsedms")
}
if(is.element("elapsedus",columnList)){
logRecs$elapsed = logRecs$elapsedus/1000
ignores = append(ignores,"elapsedus")
}
if(is.element("elapseds",columnList)){
logRecs$elapsed = logRecs$elapseds*1000
ignores = append(ignores,"elapseds")
}
if(is.element("jsessionid", columnList)){
logRecs$serverid = sub("[^:]*:(.*)","\\1", logRecs$jsessionid)
}
if(isMSTimestamp) {
if(timeFormat == "") {
timeFormat = "%Y-%m-%d %H:%M:%S"
}
if(logTimeZone != "") {
logRecs["ts"] = as.POSIXct(as.POSIXlt(as.POSIXct(paste(logRecs$msdatepart,logRecs$mstimepart), tz=logTimeZone, timeFormat ),tz=""))
}
else {
logRecs["ts"] = as.POSIXct(paste(logRecs$msdatepart, logRecs$mstimepart), tz="", timeFormat )
}
ignores = append(ignores, "msdatepart")
ignores = append(ignores, "mstimepart")
}
if(isApacheTimestamp)
{
if(timeFormat == "") {
timeFormat = "[%d/%b/%Y:%H:%M:%S"
}
if(logTimeZone != "") {
logRecs["ts"] = as.POSIXct(as.POSIXlt(as.POSIXct(logRecs$apachetimestamp, tz=logTimeZone, timeFormat),tz=""))
}
else {
logRecs["ts"] = as.POSIXct(logRecs$apachetimestamp, "", timeFormat)
}
ignores = append(ignores, "apachetimestamp")
ignores = append(ignores, "apachetzoffset")
}
logRecs$status = "Unknown"
logRecs[logRecs$httpcode >= 100,"status"] = "Informational"
logRecs[logRecs$httpcode >= 200,"status"] = "Success"
logRecs[logRecs$httpcode >= 300,"status"] = "Redirect"
logRecs[logRecs$httpcode >= 400,"status"] = "Client Error"
logRecs[logRecs$httpcode >= 500,"status"] = "Server Error"
logRecs[logRecs$httpcode >= 600,"status"] = "Unknown"
logRecs = logRecs[,!(names(logRecs) %in% ignores)]
return(logRecs)
} |
library(nleqslv)
packageVersion("nleqslv")
.libPaths()
dslnex <- function(x) {
y <- numeric(2)
y[1] <- x[1]^2 + x[2]^2 - 2
y[2] <- exp(x[1]-1) + x[2]^3 - 2
y
}
jacdsln <- function(x) {
n <- length(x)
Df <- matrix(numeric(n*n),n,n)
Df[1,1] <- 2*x[1]
Df[1,2] <- 2*x[2]
Df[2,1] <- exp(x[1]-1)
Df[2,2] <- 3*x[2]^2
Df
}
xstart <- c(2,0.5)
nleqslv(xstart,dslnex, global="none", jacobian=TRUE, control=list(trace=1,stepmax=1)) |
Derv1 <- function(penden.env,temp=FALSE,lambda=NULL) {
if(!temp) {
assign("Derv1.pen",matrix(colSums(get("tilde.PSI.d.D",penden.env)/kronecker(get("f.hat.val",penden.env), matrix(1,1,dim(get("tilde.PSI.d.D",penden.env))[2]))),get("DD",penden.env),1)-get("lambda",penden.env)*get("DDD.sum",penden.env)%*%get("ck.val",penden.env),penden.env)
}
else {
if(is.null(lambda)) lambda <- get("lambda",penden.env)
assign("Derv1.pen.temp",matrix(colSums(get("tilde.PSI.d.D",penden.env)/kronecker(get("f.hat.val.temp",penden.env), matrix(1,1,dim(get("tilde.PSI.d.D",penden.env))[2]))),get("DD",penden.env),1)-lambda*get("DDD.sum",penden.env)%*%get("ck.val.temp",penden.env),penden.env)
}
} |
lambdax <- function(rt, rx, ry, theta, revents)
{
if (rt < revents[1,1])
return(0)
theta <- sqrt(theta)
storage.mode(revents) <- "double"
.Call("clambdax", as.double(rt), as.double(rx), as.double(ry),
as.double(theta), revents, PACKAGE="ETAS")[[1]]
} |
rcorpus = function(nwords=50, alphabet=letters, minwordlen=1, maxwordlen=6)
{
check.is.posint(nwords)
check.is.strings(alphabet)
check.is.posint(minwordlen)
check.is.posint(maxwordlen)
if (minwordlen > maxwordlen)
stop("Argument 'maxwordlen' must be at least 'minwordlen'.")
sizes = as.integer(runif(nwords, minwordlen, maxwordlen+1))
words = sapply(sizes, function(size) paste0(sample(alphabet, size=size, replace=TRUE), collapse=""))
paste0(words, collapse=" ")
} |
mean_pa <- function(lengths){
result <- mean(lengths[[3]])
return(result)
} |
unzip <-
function(zipfile, files = NULL, list = FALSE, overwrite = TRUE,
junkpaths = FALSE, exdir = ".", unzip = "internal",
setTimes = FALSE)
{
if(identical(unzip, "internal")) {
if(!list && !missing(exdir))
dir.create(exdir, showWarnings = FALSE, recursive = TRUE)
res <- .External(C_unzip, zipfile, files, exdir, list, overwrite,
junkpaths, setTimes)
if(list) {
dates <- as.POSIXct(res[[3]], "%Y-%m-%d %H:%M", tz="UTC")
data.frame(Name = res[[1]], Length = res[[2]], Date = dates,
stringsAsFactors = FALSE)
} else invisible(attr(res, "extracted"))
} else {
WINDOWS <- .Platform$OS.type == "windows"
if(!is.character(unzip) || length(unzip) != 1L || !nzchar(unzip))
stop("'unzip' must be a single character string")
zipfile <- path.expand(zipfile)
if (list) {
res <- if (WINDOWS)
system2(unzip, c("-l", shQuote(zipfile)), stdout = TRUE)
else
system2(unzip, c("-l", shQuote(zipfile)), stdout = TRUE,
env = c("TZ=UTC"))
l <- length(res)
res2 <- res[-c(1,3, l-1, l)]
con <- textConnection(res2); on.exit(close(con))
z <- read.table(con, header=TRUE, as.is=TRUE)
dt <- paste(z$Date, z$Time)
formats <-
if (max(nchar(z$Date) > 8))
c("%Y-%m-%d", "%d-%m-%Y", "%m-%d-%Y") else
c("%m-%d-%y", "%d-%m-%y", "%y-%m-%d")
slash <- any(grepl("/", z$Date))
if (slash) formats <- gsub("-", "/", formats)
formats <- paste(formats, "%H:%M")
for (f in formats) {
zz <- as.POSIXct(dt, tz="UTC", format = f)
if (all(!is.na(zz))) break
}
z[, "Date"] <- zz
z[c("Name", "Length", "Date")]
} else {
args <- c("-oq", shQuote(zipfile))
if (length(files)) args <- c(args, shQuote(files))
if (exdir != ".") args <- c(args, "-d", shQuote(exdir))
if (WINDOWS)
system2(unzip, args, stdout = NULL, stderr = NULL,
invisible = TRUE)
else
system2(unzip, args, stdout = NULL, stderr = NULL)
invisible(NULL)
}
}
}
zip <- function(zipfile, files, flags = "-r9X", extras = "",
zip = Sys.getenv("R_ZIPCMD", "zip"))
{
if (missing(flags) && (!is.character(files) || !length(files)))
stop("'files' must a character vector specifying one or more filepaths")
args <- c(flags, shQuote(path.expand(zipfile)),
shQuote(files), extras)
if (.Platform$OS.type == "windows")
invisible(system2(zip, args, invisible = TRUE))
else invisible(system2(zip, args))
} |
.new_rendo_boots <- function(
call, F.formula, mf,
coefficients,
names.main.coefs,
fitted.values, residuals,
boots.params,
subclass = character(),
...){
return(.new_rendo_base(
call = call,
F.formula = as.Formula(F.formula),
mf = mf,
coefficients = coefficients,
names.main.coefs = names.main.coefs,
residuals = residuals,
fitted.values = fitted.values,
subclass = c(subclass, "rendo.boots"),
boots.params=boots.params,
...))
} |
scalingRegistry <- registry::registry(registry_class = "scaling_registry",
entry_class = "scale")
scalingRegistry$set_field("method",
type = "character",
is_key = TRUE,
index_FUN = registry::match_partial_ignorecase)
scalingRegistry$set_field("fun",
type = "function",
is_key = FALSE)
scalingRegistry$set_field("description",
type = "character",
is_key = FALSE)
scalingRegistry$set_entry(method = "Nominal",
fun = nominal_scaling,
description = "Nominal scaling")
scalingRegistry$set_entry(method = "Ordinal",
fun = ordinal_scaling,
description = "Ordinal scaling")
scalingRegistry$set_entry(method = "Interordinal",
fun = interordinal_scaling,
description = "Interordinal scaling")
scalingRegistry$set_entry(method = "Biordinal",
fun = biordinal_scaling,
description = "Biordinal scaling")
scalingRegistry$set_entry(method = "Interval",
fun = interval_scaling,
description = "Interval scaling") |
cache <- new.env(parent = emptyenv())
available_packages_set <- function(repos, type, db) {
signature <- rawToChar(serialize(list(repos, type), NULL, ascii = TRUE))
if (is.null(cache[[signature]])) {
cache[[signature]] <- db
}
cache[[signature]]
}
available_packages_reset <- function() {
rm(list = ls(envir = cache), envir = cache)
}
available_packages <- function(repos = getOption("repos"), type = getOption("pkgType")) {
available_packages_set(
repos, type,
suppressWarnings(utils::available.packages(utils::contrib.url(repos, type), type = type))
)
} |
v2 <- new.env()
source("../v0.2.8.R", v2)
isNumMatrix <- v2$isNumMatrix
x <- seq.int(0, 1, 0.05)
ord <- 4
aKnots <- c(rep(0, ord), rep(1, ord))
expect_equivalent(dbs(x, derivs = 1, intercept = TRUE),
splines::splineDesign(aKnots, x = x, derivs = 1))
expect_equivalent(dbs(x, derivs = 2, intercept = TRUE),
splines::splineDesign(aKnots, x = x, derivs = 2))
expect_equivalent(
dbs(x, derivs = 3, intercept = TRUE)[- length(x), ],
splines::splineDesign(aKnots, derivs = 3, x = x)[- length(x), ]
)
knots <- c(0.2, 0.4, 0.7)
aKnots <- c(rep(0, ord), na.omit(knots), rep(1, ord))
expect_equivalent(dbs(x, derivs = 1, knots = knots, intercept = TRUE),
splines::splineDesign(aKnots, x = x, derivs = 1))
expect_equivalent(dbs(x, derivs = 2, knots = knots, intercept = TRUE),
splines::splineDesign(aKnots, x = x, derivs = 2))
expect_equivalent(
dbs(x, derivs = 3, knots = knots, intercept = TRUE)[- length(x), ],
splines::splineDesign(aKnots, x = x, derivs = 3)[- length(x), ]
)
knots <- c(0.3, 0.6)
ord <- 5
aKnots <- c(rep(0, ord), na.omit(knots), rep(1, ord))
expect_equivalent(dbs(x, 1, knots = knots, degree = 4, intercept = TRUE),
splines::splineDesign(aKnots, x, ord, derivs = 1))
expect_equivalent(dbs(x, 2, knots = knots, degree = 4, intercept = TRUE),
splines::splineDesign(aKnots, x, ord, derivs = 2))
expect_equivalent(dbs(x, 3, knots = knots, degree = 4, intercept = TRUE),
splines::splineDesign(aKnots, x, ord, derivs = 3))
expect_equivalent(
dbs(x, 4, knots = knots, degree = 4, intercept = TRUE)[- length(x), ],
splines::splineDesign(aKnots, x, ord, derivs = 4)[- length(x), ]
)
expect_error(dbs(x, 1, df = 1, intercept = TRUE))
expect_error(dbs(x, 1, df = 2, intercept = TRUE))
expect_error(dbs(x, 1, df = 3, intercept = TRUE))
expect_error(dbs(x, 2, df = 3, intercept = TRUE))
expect_true(
isNumMatrix(dbs(x, 1, df = 1, degree = 0, intercept = TRUE), 21L, 1L)
)
expect_true(isNumMatrix(dbs(x, 1, df = 4), 21L, 4L))
expect_true(isNumMatrix(dbs(x, 1, df = 4, intercept = TRUE), 21L, 4L))
expect_true(isNumMatrix(dbs(x, 1, df = 5), 21L, 5L))
expect_true(isNumMatrix(dbs(x, 1, df = 5, intercept = TRUE), 21L, 5L))
expect_true(isNumMatrix(dbs(x, 1, df = 5, degree = 0), 21L, 5L))
expect_true(
isNumMatrix(dbs(x, 1, df = 5, degree = 0, intercept = TRUE), 21L, 5L)
)
x <- c(NA, seq.int(0, 0.5, 0.1), NA, seq.int(0.6, 1, 0.1), NA)
knots <- c(0.25, 0.5, 0.75)
x2 <- c(- 1, 2, x)
b_knots <- c(0, 1)
expect_equivalent(dbs(x), v2$dbs(x))
expect_equivalent(dbs(x, df = 5),
v2$dbs(x, df = 5))
expect_equivalent(dbs(x, knots = knots),
v2$dbs(x, knots = knots))
expect_equivalent(dbs(x, degree = 2L),
v2$dbs(x, degree = 2L))
expect_equivalent(dbs(x, intercept = TRUE),
v2$dbs(x, intercept = TRUE))
expect_equivalent(dbs(x, knots = knots, intercept = TRUE),
v2$dbs(x, knots = knots, intercept = TRUE))
expect_equivalent(dbs(x, df = 6, intercept = TRUE),
v2$dbs(x, df = 6, intercept = TRUE))
expect_equivalent(dbs(x, df = 5, degree = 0),
v2$dbs(x, df = 5, degree = 0))
expect_equivalent(dbs(x, df = 5, degree = 0, intercept = TRUE),
v2$dbs(x, df = 5, degree = 0, intercept = TRUE))
bsMat0a <- dbs(x, degree = 0, intercept = TRUE)
bsMat0b <- dbs(x, df = 5, degree = 0)
bsMat0c <- dbs(x, df = 5, degree = 0, intercept = TRUE)
bsMat0d <- dbs(x, knots = knots, degree = 0)
bsMat0e <- dbs(x, knots = knots, degree = 0, intercept = TRUE)
expect_true(isNumMatrix(bsMat0a, 14L, 1L))
expect_equal(sum(is.na(bsMat0b)), 15L)
expect_true(isNumMatrix(bsMat0b, 14L, 5L))
expect_true(isNumMatrix(bsMat0c, 14L, 5L))
expect_true(isNumMatrix(bsMat0d, 14L, 3L))
expect_true(isNumMatrix(bsMat0e, 14L, 4L))
expect_true(isNumMatrix(
dbs(x, df = 10, knots = knots, degree = 0L),
14L, 3L))
expect_true(isNumMatrix(
dbs(x, df = 10, knots = knots,
degree = 0, intercept = TRUE),
14L, 4L))
suppressWarnings({
expect_equivalent(
dbs(x2, df = 6, degree = 3, Boundary.knots = b_knots),
v2$dbs(x2, df = 6, degree = 3, Boundary.knots = b_knots)
)
})
suppressWarnings({
expect_equivalent(
dbs(x2, knots = knots, degree = 3, Boundary.knots = b_knots),
v2$dbs(x2, knots = knots, degree = 3, Boundary.knots = b_knots)
)
})
names(x) <- sample(LETTERS, length(x), replace = TRUE)
expect_equal(rownames(dbs(x)), names(x))
expect_error(dbs(c(NA_real_, NA_real_), degree = 0))
expect_error(dbs(c(NA, NA), df = 5))
expect_error(dbs(x, degree = - 1))
expect_error(dbs(x, degree = NA))
expect_error(dbs(x, df = - 1))
expect_error(dbs(x, df = NA))
expect_error(dbs(x, knots = c(0.1, 0.5, NA)))
expect_error(dbs(x, Boundary.knots = c(0.1, 0.5, NA)))
expect_error(dbs(x, Boundary.knots = 0.1))
expect_error(dbs(x, Boundary.knots = c(0.1, 0.1)))
expect_error(dbs(x, Boundary.knots = c(0.1, 0.5, 1)))
expect_true(isNumMatrix(dbs(x, degree = 0, intercept = TRUE),
length(x), 1))
expect_error(dbs(x, degree = 0))
expect_error(dbs(x, knots = c(- 0.1, 0.5), degree = 0))
expect_warning(dbs(c(x, 10), knots = knots, degree = 0,
Boundary.knots = c(0, 1)))
expect_warning(dbs(c(x, 10), knots = knots, degree = 3,
Boundary.knots = c(0, 1))) |
library(DoE.multi.response)
context("UF - CCD")
x<-matrix(c(1,1,1,0,0,
0,1,1,1,0,
1,0,1,0,1,
1,0,0,1,0), nrow = 4,byrow = TRUE)
ufccdx<-ufccd(x)
test_that("correct number of axial points", {
expect_equal(sum(ufccdx[,-1]>1) ,5)
expect_equal(sum(ufccdx[,-1]< -1),5)
expect_equal(sum(ufccdx[,2]>1) ,1)
expect_equal(sum(ufccdx[,2]< -1) ,1)
})
test_that("correct number of cube points", {
expect_equal(sum(ufccdx[,-1]==1) ,40)
expect_equal(sum(ufccdx[,-1]== -1),40)
expect_equal(sum(ufccdx[,2]==1) ,8)
expect_equal(sum(ufccdx[,2]== -1) ,8)
})
test_that("correct aliasing", {
expect_equal(ufccdx$X2 ,ufccdx$X5)
}) |
search_tweets <- function(q, n = 100,
type = c("mixed", "recent", "popular"),
include_rts = TRUE,
geocode = NULL,
since_id = NULL,
max_id = NULL,
parse = TRUE,
token = NULL,
retryonratelimit = NULL,
verbose = TRUE,
...) {
params <- search_params(q,
type = type,
include_rts = include_rts,
geocode = geocode,
...
)
result <- TWIT_paginate_max_id(token, "/1.1/search/tweets", params,
get_id = function(x) x$statuses$id_str,
page_size = 100,
n = n,
since_id = since_id,
max_id = max_id,
retryonratelimit = retryonratelimit,
verbose = verbose
)
if (parse) {
tweets <- lapply(result, "[[", "statuses")
result <- tweets_with_users(tweets)
}
result
}
search_params <- function(q,
type = c("mixed", "recent", "popular"),
include_rts = TRUE,
geocode = NULL,
max_id = NULL,
...) {
if (missing(q) && !is.null(geocode)) {
q <- ""
}
stopifnot(is.atomic(q), length(q) == 1L, is.atomic(max_id))
type <- arg_match(type)
if (nchar(q) > 600) {
stop("q cannot exceed 500 characters.", call. = FALSE)
}
if (!include_rts) {
q <- paste0(q, " -filter:retweets")
}
if (!is.null(geocode) && inherits(geocode, "coords")) {
mls1 <- abs(geocode$box[2] - geocode$box[4]) * 69
mls2 <- abs(geocode$box[1] - geocode$box[3]) *
(69 - abs(.093 * geocode$point[1])^2)
mls <- (mls1/1.8 + mls2/1.8) / 1.8
mls <- round(mls, 3)
geocode <- paste0(paste(geocode$point, collapse = ","), ",", mls, "mi")
}
list(
q = q,
result_type = type,
max_id = max_id,
tweet_mode = "extended",
include_ext_alt_text = "true",
geocode = geocode,
...
)
}
search_tweets2 <- function(...) {
dots <- match_fun(list(...), "search_tweets")
q <- dots[["q"]]
dots[["q"]] <- NULL
parse <- dots[["parse"]]
rt <- Map("search_tweets", q, MoreArgs = dots)
if (!parse) {
return(rt)
}
kp <- lengths(rt) > 0L
if (sum(kp, na.rm = TRUE) == 0L) return(data.frame())
rt <- rt[kp]
q <- q[kp]
rt <- Map("add_var", rt, query = q)
do_call_rbind(rt)
}
add_var <- function(x, ...) {
dots <- list(...)
if (!is.null(names(dots))) {
varname <- names(dots)
} else {
varname <- deparse(substitute(...))
}
x[[varname]] <- unlist(dots, use.names = FALSE)
x
}
match_fun <- function(dots, fun) {
rfuns <- names(formals(fun))
nms <- match(names(dots), rfuns)
nms[names(dots) != ""] <- names(dots)[names(dots) != ""]
is_na <- function(x) is.na(x) | x == "NA"
nms[is_na(nms) & names(dots) == ""] <- names(
formals(fun))[which(is_na(nms) & names(dots) == "")]
names(dots) <- nms
names(dots)[is.na(names(dots))] <- ""
fmls <- formals(fun)
dotsdots <- dots[!names(dots) %in% names(fmls)]
dots <- dots[names(dots) %in% names(fmls)]
fmls <- fmls[!names(fmls) %in% names(dots) & names(fmls) != "..."]
c(dots, fmls, dotsdots)
} |
prob_trans_stsl <- function(
eta,
chi,
rho,
M
){
if (!all(is.numeric(eta), eta >= 0 & eta <= 1)) stop('eta must be numeric between 0 and 1')
if (!all(is.numeric(chi), chi >= 0 & chi <= 1)) stop('chi must be numeric between 0 and 1')
if (!all(is.numeric(rho), rho > 0 & rho <= 1)) stop('rho must be numeric > 0 and <= 1')
if (!all(is.numeric(M) | is.integer(M), M >= 0)) stop('Sample size (M) must be integer or numeric greater than 0')
(eta * rho) /
((eta * rho) + ((1 - chi^(M-2)) * (1 - eta) * rho) + ((1 - chi^(M-1)) * (1 - rho)))
}
obs_pairs_stsl <- function(
eta,
chi,
rho,
M
){
if (!all(is.numeric(eta), eta >= 0 & eta <= 1)) stop('eta must be numeric between 0 and 1')
if (!all(is.numeric(chi), chi >= 0 & chi <= 1)) stop('chi must be numeric between 0 and 1')
if (!all(is.numeric(rho), rho > 0 & rho <= 1)) stop('rho must be numeric > 0 and <= 1')
if (!all(is.numeric(M) | is.integer(M), M >= 0)) stop('Sample size (M) must be integer or numeric greater than 0')
(M / 2) * ((eta * rho) + (rho * (1 - eta) * (1 - chi^(M-2))) + ((1 - rho) * (1 - chi^(M-1))))
}
true_pairs_stsl <- function(
eta,
rho,
M
){
if (!all(is.numeric(eta), eta >= 0 & eta <= 1)) stop('eta must be numeric between 0 and 1')
if (!all(is.numeric(rho), rho > 0 & rho <= 1)) stop('rho must be numeric > 0 and <= 1')
if (!all(is.numeric(M) | is.integer(M), M >= 0)) stop('Sample size (M) must be integer or numeric greater than 0')
(M / 2) * eta * rho
}
prob_trans_mtsl <- function(
chi,
eta,
rho,
M,
R
){
if (!all(is.numeric(eta), eta >= 0 & eta <= 1)) stop('eta must be numeric between 0 and 1')
if (!all(is.numeric(chi), chi >= 0 & chi <= 1)) stop('chi must be numeric between 0 and 1')
if (!all(is.numeric(rho), rho > 0 & rho <= 1)) stop('rho must be numeric > 0 and <= 1')
if (!all(is.numeric(M) | is.integer(M), M >= 0)) stop('Sample size (M) must be integer or numeric greater than 0')
if (!all(is.numeric(R), R > 0)) stop('Reproductive number (R) must be numeric greater than 0')
if (!all(is.numeric(R), R <= 1)) warning('Reproductive number (R) is usually less than 1 for finite outbreaks')
(1 - exp(-rho * eta * (R+1))) /
(1 - ((chi^(M-1)) * exp(rho * (R+1) * ((1-eta)/chi - 1))))
}
obs_pairs_mtsl <- function(
chi,
eta,
rho,
M,
R
){
if (!all(is.numeric(eta), eta >= 0 & eta <= 1)) stop('eta must be numeric between 0 and 1')
if (!all(is.numeric(chi), chi >= 0 & chi <= 1)) stop('chi must be numeric between 0 and 1')
if (!all(is.numeric(rho), rho > 0 & rho <= 1)) stop('rho must be numeric > 0 and <= 1')
if (!all(is.numeric(M) | is.integer(M), M >= 0)) stop('Sample size (M) must be integer or numeric greater than 0')
if (!all(is.numeric(R), R > 0)) stop('Reproductive number (R) must be numeric greater than 0')
if (!all(is.numeric(R), R <= 1)) warning('Reproductive number (R) is usually less than 1 for finite outbreaks')
(M * rho * (R+1) * eta * (1 - ((chi^(M-1))) * exp(rho * (R+1) * (((1-eta)/chi) -1)))) /
(2 * (1 - exp(-rho * (R+1) * eta)))
}
true_pairs_mtsl <- function(
eta,
rho,
M,
R
){
if (!all(is.numeric(eta), eta >= 0 & eta <= 1)) stop('eta must be numeric between 0 and 1')
if (!all(is.numeric(rho), rho > 0 & rho <= 1)) stop('rho must be numeric > 0 and <= 1')
if (!all(is.numeric(M) | is.integer(M), M >= 0)) stop('Sample size (M) must be integer or numeric greater than 0')
if (!all(is.numeric(R), R > 0)) stop('Reproductive number (R) must be numeric greater than 0')
if (!all(is.numeric(R), R <= 1)) warning('Reproductive number (R) is usually less than 1 for finite outbreaks')
(M * rho * (R+1) * eta) / 2
}
prob_trans_mtml <- function(
eta,
chi,
rho,
M,
R
){
if (!all(is.numeric(eta), eta >= 0 & eta <= 1)) stop('eta must be numeric between 0 and 1')
if (!all(is.numeric(chi), chi >= 0 & chi <= 1)) stop('chi must be numeric between 0 and 1')
if (!all(is.numeric(rho), rho > 0 & rho <= 1)) stop('rho must be numeric > 0 and <= 1')
if (!all(is.numeric(M) | is.integer(M), M >= 0)) stop('Sample size (M) must be integer or numeric greater than 0')
if (!all(is.numeric(R), R > 0)) stop('Reproductive number (R) must be numeric greater than 0')
if (!all(is.numeric(R), R <= 1)) warning('Reproductive number (R) is usually less than 1 for finite outbreaks')
(eta * rho * (R+1)) /
((eta * rho * (R+1)) + ((1-chi) * (M - 1 - (rho * (R+1)))))
}
obs_pairs_mtml <- function(
chi,
eta,
rho,
M,
R
){
if (!all(is.numeric(eta), eta >= 0 & eta <= 1)) stop('eta must be numeric between 0 and 1')
if (!all(is.numeric(chi), chi >= 0 & chi <= 1)) stop('chi must be numeric between 0 and 1')
if (!all(is.numeric(rho), rho > 0 & rho <= 1)) stop('rho must be numeric > 0 and <= 1')
if (!all(is.numeric(M) | is.integer(M), M >= 0)) stop('Sample size (M) must be integer or numeric greater than 0')
if (!all(is.numeric(R), R > 0)) stop('Reproductive number (R) must be numeric greater than 0')
if (!all(is.numeric(R), R <= 1)) warning('Reproductive number (R) is usually less than 1 for finite outbreaks')
(M / 2) * ((eta * rho * (R+1)) + ((1-chi) * (M - 1- (rho * (R+1)))))
}
true_pairs_mtml <- function(
eta,
rho,
M,
R
){
if (!all(is.numeric(eta), eta >= 0 & eta <= 1)) stop('eta must be numeric between 0 and 1')
if (!all(is.numeric(rho), rho > 0 & rho <= 1)) stop('rho must be numeric > 0 and <= 1')
if (!all(is.numeric(M) | is.integer(M), M >= 0)) stop('Sample size (M) must be integer or numeric greater than 0')
if (!all(is.numeric(R), R > 0)) stop('Reproductive number (R) must be numeric greater than 0')
if (!all(is.numeric(R), R <= 1)) warning('Reproductive number (R) is usually less than 1 for finite outbreaks')
(M * rho * (R+1) * eta) / 2
}
truediscoveryrate <- function(
eta,
chi,
rho,
M,
R=NULL,
assumption='mtml'
){
if (assumption == 'stsl') {
message('Calculating true discovery rate assuming single-transmission and single-linkage')
out <- prob_trans_stsl(eta=eta, chi=chi, rho=rho, M=M)
} else if (assumption == 'mtsl') {
message('Calculating true discovery rate assuming multiple-transmission and single-linkage')
out <- prob_trans_mtsl(eta=eta, chi=chi, rho=rho, M=M, R=R)
} else if (assumption == 'mtml') {
message('Calculating true discovery rate assuming multiple-transmission and multiple-linkage')
out <- prob_trans_mtml(eta=eta, chi=chi, rho=rho, M=M, R=R)
} else {
stop("Incorrect assumption argument")
}
return(out)
}
falsediscoveryrate <- function(
eta,
chi,
rho,
M,
R=NULL,
assumption='mtml'
){
suppressMessages(
1 - truediscoveryrate(eta=eta,
chi=chi,
rho=rho,
M=M,
R=R,
assumption=assumption)
)
}
exp_links <- function(
eta,
chi,
rho,
M,
R=NULL,
assumption='mtml'
){
if (assumption == 'stsl') {
message('Calculating expected number of links assuming single-transmission and single-linkage')
out <- obs_pairs_stsl(eta=eta, chi=chi, rho=rho, M=M)
} else if (assumption == 'mtsl') {
message('Calculating expected number of links assuming multiple-transmission and single-linkage')
out <- obs_pairs_mtsl(eta=eta, chi=chi, rho=rho, M=M, R=R)
} else if (assumption == 'mtml') {
message('Calculating expected number of links assuming multiple-transmission and multiple-linkage')
out <- obs_pairs_mtml(eta=eta, chi=chi, rho=rho, M=M, R=R)
} else {
stop("Incorrect assumption argument")
}
return(out)
}
true_pairs <- function(
eta,
rho,
M,
R=NULL,
assumption='mtml'
){
if (assumption == 'stsl') {
message('Calculating expected number of links assuming single-transmission and single-linkage')
out <- true_pairs_stsl(eta=eta, rho=rho, M=M)
} else if (assumption == 'mtsl') {
message('Calculating expected number of links assuming multiple-transmission and single-linkage')
out <- true_pairs_mtsl(eta=eta, rho=rho, M=M, R=R)
} else if (assumption == 'mtml') {
message('Calculating expected number of links assuming multiple-transmission and multiple-linkage')
out <- true_pairs_mtml(eta=eta, rho=rho, M=M, R=R)
} else {
stop("Incorrect assumption argument")
}
return(out)
}
samplesize <- function(
eta,
chi,
N,
R=NULL,
phi,
min_pairs=1,
assumption='mtml'
){
if (!(is.numeric(phi) & phi >= 0 & phi <= 1)) {stop('phi must be numeric between 0 and 1')}
samplesize_found <- FALSE
for (i in 2:N) {
tdr <- suppressMessages(truediscoveryrate(eta=eta, chi=chi, rho=i/N, M=i, R=R, assumption=assumption))
obs_pairs = suppressMessages(exp_links(eta=eta, chi=chi, rho=i/N, M=i, R=R, assumption=assumption))
if (tdr >= phi & obs_pairs >= min_pairs) {
samplesize_found <- TRUE
break
}
}
if (samplesize_found) { return(i) }
else { stop("Input values do no produce a viable solution") }
}
gen_dists <- function(
mut_rate,
mean_gens_pdf,
max_link_gens=1,
max_gens=NULL,
max_dist=NULL
) {
if (!all(is.numeric(mut_rate), mut_rate >= 0)) stop('Mutation rate must have a positive value')
if(is.null(max_gens)) max_gens <- which(mean_gens_pdf != 0)[length(which(mean_gens_pdf != 0))]
if(is.null(max_dist)) max_dist <- suppressWarnings(max_gens*stats::qpois(.999, mut_rate))
if (!all(is.numeric(max_gens), max_gens > 0)) stop('Maximum number of generations to consider must be numeric greater than zero')
if (!all(is.numeric(max_dist), max_dist >= 0)) stop('Maximum distance to consider must have a positive value')
if (!all(is.numeric(max_link_gens), max_link_gens > 0)) stop('Maximum number of generations to consider linked must be numeric greater than zero')
if(sum(mean_gens_pdf) <= 0) stop('Generation distribution must have at least one non-zero value')
if(any(mean_gens_pdf < 0)) stop('Generation distribution cannot contain negative probabilities')
gendist <- matrix(0,nrow=max_dist+1, ncol=3)
colnames(gendist) <- c("dist","linked_prob","unlinked_prob")
gendist[,1] <- 0:max_dist
mean_gens_cdf <- cumsum(mean_gens_pdf)/sum(mean_gens_pdf)
for (i in 1:(max_dist+1)){
for (j in 1:max_link_gens){
gendist[i,2] <- gendist[i,2] + mean_gens_pdf[j] * stats::dpois(i-1,j*mut_rate)
}
}
for (i in 1:(max_dist+1)){
for (j in (max_link_gens+1):max_gens){
gendist[i,3] <- gendist[i,3] + mean_gens_pdf[j] * stats::dpois(i-1,j*mut_rate)
}
}
gendist[,"linked_prob"] <- gendist[,"linked_prob"]/sum(gendist[,"linked_prob"])
gendist[,"unlinked_prob"] <- gendist[,"unlinked_prob"]/sum(gendist[,"unlinked_prob"])
return(gendist)
}
sens_spec_calc <- function(
cutoff,
mut_rate,
mean_gens_pdf,
max_link_gens=1,
max_gens=NULL,
max_dist=NULL
) {
if(is.null(max_gens)) max_gens <- which(mean_gens_pdf != 0)[length(which(mean_gens_pdf != 0))]
if(is.null(max_dist)) max_dist <- suppressWarnings(max_gens*stats::qpois(.999, mut_rate))
if (max_dist<max(cutoff+1)){warning("Nonsensical cutoff given distances considered")}
gendist <- gen_dists(mut_rate = mut_rate, mean_gens_pdf = mean_gens_pdf,
max_link_gens = max_link_gens, max_gens = max_gens, max_dist = max_dist)
linked_pdf <- gendist[,"linked_prob"]
unlinked_pdf <- gendist[,"unlinked_prob"]
linked_cdf <- cumsum(linked_pdf)/sum(linked_pdf)
unlinked_cdf <- cumsum(unlinked_pdf)/sum(unlinked_pdf)
get_sens_spec <- function(cutoff) {
spec <- 1 - unlinked_cdf[cutoff]
sens <- 1 - (1 - linked_cdf[cutoff])
return(c(sens,spec))
}
rc <- t(sapply(cutoff, get_sens_spec))
rc <- cbind(cutoff, rc)
colnames(rc) <- c("cutoff","sensitivity", "specificity")
return(rc)
}
sens_spec_roc <- function(
cutoff,
mut_rate,
mean_gens_pdf,
max_link_gens=1,
max_gens=NULL,
max_dist=NULL
){
if(is.null(max_gens)) max_gens <- which(mean_gens_pdf != 0)[length(which(mean_gens_pdf != 0))]
if(is.null(max_dist)) max_dist <- suppressWarnings(max_gens*stats::qpois(.999, mut_rate))
rc <- sens_spec_calc(cutoff,mut_rate,mean_gens_pdf,max_link_gens,max_gens,max_dist)
rc <- as.data.frame(rc)
rc$specificity <- 1-rc$specificity
rc <- rbind(c(-1,0,0), rc ,c(Inf,1,1))
return(rc)
}
get_optim_roc <- function(roc) {
roc <- roc[-1,]
dist <- sqrt((1-roc$sensitivity)^2 + (roc$specificity)^2)
as.list(roc[dist == min(dist),])
} |
context("Test estimate_risk_spread()")
data("Brazil_epiflows")
ef.es <- epicontacts::thin(Brazil_epiflows[, j = "Espirito Santo"])
codes <- get_id(ef.es)
test_that("an error will be thrown if all arguments are missing", {
expect_error(estimate_risk_spread(),
"'location_code' is missing. Please supply a character value.")
})
test_that("an error will be thrown if arguments are misspelled", {
expect_error({
outcome <- estimate_risk_spread(
loocation_code = "Espirito Santo",
r_incubation = function(n) rlnorm(n, 1.46, 0.35),
r_infectious = function(n) rnorm(n, 4.5, 1.5/1.96),
n_sim = 1e3,
location_population = get_pop_size(ef.es)["Espirito Santo"],
num_cases_time_window = get_vars(ef.es, "num_cases_time_window", id = FALSE)["Espirito Santo", ],
first_date_cases = get_vars(ef.es, "first_date_cases", id = FALSE)["Espirito Santo", ],
last_date_cases = get_vars(ef.es, "last_date_cases", id = FALSE)["Espirito Santo", ],
num_travellers_to_other_locations = get_n(ef.es, from = "Espirito Santo"),
num_travellers_from_other_locations = get_n(ef.es, to = "Espirito Santo"),
avg_length_stay_days = na.omit(get_vars(ef.es, "duration_stay", id = FALSE, vector = TRUE))
)
},
regexp = "'location_code' is missing. Please supply a character value.\n.+Unmatched arguments: loocation_code")
})
test_that("avg_length_stay_days can take a single value as input", {
expect_warning({
set.seed(9000)
res1 <- estimate_risk_spread(Brazil_epiflows,
location_code = "Espirito Santo",
r_incubation = function(n) rlnorm(n, 1.46, 0.35),
r_infectious = function(n) rnorm(n, 4.5, 1.5/1.96),
n_sim = 99,
avg_length_stay_days = rep(2, 10)
)
},
regexp = "number of simulations"
)
expect_warning({
set.seed(9000)
res2 <- estimate_risk_spread(Brazil_epiflows,
location_code = "Espirito Santo",
r_incubation = function(n) rlnorm(n, 1.46, 0.35),
r_infectious = function(n) rnorm(n, 4.5, 1.5/1.96),
n_sim = 99,
avg_length_stay_days = 2
)
},
regexp = "number of simulations"
)
expect_identical(res1, res2)
})
test_that("avg_length_stay_days will bork if given an incorrect length", {
expect_error({
set.seed(9000)
suppressWarnings({
res2 <- estimate_risk_spread(Brazil_epiflows,
location_code = "Espirito Santo",
r_incubation = function(n) rlnorm(n, 1.46, 0.35),
r_infectious = function(n) rnorm(n, 4.5, 1.5/1.96),
n_sim = 99,
avg_length_stay_days = c(2, 2)
)
})
},
regexp = "avg_length_stay_days must have length equal to 1 or to the number of locations"
)
})
test_that("Correct value is returned", {
set.seed(9000)
outcome <- estimate_risk_spread(
location_code = "Espirito Santo",
r_incubation = function(n) rlnorm(n, 1.46, 0.35),
r_infectious = function(n) rnorm(n, 4.5, 1.5/1.96),
n_sim = 1e3,
location_population = get_pop_size(ef.es)["Espirito Santo"],
num_cases_time_window = get_vars(ef.es, "num_cases_time_window", id = FALSE)["Espirito Santo", ],
first_date_cases = get_vars(ef.es, "first_date_cases", id = FALSE)["Espirito Santo", ],
last_date_cases = get_vars(ef.es, "last_date_cases", id = FALSE)["Espirito Santo", ],
num_travellers_to_other_locations = get_n(ef.es, from = "Espirito Santo"),
num_travellers_from_other_locations = get_n(ef.es, to = "Espirito Santo"),
avg_length_stay_days = na.omit(get_vars(ef.es, "duration_stay", id = FALSE, vector = TRUE))
)
expect_true(all(rownames(outcome) %in% codes[-1]))
})
test_that("a matrix of simulations is returned if requested", {
expect_warning({
outcome <- estimate_risk_spread(
location_code = "Espirito Santo",
r_incubation = function(n) rlnorm(n, 1.46, 0.35),
r_infectious = function(n) rnorm(n, 4.5, 1.5/1.96),
n_sim = 99,
location_population = get_pop_size(ef.es)["Espirito Santo"],
num_cases_time_window = get_vars(ef.es, "num_cases_time_window", id = FALSE)["Espirito Santo", ],
first_date_cases = get_vars(ef.es, "first_date_cases", id = FALSE)["Espirito Santo", ],
last_date_cases = get_vars(ef.es, "last_date_cases", id = FALSE)["Espirito Santo", ],
num_travellers_to_other_locations = get_n(ef.es, from = "Espirito Santo"),
num_travellers_from_other_locations = get_n(ef.es, to = "Espirito Santo"),
avg_length_stay_days = na.omit(get_vars(ef.es, "duration_stay", id = FALSE, vector = TRUE)),
return_all_simulations = TRUE
)},
regexp = "number of simulations"
)
expect_is(outcome, "matrix")
expect_equal(dim(outcome), c(99, 10))
})
test_that("estimate_risk_spread works on epiflow objects", {
expect_warning({
set.seed(9000)
res <- estimate_risk_spread(Brazil_epiflows,
location_code = "Espirito Santo",
r_incubation = function(n) rlnorm(n, 1.46, 0.35),
r_infectious = function(n) rnorm(n, 4.5, 1.5/1.96),
n_sim = 99
)
},
regexp = "number of simulations"
)
expect_true(all(rownames(res) %in% codes[-1]))
})
test_that("estimate_risk_spread arguments can be overridden", {
expect_warning({
set.seed(9000)
res1 <- estimate_risk_spread(Brazil_epiflows,
location_code = "Espirito Santo",
r_incubation = function(n) rlnorm(n, 1.46, 0.35),
r_infectious = function(n) rnorm(n, 4.5, 1.5/1.96),
n_sim = 9
)
},
regexp = "number of simulations"
)
expect_warning({
set.seed(9000)
res2 <- estimate_risk_spread(Brazil_epiflows,
location_code = "Espirito Santo",
r_incubation = function(n) rlnorm(n, 1.46, 0.35),
r_infectious = function(n) rnorm(n, 4.5, 1.5/1.96),
n_sim = 9,
num_cases_time_window = 2.6e5
)
},
regexp = "number of simulations"
)
expect_true(all(res2 > res1))
})
test_that("errors are thrown for ambiguities or wrong arguments", {
expect_error({
set.seed(9000)
res2 <- estimate_risk_spread(Brazil_epiflows,
location_code = "Espirito Santo",
r_incubation = function(n) rlnorm(n, 1.46, 0.35),
r_infectious = function(n) rnorm(n, 4.5, 1.5/1.96),
n_sim = 9,
num = 2.6e5,
grind = 1,
core = TRUE
)
},
regexp = "Unmatched arguments: grind, core\n Matched multiple arguments: num")
}) |
library(aster)
alpha <- 2.222
ifam <- fam.negative.binomial(alpha)
p <- seq(0.9, 0.1, -0.1)
theta <- log(1 - p)
qq <- exp(theta)
pp <- (- expm1(theta))
all.equal(p, pp)
all.equal(pp, 1 - qq)
zeroth <- double(length(theta))
first <- double(length(theta))
second <- double(length(theta))
for (i in seq(along = theta)) {
zeroth[i] <- famfun(ifam, 0, theta[i])
first[i] <- famfun(ifam, 1, theta[i])
second[i] <- famfun(ifam, 2, theta[i])
}
all.equal(zeroth, alpha * (- log(pp)))
all.equal(first, alpha * qq / pp)
all.equal(second, alpha * qq / pp^2)
rm(p)
theta <- seq(-100, -10, 10)
qq <- exp(theta)
pp <- (- expm1(theta))
zeroth <- double(length(theta))
first <- double(length(theta))
second <- double(length(theta))
for (i in seq(along = theta)) {
zeroth[i] <- famfun(ifam, 0, theta[i])
first[i] <- famfun(ifam, 1, theta[i])
second[i] <- famfun(ifam, 2, theta[i])
}
all.equal(zeroth, alpha * (- log(pp)))
all.equal(first, alpha * qq / pp)
all.equal(second, alpha * qq / pp^2)
theta <- (- 10^(- c(1:9, seq(10, 100, 10))))
qq <- exp(theta)
pp <- (- expm1(theta))
zeroth <- double(length(theta))
first <- double(length(theta))
second <- double(length(theta))
for (i in seq(along = theta)) {
zeroth[i] <- famfun(ifam, 0, theta[i])
first[i] <- famfun(ifam, 1, theta[i])
second[i] <- famfun(ifam, 2, theta[i])
}
all.equal(zeroth, alpha * (- log(pp)))
all.equal(first, alpha * qq / pp)
all.equal(second, alpha * qq / pp^2)
nind <- 50
theta <- rep(- 1.75, nind)
pred <- 0
fam <- 1
root <- seq(1, by = 0.5, length = nind)
theta <- cbind(theta)
root <- cbind(root)
set.seed(42)
rout <- raster(theta, pred, fam, root, famlist = list(ifam))
set.seed(42)
rout.too <- rnbinom(nind, size = alpha * as.numeric(root),
prob = as.numeric(1 - exp(theta)))
all.equal(as.numeric(rout), rout.too) |
local({
defrocked <- c("anyNA", "as.character", "seq.int")
rm(list = defrocked[defrocked %in% ls(.GenericArgsEnv)], envir = .GenericArgsEnv)
rm(list = defrocked[defrocked %in% ls(.ArgsEnv)], envir = .ArgsEnv)
.S3PrimitiveGenerics <<- .S3PrimitiveGenerics[ !( .S3PrimitiveGenerics %in% defrocked ) ]
}) |
downdateR <-
function(R, k = p)
{
p <- dim(R)[1]
if(p == 1)
return(NULL)
R <- delcol(R, rep(1, p), k)[[1]][ - p, , drop = FALSE]
attr(R, "rank") <- p - 1
R
} |
calculate_frequency <- function(datalist, purelist, let) {
for (i in 1:length(let)) {
l <- let[i]
purelist[[l]] <- rbind(purelist[[l]], datalist)
purelist[[l]] <- purelist[[l]][with(purelist[[l]],
order(purelist[[l]]$date,
- as.numeric(purelist[[l]]$n))), ]
}
return(purelist)
} |
get.feature.names <- function(object, gff.file, chr){
chr <- as.character(chr)
region <- .Call("find_lines_GFF_Human2",gff.file,chr)
start <- region[1]
end <- region[2]
gff.table <- read.table(gff.file,sep="\t",colClasses=c("NULL","NULL","character","numeric","numeric",rep("NULL",3),"character"),
skip = start - 1, nrows = end - start + 1)
region.pos <- strsplit([email protected]," - ")
feature.names <- character(length([email protected]))
for(xx in 1:length(feature.names)){
pos <- as.numeric(region.pos[[xx]])
left <- pos[1]==gff.table[,2]
right <- pos[2]==gff.table[,3]
match.pos <- which(left&right)
if(length(match.pos)>0){
attr <- paste(gff.table[match.pos,1],gff.table[match.pos,4],sep="-->")
attr <- paste(attr,collapse=";")
}else{next}
feature.names[xx] <- attr
}
return(feature.names)
} |
context("checkIntegerish")
test_that("checkIntegerish", {
myobj = 1
expect_succ_all(Integerish, myobj)
myobj = 3.3
expect_fail_all(Integerish, myobj)
x = 1 - 0.9 -.1
expect_true(testIntegerish(integer(0)))
expect_false(testIntegerish(NULL))
expect_false(testIntegerish(TRUE))
expect_false(testIntegerish(FALSE))
expect_true(testIntegerish(1L))
expect_true(testIntegerish(c(-1, 0, 1)))
expect_true(testIntegerish(1.))
expect_true(testIntegerish(x))
expect_true(testIntegerish(NA))
expect_true(testIntegerish(NaN))
expect_true(testIntegerish(c(1L, NA)))
expect_true(testIntegerish(c(1, NA)))
expect_true(testIntegerish(c(1, NaN)))
expect_false(testIntegerish(1:2 + 0.0001))
expect_false(testIntegerish(-Inf))
expect_false(testIntegerish(Inf))
expect_true(testIntegerish(3+0i))
expect_false(testIntegerish(3-1i))
expect_true(testIntegerish(as.complex(NA)))
expect_false(testIntegerish(3+2i))
expect_false(testIntegerish(list()))
max = as.double(.Machine$integer.max)
min = as.double(-.Machine$integer.max)
expect_true(testIntegerish(min))
expect_true(testIntegerish(max))
expect_false(testIntegerish(min-1))
expect_false(testIntegerish(max+1))
expect_false(testIntegerish(min-.1))
expect_false(testIntegerish(max+.1))
expect_false(testIntegerish(NA, any.missing = FALSE))
expect_false(testIntegerish(NA, all.missing = FALSE))
expect_error(assertIntegerish(x, tol=0), "integerish")
expect_false(is.integer(assertIntegerish(5)))
expect_true(is.integer(assertIntegerish(5, coerce = TRUE)))
})
test_that("bounds of vectors with only missings are not checked", {
expect_true(checkInteger(NA, lower = 1))
expect_true(checkInteger(NA_character_, upper = 10))
expect_fail_all(Integerish, 0, lower = 1L)
expect_fail_all(Integerish, 100, upper = 10L)
})
test_that("isIntegerish internal function", {
expect_true(isIntegerish(1))
expect_true(isIntegerish(1.))
expect_false(isIntegerish(1.1))
})
test_that("sorted works", {
expect_true(checkIntegerish(1:3, sorted = TRUE))
expect_true(grepl("sorted", checkIntegerish(3:1, sorted = TRUE), fixed = TRUE))
})
test_that("informative error messages", {
x = checkIntegerish((.Machine$integer.max + as.double(-3:3)))
expect_string(x, fixed = "element 5")
expect_string(x, fixed = "integer range")
x = checkIntegerish((-.Machine$integer.max - 1))
expect_string(x, fixed = "element 1")
expect_string(x, fixed = "integer range")
x = checkIntegerish(0.5)
expect_string(x, fixed = "close to an integer")
x = checkIntegerish(3 + 1i)
expect_string(x, fixed = "imaginary part")
})
test_that("factors are detected (
x = factor(letters)
expect_error(assertIntegerish(x), "factor")
})
test_that("0 tolerance works (
expect_true(isIntegerish(1, tol = 0))
})
test_that("coerce rounds to next integer", {
x = 1 - sqrt(.Machine$double.eps) / 10
y = assert_integerish(x, coerce = TRUE)
expect_identical(y, 1L)
})
test_that("typed.missing", {
expect_true(testIntegerish(NA_character_))
expect_true(testIntegerish(NA_character_, typed.missing = FALSE))
expect_false(testIntegerish(NA_character_, typed.missing = TRUE))
expect_true(testIntegerish(character()))
expect_true(testIntegerish(character(), typed.missing = FALSE))
expect_false(testIntegerish(character(), typed.missing = TRUE))
}) |
library('ggplot2')
df <- read.csv(file.path('data', 'df.csv'))
logit.fit <- glm(Label ~ X + Y,
family = binomial(link = 'logit'),
data = df)
logit.predictions <- ifelse(predict(logit.fit) > 0, 1, 0)
mean(with(df, logit.predictions == Label))
mean(with(df, 0 == Label))
library('e1071')
svm.fit <- svm(Label ~ X + Y, data = df)
svm.predictions <- ifelse(predict(svm.fit) > 0, 1, 0)
mean(with(df, svm.predictions == Label))
library("reshape")
df <- cbind(df,
data.frame(Logit = ifelse(predict(logit.fit) > 0, 1, 0),
SVM = ifelse(predict(svm.fit) > 0, 1, 0)))
predictions <- melt(df, id.vars = c('X', 'Y'))
ggplot(predictions, aes(x = X, y = Y, color = factor(value))) +
geom_point() +
facet_grid(variable ~ .)
df <- df[, c('X', 'Y', 'Label')]
linear.svm.fit <- svm(Label ~ X + Y, data = df, kernel = 'linear')
with(df, mean(Label == ifelse(predict(linear.svm.fit) > 0, 1, 0)))
polynomial.svm.fit <- svm(Label ~ X + Y, data = df, kernel = 'polynomial')
with(df, mean(Label == ifelse(predict(polynomial.svm.fit) > 0, 1, 0)))
radial.svm.fit <- svm(Label ~ X + Y, data = df, kernel = 'radial')
with(df, mean(Label == ifelse(predict(radial.svm.fit) > 0, 1, 0)))
sigmoid.svm.fit <- svm(Label ~ X + Y, data = df, kernel = 'sigmoid')
with(df, mean(Label == ifelse(predict(sigmoid.svm.fit) > 0, 1, 0)))
df <- cbind(df,
data.frame(LinearSVM = ifelse(predict(linear.svm.fit) > 0, 1, 0),
PolynomialSVM = ifelse(predict(polynomial.svm.fit) > 0, 1, 0),
RadialSVM = ifelse(predict(radial.svm.fit) > 0, 1, 0),
SigmoidSVM = ifelse(predict(sigmoid.svm.fit) > 0, 1, 0)))
predictions <- melt(df, id.vars = c('X', 'Y'))
ggplot(predictions, aes(x = X, y = Y, color = factor(value))) +
geom_point() +
facet_grid(variable ~ .)
polynomial.degree3.svm.fit <- svm(Label ~ X + Y,
data = df,
kernel = 'polynomial',
degree = 3)
with(df, mean(Label != ifelse(predict(polynomial.degree3.svm.fit) > 0, 1, 0)))
polynomial.degree5.svm.fit <- svm(Label ~ X + Y,
data = df,
kernel = 'polynomial',
degree = 5)
with(df, mean(Label != ifelse(predict(polynomial.degree5.svm.fit) > 0, 1, 0)))
polynomial.degree10.svm.fit <- svm(Label ~ X + Y,
data = df,
kernel = 'polynomial',
degree = 10)
with(df, mean(Label != ifelse(predict(polynomial.degree10.svm.fit) > 0, 1, 0)))
polynomial.degree12.svm.fit <- svm(Label ~ X + Y,
data = df,
kernel = 'polynomial',
degree = 12)
with(df, mean(Label != ifelse(predict(polynomial.degree12.svm.fit) > 0, 1, 0)))
df <- df[, c('X', 'Y', 'Label')]
df <- cbind(df,
data.frame(Degree3SVM = ifelse(predict(polynomial.degree3.svm.fit) > 0,
1,
0),
Degree5SVM = ifelse(predict(polynomial.degree5.svm.fit) > 0,
1,
0),
Degree10SVM = ifelse(predict(polynomial.degree10.svm.fit) > 0,
1,
0),
Degree12SVM = ifelse(predict(polynomial.degree12.svm.fit) > 0,
1,
0)))
predictions <- melt(df, id.vars = c('X', 'Y'))
ggplot(predictions, aes(x = X, y = Y, color = factor(value))) +
geom_point() +
facet_grid(variable ~ .)
radial.cost1.svm.fit <- svm(Label ~ X + Y,
data = df,
kernel = 'radial',
cost = 1)
with(df, mean(Label == ifelse(predict(radial.cost1.svm.fit) > 0, 1, 0)))
radial.cost2.svm.fit <- svm(Label ~ X + Y,
data = df,
kernel = 'radial',
cost = 2)
with(df, mean(Label == ifelse(predict(radial.cost2.svm.fit) > 0, 1, 0)))
radial.cost3.svm.fit <- svm(Label ~ X + Y,
data = df,
kernel = 'radial',
cost = 3)
with(df, mean(Label == ifelse(predict(radial.cost3.svm.fit) > 0, 1, 0)))
radial.cost4.svm.fit <- svm(Label ~ X + Y,
data = df,
kernel = 'radial',
cost = 4)
with(df, mean(Label == ifelse(predict(radial.cost4.svm.fit) > 0, 1, 0)))
df <- df[, c('X', 'Y', 'Label')]
df <- cbind(df,
data.frame(Cost1SVM = ifelse(predict(radial.cost1.svm.fit) > 0, 1, 0),
Cost2SVM = ifelse(predict(radial.cost2.svm.fit) > 0, 1, 0),
Cost3SVM = ifelse(predict(radial.cost3.svm.fit) > 0, 1, 0),
Cost4SVM = ifelse(predict(radial.cost4.svm.fit) > 0, 1, 0)))
predictions <- melt(df, id.vars = c('X', 'Y'))
ggplot(predictions, aes(x = X, y = Y, color = factor(value))) +
geom_point() +
facet_grid(variable ~ .)
sigmoid.gamma1.svm.fit <- svm(Label ~ X + Y,
data = df,
kernel = 'sigmoid',
gamma = 1)
with(df, mean(Label == ifelse(predict(sigmoid.gamma1.svm.fit) > 0, 1, 0)))
sigmoid.gamma2.svm.fit <- svm(Label ~ X + Y,
data = df,
kernel = 'sigmoid',
gamma = 2)
with(df, mean(Label == ifelse(predict(sigmoid.gamma2.svm.fit) > 0, 1, 0)))
sigmoid.gamma3.svm.fit <- svm(Label ~ X + Y,
data = df,
kernel = 'sigmoid',
gamma = 3)
with(df, mean(Label == ifelse(predict(sigmoid.gamma3.svm.fit) > 0, 1, 0)))
sigmoid.gamma4.svm.fit <- svm(Label ~ X + Y,
data = df,
kernel = 'sigmoid',
gamma = 4)
with(df, mean(Label == ifelse(predict(sigmoid.gamma4.svm.fit) > 0, 1, 0)))
df <- df[, c('X', 'Y', 'Label')]
df <- cbind(df,
data.frame(Gamma1SVM = ifelse(predict(sigmoid.gamma1.svm.fit) > 0, 1, 0),
Gamma2SVM = ifelse(predict(sigmoid.gamma2.svm.fit) > 0, 1, 0),
Gamma3SVM = ifelse(predict(sigmoid.gamma3.svm.fit) > 0, 1, 0),
Gamma4SVM = ifelse(predict(sigmoid.gamma4.svm.fit) > 0, 1, 0)))
predictions <- melt(df, id.vars = c('X', 'Y'))
ggplot(predictions, aes(x = X, y = Y, color = factor(value))) +
geom_point() +
facet_grid(variable ~ .)
load(file.path('data', 'dtm.RData'))
set.seed(1)
training.indices <- sort(sample(1:nrow(dtm), round(0.5 * nrow(dtm))))
test.indices <- which(! 1:nrow(dtm) %in% training.indices)
train.x <- dtm[training.indices, 3:ncol(dtm)]
train.y <- dtm[training.indices, 1]
test.x <- dtm[test.indices, 3:ncol(dtm)]
test.y <- dtm[test.indices, 1]
rm(dtm)
library('glmnet')
regularized.logit.fit <- glmnet(train.x, train.y, family = c('binomial'))
lambdas <- regularized.logit.fit$lambda
performance <- data.frame()
for (lambda in lambdas)
{
predictions <- predict(regularized.logit.fit, test.x, s = lambda)
predictions <- as.numeric(predictions > 0)
mse <- mean(predictions != test.y)
performance <- rbind(performance, data.frame(Lambda = lambda, MSE = mse))
}
ggplot(performance, aes(x = Lambda, y = MSE)) +
geom_point() +
scale_x_log10()
best.lambda <- with(performance, max(Lambda[which(MSE == min(MSE))]))
mse <- with(subset(performance, Lambda == best.lambda), MSE)
mse
library('e1071')
linear.svm.fit <- svm(train.x, train.y, kernel = 'linear')
predictions <- predict(linear.svm.fit, test.x)
predictions <- as.numeric(predictions > 0)
mse <- mean(predictions != test.y)
mse
radial.svm.fit <- svm(train.x, train.y, kernel = 'radial')
predictions <- predict(radial.svm.fit, test.x)
predictions <- as.numeric(predictions > 0)
mse <- mean(predictions != test.y)
mse
library('class')
knn.fit <- knn(train.x, test.x, train.y, k = 50)
predictions <- as.numeric(as.character(knn.fit))
mse <- mean(predictions != test.y)
mse
performance <- data.frame()
for (k in seq(5, 50, by = 5))
{
knn.fit <- knn(train.x, test.x, train.y, k = k)
predictions <- as.numeric(as.character(knn.fit))
mse <- mean(predictions != test.y)
performance <- rbind(performance, data.frame(K = k, MSE = mse))
}
best.k <- with(performance, K[which(MSE == min(MSE))])
best.mse <- with(subset(performance, K == best.k), MSE)
best.mse |
"G_alumni_111" |
setGeneric("alphacut",
function(object, alpha) standardGeneric("alphacut"))
setMethod(
f="alphacut",
signature(object="FuzzyNumber", alpha="numeric"),
definition=function(object, alpha)
{
x <- matrix(NA_real_, nrow=length(alpha), ncol=2,
dimnames=list(format(alpha), c("L", "U")))
wh <- which(alpha >= 0 & alpha <= 1)
x[wh, ] <-
c(
object@a1+(object@a2-object@a1)*object@lower(alpha[wh]),
object@a3+(object@a4-object@a3)*object@upper(alpha[wh])
)
x[wh,2] <- pmax(x[wh,1], x[wh,2])
x
}
) |
library(wsjplot)
test_that("label prefix prepends to only the largest number", {
new_lables <- label_wsj()(c(0, 10, 100))
expect_equal(new_lables[3], "$100")
})
test_that("label prefix prepends to only the largest number and ignores NA", {
new_lables <- label_wsj()(c(0, 10, 100, NA))
expect_equal(new_lables[3], "$100")
})
test_that("default label drops the lowest number", {
new_lables <- label_wsj()(c(5, 10, 100))
expect_equal(new_lables[1], "")
})
test_that("default label accuracy calculation recognizes one decimal place", {
new_lables <- label_wsj()(c(-0.5, 0, 0.5))
expect_equal(new_lables, c("", "0.0", "$0.5"))
})
test_that("default label accuracy calculation recognizes the need for
two decimal place based on spacing", {
new_lables <- label_wsj()(c(-0.5, 0, 0.05, 0.1))
expect_equal(new_lables, c("", "0.00", "0.05","$0.1"))
})
test_that("default label accuracy calculation recognizes two decimal places", {
new_lables <- label_wsj()(c(-0.05, 0, 0.05))
expect_equal(new_lables, c("", "0.00", "$0.05"))
})
test_that("default label accuracy calculation recognizes three decimal places", {
new_lables <- label_wsj()(c(-0.05, 0, 0.005))
expect_equal(new_lables, c("", "0.000", "$0.005"))
})
test_that("Suffix adds white space for spacing", {
new_lables <- label_wsj(suffix = " M")(c(0, 5, 10))
expect_equal(new_lables, c(" ", "5 ", "$10 M"))
}) |
csReport <-
function(csCompareObj = NULL,
csSensitivityObj = NULL,
save = FALSE,
fileName = "report",
alphaLevel = 0.05,
interpretation = FALSE) {
if (is.null(csCompareObj) && is.null(csSensitivityObj)) {
rep <- "[No report was produced.]"
}
if (!is.null(csCompareObj)) {
if (class(csCompareObj) != "csCompare") {
stop("The csCompareObj is not of class csCompare.")
} else {
for (i in 1:ncol(csCompareObj$freq.results)) {
assign(names(csCompareObj$freq.results)[i],
csCompareObj$freq.results[[i]])
}
for (i in 1:ncol(csCompareObj$bayes.results)) {
assign(names(csCompareObj$bayes.results)[i],
csCompareObj$bayes.results[[i]])
}
t.statistic <- t.statistic
p.value <- p.value
rscale <- rscale
bf10 <- bf10
bf01 <- bf01
df <- df
cohenD <- cohenD
cohenDM <- cohenDM
cs1 <- rownames(csCompareObj$descriptives)[1]
cs2 <- rownames(csCompareObj$descriptives)[2]
r.p.value <- ifelse (p.value < 0.001, "< 0.001",
paste(" = ", round(p.value, 3)))
alternative <- paste(strsplit(as.character(alternative), ".",
fixed = TRUE)[[1]], collapse = " ")
method <- tolower(method)
if (grepl("welch", method)) {
method <- gsub("welch", "Welch", method)
}
repF <- paste0(
"We performed a ",
alternative,
" ",
method,
". The results are t (",
round(df, 3),
") ",
"= ",
round(t.statistic, 3),
", p ",
r.p.value,
", Cohen's d = ",
round(cohenD, 3),
" (",
cohenDM,
" effect size)."
)
if (interpretation) {
paired <-
ifelse(as.character(method) == "paired t-test", TRUE, FALSE)
p.val <- as.numeric(as.character(p.value))
if (paired && p.val < alphaLevel) {
inter <-
paste0(
" These results suggest that there are statistically significant differences between ",
cs1,
" and ",
cs2,
" for an alpha level of ",
alphaLevel,
"."
)
} else if (paired && p.val >= alphaLevel) {
inter <-
paste0(
" These results suggest that there are no statistically significant differences between ",
cs1,
" and ",
cs2,
" for an alpha level of ",
alphaLevel,
"."
)
} else if (!paired && p.val < alphaLevel) {
inter <-
paste0(
" These results suggest that there are statistically significant between group differences, for an alpha level of ",
alphaLevel,
"."
)
} else if (!paired && p.val >= alphaLevel) {
inter <-
paste0(
" These results suggest that there are no statistically significant between group differences, for an alpha level of ",
alphaLevel,
"."
)
}
repF <- paste0(repF, inter, sep = "\n\n")
}
repB <- paste0(
"\nWe performed a ",
alternative,
" Bayesian t-test, with a Cauchy prior, with its width set to ",
rscale,
". The BF01 was: ",
condir::roundBF(bf01, rscale, BF01 = TRUE),
". The BF10 was: ",
condir::roundBF(bf10, rscale, BF01 = FALSE),
"."
)
if (interpretation) {
if (bf10 > 0 && bf10 < 1) {
interbf10 <- "no"
} else if (bf10 >= 1 && bf10 < 3) {
interbf10 <- "anecdotal"
} else if (bf10 >= 3 && bf10 < 10) {
interbf10 <- "substantial"
} else if (bf10 >= 10 && bf10 < 30) {
interbf10 <- "strong"
} else if (bf10 >= 30 && bf10 < 100) {
interbf10 <- "very strong"
} else if (bf10 > 100) {
interbf10 <- "decisive"
}
interbf10 <-
paste0("The results suggest that there is ",
interbf10,
" evidence for H1, relative to H0.")
if (bf01 > 0 && bf01 < 1) {
interbf01 <- "no"
} else if (bf01 >= 1 && bf10 < 3) {
interbf01 <- "anecdotal"
} else if (bf01 >= 3 && bf01 < 10) {
interbf01 <- "substantial"
} else if (bf01 >= 10 && bf01 < 30) {
interbf01 <- "strong"
} else if (bf01 >= 30 && bf01 < 100) {
interbf01 <- "very strong"
} else if (bf01 > 100) {
interbf01 <- "decisive"
}
interbf01 <-
paste("The results suggest that there is",
interbf01,
"evidence for H0, relative to H1.")
repB <- paste(repB, interbf10, interbf01, sep = "\n\n")
}
repCompare <- paste(repF, repB, collapse = " ")
}
}
if (!is.null(csSensitivityObj)) {
if (class(csSensitivityObj) != "csSensitivity") {
stop("The csSensitivityObj is not of class csSensitivity.")
} else {
csSensitivityElement = csSensitivityObj[[1]]
for (i in 1:ncol(csSensitivityElement)) {
assign(names(csSensitivityElement)[i], csSensitivityElement[, i])
}
repB <-
paste0(
"We performed a Sensitivity Analysis using the scaling factors: ",
paste(rscale, collapse = ", "),
". The results for BF01 were: ",
paste(
mapply(
condir::roundBF,
as.numeric(as.character(bf01)),
rscale,
BF01 = TRUE
),
collapse = ", "
),
" respectively.",
" The results for BF10 were: ",
paste(
mapply(
condir::roundBF,
as.numeric(as.character(bf10)),
rscale,
BF01 = FALSE
),
collapse = ", "
),
" respectively."
)
}
repSensitivity <- paste(repB, collapse = " ")
}
if (!is.null(csCompareObj) && !is.null(csSensitivityObj)) {
rep <- paste(repCompare, repSensitivity)
} else if (!is.null(csCompareObj) && is.null(csSensitivityObj)) {
rep <- repCompare
} else if (is.null(csCompareObj) && !is.null(csSensitivityObj)) {
rep <- repSensitivity
}
if (!is.null(csCompareObj$res.out)) {
report.outliers <- paste(csReport(csCompareObj$res.out))
rep <- paste0("Main analyses\n",
rep,
"\n\n**Outliers report**\n",
report.outliers)
}
if (!is.null(csSensitivityObj$res.out)) {
csSensOut <- list(csSensitivityObj$res.out)
attr(csSensOut, "class") <- "csSensitivity"
report.outliers <- paste(csReport(csSensitivityObj = csSensOut))
rep <- paste0(
"Sensitivity analyses\n",
rep,
"\n\n**Sensitivity analyses - Outliers report**\n",
report.outliers
)
}
if (save) {
cat(rep, file = paste0(fileName, ".txt"))
cat("Report file saved in the following directory: ", getwd())
} else {
cat(rep)
invisible(rep)
}
} |
mle_hetop <- function(ngk, fixedcuts, svals=NULL, iterlim = 1500, ...){
if(!is.numeric(ngk)){
stop("ngk must be a GxK numeric matrix of category counts")
}
if(!is.matrix(ngk)){
stop("ngk must be a GxK numeric matrix of category counts")
}
if(any(is.na(ngk))){
stop("ngk cannot contain missing values")
}
if(any(ngk < 0)){
stop("ngk must contain only non-negative values")
}
if(any(apply(ngk, 1, sum) <= 0)){
stop("ngk contains at least one row with insufficient data")
}
G <- nrow(ngk)
K <- ncol(ngk)
if(K <= 2){
stop("Function requires K >= 3 categories")
}
if(!is.numeric(fixedcuts)){
stop("fixedcuts must be a numeric vector of length 2")
}
if(length(fixedcuts) != 2){
stop("fixedcuts must be a numeric vector of length 2")
}
if(any(is.na(fixedcuts))){
stop("fixedcuts cannot contain missing values")
}
if(fixedcuts[1] >= fixedcuts[2]){
stop("fixedcuts[1] must be strictly less than fixedcuts[2]")
}
pstatus <- as.data.frame(matrix("est", ncol=2, nrow=G), stringsAsFactors=FALSE)
names(pstatus) <- c("mug","sigmag")
pstatus$sigmag[which( apply(ngk, 1, function(x){ sum(x > 0) }) <= 2 )] <- "mean"
pstatus$mug[which(apply(ngk, 1, function(x){ (sum(x > 0) == 1) && (x[1] > 0) }))] <- "min"
pstatus$mug[which(apply(ngk, 1, function(x){ (sum(x > 0) == 1) && (x[K] > 0) }))] <- "max"
n_m <- sum(pstatus$mug == "est")
n_s <- sum(pstatus$sigmag == "est")
if(!( (n_m > 0) && (n_s > 0) )){
stop("Insufficient data for estimation - too many groups with sparse counts")
}
n_param <- n_m + n_s + (K-3)
if(!is.null(svals) && (length(svals) != n_param)){
stop("length of svals inconsistent with number of estimable parameters")
}
if(!is.null(svals) && any(is.na(svals))){
stop("svals contains missing values")
}
negll <- function(param){
.mug <- .sigmag <- rep(-99.0, G)
.mug[which(pstatus$mug == "est")] <- param[1:n_m]
.mug[which(pstatus$mug == "min")] <- min(param[1:n_m])
.mug[which(pstatus$mug == "max")] <- max(param[1:n_m])
.lsigs <- param[(n_m + 1):(n_m + n_s)]
.sigmag[which(pstatus$sigmag == "est")] <- exp(.lsigs)
.sigmag[which(pstatus$sigmag == "mean")] <- exp(mean(.lsigs))
if(K==3){
.cutpoints <- fixedcuts
} else {
.cutpoints <- c(fixedcuts, fixedcuts[2] + cumsum(exp(param[(n_m + n_s + 1):length(param)])))
}
pgk <- matrix(0, ncol=K, nrow=G)
for(g in 1:G){
tmp <- c(pnorm(.cutpoints, mean=.mug[g], sd = .sigmag[g]), 1)
pgk[g,] <- c(tmp[1], diff(tmp))
}
stopifnot(max(abs(apply(pgk, 1, sum) - 1)) < 1e-8)
pgk[which(pgk <= 1e-300)] <- 1e-300
pgk[which(pgk >= 1 - 1e-300)] <- 1 - 1e-300
-sum(c(ngk)*log(c(pgk)))
}
if(is.null(svals)){
cats <- 1:K
mu <- apply(ngk, 1, function(x){ weighted.mean(cats, w = x) })
mu <- mu[which(pstatus$mug == "est")]
logsig <- apply(ngk[which(pstatus$sigmag == "est"),,drop=F], 1, function(x){ log(sqrt(weighted.mean(cats^2, w = x) - (weighted.mean(cats, w = x))^2)) })
if(G==1){
mu <- 0.0
} else {
mu <- (mu - mean(mu)) / sd(mu)
}
logsig <- logsig - mean(logsig)
svals <- c(mu, logsig, rep(0, K-3))
}
res <- nlm(f = negll, p = svals, iterlim = iterlim, ...)
if(res$code > 1){
warning("optimization algorithm may not have converged properly; see 'nlmdetails' element of object")
}
param <- res$estimate
if(K==3){
cutpoints <- fixedcuts
} else {
cutpoints <- c(fixedcuts, fixedcuts[2] + cumsum(exp(param[(n_m + n_s + 1):length(param)])))
}
mug <- sigmag <- rep(-99.0, G)
mug[which(pstatus$mug == "est")] <- param[1:n_m]
mug[which(pstatus$mug == "min")] <- min(param[1:n_m])
mug[which(pstatus$mug == "max")] <- max(param[1:n_m])
.lsigs <- param[(n_m + 1):(n_m + n_s)]
sigmag[which(pstatus$sigmag == "est")] <- exp(.lsigs)
sigmag[which(pstatus$sigmag == "mean")] <- exp(mean(.lsigs))
est_fc <- list(mug = mug, sigmag = sigmag, cutpoints = cutpoints)
ng <- as.vector(apply(ngk, 1, sum))
pg <- ng / sum(ng)
est_fc$icc <- (sum(pg * mug^2) - (sum(pg * mug))^2) / (sum(pg * mug^2) - (sum(pg * mug))^2 + sum(pg * sigmag^2))
a <- sum(pg * est_fc$mug)
b <- exp(sum(pg * log(est_fc$sigmag)))
est_zero <- list(mug = (est_fc$mug - a) / b,
sigmag = est_fc$sigmag / b,
cutpoints = (est_fc$cutpoints - a) / b)
est_zero$icc <- (sum(pg * est_zero$mug^2) - (sum(pg * est_zero$mug))^2) / (sum(pg * est_zero$mug^2) - (sum(pg * est_zero$mug))^2 + sum(pg * est_zero$sigmag^2))
a <- sum(pg * est_zero$mug)
b <- sqrt(sum(pg * ( (est_zero$mug - a)^2 + est_zero$sigmag^2)))
est_star <- list(mug = (est_zero$mug - a) / b,
sigmag = est_zero$sigmag / b,
cutpoints = (est_zero$cutpoints - a) / b)
est_star$icc <- (sum(pg * est_star$mug^2) - (sum(pg * est_star$mug))^2) / (sum(pg * est_star$mug^2) - (sum(pg * est_star$mug))^2 + sum(pg * est_star$sigmag^2))
if(abs(sum(pg * (est_star$mug^2 + est_star$sigma^2)) - 1) > 1e-8){
stop("numerical problem in translating estimates to different scales")
}
if(any(ng <= 1)){
est_starbc <- NULL
} else {
N <- sum(ng)
ntilde <- 1/sum((1/(ng-1))*(1/G))
mprime <- est_zero$mug
sprime <- est_zero$sigmag
ssw_pw <- sum( (ntilde/(1+ntilde)) * pg * sprime^2 )
ssb_pw <- sum( pg * (mprime - sum(pg * mprime) )^2 ) - sum( (ntilde/(1+ntilde)) * (1/N) * (1-pg) * sprime^2)
b <- sqrt(ssb_pw + ssw_pw)
est_starbc <- list(mug = (mprime - sum(pg*mprime)) / b,
sigmag = sprime / b,
cutpoints = (est_zero$cutpoints - sum(pg*mprime)) / b)
est_starbc$icc <- ssb_pw / (ssb_pw + ssw_pw)
}
tmp <- list(est_fc, est_zero, est_star)
if(sd(sapply(tmp, function(x){ x$icc})) > 1e-8){
stop("numerical problem with ICC calculation on different scales")
}
if(!any(ng <= 1)){
tmp[[4]] <- est_starbc
}
tmp <- do.call("rbind",lapply(tmp, function(x){
c(sapply(1:(K-1), function(k){ (x$mug - x$cutpoints[k]) / x$sigmag}))
}))
if( max(apply(tmp, 2, sd)) > 1e-8){
stop("numerical problem in translating estimates to different scales")
}
return(list(est_fc = est_fc,
est_zero = est_zero,
est_star = est_star,
est_starbc = est_starbc,
nlmdetails = res,
pstatus = pstatus))
} |
`anyReps` <-
function(X) {
X = as.matrix(X)
for (i in 1:(dim(X)[1]-1)) {
for (j in (i+1):dim(X)[1]) {
if (identical(X[i,], X[j,])) return (TRUE)
}
}
return (FALSE)
} |
alt_preprocess_func <- function(raw_data, general_info) {
subjs <- general_info$subjs
n_subj <- general_info$n_subj
t_subjs <- general_info$t_subjs
t_max <- general_info$t_max
choice <- array(-1, c(n_subj, t_max))
outcome <- array( 0, c(n_subj, t_max))
blue_punish <- array(0, c(n_subj, t_max))
orange_punish <- array(0, c(n_subj, t_max))
for (i in 1:n_subj) {
subj <- subjs[i]
t <- t_subjs[i]
DT_subj <- raw_data[raw_data$subjid == subj]
choice[i, 1:t] <- DT_subj$choice
outcome[i, 1:t] <- DT_subj$outcome
blue_punish[i, 1:t] <- DT_subj$bluepunish
orange_punish[i, 1:t] <- DT_subj$orangepunish
}
data_list <- list(
N = n_subj,
T = t_max,
Tsubj = t_subjs,
choice = choice,
outcome = outcome,
bluePunish = blue_punish,
orangePunish = orange_punish
)
return(data_list)
}
bandit2arm_preprocess_func <- function(raw_data, general_info) {
subjs <- general_info$subjs
n_subj <- general_info$n_subj
t_subjs <- general_info$t_subjs
t_max <- general_info$t_max
choice <- array(-1, c(n_subj, t_max))
outcome <- array( 0, c(n_subj, t_max))
for (i in 1:n_subj) {
subj <- subjs[i]
t <- t_subjs[i]
DT_subj <- raw_data[raw_data$subjid == subj]
choice[i, 1:t] <- DT_subj$choice
outcome[i, 1:t] <- DT_subj$outcome
}
data_list <- list(
N = n_subj,
T = t_max,
Tsubj = t_subjs,
choice = choice,
outcome = outcome
)
return(data_list)
}
bandit4arm2_preprocess_func <- function(raw_data, general_info) {
subjs <- general_info$subjs
n_subj <- general_info$n_subj
t_subjs <- general_info$t_subjs
t_max <- general_info$t_max
choice <- array(-1, c(n_subj, t_max))
outcome <- array( 0, c(n_subj, t_max))
for (i in 1:n_subj) {
subj <- subjs[i]
t <- t_subjs[i]
DT_subj <- raw_data[raw_data$subjid == subj]
choice[i, 1:t] <- DT_subj$choice
outcome[i, 1:t] <- DT_subj$outcome
}
data_list <- list(
N = n_subj,
T = t_max,
Tsubj = t_subjs,
choice = choice,
outcome = outcome
)
return(data_list)
}
bandit4arm_preprocess_func <- function(raw_data, general_info) {
subjs <- general_info$subjs
n_subj <- general_info$n_subj
t_subjs <- general_info$t_subjs
t_max <- general_info$t_max
rew <- array( 0, c(n_subj, t_max))
los <- array( 0, c(n_subj, t_max))
choice <- array(-1, c(n_subj, t_max))
for (i in 1:n_subj) {
subj <- subjs[i]
t <- t_subjs[i]
DT_subj <- raw_data[raw_data$subjid == subj]
rew[i, 1:t] <- DT_subj$gain
los[i, 1:t] <- -1 * abs(DT_subj$loss)
choice[i, 1:t] <- DT_subj$choice
}
data_list <- list(
N = n_subj,
T = t_max,
Tsubj = t_subjs,
rew = rew,
los = los,
choice = choice
)
return(data_list)
}
bart_preprocess_func <- function(raw_data, general_info) {
subjs <- general_info$subjs
n_subj <- general_info$n_subj
t_subjs <- general_info$t_subjs
t_max <- general_info$t_max
pumps <- array(0, c(n_subj, t_max))
explosion <- array(0, c(n_subj, t_max))
for (i in 1:n_subj) {
subj <- subjs[i]
t <- t_subjs[i]
DT_subj <- raw_data[raw_data$subjid == subj]
pumps[i, 1:t] <- DT_subj$pumps
explosion[i, 1:t] <- DT_subj$explosion
}
data_list <- list(
N = n_subj,
T = t_max,
Tsubj = t_subjs,
P = max(pumps) + 1,
pumps = pumps,
explosion = explosion
)
return(data_list)
}
choiceRT_preprocess_func <- function(raw_data, general_info, RTbound = 0.1) {
raw_data <- as.data.frame(raw_data)
subjs <- general_info$subjs
n_subj <- general_info$n_subj
Nu <- with(raw_data, aggregate(choice == 2, by = list(y = subjid), FUN = sum)[["x"]])
Nl <- with(raw_data, aggregate(choice == 1, by = list(y = subjid), FUN = sum)[["x"]])
RTu <- array(-1, c(n_subj, max(Nu)))
RTl <- array(-1, c(n_subj, max(Nl)))
for (i in 1:n_subj) {
subj <- subjs[i]
subj_data <- subset(raw_data, raw_data$subjid == subj)
if (Nu[i] > 0)
RTu[i, 1:Nu[i]] <- subj_data$rt[subj_data$choice == 2]
if (Nl[i] > 0)
RTl[i, 1:Nl[i]] <- subj_data$rt[subj_data$choice == 1]
}
minRT <- with(raw_data, aggregate(rt, by = list(y = subjid), FUN = min)[["x"]])
data_list <- list(
N = n_subj,
Nu_max = max(Nu),
Nl_max = max(Nl),
Nu = Nu,
Nl = Nl,
RTu = RTu,
RTl = RTl,
minRT = minRT,
RTbound = RTbound
)
return(data_list)
}
choiceRT_single_preprocess_func <- function(raw_data, general_info, RTbound = 0.1) {
DT_upper <- raw_data[raw_data$choice == 2]
DT_lower <- raw_data[raw_data$choice == 1]
data_list <- list(
Nu = nrow(DT_upper),
Nl = nrow(DT_lower),
RTu = DT_upper$rt,
RTl = DT_lower$rt,
minRT = min(raw_data$rt),
RTbound = RTbound
)
return(data_list)
}
cra_preprocess_func <- function(raw_data, general_info) {
subjs <- general_info$subjs
n_subj <- general_info$n_subj
t_subjs <- general_info$t_subjs
t_max <- general_info$t_max
choice <- array(0, c(n_subj, t_max))
prob <- array(0, c(n_subj, t_max))
ambig <- array(0, c(n_subj, t_max))
reward_var <- array(0, c(n_subj, t_max))
reward_fix <- array(0, c(n_subj, t_max))
for (i in 1:n_subj) {
subj <- subjs[i]
t <- t_subjs[i]
DT_subj <- raw_data[raw_data$subjid == subj]
choice[i, 1:t] <- DT_subj$choice
prob[i, 1:t] <- DT_subj$prob
ambig[i, 1:t] <- DT_subj$ambig
reward_var[i, 1:t] <- DT_subj$rewardvar
reward_fix[i, 1:t] <- DT_subj$rewardfix
}
data_list <- list(
N = n_subj,
T = t_max,
Tsubj = t_subjs,
choice = choice,
prob = prob,
ambig = ambig,
reward_var = reward_var,
reward_fix = reward_fix
)
return(data_list)
}
dbdm_preprocess_func <- function(raw_data, general_info) {
subjs <- general_info$subjs
n_subj <- general_info$n_subj
t_subjs <- general_info$t_subjs
t_max <- general_info$t_max
opt1hprob <- array( 0, c(n_subj, t_max))
opt2hprob <- array( 0, c(n_subj, t_max))
opt1hval <- array( 0, c(n_subj, t_max))
opt1lval <- array( 0, c(n_subj, t_max))
opt2hval <- array( 0, c(n_subj, t_max))
opt2lval <- array( 0, c(n_subj, t_max))
choice <- array(-1, c(n_subj, t_max))
for (i in 1:n_subj) {
subj <- subjs[i]
t <- t_subjs[i]
DT_subj <- raw_data[raw_data$subjid == subj]
opt1hprob[i, 1:t] <- DT_subj$opt1hprob
opt2hprob[i, 1:t] <- DT_subj$opt2hprob
opt1hval[i, 1:t] <- DT_subj$opt1hval
opt1lval[i, 1:t] <- DT_subj$opt1lval
opt2hval[i, 1:t] <- DT_subj$opt2hval
opt2lval[i, 1:t] <- DT_subj$opt2lval
choice[i, 1:t] <- DT_subj$choice
}
data_list <- list(
N = n_subj,
T = t_max,
Tsubj = t_subjs,
opt1hprob = opt1hprob,
opt2hprob = opt2hprob,
opt1hval = opt1hval,
opt1lval = opt1lval,
opt2hval = opt2hval,
opt2lval = opt2lval,
choice = choice
)
return(data_list)
}
dd_preprocess_func <- function(raw_data, general_info) {
subjs <- general_info$subjs
n_subj <- general_info$n_subj
t_subjs <- general_info$t_subjs
t_max <- general_info$t_max
delay_later <- array( 0, c(n_subj, t_max))
amount_later <- array( 0, c(n_subj, t_max))
delay_sooner <- array( 0, c(n_subj, t_max))
amount_sooner <- array( 0, c(n_subj, t_max))
choice <- array(-1, c(n_subj, t_max))
for (i in 1:n_subj) {
subj <- subjs[i]
t <- t_subjs[i]
DT_subj <- raw_data[raw_data$subjid == subj]
delay_later[i, 1:t] <- DT_subj$delaylater
amount_later[i, 1:t] <- DT_subj$amountlater
delay_sooner[i, 1:t] <- DT_subj$delaysooner
amount_sooner[i, 1:t] <- DT_subj$amountsooner
choice[i, 1:t] <- DT_subj$choice
}
data_list <- list(
N = n_subj,
T = t_max,
Tsubj = t_subjs,
delay_later = delay_later,
amount_later = amount_later,
delay_sooner = delay_sooner,
amount_sooner = amount_sooner,
choice = choice
)
return(data_list)
}
dd_single_preprocess_func <- function(raw_data, general_info) {
t_subjs <- general_info$t_subjs
delay_later <- raw_data$delaylater
amount_later <- raw_data$amountlater
delay_sooner <- raw_data$delaysooner
amount_sooner <- raw_data$amountsooner
choice <- raw_data$choice
data_list <- list(
Tsubj = t_subjs,
delay_later = delay_later,
amount_later = amount_later,
delay_sooner = delay_sooner,
amount_sooner = amount_sooner,
choice = choice
)
return(data_list)
}
gng_preprocess_func <- function(raw_data, general_info) {
subjs <- general_info$subjs
n_subj <- general_info$n_subj
t_subjs <- general_info$t_subjs
t_max <- general_info$t_max
cue <- array( 1, c(n_subj, t_max))
pressed <- array(-1, c(n_subj, t_max))
outcome <- array( 0, c(n_subj, t_max))
for (i in 1:n_subj) {
subj <- subjs[i]
t <- t_subjs[i]
DT_subj <- raw_data[raw_data$subjid == subj]
cue[i, 1:t] <- DT_subj$cue
pressed[i, 1:t] <- DT_subj$keypressed
outcome[i, 1:t] <- DT_subj$outcome
}
data_list <- list(
N = n_subj,
T = t_max,
Tsubj = t_subjs,
cue = cue,
pressed = pressed,
outcome = outcome
)
return(data_list)
}
igt_preprocess_func <- function(raw_data, general_info, payscale = 100) {
subjs <- general_info$subjs
n_subj <- general_info$n_subj
t_subjs <- general_info$t_subjs
t_max <- general_info$t_max
Ydata <- array(-1, c(n_subj, t_max))
RLmatrix <- array( 0, c(n_subj, t_max))
for (i in 1:n_subj) {
subj <- subjs[i]
t <- t_subjs[i]
DT_subj <- raw_data[raw_data$subjid == subj]
Ydata[i, 1:t] <- DT_subj$choice
RLmatrix[i, 1:t] <- DT_subj$gain - abs(DT_subj$loss)
}
data_list <- list(
N = n_subj,
T = t_max,
Tsubj = t_subjs,
choice = Ydata,
outcome = RLmatrix / payscale,
sign_out = sign(RLmatrix)
)
return(data_list)
}
peer_preprocess_func <- function(raw_data, general_info) {
subjs <- general_info$subjs
n_subj <- general_info$n_subj
t_subjs <- general_info$t_subjs
t_max <- general_info$t_max
condition <- array( 0, c(n_subj, t_max))
p_gamble <- array( 0, c(n_subj, t_max))
safe_Hpayoff <- array( 0, c(n_subj, t_max))
safe_Lpayoff <- array( 0, c(n_subj, t_max))
risky_Hpayoff <- array( 0, c(n_subj, t_max))
risky_Lpayoff <- array( 0, c(n_subj, t_max))
choice <- array(-1, c(n_subj, t_max))
for (i in 1:n_subj) {
subj <- subjs[i]
t <- t_subjs[i]
DT_subj <- raw_data[raw_data$subjid == subj]
condition[i, 1:t] <- DT_subj$condition
p_gamble[i, 1:t] <- DT_subj$pgamble
safe_Hpayoff[i, 1:t] <- DT_subj$safehpayoff
safe_Lpayoff[i, 1:t] <- DT_subj$safelpayoff
risky_Hpayoff[i, 1:t] <- DT_subj$riskyhpayoff
risky_Lpayoff[i, 1:t] <- DT_subj$riskylpayoff
choice[i, 1:t] <- DT_subj$choice
}
data_list <- list(
N = n_subj,
T = t_max,
Tsubj = t_subjs,
condition = condition,
p_gamble = p_gamble,
safe_Hpayoff = safe_Hpayoff,
safe_Lpayoff = safe_Lpayoff,
risky_Hpayoff = risky_Hpayoff,
risky_Lpayoff = risky_Lpayoff,
choice = choice
)
return(data_list)
}
prl_preprocess_func <- function(raw_data, general_info) {
subjs <- general_info$subjs
n_subj <- general_info$n_subj
t_subjs <- general_info$t_subjs
t_max <- general_info$t_max
choice <- array(-1, c(n_subj, t_max))
outcome <- array( 0, c(n_subj, t_max))
for (i in 1:n_subj) {
subj <- subjs[i]
t <- t_subjs[i]
DT_subj <- raw_data[raw_data$subjid == subj]
choice[i, 1:t] <- DT_subj$choice
outcome[i, 1:t] <- sign(DT_subj$outcome)
}
data_list <- list(
N = n_subj,
T = t_max,
Tsubj = t_subjs,
choice = choice,
outcome = outcome
)
return(data_list)
}
prl_multipleB_preprocess_func <- function(raw_data, general_info) {
subjs <- general_info$subjs
n_subj <- general_info$n_subj
b_subjs <- general_info$b_subjs
b_max <- general_info$b_max
t_subjs <- general_info$t_subjs
t_max <- general_info$t_max
choice <- array(-1, c(n_subj, b_max, t_max))
outcome <- array( 0, c(n_subj, b_max, t_max))
for (i in 1:n_subj) {
subj <- subjs[i]
DT_subj <- raw_data[raw_data$subjid == subj]
blocks_of_subj <- unique(DT_subj$block)
for (b in 1:b_subjs[i]) {
curr_block <- blocks_of_subj[b]
DT_curr_block <- DT_subj[DT_subj$block == curr_block]
t <- t_subjs[i, b]
choice[i, b, 1:t] <- DT_curr_block$choice
outcome[i, b, 1:t] <- sign(DT_curr_block$outcome)
}
}
data_list <- list(
N = n_subj,
B = b_max,
Bsubj = b_subjs,
T = t_max,
Tsubj = t_subjs,
choice = choice,
outcome = outcome
)
return(data_list)
}
pst_preprocess_func <- function(raw_data, general_info) {
subjs <- general_info$subjs
n_subj <- general_info$n_subj
t_subjs <- general_info$t_subjs
t_max <- general_info$t_max
option1 <- array(-1, c(n_subj, t_max))
option2 <- array(-1, c(n_subj, t_max))
choice <- array(-1, c(n_subj, t_max))
reward <- array(-1, c(n_subj, t_max))
for (i in 1:n_subj) {
subj <- subjs[i]
t <- t_subjs[i]
DT_subj <- raw_data[raw_data$subjid == subj]
option1[i, 1:t] <- DT_subj$type %/% 10
option2[i, 1:t] <- DT_subj$type %% 10
choice[i, 1:t] <- DT_subj$choice
reward[i, 1:t] <- DT_subj$reward
}
data_list <- list(
N = n_subj,
T = t_max,
Tsubj = t_subjs,
option1 = option1,
option2 = option2,
choice = choice,
reward = reward
)
return(data_list)
}
ra_preprocess_func <- function(raw_data, general_info) {
subjs <- general_info$subjs
n_subj <- general_info$n_subj
t_subjs <- general_info$t_subjs
t_max <- general_info$t_max
gain <- array( 0, c(n_subj, t_max))
loss <- array( 0, c(n_subj, t_max))
cert <- array( 0, c(n_subj, t_max))
gamble <- array(-1, c(n_subj, t_max))
for (i in 1:n_subj) {
subj <- subjs[i]
t <- t_subjs[i]
DT_subj <- raw_data[raw_data$subjid == subj]
gain[i, 1:t] <- DT_subj$gain
loss[i, 1:t] <- abs(DT_subj$loss)
cert[i, 1:t] <- DT_subj$cert
gamble[i, 1:t] <- DT_subj$gamble
}
data_list <- list(
N = n_subj,
T = t_max,
Tsubj = t_subjs,
gain = gain,
loss = loss,
cert = cert,
gamble = gamble
)
return(data_list)
}
rdt_preprocess_func <- function(raw_data, general_info) {
subjs <- general_info$subjs
n_subj <- general_info$n_subj
t_subjs <- general_info$t_subjs
t_max <- general_info$t_max
gain <- array( 0, c(n_subj, t_max))
loss <- array( 0, c(n_subj, t_max))
cert <- array( 0, c(n_subj, t_max))
type <- array(-1, c(n_subj, t_max))
gamble <- array(-1, c(n_subj, t_max))
outcome <- array( 0, c(n_subj, t_max))
happy <- array( 0, c(n_subj, t_max))
RT_happy <- array( 0, c(n_subj, t_max))
for (i in 1:n_subj) {
subj <- subjs[i]
t <- t_subjs[i]
DT_subj <- raw_data[raw_data$subjid == subj]
gain[i, 1:t] <- DT_subj$gain
loss[i, 1:t] <- abs(DT_subj$loss)
cert[i, 1:t] <- DT_subj$cert
type[i, 1:t] <- DT_subj$type
gamble[i, 1:t] <- DT_subj$gamble
outcome[i, 1:t] <- DT_subj$outcome
happy[i, 1:t] <- DT_subj$happy
RT_happy[i, 1:t] <- DT_subj$rthappy
}
data_list <- list(
N = n_subj,
T = t_max,
Tsubj = t_subjs,
gain = gain,
loss = loss,
cert = cert,
type = type,
gamble = gamble,
outcome = outcome,
happy = happy,
RT_happy = RT_happy
)
return(data_list)
}
task2AFC_preprocess_func <- function(raw_data, general_info) {
subjs <- general_info$subjs
n_subj <- general_info$n_subj
signal <- array(0, c(n_subj))
noise <- array(0, c(n_subj))
h <- array(0, c(n_subj))
f <- array(0, c(n_subj))
for (i in 1:n_subj) {
subj <- subjs[i]
subj_data <- subset(raw_data, raw_data$subjid == subj)
signal[i] = nrow(subset(subj_data, subj_data$stimulus == 1))
noise[i] = nrow(subset(subj_data, subj_data$stimulus == 0))
h[i] = nrow(subset(subj_data, subj_data$stimulus ==1 & subj_data$response ==1))
f[i] = nrow(subset(subj_data, subj_data$stimulus ==0 & subj_data$response ==1))
}
data_list <- list(
N = n_subj,
signal = signal,
noise = noise,
h = h,
f = f
)
return(data_list)
}
ts_preprocess_func <- function(raw_data, general_info, trans_prob = 0.7) {
subjs <- general_info$subjs
n_subj <- general_info$n_subj
t_subjs <- general_info$t_subjs
t_max <- general_info$t_max
level1_choice <- array(1, c(n_subj, t_max))
level2_choice <- array(1, c(n_subj, t_max))
reward <- array(0, c(n_subj, t_max))
for (i in 1:n_subj) {
subj <- subjs[i]
t <- t_subjs[i]
DT_subj <- raw_data[raw_data$subjid == subj]
level1_choice[i, 1:t] <- DT_subj$level1choice
level2_choice[i, 1:t] <- DT_subj$level2choice
reward[i, 1:t] <- DT_subj$reward
}
data_list <- list(
N = n_subj,
T = t_max,
Tsubj = t_subjs,
level1_choice = level1_choice,
level2_choice = level2_choice,
reward = reward,
trans_prob = trans_prob
)
return(data_list)
}
ug_preprocess_func <- function(raw_data, general_info) {
subjs <- general_info$subjs
n_subj <- general_info$n_subj
t_subjs <- general_info$t_subjs
t_max <- general_info$t_max
offer <- array( 0, c(n_subj, t_max))
accept <- array(-1, c(n_subj, t_max))
for (i in 1:n_subj) {
subj <- subjs[i]
t <- t_subjs[i]
DT_subj <- raw_data[raw_data$subjid == subj]
offer[i, 1:t] <- DT_subj$offer
accept[i, 1:t] <- DT_subj$accept
}
data_list <- list(
N = n_subj,
T = t_max,
Tsubj = t_subjs,
offer = offer,
accept = accept
)
return(data_list)
}
wcs_preprocess_func <- function(raw_data, general_info) {
subjs <- general_info$subjs
n_subj <- general_info$n_subj
t_subjs <- general_info$t_subjs
t_max <- 128
answersheet <- system.file("extdata", "wcs_answersheet.txt", package = "hBayesDM")
answer <- read.table(answersheet, header = TRUE)
choice <- array( 0, c(n_subj, 4, t_max))
outcome <- array(-1, c(n_subj, t_max))
choice_match_att <- array( 0, c(n_subj, t_max, 1, 3))
deck_match_rule <- array( 0, c(t_max, 3, 4))
for (i in 1:n_subj) {
subj <- subjs[i]
t <- t_subjs[i]
DT_subj <- raw_data[raw_data$subjid == subj]
DT_subj_choice <- DT_subj$choice
DT_subj_outcome <- DT_subj$outcome
for (tr in 1:t) {
ch <- DT_subj_choice[tr]
ou <- DT_subj_outcome[tr]
choice[i, ch, tr] <- 1
outcome[i, tr] <- ou
choice_match_att[i, tr, 1, ] <- answer[, tr] == ch
}
}
for (tr in 1:t_max) {
for (ru in 1:3) {
deck_match_rule[tr, ru, answer[ru, tr]] <- 1
}
}
data_list <- list(
N = n_subj,
T = t_max,
Tsubj = t_subjs,
choice = choice,
outcome = outcome,
choice_match_att = choice_match_att,
deck_match_rule = deck_match_rule
)
return(data_list)
}
cgt_preprocess_func <- function(raw_data, general_info) {
subjs <- general_info$subjs
n_subj <- general_info$n_subj
t_subjs <- general_info$t_subjs
t_max <- general_info$t_max
uniq_bet <- unique(raw_data$percentagestaked)
n_bets <- length(uniq_bet)
bets_asc <- sort(uniq_bet / 100)
bets_dsc <- sort(uniq_bet / 100, decreasing = T)
bet_delay <- (1:n_bets - 1) / 4
bet_time <- raw_data$percentagestaked / 100
for (b in 1:n_bets) {
bet_time[bet_time == bets_asc[b]] <- b
}
raw_data$bet_time <- ifelse(raw_data$gambletype == 0,
n_bets + 1 - bet_time,
bet_time)
col_chosen <- array(0, c(n_subj, t_max))
bet_chosen <- array(0, c(n_subj, t_max))
prop_red <- array(0, c(n_subj, t_max))
prop_chosen <- array(0, c(n_subj, t_max))
gain <- array(0, c(n_subj, t_max, n_bets))
loss <- array(0, c(n_subj, t_max, n_bets))
for (i in 1:n_subj) {
t <- t_subjs[i]
DT_subj <- raw_data[raw_data$subjid == subjs[i]]
col_chosen [i, 1:t] <- ifelse(DT_subj$redchosen == 1, 1, 2)
bet_chosen [i, 1:t] <- DT_subj$bet_time
prop_red [i, 1:t] <- DT_subj$nredboxes / 10
prop_chosen[i, 1:t] <- ifelse(DT_subj$redchosen == 1,
prop_red[i, 1:t],
1 - prop_red[i, 1:t])
for (b in 1:n_bets) {
gain[i, 1:t, b] <- with(DT_subj, trialinitialpoints / 100 + trialinitialpoints / 100 * ifelse(gambletype == 1, bets_asc[b], bets_dsc[b]))
loss[i, 1:t, b] <- with(DT_subj, trialinitialpoints / 100 - trialinitialpoints / 100 * ifelse(gambletype == 1, bets_asc[b], bets_dsc[b]))
}
}
data_list <- list(
N = n_subj,
T = t_max,
B = n_bets,
Tsubj = t_subjs,
bet_delay = bet_delay,
gain = gain,
loss = loss,
prop_red = prop_red,
prop_chosen = prop_chosen,
col_chosen = col_chosen,
bet_chosen = bet_chosen
)
return(data_list)
} |
povcalnet_cl <- function(country,
povline,
year,
coverage = NULL,
ppp = NULL,
url = "http://iresearch.worldbank.org",
format = "csv") {
query <- build_query_string_cl(
country = country,
povline = povline,
year = year,
coverage_type = coverage,
ppp = ppp,
format = format
)
url <- httr::modify_url(url, path = api_handle(), query = query)
res <- httr::GET(url = url)
res <- httr::content(res, as = "text", encoding = "UTF-8" )
if (format == "json") {
out <- tibble::as_tibble(jsonlite::fromJSON(res, simplifyDataFrame = TRUE))
out <- out$PovResult
} else {
out <- readr::read_csv(res)
}
out <- format_data(out,
coverage = coverage,
aggregate = FALSE)
return(out)
} |
threshold_slope <- function( pfit, xfit, thresh = 0.5 ) {
if( missing("pfit") || missing("xfit") ) {
stop("Check input. First 2 arguments are mandatory");
}
if( !is.double( thresh ) || length( thresh ) > 1 ) {
stop( "Threshold level must be scalar" );
}
if( ( thresh < 0 ) || (thresh > 1 )) {
stop( "Threshold level must be betwen 0 and 1" );
}
if ( length( pfit ) != length( xfit ) )
stop( 'Length of fitted values pfit must be the same as length of xfit' );
end
value <- NULL;
value$x_th <- xfit[ which( abs( pfit - thresh ) == min( abs( pfit - thresh ) ) ) ];
if( length( value$x_th ) > 1 ) {
value$slope <- 0;
value$x_th <- mean( value$x_th );
}
else {
ind <- which( xfit == value$x_th );
value$slope <- ( pfit[pmin( ind + 1, length( pfit ) )] -
pfit[pmax( ind - 1, 1 )] ) /
( xfit[pmin( ind + 1, length( xfit ) )] -
xfit[pmax( ind - 1, 1 )] );
}
return( value );
} |
ivdesc <- function(X,D,Z, variance=FALSE, boot=TRUE, bootn=1000, balance=TRUE, ...){
if(!is.numeric(D)) stop("D has to be numeric with values c(0,1,NA).")
if(!is.numeric(Z)) stop("Z has to be numeric with values c(0,1,NA).")
if( sum(D %in% c(0,1,NA))!=length(D) ) stop("D can only contain values c(0,1,NA).")
if( sum(Z %in% c(0,1,NA))!=length(Z) ) stop("Z can only contain values c(0,1,NA).")
if( length(D)!=length(Z) ) stop("D has to be of the same length of Z.")
if( length(X)!=length(Z) ) stop("X has to be of the same length of Z.")
if( length(X)!=length(D) ) stop("X has to be of the same length of D.")
if( boot==TRUE & bootn<2 ) stop("bootn has be larger than 2.")
if (!is.numeric(X)){
X <- as.numeric(X)
warning("X coerced to numeric.")
}
nomiss <- !is.na(X) & !is.na(D) & !is.na(Z)
X <- X[nomiss]
D <- D[nomiss]
Z <- Z[nomiss]
if(boot==FALSE) { boot <- 0 }
else { boot <- bootn }
if( (mean(D[Z==1]==1)-mean(D[Z==0]==1))<0 ) stop("First-stage is negative. Please reverse coding of Z.")
if( sum(D==Z)==length(D) ) stop("There is full compliance with the instrument (D=Z).")
res <- ivdesc_all(X,D,Z,boot=boot,variance=variance,...)
if( balance ){
bal <- t.test(X ~ Z, var.equal=FALSE)
attr(res, "balance_pval") <- bal$p.value
}
class(res) <- c('ivdesc', 'data.frame')
return(res)
}
print.ivdesc <- function(x,...) {
class(x) <- 'data.frame'
print(kable(x))
pvals <- attr(x, 'pvals')
balance_pval <- attr(x, 'balance_pval')
if( !is.null(pvals) ){
cat("\nBootstrapped p-values:")
print(kable(pvals, col.names=c("group","Pr(T<t)", "Pr(T>t)")))
cat("\n\n")
} else {cat("\n")}
if( !is.null(balance_pval) ){
cat("Balance test: H0: E[X|Z=0]=E[X|Z=1]\n")
cat("Pr(|T| > |t|) = ", format(attr(x,'balance_pval'),digits=3), "\n\n")
}
invisible(x)
} |
maxpoints <-
function(td)
{
return(apply(td, 1, max))
} |
context("multi-objective: mspot")
test_that("multi-objective mspot works", {
learner = makeLearner("regr.km", nugget.estim = TRUE, predict.type = "se")
ctrl = makeMBOControl(n.objectives = 2L)
ctrl = setMBOControlTermination(ctrl, iters = 5L)
ctrl = setMBOControlInfill(ctrl, crit = crit.ei, opt = "nsga2", opt.nsga2.generations = 1L, opt.nsga2.popsize = 12L)
ctrl = setMBOControlMultiObj(ctrl, method = "mspot")
or = mbo(testf.zdt1.2d, testd.zdt1.2d, learner = learner, control = ctrl)
expect_output(print(or), "Optimization path")
op = as.data.frame(or$opt.path)
k = seq_row(testd.zdt1.2d)
expect_true(all(is.na(op$ei.y_1[k])))
expect_true(all(is.na(op$ei.y_2[k])))
expect_numeric(op$ei.y_1[-k], any.missing = FALSE)
expect_numeric(op$ei.y_2[-k], any.missing = FALSE)
expect_matrix(or$pareto.front, mode = "numeric", any.missing = FALSE)
}) |
proxycheck <- function(ip, ..., api_key = proxycheck_api_key()) {
ip <- ip[1]
if (is.na(iptools::ip_classify(ip))) {
stop("`ip` is not a valid IP address", call.=FALSE)
}
params <- list(...)
param_names <- names(params)
cts <- table(param_names)
cts <- names(cts[cts > 1])
if (length(cts)) {
stop(
"Encountered duplicate API parameters: ",
paste0(sprintf('"%s"', cts), collapse = ", "),
call.=FALSE
)
}
if (!all(names(params) %in% .valid_proxycheck_params)) {
params <- params[param_names %in% .valid_proxycheck_params]
bad_params <- unique(param_names[!(param_names %in% .valid_proxycheck_params)])
warning(
"Ignoring invalid API parameters: ",
paste0(sprintf('"%s"', bad_params), collapse = ", "),
call. = FALSE
)
}
if (length(params[["tag"]])) {
if (!is.character(params[["tag"]])) {
stop("`tag` API parameter must be a text string.", call.=FALSE)
}
}
if (length(params[["days"]])) {
if (!is.integer(params[["days"]])) {
stop("`days` API parameter must be an integer.", call.=FALSE)
}
}
bool_params <- params[c("vpn", "asn", "node", "time", "inf", "risk", "port", "seen")]
bool_params <- Filter(Negate(is.null), bool_params)
if (length(bool_params)) {
notlog <- vapply(bool_params, FUN = is.logical, logical(1), USE.NAMES = TRUE)
notlog <- names(notlog[!notlog])
if (length(notlog)) {
stop(
"Invalid values (must be logical) for: ",
paste0(sprintf('"%s"', notlog), collapse = ", "),
call.=FALSE
)
}
}
for (bp in c("vpn", "asn", "node", "time", "inf", "risk", "port", "seen")) {
if (length(params[[bp]])) params[[bp]] <- as.integer(params[[bp]])
}
params[["key"]] <- api_key
httr::GET(
url = sprintf("https://proxycheck.io/v2/%s", ip),
query = params,
.RIP_UA
) -> res
httr::stop_for_status(res)
out <- httr::content(res, as = "text", encoding = "UTF-8")
out <- jsonlite::fromJSON(out)
out
} |
context('Add custom configuration')
test_that('Custom configuration is added to config', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
on.exit(clear(), add = TRUE)
expect_warning(load.project(), NA)
expect_error(add.config(new_config = 'a'), NA)
expect_equal(config$new_config, 'a')
})
test_that('Unnamed added configuration raises an error', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
on.exit(clear(), add = TRUE)
expect_warning(load.project(), NA)
expect_error(add.config('a'), 'All options should be named')
})
test_that('Added configuration is displayed correctly by project.config()', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
on.exit(clear(), add = TRUE)
expect_warning(load.project(), NA)
expect_error(add.config(dummy = 999), NA)
expect_message(project.config(), "Additional custom config present")
expect_message(project.config(), "dummy[ ]+999")
}) |
match_parent <- function(taxa, ref_taxa, n_parents = 5, verbose = FALSE){
df <- tibble::tibble(orig = taxa, matching = taxa)
for(i in seq(1, n_parents)) {
df <- dplyr::mutate(df, matching = ifelse(matching %in% ref_taxa,
matching,
twn_parent(matching)))
}
df <- df %>% dplyr::mutate(matching = ifelse(matching %in% ref_taxa, matching, NA_character_))
if(verbose){
not_matched <- dplyr::filter(df, is.na(matching)) %>% .$orig %>% unique()
if (length(not_matched > 0)) message(paste("Voor de volgende taxa is geen `matching parent` gevonden:",
paste(crayon::cyan(not_matched), collapse = " - ")))
}
df$matching
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.