code
stringlengths 1
13.8M
|
---|
stopifnot(require("glogis"))
data("hicps", package = "glogis")
hicps <- hicps[, colnames(hicps) != "EU"]
glmean <- function(x) {
if(any(is.na(x))) NA else glogisfit(x, hessian = FALSE)$moments[1]
}
hicp_glmean <- rollapply(hicps, 12, glmean)
gray_red <- rgb(c(0.2, 0.8), c(0.2, 0), c(0.2, 0), alpha = 0.3)
gray_red1 <- rgb(c(0.2, 0.8), c(0.2, 0), c(0.2, 0))
plot(hicps, plot.type = "single", lwd = 1.5, col = gray_red[1],
xlab = "Time", ylab = "Monthly inflation rates")
for(i in 1:ncol(hicps)) lines(hicp_glmean[,i], lwd = 1.5, col = gray_red[2])
|
set.seed(123)
library(Bchron)
co <- function(expr) capture.output(expr, file = "NUL")
test_that("Sluggan", {
data(Sluggan)
expect_output(print(Sluggan))
co(run <- with(
Sluggan,
Bchronology(
ages = ages,
ageSds = ageSds,
calCurves = calCurves,
positions = position,
positionThicknesses = thickness,
ids = id,
iterations = 1000,
burn = 200,
thin = 1
)
))
expect_s3_class(run, "BchronologyRun")
expect_output(summary(run, type = "quantiles"))
expect_output(summary(run, type = "convergence"))
expect_output(summary(run, type = "outliers"))
expect_output(summary(run, type = "max_var"))
p <- plot(run)
expect_s3_class(p, "ggplot")
})
test_that("TestChronData", {
data(TestChronData)
expect_output(print(TestChronData))
co(run <- with(
TestChronData,
Bchronology(
ages = ages,
ageSds = ageSds,
calCurves = calCurves,
positions = position,
positionThicknesses = thickness,
ids = id,
iterations = 1000,
burn = 200,
thin = 1
)
))
expect_s3_class(run, "BchronologyRun")
expect_output(summary(run, type = "quantiles"))
expect_output(summary(run, type = "convergence"))
expect_output(summary(run, type = "outliers"))
expect_output(summary(run, type = "max_var"))
p <- plot(run)
expect_s3_class(p, "ggplot")
})
test_that("Taphocoenose_Jan20", {
skip_on_ci()
skip_on_cran()
chron_df <-
structure(
list(
sim_time = c(
4750L,
4501L,
4001L,
3501L,
3001L,
2501L,
2001L,
1501L,
1001L,
501L,
1L
),
sim_acc_rate = c(
0, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1
),
labID = c(
"id_ 4750",
"id_ 4501",
"id_ 4001",
"id_ 3501",
"id_ 3001",
"id_ 2501",
"id_ 2001",
"id_ 1501",
"id_ 1001",
"id_ 501",
"id_ 1"
),
sim_depth = c(
0L,
249L,
749L,
1249L,
1749L,
2249L,
2749L,
3249L,
3749L,
4249L,
4749L
),
sim_age = c(
0,
249, 749, 1249, 1749, 2249, 2749, 3249, 3749, 4249, 4749
),
sim_age_round = c(
0,
249, 749, 1249, 1749, 2249, 2749, 3249, 3749, 4249, 4749
),
error = c(
10,
47, 62, 57, 70, 59, 64, 59, 57, 72, 69
),
calCurves = c(
"normal",
"normal",
"normal",
"normal",
"normal",
"normal",
"normal",
"normal",
"normal",
"normal",
"normal"
)
),
row.names = c(NA, -11L),
class = c(
"tbl_df",
"tbl", "data.frame"
)
)
co(run <- with(
chron_df,
Bchronology(
ages = sim_age_round,
ageSds = error,
calCurves = calCurves,
positions = sim_depth,
ids = labID,
iterations = 1000,
burn = 200,
thin = 1
)
))
expect_s3_class(run, "BchronologyRun")
expect_output(summary(run, type = "quantiles"))
expect_output(summary(run, type = "convergence"))
expect_output(summary(run, type = "outliers"))
expect_output(summary(run, type = "max_var"))
p <- plot(run)
expect_s3_class(p, "ggplot")
})
test_that("Kemp_Jan21", {
skip_on_ci()
skip_on_cran()
RC_input <- structure(list(
id = c(
"10373", "10374", "10375", "10376", "10517",
"10516", "10515", "10470", "10518", "10519", "10520", "10522",
"10521", "10523", "10524", "10525", "10526", "10527", "10528",
"10529", "10471", "10472", "10473", "10474", "10476", "10475",
"10477", "10478", "10479", "10480", "10481", "10482", "10483",
"10484", "10485", "10486", "10441", "10442", "10502", "10398",
"10399"
), ages = c(
143, 176, 125, 125, 233, 286, 332, 367, 415,
530, 546, 263, 846, 837, 1039, 1012, 1111, 1243, 1323, 1321,
1508, 1643, 1597, 1653, 1684, 1722, 1782, 1842, 1892, 1944, 1909,
2017, 2168, 2234, 2359, 2422, 2492, 2470, 2481, 2578, 2705
),
ageSds = c(
41, 31, 39, 35, 26, 33, 34, 33, 40, 34, 42, 29,
38, 36, 38, 38, 30, 39, 36, 31, 31, 29, 28, 29, 30, 31, 29,
28, 36, 30, 32, 30, 31, 33, 39, 35, 38, 43, 38, 40, 41
),
position = c(
24, 32, 40, 48, 54, 60, 66, 74, 80, 86, 94,
102, 107, 108, 119, 125, 133, 141, 149, 157, 166, 174, 174,
182, 189, 190, 195, 203, 208, 214, 220, 229, 235, 245, 254,
261, 267, 271, 277, 285, 291
), thickness = c(
4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
), calCurves = c(
"intcal20",
"intcal20", "intcal20", "intcal20", "intcal20", "intcal20",
"intcal20", "intcal20", "intcal20", "intcal20", "intcal20",
"intcal20", "intcal20", "intcal20", "intcal20", "intcal20",
"intcal20", "intcal20", "intcal20", "intcal20", "intcal20",
"intcal20", "intcal20", "intcal20", "intcal20", "intcal20",
"intcal20", "intcal20", "intcal20", "intcal20", "intcal20",
"intcal20", "intcal20", "intcal20", "intcal20", "intcal20",
"intcal20", "intcal20", "intcal20", "intcal20", "intcal20"
)
), class = c("spec_tbl_df", "tbl_df", "tbl", "data.frame"), row.names = c(NA, -41L), spec = structure(list(cols = list(
id = structure(list(), class = c("collector_character", "collector")), ages = structure(list(), class = c(
"collector_double",
"collector"
)), ageSds = structure(list(), class = c(
"collector_double",
"collector"
)), position = structure(list(), class = c(
"collector_double",
"collector"
)), thickness = structure(list(), class = c(
"collector_double",
"collector"
)), calCurves = structure(list(), class = c(
"collector_character",
"collector"
))
), default = structure(list(), class = c(
"collector_guess",
"collector"
)), skip = 1L), class = "col_spec"))
new_error <- c(
117, 67, 63, 69, 50, 55, 55, 59, 72, 53, 77, 41, 94, 69, 68,
122, 59, 63, 98, 67, 57, 61, 43, 49, 89, 59, 67, 42, 104, 40,
39, 55, 74, 82, 147, 72, 111, 85, 84, 51, 86
)
set.seed(344)
co(run <- with(
RC_input,
Bchronology(
ages = ages,
ageSds = new_error,
calCurves = calCurves,
positions = position,
positionThicknesses = thickness,
ids = id,
extractDate = -49,
iterations = 1000,
burn = 200,
thin = 1
)
))
expect_s3_class(run, "BchronologyRun")
expect_output(summary(run, type = "quantiles"))
expect_output(summary(run, type = "convergence"))
expect_output(summary(run, type = "outliers"))
expect_output(summary(run, type = "max_var"))
p <- plot(run)
expect_s3_class(p, "ggplot")
})
test_that("Kemp_Jan21_part2", {
skip_on_ci()
skip_on_cran()
RC_input <- structure(list(
id = c(
"10373", "10374", "10375", "10376", "10517",
"10516", "10515", "10470", "10518", "10519", "10520", "10522",
"10521", "10523", "10524", "10525", "10526", "10527", "10528",
"10529", "10471", "10472", "10473", "10474", "10476", "10475",
"10477", "10478", "10479", "10480", "10481", "10482", "10483",
"10484", "10485", "10486", "10441", "10442", "10502", "10398",
"10399"
), ages = c(
143, 176, 125, 125, 233, 286, 332, 367, 415,
530, 546, 263, 846, 837, 1039, 1012, 1111, 1243, 1323, 1321,
1508, 1643, 1597, 1653, 1684, 1722, 1782, 1842, 1892, 1944, 1909,
2017, 2168, 2234, 2359, 2422, 2492, 2470, 2481, 2578, 2705
),
ageSds = c(
41, 31, 39, 35, 26, 33, 34, 33, 40, 34, 42, 29,
38, 36, 38, 38, 30, 39, 36, 31, 31, 29, 28, 29, 30, 31, 29,
28, 36, 30, 32, 30, 31, 33, 39, 35, 38, 43, 38, 40, 41
),
position = c(
24, 32, 40, 48, 54, 60, 66, 74, 80, 86, 94,
102, 107, 108, 119, 125, 133, 141, 149, 157, 166, 174, 174,
182, 189, 190, 195, 203, 208, 214, 220, 229, 235, 245, 254,
261, 267, 271, 277, 285, 291
), thickness = c(
4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
), calCurves = c(
"intcal20",
"intcal20", "intcal20", "intcal20", "intcal20", "intcal20",
"intcal20", "intcal20", "intcal20", "intcal20", "intcal20",
"intcal20", "intcal20", "intcal20", "intcal20", "intcal20",
"intcal20", "intcal20", "intcal20", "intcal20", "intcal20",
"intcal20", "intcal20", "intcal20", "intcal20", "intcal20",
"intcal20", "intcal20", "intcal20", "intcal20", "intcal20",
"intcal20", "intcal20", "intcal20", "intcal20", "intcal20",
"intcal20", "intcal20", "intcal20", "intcal20", "intcal20"
)
), class = c("spec_tbl_df", "tbl_df", "tbl", "data.frame"), row.names = c(NA, -41L), spec = structure(list(cols = list(
id = structure(list(), class = c("collector_character", "collector")), ages = structure(list(), class = c(
"collector_double",
"collector"
)), ageSds = structure(list(), class = c(
"collector_double",
"collector"
)), position = structure(list(), class = c(
"collector_double",
"collector"
)), thickness = structure(list(), class = c(
"collector_double",
"collector"
)), calCurves = structure(list(), class = c(
"collector_character",
"collector"
))
), default = structure(list(), class = c(
"collector_guess",
"collector"
)), skip = 1L), class = "col_spec"))
co(run <- with(
RC_input,
Bchronology(
ages = ages,
ageSds = ageSds,
calCurves = calCurves,
positions = position,
positionThicknesses = thickness,
ids = id,
extractDate = -49,
iterations = 1000,
burn = 200,
thin = 1
)
))
expect_s3_class(run, "BchronologyRun")
expect_output(summary(run, type = "quantiles"))
expect_output(summary(run, type = "convergence"))
expect_output(summary(run, type = "outliers"))
expect_output(summary(run, type = "max_var"))
p <- plot(run)
expect_s3_class(p, "ggplot")
})
test_that("Gregor_Github17_20210408", {
skip_on_ci()
skip_on_cran()
set.seed(210308)
Bchron_Frame <- structure(list(id = c(
"Co1412 0", "Co1412 51.5", "Co1412 98.5",
"Co1412 168.6", "Co1412 253.5", "Co1412 253.5", "Co1412 258.5",
"Co1412 258.5", "Co1412 279.5", "Co1412 286.5", "Co1412 306",
"Co1412 345.5", "Co1412 386.5", "Co1412 416", "Co1412 465", "Co1412 465",
"Co1412 502.5"
), ages = c(
-67L, 4695L, 9269L, 14592L, 19804L,
27720L, 45423L, 25750L, 31375L, 44198L, 45769L, 32400L, 39299L,
48128L, 49559L, 39810L, 46886L
), ageSds = c(
5L, 167L, 285L, 540L,
1026L, 140L, 1480L, 180L, 238L, 442L, 363L, 220L, 321L, 2304L,
2402L, 410L, 1762L
), position = c(
0, 51.5, 98.5, 168.6, 253.5,
253.5, 258.5, 258.5, 279.5, 286.5, 306, 345.5, 386.5, 416, 465,
465, 502.5
), thickness = c(
0L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L
), calCurves = c(
"normal", "intcal20",
"intcal20", "intcal20", "intcal20", "intcal20", "intcal20", "intcal20",
"intcal20", "intcal20", "intcal20", "intcal20", "intcal20", "intcal20",
"intcal20", "intcal20", "intcal20"
)), class = "data.frame", row.names = c(
NA,
-17L
))
co(run <- Bchronology(
ages = Bchron_Frame$ages,
ageSds = Bchron_Frame$ageSds,
calCurves = Bchron_Frame$calCurves,
positions = Bchron_Frame$position,
positionThickness = Bchron_Frame$thickness,
iterations = 1500,
burn = 500,
thin = 1,
predictPositions = seq(min(Bchron_Frame$position), max(Bchron_Frame$position), by = 1)
))
expect_s3_class(run, "BchronologyRun")
expect_output(summary(run, type = "quantiles"))
expect_output(summary(run, type = "convergence"))
expect_output(summary(run, type = "outliers"))
expect_output(summary(run, type = "max_var"))
p <- plot(run)
expect_s3_class(p, "ggplot")
})
test_that("Gregor_Github17_20210408_b", {
skip_on_ci()
skip_on_cran()
set.seed(210308)
Bchron_Frame <- structure(list(id = c(
"PG1975 0", "PG1975 0.25", "PG1975 0.25",
"PG1975 44.75", "PG1975 44.75", "PG1975 90.25", "PG1975 90.25",
"PG1975 134.5", "PG1975 134.5"
), ages = c(
-59L, 2980L, 2980L,
7090L, 6190L, 6240L, 5740L, 9580L, 6790L
), ageSds = c(
5L, 35L,
35L, 50L, 40L, 50L, 40L, 35L, 30L
), position = c(
0, 0.25, 0.25,
44.75, 44.75, 90.25, 90.25, 134.5, 134.5
), thickness = c(
0, 0.5,
0.5, 0.5, 0.5, 0.5, 0.5, 1, 1
), calCurves = c(
"normal", "intcal20",
"intcal20", "intcal20", "intcal20", "intcal20", "intcal20", "intcal20",
"intcal20"
)), class = "data.frame", row.names = c(NA, -9L))
co(run <- Bchronology(
ages = Bchron_Frame$ages,
ageSds = Bchron_Frame$ageSds,
calCurves = Bchron_Frame$calCurves,
positions = Bchron_Frame$position,
positionThickness = Bchron_Frame$thickness,
iterations = 1500,
burn = 500,
thin = 1,
predictPositions = seq(min(Bchron_Frame$position), max(Bchron_Frame$position), by = 1)
))
expect_s3_class(run, "BchronologyRun")
expect_output(summary(run, type = "quantiles"))
expect_output(summary(run, type = "convergence"))
expect_output(summary(run, type = "outliers"))
expect_output(summary(run, type = "max_var"))
})
test_that("Gregor_Github17_20210408_b", {
skip_on_ci()
skip_on_cran()
set.seed(-1673826857L)
Bchron_Frame <- structure(list(id = c(
"PG1755 0", "PG1755 37.5", "PG1755 57.5",
"PG1755 78.75", "PG1755 108.5", "PG1755 128.5", "PG1755 133",
"PG1755 151", "PG1755 155", "PG1755 168.5", "PG1755 181", "PG1755 194.5",
"PG1755 197", "PG1755 199.5", "PG1755 214", "PG1755 214.5", "PG1755 249.25",
"PG1755 287", "PG1755 291", "PG1755 424", "PG1755 532", "PG1755 599",
"PG1755 707", "PG1755 725", "PG1755 750", "PG1755 770", "PG1755 811",
"PG1755 842", "PG1755 854", "PG1755 899.5", "PG1755 915", "PG1755 934"
), ages = c(
-55L, 3500L, 4429L, 5698L, 8296L, 10150L, 9450L,
18150L, 14339L, 16627L, 33688L, 17999L, 18680L, 18172L, 21490L,
18954L, 19267L, 22960L, 20969L, 25207L, 27220L, 30610L, 30400L,
42400L, 43000L, 41632L, 42121L, 52300L, 41436L, 37949L, 47300L,
36140L
), ageSds = c(
5L, 47L, 47L, 48L, 49L, 50L, 40L, 120L, 54L,
57L, 150L, 59L, 120L, 59L, 110L, 63L, 62L, 230L, 68L, 82L, 200L,
119L, 500L, 375L, 900L, 344L, 359L, 3100L, 335L, 236L, 1700L,
197L
), position = c(
0, 37.5, 57.5, 78.75, 108.5, 128.5, 133,
151, 155, 168.5, 181, 194.5, 197, 199.5, 214, 214.5, 249.25,
287, 291, 424, 532, 599, 707, 725, 750, 770, 811, 842, 854, 899.5,
915, 934
), thickness = c(
0, 0.5, 0.5, 0.5, 0.5, 0.5, 1, 1, 0.5,
0.5, 0.5, 0.5, 1, 0.5, 1, 0.5, 0.5, 1, 0.5, 0.5, 1, 0.5, 1, 0.5,
1, 0.5, 0.5, 1, 0.5, 0.5, 1, 0.5
), calCurves = c(
"normal", "intcal20",
"intcal20", "intcal20", "intcal20", "intcal20", "intcal20", "intcal20",
"intcal20", "intcal20", "intcal20", "intcal20", "intcal20", "intcal20",
"intcal20", "intcal20", "intcal20", "intcal20", "intcal20", "intcal20",
"intcal20", "intcal20", "intcal20", "intcal20", "intcal20", "intcal20",
"intcal20", "normal", "intcal20", "intcal20", "intcal20", "intcal20"
)), class = "data.frame", row.names = c(NA, -32L))
co(run <- Bchronology(
ages = Bchron_Frame$ages,
ageSds = Bchron_Frame$ageSds,
calCurves = Bchron_Frame$calCurves,
positions = Bchron_Frame$position,
positionThickness = Bchron_Frame$thickness,
ids = Bchron_Frame$id,
iterations = 1500,
burn = 500,
thin = 1,
predictPositions = seq(min(Bchron_Frame$position), max(Bchron_Frame$position), by = 1)
))
expect_s3_class(run, "BchronologyRun")
expect_output(summary(run, type = "quantiles"))
expect_output(summary(run, type = "convergence"))
expect_output(summary(run, type = "outliers"))
expect_output(summary(run, type = "max_var"))
})
test_that("Gregor_Github17_20210408_c", {
skip_on_ci()
skip_on_cran()
set.seed(10407L)
Bchron_Frame <- structure(list(id = c(
"PG1238 0", "PG1238 14", "PG1238 20.8",
"PG1238 200.8", "PG1238 261", "PG1238 307.8", "PG1238 361", "PG1238 613",
"PG1238 714.8", "PG1238 773", "PG1238 773", "PG1238 811.8", "PG1238 841.8",
"PG1238 863", "PG1238 885", "PG1238 926", "PG1238 962.8", "PG1238 965",
"PG1238 994.75", "PG1238 996.8", "PG1238 1005.75"
), ages = c(
-46L,
10922L, 4400L, 4030L, 8189L, 5120L, 9253L, 6020L, 12110L, 11377L,
18434L, 19200L, 20500L, 24170L, 22953L, 27400L, 33400L, 25570L,
34000L, 5300L, 84000L
), ageSds = c(
5L, 153L, 380L, 420L, 354L,
680L, 71L, 100L, 680L, 85L, 118L, 1300L, 910L, 160L, 161L, 220L,
2100L, 220L, 4000L, 3000L, 6000L
), position = c(
0, 14, 20.8,
200.8, 261, 307.8, 361, 613, 714.8, 773, 773, 811.8, 841.8, 863,
885, 926, 962.8, 965, 994.75, 996.8, 1005.75
), thickness = c(
0,
6, 1, 1, 6, 1, 2, 6, 1, 6, 6, 1, 1, 2, 6, 4, 1, 6, 9.5, 1, 10.5
), calCurves = c(
"normal", "intcal20", "normal", "normal", "intcal20",
"normal", "intcal20", "intcal20", "normal", "intcal20", "intcal20",
"normal", "normal", "marine20", "intcal20", "marine20", "normal",
"marine20", "normal", "normal", "normal"
)), class = "data.frame", row.names = c(
NA,
-21L
))
co(run <- Bchronology(
ages = Bchron_Frame$ages,
ageSds = Bchron_Frame$ageSds,
calCurves = Bchron_Frame$calCurves,
positions = Bchron_Frame$position,
positionThickness = Bchron_Frame$thickness,
iterations = 1500,
burn = 500,
thin = 1,
predictPositions = seq(min(Bchron_Frame$position), max(Bchron_Frame$position), by = 1)
))
expect_s3_class(run, "BchronologyRun")
expect_output(summary(run, type = "quantiles"))
expect_output(summary(run, type = "convergence"))
expect_output(summary(run, type = "outliers"))
expect_output(summary(run, type = "max_var"))
})
test_that("Gregor_Github17_20210510_a", {
skip_on_ci()
skip_on_cran()
set.seed(-1673826857L)
Bchron_Frame <- structure(list(id = c(
"ESM-1 0", "ESM-1 27.75", "ESM-1 51.75",
"ESM-1 83.75", "ESM-1 114.5", "ESM-1 149.5", "ESM-1 184.5", "ESM-1 209.5",
"ESM-1 244.5", "ESM-1 279.5", "ESM-1 304.5", "ESM-1 329.5", "ESM-1 359.5",
"ESM-1 370.5"
), ages = c(
-56L, 520L, 620L, 905L, 1720L, 2145L,
2500L, 2720L, 3380L, 3755L, 4700L, 6810L, 8810L, 9990L
), ageSds = c(
5L,
30L, 30L, 30L, 35L, 35L, 35L, 35L, 40L, 35L, 40L, 50L, 60L, 60L
), position = c(
0, 27.75, 51.75, 83.75, 114.5, 149.5, 184.5,
209.5, 244.5, 279.5, 304.5, 329.5, 359.5, 370.5
), thickness = c(
0,
0.5, 0.5, 0.5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
), calCurves = c(
"normal",
"intcal20", "intcal20", "intcal20", "intcal20", "intcal20", "intcal20",
"intcal20", "intcal20", "intcal20", "intcal20", "intcal20", "intcal20",
"intcal20"
)), class = "data.frame", row.names = c(NA, -14L))
co(run <- Bchronology(
ages = Bchron_Frame$ages,
ageSds = Bchron_Frame$ageSds,
calCurves = Bchron_Frame$calCurves,
positions = Bchron_Frame$position,
positionThickness = Bchron_Frame$thickness,
ids = Bchron_Frame$id,
iterations = 1500,
burn = 500,
thin = 1,
predictPositions = seq(min(Bchron_Frame$position), max(Bchron_Frame$position), by = 1)
))
expect_s3_class(run, "BchronologyRun")
expect_output(summary(run, type = "quantiles"))
expect_output(summary(run, type = "convergence"))
expect_output(summary(run, type = "outliers"))
expect_output(summary(run, type = "max_var"))
})
test_that("Gregor_Github17_20210510_b", {
skip_on_ci()
skip_on_cran()
set.seed(10407L)
Bchron_Frame <- structure(list(id = c(
"PG1975 0", "PG1975 0.25", "PG1975 0.25",
"PG1975 44.75", "PG1975 44.75", "PG1975 90.25", "PG1975 90.25",
"PG1975 134.5", "PG1975 134.5"
), ages = c(
-59L, 2980L, 2980L,
7090L, 6190L, 6240L, 5740L, 9580L, 6790L
), ageSds = c(
5L, 35L,
35L, 50L, 40L, 50L, 40L, 35L, 30L
), position = c(
0, 0.25, 0.25,
44.75, 44.75, 90.25, 90.25, 134.5, 134.5
), thickness = c(
0, 0.5,
0.5, 0.5, 0.5, 0.5, 0.5, 1, 1
), calCurves = c(
"normal", "intcal20",
"intcal20", "intcal20", "intcal20", "intcal20", "intcal20", "intcal20",
"intcal20"
)), class = "data.frame", row.names = c(NA, -9L))
co(run <- Bchronology(
ages = Bchron_Frame$ages,
ageSds = Bchron_Frame$ageSds,
calCurves = Bchron_Frame$calCurves,
positions = Bchron_Frame$position,
positionThickness = Bchron_Frame$thickness,
iterations = 1500,
burn = 500,
thin = 1,
predictPositions = seq(min(Bchron_Frame$position), max(Bchron_Frame$position), by = 1)
))
expect_s3_class(run, "BchronologyRun")
expect_output(summary(run, type = "quantiles"))
expect_output(summary(run, type = "convergence"))
expect_output(summary(run, type = "outliers"))
expect_output(summary(run, type = "max_var"))
set.seed(-769196902L)
co(run <- Bchronology(
ages = Bchron_Frame$ages,
ageSds = Bchron_Frame$ageSds,
calCurves = Bchron_Frame$calCurves,
positions = Bchron_Frame$position,
positionThickness = Bchron_Frame$thickness,
iterations = 1500,
burn = 500,
thin = 1,
predictPositions = seq(min(Bchron_Frame$position), max(Bchron_Frame$position), by = 1)
))
expect_s3_class(run, "BchronologyRun")
expect_output(summary(run, type = "quantiles"))
expect_output(summary(run, type = "convergence"))
expect_output(summary(run, type = "outliers"))
expect_output(summary(run, type = "max_var"))
})
test_that("Barton_Github_20210521", {
skip_on_ci()
skip_on_cran()
set.seed(123)
Bchron_Frame <- structure(list(
X = 2:22, id = c(
"TY-7-5", "TY-1-4", "TY-7-8",
"TY-5/6-3", "TY-7-10", "TY-1-5", "TY-7-11", "TY-1-6", "TY-7-12a",
"TY-7-12b", "TY-7-13", "TY-5/6-6a", "TY-5/6-11", "TY-5/6-6b",
"TY-5/6-12", "TY-7-14", "TY-1-7", "TY-5/6-13", "TY-5/6-14", "TY-7-18a",
"TY-7-18b"
), ages = c(
1885L, 1745L, 2020L, 2010L, 1120L, 1170L,
7910L, 9560L, 8960L, 10060L, 8765L, 10359L, 10250L, 8507L, 10215L,
10375L, 10412L, 10255L, 10355L, 13660L, 13845L
), ageSds = c(
20L,
15L, 20L, 20L, 15L, 20L, 20L, 35L, 30L, 25L, 25L, 41L, 30L, 35L,
30L, 25L, 42L, 35L, 35L, 70L, 35L
), position..cmbd. = c(
85L,
92L, 96L, 105L, 107L, 108L, 111L, 116L, 119L, 119L, 120L, 122L,
123L, 123L, 128L, 130L, 133L, 134L, 143L, 151L, 151L
), position = c(
14L,
21L, 25L, 34L, 36L, 37L, 40L, 45L, 48L, 48L, 49L, 51L, 52L, 52L,
57L, 59L, 62L, 63L, 72L, 80L, 80L
), thicknesses = c(
0L, 0L, 0L,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
0L, 0L
), calCurves = c(
"intcal13", "intcal13", "intcal13", "intcal13",
"intcal13", "intcal13", "intcal13", "intcal13", "intcal13", "intcal13",
"intcal13", "intcal13", "intcal13", "intcal13", "intcal13", "intcal13",
"intcal13", "intcal13", "intcal13", "intcal13", "intcal13"
),
calCurves20 = c(
"intcal20", "intcal20", "intcal20", "intcal20",
"intcal20", "intcal20", "intcal20", "intcal20", "intcal20",
"intcal20", "intcal20", "intcal20", "intcal20", "intcal20",
"intcal20", "intcal20", "intcal20", "intcal20", "intcal20",
"intcal20", "intcal20"
), Date = c(
"TY-7-5", "TY-1-4", "TY-7-8",
"TY-5/6-3", "TY-7-10", "TY-1-5", "TY-7-11", "TY-1-6", "TY-7-12a",
"TY-7-12b", "TY-7-13", "TY-5/6-6a", "TY-5/6-11", "TY-5/6-6b",
"TY-5/6-12", "TY-7-14", "TY-1-7", "TY-5/6-13", "TY-5/6-14",
"TY-7-18a", "TY-7-18b"
), OutlierProb = c(
1, 0.013, 0.031,
0.487, 1, 1, 0.008, 1, 0.871, 1, 0.154, 1, 0.677, 1, 0.007,
0.014, 0.009, 0.9, 0.945, 0.073, 0.273
)
), class = "data.frame", row.names = c(
NA,
-21L
))
co(run <- Bchronology(
ages = Bchron_Frame$ages,
ageSds = Bchron_Frame$ageSds,
calCurves = Bchron_Frame$calCurves,
positions = Bchron_Frame$position,
positionThickness = Bchron_Frame$thickness,
ids = Bchron_Frame$id,
positionNormalise = FALSE,
iterations = 1500,
burn = 500,
thin = 1,
predictPositions = seq(min(Bchron_Frame$position), max(Bchron_Frame$position), by = 1)
))
expect_s3_class(run, "BchronologyRun")
expect_output(summary(run, type = "quantiles"))
expect_output(summary(run, type = "convergence"))
expect_output(summary(run, type = "outliers"))
expect_output(summary(run, type = "max_var"))
co(run2 <- Bchronology(
ages = Bchron_Frame$ages,
ageSds = Bchron_Frame$ageSds,
calCurves = Bchron_Frame$calCurves,
positions = Bchron_Frame$position,
positionThickness = Bchron_Frame$thickness,
ids = Bchron_Frame$id,
positionNormalise = TRUE,
iterations = 1500,
burn = 500,
thin = 1,
predictPositions = seq(min(Bchron_Frame$position), max(Bchron_Frame$position), by = 1)
))
expect_s3_class(run2, "BchronologyRun")
expect_output(summary(run2, type = "quantiles"))
expect_output(summary(run2, type = "convergence"))
expect_output(summary(run2, type = "outliers"))
expect_output(summary(run2, type = "max_var"))
co(run3 <- Bchronology(
ages = Bchron_Frame$ages,
ageSds = Bchron_Frame$ageSds,
calCurves = Bchron_Frame$calCurves,
positions = Bchron_Frame$position,
positionThickness = Bchron_Frame$thickness,
ids = Bchron_Frame$id,
positionNormalise = TRUE,
iterations = 100,
burn = 10,
thin = 1,
predictPositions = seq(min(Bchron_Frame$position), max(Bchron_Frame$position), by = 1)
))
expect_s3_class(run3, "BchronologyRun")
expect_output(summary(run3, type = "quantiles"))
expect_output(summary(run3, type = "convergence"))
expect_output(summary(run3, type = "outliers"))
expect_output(summary(run3, type = "max_var"))
expect_error(Bchronology(
ages = Bchron_Frame$ages,
ageSds = Bchron_Frame$ageSds,
calCurves = Bchron_Frame$calCurves,
positions = Bchron_Frame$position,
positionThickness = Bchron_Frame$thickness,
artificialThickness = 0,
positionNormalise = TRUE,
iterations = 100,
burn = 10,
thin = 1,
predictPositions = seq(min(Bchron_Frame$position), max(Bchron_Frame$position), by = 1)
))
})
test_that("Barton_Github_202100604", {
skip_on_ci()
skip_on_cran()
set.seed(123)
Bchron_Frame <- structure(list(level = structure(c(
15L, 14L, 13L, 12L, 12L, 11L,
10L, 10L, 9L, 9L, 9L, 8L, 8L, 8L, 7L, 7L, 6L, 5L, 5L, 4L, 3L,
2L, 17L, 17L, 16L, 1L, 1L, 1L
), .Label = c(
"1", "10", "12", "14",
"15", "16", "17", "19", "20", "23", "24", "27 lower", "27 upper",
"29", "29 top", "4", "8"
), class = "factor"), level.num = c(
29L,
29L, 27L, 27L, 27L, 24L, 23L, 23L, 20L, 20L, 20L, 19L, 19L, 19L,
17L, 17L, 16L, 15L, 15L, 14L, 12L, 10L, 8L, 8L, 4L, 1L, 1L, 1L
), thickness = c(
0.1, 0.1, 0.2, 0.2, 0.2, 0.1, 0.1, 0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.11, 0.03, 0.03, 0.03, 0.03,
0.03, 0.06, 0.06, 0.06, 0.3, 0.3, 0.3
), depth = c(
0.28, 0.28,
0.58, 0.58, 0.58, 0.74, 0.84, 0.84, 1.14, 1.14, 1.14, 1.24, 1.24,
1.24, 1.44, 1.44, 1.55, 1.58, 1.58, 1.61, 1.67, 1.73, 1.82, 1.82,
2.06, 2.62, 2.62, 2.62
), C14mean = c(
6500L, 8650L, 10630L, 12270L,
14760L, 10890L, 10340L, 12620L, 12360L, 9090L, 17160L, 15230L,
15520L, 16420L, 16900L, 17070L, 18200L, 15600L, 17225L, 15690L,
17210L, 19820L, 15860L, 20690L, 20970L, 19620L, 20360L, 20860L
), C14SD = c(
200L, 300L, 120L, 400L, 400L, 430L, 560L, 300L,
670L, 570L, 440L, 300L, 350L, 430L, 200L, 230L, 610L, 570L, 350L,
310L, 350L, 390L, 330L, 810L, 620L, 390L, 450L, 410L
), calib.curve = structure(c(
1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L
), .Label = "intcal13", class = "factor")), class = "data.frame", row.names = c(
NA,
-28L
))
co(run <- Bchronology(
ages = Bchron_Frame$C14mean,
ageSds = Bchron_Frame$C14SD,
calCurves = Bchron_Frame$calib.curve,
positions = Bchron_Frame$depth,
positionThickness = Bchron_Frame$thickness,
positionNormalise = FALSE,
iterations = 1500,
burn = 500,
thin = 1,
predictPositions = seq(min(Bchron_Frame$depth), max(Bchron_Frame$depth), length.out = 100)
))
expect_s3_class(run, "BchronologyRun")
expect_output(summary(run, type = "quantiles"))
expect_output(summary(run, type = "convergence"))
expect_output(summary(run, type = "outliers"))
expect_output(summary(run, type = "max_var"))
})
|
MoorePenrose <- function(X, tol = sqrt(.Machine$double.eps)){
svd <- SVD(X, tol=tol)
svd$V %*% diag(1/svd$d) %*% t(svd$U)
}
|
skip_on_cran()
ds_routes <- c(
"/dataset/{key}",
"/dataset/{key}/assembly",
"/dataset/{key}/source",
"/dataset/{key}/source/{key}/metrics",
"/dataset/{key}/decision",
"/dataset/{key}/estimate/{id}",
"/dataset/{key}/estimate",
"/dataset/{key}/name/orphans",
"/dataset/{key}/name/{id}/relations",
"/dataset/{key}/name/{id}/group",
"/dataset/{key}/name/{id}/synonyms",
"/dataset/{key}/name/{id}/types",
"/dataset/{key}/name/{id}",
"/dataset/{key}/name",
"/dataset/{key}/nameusage/{id}",
"/dataset/{key}/reference/orphans",
"/dataset/{key}/reference",
"/dataset/{key}/reference/{id}",
"/dataset/{key}/sector",
"/dataset/{key}/sector/{id}/sync",
"/dataset/{key}/sector/sync",
"/dataset/{key}/sector/{id}",
"/dataset/{key}/synonym/{id}",
"/dataset/{key}/synonym",
"/dataset/{key}/taxon/{id}/children",
"/dataset/{key}/taxon/{id}/synonyms",
"/dataset/{key}/taxon/{id}/classification",
"/dataset/{key}/taxon/{id}",
"/dataset/{key}/taxon",
"/dataset/{key}/tree/{id}/children",
"/dataset/{key}/tree/{id}",
"/dataset/{key}/tree",
"/dataset/{key}/verbatim"
)
test_that("cp_ds_all_routes", {
skip_on_ci()
estimate <- list(datasetKey=3, key=3, id=68)
namez <- list(datasetKey=3, key=3, id=0)
taxon <- latest <- namez
nameusage <- list(datasetKey=3, key=3, id=0, q="poa")
ref <- list(datasetKey=3, key=3, id='000007c7-d8dc-4c8c-9380-f405e846b07d')
sector <- list(datasetKey=3, key=3, id=1131)
syn <- list(datasetKey=3, key=3, id='0000008d-30e8-489b-b3ed-49f82c98ce27')
params <- list(key=1005, datasetKey=3, key=3, id=230686, attempt=10, q="poa", id='00000486-5f68-499f-bab6-403eaea47339')
for (i in seq_along(ds_routes)) {
route <- sub("/dataset/", "", ds_routes[i])
pars <- if (grepl("latest", route)) latest else params
pars <- if (grepl("estimate", route)) estimate else params
pars <- if (grepl("name/|name$", route)) namez else pars
pars <- if (grepl("nameusage", route)) nameusage else pars
pars <- if (grepl("reference", route)) ref else pars
pars <- if (grepl("sector", route)) sector else pars
pars <- if (grepl("synonym$|synonym/", route)) syn else pars
pars <- if (grepl("taxon$|taxon/|tree", route)) taxon else pars
cp_ds(route, .list = pars)
}
})
test_that("cp_ds", {
vcr::use_cassette("cp_ds", {
x <- cp_ds("{key}/name", key = "1014")
})
expect_is(x, "list")
expect_is(x$offset, "integer")
expect_is(x$limit, "integer")
expect_is(x$total, "integer")
expect_is(x$last, "logical")
expect_is(x$result, "data.frame")
})
test_that("cp_ds fails well", {
expect_error(cp_ds(), class = "error")
expect_error(cp_ds("{key}/namesdiff"), class = "error")
expect_error(cp_ds("{key}/namesdiff", foo = "bar"), class = "error")
expect_error(cp_ds("{key}/logo"), "logo route not supported")
vcr::use_cassette("cp_ds_tree_does_not_exist", {
expect_error(cp_ds("{key}/tree", key = 2), "does not exist")
})
})
|
uncaught_loop_control_flow_condition <-
function(type, env) {
registry <- control_flow_registries$peek()
structure(
class = c(type, "uncaught_loop_control_flow", "error", "condition"),
drop_empty(
list(
message = "",
call = sys.call(-1),
is_break = if (registry$can_break)
type == "break",
loop_vars = mget(registry$loop_vars, envir = env, inherits = TRUE),
reduced_conds = reduce_registered_conds(),
env = env
)
)
)
}
ag_break <- function() {
env <- parent.frame()
tryCatch(
eval(quote(.Primitive("break")()), env),
error = function(e) {
try_register_or_signal_error_with_restart(
uncaught_loop_control_flow_condition("break", env))
do_return(env)
}
)
}
ag_next <- function() {
env <- parent.frame()
tryCatch(
eval(quote(.Primitive("next")()), env),
error = function(e) {
try_register_or_signal_error_with_restart(
uncaught_loop_control_flow_condition("next", env))
do_return(env)
}
)
}
compact_lcf <- function(x)
drop_empty(unclass(x)[c("loop_vars", "reduced_conds", "is_break")])
dummy_compact_lcf <- function(env) {
registry <- control_flow_registries$peek()
drop_empty(list(
loop_vars = mget(registry$loop_vars, envir = env, inherits = TRUE),
reduced_conds = FALSE,
is_break = if (registry$can_break) FALSE
))
}
expand_lcf <-
function(lcf, msg = "", call = sys.call(-1), env = NULL, type = NULL) {
lcf$message <- msg
lcf$call <- call
lcf$env <- env
structure(lcf, class = c(type, "uncaught_loop_control_flow",
"error", "condition"))
}
can_register_loop_control_flow <- function(lcf) {
if(tf$executing_eagerly())
return(TRUE)
registry <- control_flow_registries$peek()
for (x in unlist(compact_lcf(lcf))) {
if(!is_tensor(x))
next
if(py_has_attr(x, "numpy"))
next
if(x$graph != registry$graph)
return(FALSE)
}
TRUE
}
register_loop_control_flow <- function(lcf) {
registry <- control_flow_registries$peek()
registry$recorded_conditions$push(compact_lcf(lcf))
}
try_register_or_signal_error_with_restart <- function(lcf) {
if (can_register_loop_control_flow(lcf))
register_loop_control_flow(lcf)
else {
withRestarts(
stop(expand_lcf(lcf)),
continue = function() NULL
)
}
}
new_control_flow_registry <-
function(loop_vars, can_break, graph = tf$compat$v1$get_default_graph()) {
registry <- list2env(
list(
loop_vars = as.character(loop_vars),
can_break = can_break,
graph = graph,
recorded_conditions = Stack()
),
parent = emptyenv()
)
registry
}
do_return <- function(env, value = NULL) {
eval(as.call(list(quote(.Primitive("return")), value)), env)
}
|
bucketsort1<-function(v){
v_1<-round(v,digits=-4)
v2<-v*(10^4)
v3<-max(v2)
v4<-f(v3,v2)
v5<-NULL
for(i in 1:v3)
if (v4[i]!=0){
v5<-c(v5,rep(i,v4[i]))
}
v5<-v5/(10^4)
v5
}
|
summarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=FALSE,
conf.interval=.95, .drop=TRUE) {
library(plyr)
length2 <- function (x, na.rm=FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)
}
datac <- ddply(data, groupvars, .drop=.drop,
.fun = function(xx, col) {
c(N = length2(xx[[col]], na.rm=na.rm),
mean = mean (xx[[col]], na.rm=na.rm),
sd = sd (xx[[col]], na.rm=na.rm)
)
},
measurevar
)
datac <- rename(datac, c("mean" = measurevar))
datac$se <- datac$sd / sqrt(datac$N)
ciMult <- qt(conf.interval/2 + .5, datac$N-1)
datac$ci <- datac$se * ciMult
return(datac)
}
library('ggplot2')
github.user.data <- read.csv('aggregated-top-Spain.csv',sep=';')
tgithub.user.data <- summarySE(github.user.data,measurevar="followers",groupvars=c("contributions"))
ggplot( data=tgithub.user.data, aes(x=contributions,y=followers))+ geom_point()
ggsave( 'province-stacked-chart.png')
|
plot.chisq.diag <-
function(x, main, sub, xlab, ylab, qqline = TRUE, ...)
{
if (missing(main))
main <- if (is.null(nclones(x)))
"" else paste("Number of Clones =", nclones(x))
if (missing(sub))
sub <- paste("MS Error =", round(x$statistics$ms.error, 3),
"\tr-squared =", round(x$statistics$r.squared, 3))
if (missing(xlab))
xlab <- "Theoretical Quantiles"
if (missing(ylab))
ylab <- "Empirical Quantiles"
qq <- x$quantiles
qqplot(qq$theoretical, qq$empirical, plot.it=TRUE,
xlab=xlab, ylab=ylab, main=main, sub=sub, ...)
if (qqline) {
y <- quantile(qq$empirical, c(0.25, 0.75))
x <- quantile(qq$theoretical, c(0.25, 0.75))
slope <- diff(y)/diff(x)
int <- y[1L] - slope * x[1L]
abline(int, slope, lty = 2)
}
invisible(x)
}
|
context("resizeImages")
library(imageseg)
library(magick)
wd_can <- system.file("images/canopy/raw",
package = "imageseg")
wd_out_can <- file.path(tempdir(), "canopy", "resized")
resizeImages(imageDir = wd_can,
type = "canopy",
outDir = wd_out_can)
filename_resized <- list.files(wd_out_can, full.names = TRUE)
img_can <- magick::image_read(filename_resized)
wd_us <- system.file("images/understory/raw",
package = "imageseg")
wd_out_us <- file.path(tempdir(), "understory", "resized")
resizeImages(imageDir = wd_us,
type = "understory",
outDir = wd_out_us)
filename_resized <- list.files(wd_out_us, full.names = TRUE)
img_us <- magick::image_read(filename_resized)
test_that("object classes are correct", {
expect_is(wd_can, "character")
expect_is(wd_out_can, "character")
expect_is(wd_out_us, "character")
expect_is(wd_us, "character")
expect_is(img_can, "magick-image")
expect_is(img_us, "magick-image")
})
test_that("correct number of images was extracted", {
expect_true(length(img_can) == 3)
expect_true(length(img_us) == 1)
})
file_can_resized_tmp <- list.files(wd_out_can, full.names = T)
img_can_resized <- imageseg:::imageRead(file_can_resized_tmp)
file_us_resized_tmp <- list.files(wd_out_us, full.names = T)
img_us_resized <- imageseg:::imageRead(file_us_resized_tmp)
test_that("resized images have correct dimensions", {
expect_equal(unique(image_info(img_can_resized)$width), 256)
expect_equal(unique(image_info(img_can_resized)$height), 256)
expect_equal(unique(image_info(img_us_resized)$width), 160)
expect_equal(unique(image_info(img_us_resized)$height), 256)
})
|
integ_features <- SelectIntegrationFeatures(object.list = split_seurat,
nfeatures = 3000)
split_seurat <- PrepSCTIntegration(object.list = split_seurat,
anchor.features = integ_features)
integ_anchors <- FindIntegrationAnchors(object.list = split_seurat,
normalization.method = "SCT",
anchor.features = integ_features)
seurat_integrated <- IntegrateData(anchorset = integ_anchors,
normalization.method = "SCT")
|
gridboxvar <- function(var, lonGrid, latGrid, infile, outfile, nc34 = 4, overwrite = FALSE, verbose = FALSE, nc = NULL) {
gridboxx_wrapper(7, var, lonGrid, latGrid, infile, outfile, nc34, overwrite, verbose, nc = nc)
}
|
gwr.model.selection<-function(DeVar=NULL,InDeVars=NULL, data=list(),bw=NULL,approach="CV",
adaptive=F,kernel="bisquare",dMat=NULL,p=2, theta=0, longlat=F,
parallel.method=F,parallel.arg=NULL)
{
if (is.null(DeVar) || !is.character(DeVar) || is.null(InDeVars) || !is.character(InDeVars))
stop("Input are not correct, please recheck!")
spdf <- data
if (!is.null(data)) {
if (is(data, "Spatial")) {
p4s <- proj4string(data)
dp.locat <- coordinates(data)
data <- as(data, "data.frame")
} else {
if (!is(data, "data.frame"))
stop("Given regression data must be data.frame or Spatial*DataFrame")
}
}
else stop("No regression data frame is avaiable!")
vars.df <- names(data)
dp.n <- nrow(data)
var.n <- length(InDeVars)
InDeVars.Sub <- InDeVars
model.list <- list()
GWR.df <- c()
if (missing(dMat)) {
DM.given <- F
dMat <- matrix(0, 0, 0)
}
else if (is.null(dMat) || !is.matrix(dMat)) {
DM.given<-F
dMat <- matrix(0, 0, 0)
}
else {
DM.given<-T
dim.dMat<-dim(dMat)
if (dim.dMat[1]!=dp.n||dim.dMat[2]!=dp.n)
stop ("Dimensions of dMat are not correct")
}
varsindx.list <- list()
level.vars <- c()
tag <- 1
adapt <- NULL
if (parallel.method == "cluster") {
if (missing(parallel.arg)) {
parallel.arg.n <- max(detectCores() - 4, 2)
parallel.arg <- makeCluster(parallel.arg.n)
} else parallel.arg.n <- length(parallel.arg)
clusterCall(parallel.arg, function() { library(GWmodel) })
}
for (i in 1:var.n) {
AICcs <- c()
for (j in 1:(var.n - i + 1)) {
vars.j <- c(level.vars, InDeVars.Sub[j])
fml <- Generate.formula(DeVar, vars.j)
cat("Now calibrating the model: \n", fml, "\n")
matL <- extract.mat(fml, data)
y <- matL[[1]]
x <- matL[[2]]
if (is.null(bw)) {
part1 <- paste("bw<-bw.gwr(", fml, sep = "")
part2 <- "data=spdf,kernel=kernel,approach=approach,dMat=dMat, parallel.method=parallel.method,parallel.arg=parallel.arg)"
expression <- paste(part1, part2, sep = ",")
print(expression)
eval(parse(text = expression))
} else {
if (adaptive) {
stopifnot(is.numeric(bw))
stopifnot((bw >= 0))
} else {
stopifnot(is.numeric(bw))
stopifnot((bw > min(dMat)))
}
}
betas <- matrix(0, nrow = dp.n, ncol = ncol(x))
s_hat <- numeric(2)
if (parallel.method == FALSE) {
res <- gw_reg_all(x, y, dp.locat, FALSE, dp.locat, DM.given, dMat, TRUE, p, theta, longlat, bw, kernel, adaptive)
betas <- res$betas
s_hat <- res$s_hat
} else if (parallel.method == "omp") {
if (missing(parallel.arg)) { threads <- 0 } else {
threads <- ifelse(is(parallel.arg, "numeric"), parallel.arg, 0)
}
res <- gw_reg_all_omp(x, y, dp.locat, FALSE, dp.locat, DM.given, dMat, TRUE, p, theta, longlat, bw, kernel, adaptive, threads)
betas <- res$betas
s_hat <- res$s_hat
} else if (parallel.method == "cuda") {
if (missing(parallel.arg)) { groupl <- 16 } else {
groupl <- ifelse(is(parallel.arg, "numeric"), parallel.arg, 16)
}
res <- gw_reg_all_cuda(x, y, dp.locat, FALSE, dp.locat, DM.given, dMat, TRUE, p, theta, longlat, bw, kernel, adaptive, groupl)
betas <- res$betas
s_hat <- res$s_hat
} else if (parallel.method == "cluster") {
parallel.arg.results <- clusterApplyLB(parallel.arg, 1:parallel.arg.n, function(group.i, parallel.arg.n, x, y, dp.locat, DM.given, dMat, p, theta, longlat, bw, kernel, adaptive) {
res <- gw_reg_all(x, y, dp.locat, FALSE, dp.locat, DM.given, dMat, TRUE, p, theta, longlat, bw, kernel, adaptive, parallel.arg.n, group.i)
return(res)
}, parallel.arg.n, x, y, dp.locat, DM.given, dMat, p, theta, longlat, bw, kernel, adaptive)
for (i in 1:parallel.arg.n) {
res <- parallel.arg.results[[i]]
betas = betas + res$betas
s_hat = s_hat + res$s_hat
}
} else {
for (i in 1:dp.n)
{
dist.vi<-dMat[,i]
W.i<-gw.weight(dist.vi,bw,kernel,adaptive)
gw.resi<-gw_reg(x,y,W.i,hatmatrix=T,i)
betas[i,]<-as.numeric(gw.resi[[1]])
S[i,]<-gw.resi[[2]]
}
}
aic.rss <- as.numeric(AICc_rss1(y, x, betas, s_hat))
model.list[[tag]] <- list(fml, vars.j)
GWR.df <- rbind(GWR.df, c(bw, aic.rss[2], aic.rss[3], aic.rss[1]))
AICcs <- c(AICcs, aic.rss[3])
tag <- tag + 1
}
idx <- which.min(AICcs)[1]
level.vars <- c(level.vars, InDeVars.Sub[idx])
InDeVars.Sub <- InDeVars.Sub[-idx]
}
if (parallel.method == "cluster") {
if (missing(parallel.arg)) stopCluster(parallel.arg)
}
res <- list(model.list, GWR.df)
res
}
model.selection.gwr <-function(DeVar=NULL,InDeVars=NULL, data=list(),bw=NULL,approach="CV",
adaptive=F,kernel="bisquare",dMat=NULL,p=2, theta=0, longlat=F,
parallel.method=F,parallel.arg=NULL)
{
if (is.null(DeVar) || !is.character(DeVar) || is.null(InDeVars) || !is.character(InDeVars))
stop("Input are not correct, please recheck!")
spdf <- data
if (!is.null(data)) {
if (is(data, "Spatial")) {
p4s <- proj4string(data)
dp.locat <- coordinates(data)
data <- as(data, "data.frame")
} else {
if (!is(data, "data.frame"))
stop("Given regression data must be data.frame or Spatial*DataFrame")
}
}
else stop("No regression data frame is avaiable!")
vars.df <- names(data)
dp.n <- nrow(data)
var.n <- length(InDeVars)
InDeVars.Sub <- InDeVars
model.list <- list()
GWR.df <- c()
if (missing(dMat)) {
dMat <- matrix(0, 0, 0)
DM.given <- F
}
else if (is.null(dMat) || !is.matrix(dMat)) {
DM.given<-F
dMat <- matrix(0, 0, 0)
}
else {
DM.given<-T
dim.dMat<-dim(dMat)
if (dim.dMat[1]!=dp.n||dim.dMat[2]!=dp.n)
stop ("Dimensions of dMat are not correct")
}
varsindx.list <- list()
level.vars <- c()
tag <- 1
adapt <- NULL
for (i in 1:var.n) {
AICcs <- c()
for (j in 1:(var.n - i + 1)) {
vars.j <- c(level.vars, InDeVars.Sub[j])
fml <- Generate.formula(DeVar, vars.j)
cat("Now calibrating the model: \n", fml, "\n")
matL <- extract.mat(fml, data)
y <- matL[[1]]
x <- matL[[2]]
if (is.null(bw)) {
part1 <- paste("bw<-bw.gwr(", fml, sep = "")
part2 <- "data=spdf,kernel=kernel,approach=approach,dMat=dMat, parallel.method=parallel.method,parallel.arg=parallel.arg)"
expression <- paste(part1, part2, sep = ",")
print(expression)
eval(parse(text = expression))
} else {
if (adaptive) {
stopifnot(is.numeric(bw))
stopifnot((bw >= 0))
} else {
stopifnot(is.numeric(bw))
}
}
betas <- matrix(nrow = dp.n, ncol = var.n)
s_hat <- numeric(2)
if (parallel.method == FALSE) {
res <- gw_reg_all(x, y, dp.locat, FALSE, dp.locat, DM.given, dMat, TRUE, p, theta, longlat, bw, kernel, adaptive)
betas <- res$betas
s_hat <- res$s_hat
} else if (parallel.method == "omp") {
if (missing(parallel.arg)) { threads <- 0 } else {
threads <- ifelse(is(parallel.arg, "numeric"), parallel.arg, 0)
}
res <- gw_reg_all_omp(x, y, dp.locat, FALSE, dp.locat, DM.given, dMat, TRUE, p, theta, longlat, bw, kernel, adaptive, threads)
betas <- res$betas
s_hat <- res$s_hat
} else if (parallel.method == "cuda") {
if (missing(parallel.arg)) { groupl <- 0 } else {
groupl <- ifelse(is(parallel.arg, "numeric"), parallel.arg, 0)
}
res <- gw_reg_all_cuda(x, y, dp.locat, FALSE, dp.locat, DM.given, dMat, TRUE, p, theta, longlat, bw, kernel, adaptive, groupl)
betas <- res$betas
s_hat <- res$s_hat
} else if (parallel.method == "cluster") {
if (missing(parallel.arg)) {
parallel.arg.n <- max(detectCores() - 4, 2)
parallel.arg <- makeCluster(parallel.arg.n)
} else parallel.arg.n <- length(parallel.arg)
clusterCall(parallel.arg, function() { library(GWmodel) })
parallel.arg.results <- clusterApplyLB(parallel.arg, 1:parallel.arg.n, function(group.i, parallel.arg.n, x, y, dp.locat, DM.given, dMat, p, theta, longlat, bw, kernel, adaptive) {
res <- gw_reg_all(x, y, dp.locat, FALSE, dp.locat, DM.given, dMat, TRUE, p, theta, longlat, bw, kernel, adaptive, parallel.arg.n, group.i)
return(res)
}, parallel.arg.n, x, y, dp.locat, DM.given, dMat, p, theta, longlat, bw, kernel, adaptive)
for (i in 1:parallel.arg.n) {
res <- parallel.arg.results[[i]]
betas = betas + res$betas
s_hat = s_hat + res$s_hat
}
if (missing(parallel.arg)) stopCluster(parallel.arg)
} else {
for (i in 1:dp.n) {
dist.vi<-dMat[,i]
W.i<-gw.weight(dist.vi,bw,kernel,adaptive)
gw.resi<-gw_reg(x,y,W.i,hatmatrix=T,i)
betas[i,]<-as.numeric(gw.resi[[1]])
S[i,]<-gw.resi[[2]]
}
}
aic.rss <- as.numeric(AICc_rss1(y, x, betas, s_hat))
model.list[[tag]] <- list(fml, vars.j)
GWR.df <- rbind(GWR.df, c(bw, aic.rss[2], aic.rss[3], aic.rss[1]))
AICcs <- c(AICcs, aic.rss[3])
tag <- tag + 1
}
idx <- which.min(AICcs)[1]
level.vars <- c(level.vars, InDeVars.Sub[idx])
InDeVars.Sub <- InDeVars.Sub[-idx]
}
res <- list(model.list, GWR.df)
res
}
extract.mat <- function(formula, data = list())
{
this.call <- match.call()
if (!is.null(data))
{
if (is(data, "Spatial"))
{
data <- as(data, "data.frame")
}
else
{
if (!is(data, "data.frame"))
stop("Given regression data must be data.frame or Spatial*DataFrame")
}
}
else stop("No regression data frame is avaiable!")
mf <- match.call(expand.dots = FALSE)
m <- match(c("formula", "data"), names(mf), 0)
mf <- mf[c(1, m)]
mf$drop.unused.levels <- TRUE
mf[[1]] <- as.name("model.frame")
mf <- eval(mf, parent.frame())
mt <- attr(mf, "terms")
y <- model.extract(mf, "response")
x <- model.matrix(mt, mf)
mat.list <- list(y, x)
mat.list
}
gwr.model.view <- function(DeVar, InDeVars, model.list)
{
n <- length(InDeVars)
if (n > 10)
{
cex <- 10 / n
}
else
{
cex <- 1
}
numModels<-length(model.list)
alpha<-2*pi/numModels
cols<-rainbow(n)
pchs<-rep(c(8,9,10,15,16,17,18,23,24),length.out=n)
plot(x=0,y=0,xlim=c(-3*n/4, n+6),ylim=c(-n-1, n+1), cex=2, axes=F, pch=22,xlab="",ylab="",main="View of GWR model selection with different variables")
for (i in 1:numModels)
{
vars<-model.list[[i]][[2]]
nvar<-length(vars)
p1<-c(0,0)
for (j in 1:nvar)
{
radius<-sqrt(n)*sqrt(j)
var.idx<-which(InDeVars==vars[j])
coord<-c(radius*cos((i-1)*alpha),radius*sin((i-1)*alpha))
lines(x=c(p1[1], coord[1]),y=c(p1[2], coord[2]), col="grey",lwd=cex)
points(x=coord[1], y=coord[2], col=cols[var.idx],pch=pchs[var.idx],cex=(cex*i/numModels+0.3))
p1<-coord
}
text(x=(radius+0.5)*cos((i-1)*alpha),y=(radius+0.5)*sin((i-1)*alpha), as.character(i), cex=cex*0.6)
}
legend(x=n+2, y=n/2, col=c("black",cols),pch=c(22,pchs), c(DeVar, InDeVars),box.col="white")
}
model.view.gwr <-function(DeVar, InDeVars, model.list)
{
n<-length(InDeVars)
if (n>10)
{
cex<-10/n
}
else
{
cex<-1
}
numModels<-length(model.list)
alpha<-2*pi/numModels
cols<-rainbow(n)
pchs<-rep(c(8,9,10,15,16,17,18,23,24),length.out=n)
plot(x=0,y=0,xlim=c(-3*n/4, n+6),ylim=c(-n-1, n+1), cex=2, axes=F, pch=22,xlab="",ylab="",main="View of GWR model selection with different variables")
for (i in 1:numModels)
{
vars<-model.list[[i]][[2]]
nvar<-length(vars)
p1<-c(0,0)
for (j in 1:nvar)
{
radius<-sqrt(n)*sqrt(j)
var.idx<-which(InDeVars==vars[j])
coord<-c(radius*cos((i-1)*alpha),radius*sin((i-1)*alpha))
lines(x=c(p1[1], coord[1]),y=c(p1[2], coord[2]), col="grey",lwd=cex)
points(x=coord[1], y=coord[2], col=cols[var.idx],pch=pchs[var.idx],cex=(cex*i/numModels+0.3))
p1<-coord
}
text(x=(radius+0.5)*cos((i-1)*alpha),y=(radius+0.5)*sin((i-1)*alpha), as.character(i), cex=cex*0.6)
}
legend(x=n+2, y=n/2, col=c("black",cols),pch=c(22,pchs), c(DeVar, InDeVars),box.col="white")
}
gwr.model.sort<-function(Sorting.list , numVars, ruler.vector)
{
n<-length(Sorting.list)
numMoldes<-length(ruler.vector)
indxs<-c()
tag<-0
for (i in numVars:1)
{
tmpV<-ruler.vector[(tag+1):(tag+i)]
indx<-sort(tmpV, decreasing=T, index=T)$ix
indxs<-c(indxs, indx+tag)
tag<-tag+i
}
res<-list()
for (i in 1:n)
{
list.i<-Sorting.list[[i]]
if (is.list(list.i))
{
tmp.L<-list()
for (j in 1:numMoldes) tmp.L[[j]]<-list.i[[indxs[j]]]
res[[i]]<-tmp.L
}
else
{
tmp.V<-c()
for (j in 1:numMoldes) tmp.V<-rbind(tmp.V,list.i[indxs[j],])
res[[i]]<-tmp.V
}
}
res
}
model.sort.gwr<-function(Sorting.list , numVars, ruler.vector)
{
n<-length(Sorting.list)
numMoldes<-length(ruler.vector)
indxs<-c()
tag<-0
for (i in numVars:1)
{
tmpV<-ruler.vector[(tag+1):(tag+i)]
indx<-sort(tmpV, decreasing=T, index=T)$ix
indxs<-c(indxs, indx+tag)
tag<-tag+i
}
res<-list()
for (i in 1:n)
{
list.i<-Sorting.list[[i]]
if (is.list(list.i))
{
tmp.L<-list()
for (j in 1:numMoldes) tmp.L[[j]]<-list.i[[indxs[j]]]
res[[i]]<-tmp.L
}
else
{
tmp.V<-c()
for (j in 1:numMoldes) tmp.V<-rbind(tmp.V,list.i[indxs[j],])
res[[i]]<-tmp.V
}
}
res
}
Generate.formula<-function(DeVar,InDeVars)
{
fml<-paste(paste(DeVar, "~", sep=""), InDeVars[1],sep="")
var.n<-length(InDeVars)
if (var.n>1)
{
for (i in 2:var.n)
fml<-paste(fml, InDeVars[i], sep="+")
}
fml
}
|
knitr::opts_chunk$set(
collapse = TRUE,
comment = "
)
library(clusteredinterference)
data("toy_data")
suppressWarnings(RNGversion("3.5.0"))
set.seed(1113)
causal_fx <- policyFX(
data = toy_data,
formula = Outcome | Treatment ~ Age + Distance + (1 | Cluster_ID) | Cluster_ID,
alphas = c(.3, .5),
k_samps = 1
)
summary(causal_fx)
my_grid <- makeTargetGrid(alphas = (3:8)/20, small_grid = TRUE)
head(my_grid)
my_grid$estVar <- FALSE
causal_fx2 <- policyFX(
data = toy_data,
formula = Outcome | Treatment ~ Age + Distance + (1 | Cluster_ID) | Cluster_ID,
target_grid = my_grid,
k_samps = 5,
verbose = FALSE,
root_options = list(atol=1e-4)
)
print(causal_fx, nrows = 9)
plotdat <- causal_fx2$estimates[causal_fx2$estimates$estimand_type=="mu",]
plot(x = plotdat$alpha1, y = plotdat$estimate, main = "Estimated Population Means")
causal_fx$formula
causal_fx$model@call
lme4::getME(causal_fx$model, c("beta", "theta"))
|
msc.depth <- function(depthstats, groups, HCN = NULL) {
if (sum(file.exists(depthstats))!=length(depthstats)) stop("ERROR: One or more files doesn't exist")
if (!is.null(HCN)) {
if (length(depthstats) != length(HCN)) stop("ERROR: the HCN vector and depthstats vector should be of equal length.")
}
if (!is.factor(groups)) stop("ERROR: groups should be a factor")
if (length(depthstats)!=length(groups)) stop('ERROR: groups factor and depthstats are not of equal length. ')
theme_set(theme_minimal() + theme(axis.title = element_text(face = 'bold')))
depths <- depth_plts <- hists_MEDIAN.DEPTH <- box_MEDIAN.DEPTH <- box_CN <- list()
for (n in 1:length(depthstats)) {
depths[[n]] <- utils::read.csv(depthstats[n], sep=' ', header=T, row.names = 1)[,1:4]
depths[[n]][,"sample"] <- gsub("_contig.*","",rownames(depths[[n]])[1])
depths[[n]][,"group"] <- groups[n]
hists_MEDIAN.DEPTH[[n]] <- ggplot(depths[[n]], aes(x=depths[[n]]$MEDIAN.DEPTH)) +
geom_histogram(bins = 50, color="black", fill="gray") +
ylab('frequency') + xlab('median depth') + ggtitle(depths[[n]][1,5])
box_MEDIAN.DEPTH[[n]] <- ggplot(depths[[n]], aes(x=depths[[n]]$MEDIAN.DEPTH)) +
geom_boxplot() + xlab('median depth') +
theme(axis.text.y = element_blank())
if (!is.null(HCN)) {
depths[[n]]$HCN <- HCN[n]
depths[[n]]$CN <- depths[[n]]$MEDIAN.DEPTH/HCN[n]
depths[[n]]$MIN.CN <- depths[[n]]$MIN.DEPTH/HCN[n]
depths[[n]]$MAX.CN <- depths[[n]]$MAX.DEPTH/HCN[n]
box_CN[[n]] <- ggplot(depths[[n]], aes(x=depths[[n]]$CN)) +
geom_boxplot() + xlab('copy number') +
theme(axis.text.y = element_blank())
depth_plts[[n]] <- ggpubr::ggarrange(hists_MEDIAN.DEPTH[[n]], box_CN[[n]], nrow=2, heights = c(3,1), align = "v")
} else {
depths[[n]]$HCN <- depths[[n]]$CN <- depths[[n]]$MIN.CN <- depths[[n]]$MAX.CN <- NA
depth_plts[[n]] <- ggpubr::ggarrange(hists_MEDIAN.DEPTH[[n]], box_MEDIAN.DEPTH[[n]], nrow=2, heights = c(3,1), align = "v")
}
}
depths <- do.call("rbind", depths)
names(depth_plts) <- unique(depths$sample)
mean.RD <- data.frame(depths %>% dplyr::group_by(sample) %>% dplyr::summarise(mean(depths$MEDIAN.DEPTH), .groups = 'drop'))
box_MEDIAN.DEPTH <- ggplot(depths, aes(y=depths$MEDIAN.DEPTH, x=sample, fill=depths$group, color=depths$group)) +
geom_boxplot(alpha=0.5, show.legend = F) + xlab('') +
theme(axis.text.x = element_text(angle=90, hjust=1)) + ylab('Minicircle median read depth') +
facet_grid(. ~ group, scales="free", space="free")
if (!is.null(HCN)) {
box_CN <- ggplot(depths, aes(y=depths$CN, x=sample, fill=depths$group, color=depths$group)) +
geom_boxplot(alpha=0.5, show.legend = F) + xlab('') +
theme(axis.text.x = element_text(angle=90, hjust=1)) + ylab('Minicircle copy number') +
facet_grid(. ~ group, scales="free", space="free")
} else {box_CN <- NA}
return(list("all" = depths,
"plots" = depth_plts,
"medianRD" = box_MEDIAN.DEPTH,
"CN" = box_CN))
}
|
SearchCrissCross <- function(pResidual, iterationMax = 20) {
nRow <- nrow(pResidual)
nCol <- ncol(pResidual)
ii <- 1
iiMax <- -Inf
uMax <- matrix(NA, ncol = nCol, nrow = 1)
colnames(uMax) <- colnames(pResidual)
temp <- svd(pResidual)$v[1, ] > 0
temp <- (-1)^temp
temp <- temp * ((-1)^(temp[1] == -1))
O <- order(apply(abs(pResidual), 1, max), decreasing = T)
us <- rbind(temp, sign(pResidual[O, ]))
us <- unique(us, MARGIN = 1)
us <- matrix((-1)^(us[, 1] == -1), nrow = nrow(us), ncol = ncol(us)) *
us
t1 <- proc.time()["elapsed"]
for (ii in 1:nrow(us)) {
if ((proc.time()["elapsed"] - t1) > 2) {
cat("*")
t1 <- proc.time()["elapsed"]
}
u <- us[ii, , drop = F]
continue <- T
jj <- 0
while (continue & jj < iterationMax) {
jj <- jj + 1
a <- pResidual %*% t(u)
v <- sign(a)
LambdaA <- t(a) %*% v
b <- t(v) %*% pResidual
u <- sign(b)
LambdaB <- u %*% t(b)
continue <- (LambdaB - LambdaA > 10^-6)
}
if (LambdaA > iiMax) {
iiMax <- LambdaA
uMax <- u
}
}
L <- list(L1Max = iiMax, uMax = uMax)
return(L)
}
|
qat_call_plot_histogram_test <-
function(resultlist_part, measurement_vector=NULL, time=NULL, height= NULL, lat=NULL, lon=NULL, measurement_name="", directoryname="", basename="", plotstyle=NULL) {
filename <- paste(basename,"_",resultlist_part$element,"_histogramtest_", resultlist_part$result$metric, sep="")
if ("factorofbar" %in% names(resultlist_part$result)) {
qat_plot_histogram_test(resultlist_part$result$field, filename, resultlist_part$result$blocksize, resultlist_part$result$numofbars, resultlist_part$result$factorofbar, resultlist_part$result$metric, runs=resultlist_part$result$runs, measurement_name = measurement_name, directoryname=directoryname, plotstyle=plotstyle)
} else {
qat_plot_histogram_test(resultlist_part$result$field, filename, resultlist_part$result$blocksize, resultlist_part$result$numofbars, -1, resultlist_part$result$metric, runs=resultlist_part$result$runs, measurement_name = measurement_name, directoryname=directoryname, plotstyle=plotstyle)
}
}
|
require(geometa, quietly = TRUE)
require(sf)
require(testthat)
context("GMLGridFunction")
test_that("GMLGridFunction",{
testthat::skip_on_cran()
md <- GMLGridFunction$new()
md$setSequenceRule("Linear")
md$setStartPoint(0,0)
xml <- md$encode()
expect_is(xml, "XMLInternalNode")
md2 <- GMLGridFunction$new(xml = xml)
xml2 <- md2$encode()
expect_true(ISOAbstractObject$compare(md, md2))
})
|
.fgarchfit = function(spec, data, out.sample = 0, solver = "solnp", solver.control = list(),
fit.control = list(stationarity = 1, fixed.se = 0, scale = 0, rec.init = 'all', trunclag = 1000),
numderiv.control = list(grad.eps=1e-4, grad.d=0.0001, grad.zero.tol=sqrt(.Machine$double.eps/7e-7),
hess.eps=1e-4, hess.d=0.1, hess.zero.tol=sqrt(.Machine$double.eps/7e-7), r=4, v=2))
{
tic = Sys.time()
if(is.null(solver.control$trace)) trace = 0 else trace = solver.control$trace
if(is.null(fit.control$stationarity)) fit.control$stationarity = TRUE
if(is.null(fit.control$fixed.se)) fit.control$fixed.se = FALSE
if(is.null(fit.control$scale)) fit.control$scale = FALSE
if(is.null(fit.control$rec.init)) fit.control$rec.init = 'all'
if(is.null(fit.control$trunclag)) fit.control$trunclag = 1000
mm = match(names(fit.control), c("stationarity", "fixed.se", "scale", "rec.init", "trunclag"))
if(any(is.na(mm))){
idx = which(is.na(mm))
enx = NULL
for(i in 1:length(idx)) enx = c(enx, names(fit.control)[idx[i]])
warning(paste(c("unidentified option(s) in fit.control:\n", enx), sep="", collapse=" "), call. = FALSE, domain = NULL)
}
if(spec@model$modelinc[15] > 0) fit.control$scale = FALSE
if(spec@model$modelinc[5] > 0) fit.control$scale = FALSE
if(sum(spec@model$pars[,2]) > 0) fit.control$scale = FALSE
xdata = .extractdata(data)
if(!is.numeric(out.sample)) stop("\nugarchfit-->error: out.sample must be numeric\n")
if(as.numeric(out.sample)<0) stop("\nugarchfit-->error: out.sample must be positive\n")
n.start = round(out.sample,0)
n = length(xdata$data)
if((n-n.start)<100) warning("\nugarchfit-->waring: using less than 100 data\n points for estimation\n")
data = xdata$data[1:(n-n.start)]
index = xdata$index[1:(n-n.start)]
origdata = xdata$data
origindex = xdata$index
period = xdata$period
garchenv = new.env(hash = TRUE)
arglist = list()
arglist$garchenv <- garchenv
arglist$pmode = 0
model = spec@model
modelinc = model$modelinc
pidx = model$pidx
if(modelinc[6] > 0){
mexdata = model$modeldata$mexdata[1:(n-n.start), , drop = FALSE]
} else{
mexdata = NULL
}
if(modelinc[15] > 0){
vexdata = model$modeldata$vexdata[1:(n-n.start), ,drop = FALSE]
} else{
vexdata = NULL
}
arglist$index = index
arglist$trace = trace
m = model$maxOrder
model$modeldata$T = T = length(as.numeric(data))
dist = model$modeldesc$distribution
if(fit.control$scale) dscale = sd(data) else dscale = 1
zdata = data/dscale
recinit = .checkrec(fit.control$rec.init, T)
arglist$data = zdata
arglist$recinit = recinit
arglist$dscale = dscale
arglist$model = model
ipars = model$pars
tmp = .garchstart(ipars, arglist)
ipars = arglist$ipars = tmp$pars
arglist$tmph = tmp$tmph
estidx = as.logical( ipars[,4] )
arglist$estidx = estidx
arglist$fit.control = fit.control
npars = sum(estidx)
if(any(ipars[,2]==1)){
if(npars == 0){
if(fit.control$fixed.se==0) {
warning("\nugarchfit-->warning: all parameters fixed...returning ugarchfilter object instead\n")
return(ugarchfilter(data = xts(origdata, origindex), spec = spec, out.sample = out.sample))
} else{
use.solver = 0
ipars[ipars[,2]==1, 4] = 1
ipars[ipars[,2]==1, 2] = 0
arglist$ipars = ipars
estidx = as.logical( ipars[,4] )
arglist$estidx = estidx
}
} else{
use.solver = 1
}
} else{
use.solver = 1
}
assign("rugarch_llh", 1, envir = garchenv)
if(fit.control$stationarity == 1 && modelinc[15] == 0){
cb = .garchconbounds()
Ifn = .fgarchcon
ILB = cb$LB
IUB = cb$UB
if(solver == "solnp" | solver == "gosolnp" | solver == "hybrid") fit.control$stationarity = 0
} else{
Ifn = NULL
ILB = NULL
IUB = NULL
}
arglist$fit.control = fit.control
if(use.solver){
parscale = rep(1, length = npars)
names(parscale) = rownames(ipars[estidx,])
if(modelinc[1] > 0) parscale["mu"] = abs(mean(zdata))
if(modelinc[7] > 0) parscale["omega"] = var(zdata)
arglist$returnType = "llh"
solution = .garchsolver(solver, pars = ipars[estidx, 1], fun = .fgarchLLH,
Ifn, ILB, IUB, gr = NULL, hessian = NULL, parscale = parscale,
control = solver.control, LB = ipars[estidx, 5], UB = ipars[estidx, 6],
ux = NULL, ci = NULL, mu = NULL, arglist)
sol = solution$sol
hess = solution$hess
timer = Sys.time()-tic
if(!is.null(sol$par)){
ipars[estidx, 1] = sol$par
if(modelinc[7]==0){
tmpx = .fgarchLLH(sol$par, arglist)
ipars[pidx["omega",1], 1] = get("omega", garchenv)
}
if(sum(ipars[,2]) == 0){
if(modelinc[1] > 0) ipars[pidx["mu",1]:pidx["mu",2], 1] = ipars[pidx["mu",1]:pidx["mu",2], 1] * dscale
if(modelinc[6] > 0){
ipars[pidx["mxreg", 1]:pidx["mxreg", 2], 1] = ipars[pidx["mxreg", 1]:pidx["mxreg", 2], 1] * dscale
}
ipars[pidx["omega",1],1] = ipars[pidx["omega",1],1] * dscale^ipars[pidx["lambda",1],1]
}
} else{
ipars[estidx, 1] = NA
}
arglist$ipars = ipars
convergence = sol$convergence
if(convergence != 0) warning("\nugarchfit-->warning: solver failer to converge.")
} else{
solution = NULL
hess = NULL
timer = Sys.time()-tic
convergence = 0
sol = list()
sol$message = "all parameters fixed"
}
fit = list()
ipars2 = ipars
if(convergence==0){
arglist$dscale = 1
if(sum(ipars[,2]) > 0 && fit.control$fixed.se == 1){
ipars[ipars[,2]==1, 4] = 1
ipars[ipars[,2]==1, 2] = 0
arglist$ipars = ipars
estidx = as.logical( ipars[,4] )
arglist$estidx = estidx
}
arglist$data = data
fit = .makefitmodel(garchmodel = "fGARCH", f = .fgarchLLH, T = T, m = m,
timer = timer, convergence = convergence, message = sol$message,
hess, arglist = arglist, numderiv.control = numderiv.control)
model$modelinc[7] = modelinc[7]
model$modeldata$data = origdata
model$modeldata$index = origindex
model$modeldata$period = period
model$pars[, 1] = fit$ipars[,1]
model$pars[, 5:6] = ipars2[,5:6]
fit$ipars[, 4] = ipars2[, 4]
fit$ipars[, 2] = ipars2[, 2]
fit$ipars[, 5:6] = ipars2[,5:6]
fit$ipars["omega", 3] = 1
model$pars["omega", 3] = 1
} else{
fit$message = sol$message
fit$convergence = 1
model$modeldata$data = origdata
model$modeldata$index = origindex
model$modeldata$period = period
}
model$n.start = n.start
fit$solver = solution
ans = new("uGARCHfit",
fit = fit,
model = model)
return(ans)
}
.fgarchLLH = function(pars, arglist)
{
data = arglist$data
returnType = arglist$returnType
garchenv = arglist$garchenv
model = arglist$model
estidx = arglist$estidx
idx = model$pidx
ipars = arglist$ipars
ipars[estidx, 1] = pars
trace = arglist$trace
T = length(data)
dscale = arglist$dscale
recinit = arglist$recinit
fit.control = arglist$fit.control
m = model$maxOrder
N = c(m,T)
mexdata = coredata(model$modeldata$mexdata[1:T,, drop = FALSE])
vexdata = coredata(model$modeldata$vexdata[1:T,, drop = FALSE])
distribution = model$modeldesc$distribution
modelinc = model$modelinc
fmodel = model$fmodel
fpars = fmodel$fpars
fk = fpars$fk
kdelta = ipars[idx["delta", 1], 1] + fk*ipars[idx["lambda", 1], 1]
dist = model$modeldesc$distno
hm = arglist$tmph
rx = .arfimaxfilter(modelinc[1:21], ipars[,1], idx, mexdata = mexdata, h = hm, data = data, N = N)
res = rx$res
zrf = rx$zrf
res[is.na(res) | !is.finite(res) | is.nan(res)] = 0
sumbeta = sum(ipars[idx["beta",1]:idx["beta",2],1])
kappa = apply(cbind(ipars[idx["eta1", 1]:idx["eta1", 2], 1], ipars[idx["eta2", 1]:idx["eta2", 2], 1]), 1,
FUN = function(x) fgarchKappa(ipars[idx["lambda", 1], 1], ipars[idx["delta", 1], 1], x[1], x[2],
fk, ipars[idx["ghlambda", 1], 1], ipars[idx["shape", 1], 1], ipars[idx["skew", 1], 1],
distribution))
persist = sumbeta + sum(ipars[idx["alpha",1]:idx["alpha",2],1] * kappa)
if(is.na(persist)) persist = 1
mvar = ifelse(recinit$type==1, mean(abs(res[1:recinit$n])^ipars[idx["lambda", 1], 1])^(1/ipars[idx["lambda", 1], 1]),
backcastv(abs(res), T, recinit$n, ipars[idx["lambda", 1], 1])^(1/ipars[idx["lambda", 1], 1]))
if(modelinc[15]>0) {
mv = sum(apply(matrix(vexdata, ncol = modelinc[15]), 2, "mean")*ipars[idx["vxreg",1]:idx["vxreg",2],1])
} else{
mv = 0
}
hEst = mvar
if(modelinc[7]>0){
ipars[idx["omega",1],1] = max(eps, ipars[idx["omega",1],1])
} else{
mvar2 = ifelse(!is.na(modelinc[22]), modelinc[22]/dscale, mvar)
ipars[idx["omega",1],1] = (mvar2^ipars[idx["lambda", 1], 1]) * (1 - persist) - mv
assign("omega", ipars[idx["omega",1],1], garchenv)
}
if(is.na(hEst) | !is.finite(hEst) | is.nan(hEst)) hEst = ipars[idx["omega",1],1]^(1/ipars[idx["lambda", 1], 1])
if(fit.control$stationarity == 1 && modelinc[15] == 0){
if(!is.na(persist) && persist >= 1){
if(arglist$pmode!=1){
return(llh = get("rugarch_llh", garchenv) + 0.1*(abs(get("rugarch_llh", garchenv))))
} else{
return(llh = 1e10)
}
}
}
if(modelinc[6]>0) mexdata = as.double(as.vector(mexdata)) else mexdata = double(1)
if(modelinc[15]>0) vexdata = as.double(as.vector(vexdata)) else vexdata = double(1)
ans = try( .C("fgarchfilterC", model = as.integer(modelinc[1:21]),
pars = as.double(ipars[,1]), idx = as.integer(idx[,1]-1),
hEst = as.double(hEst), kdelta = as.double(kdelta),
x = as.double(data), res = as.double(res), e = double(T),
mexdata = mexdata, vexdata = vexdata, zrf = as.double(zrf),
constm = double(T), condm = double(T), m = as.integer(m),
T = as.integer(T), h = double(T), z = double(T), llh = double(1),
LHT = double(T), PACKAGE = "rugarch"), silent = TRUE )
if(inherits(ans, "try-error")){
if(arglist$pmode!=1){
assign("rugarch_csol", 1, envir = garchenv)
assign("rugarch_filtermessage", ans, envir = garchenv)
if( trace > 0 ) cat(paste("\narfimafit-->warning: ", get("rugarch_filtermessage", garchenv),"\n", sep=""))
return(llh = (get("rugarch_llh", garchenv) + 0.1*(abs(get("rugarch_llh", garchenv)))))
} else{
return(llh = 1e10)
}
} else{
if(arglist$pmode!=1){
assign("rugarch_csol", 0, envir = garchenv)
}
}
z = ans$z
h = ans$h
epsx = ans$res
llh = ans$llh
if(is.finite(llh) && !is.na(llh) && !is.nan(llh)){
if(arglist$pmode!=1) assign("rugarch_llh", llh, envir = garchenv)
} else {
if(arglist$pmode!=1) llh = (get("rugarch_llh", garchenv) + 0.1*(abs(get("rugarch_llh", garchenv)))) else llh = 1e10
}
LHT = -ans$LHT
ans = switch(returnType,
llh = llh,
LHT = LHT,
all = list(llh = llh, h = h, epsx = epsx, z = z, kappa = kappa,
LHT = LHT, persistence = persist))
return(ans)
}
.fgarchfilter = function(spec, data, out.sample = 0, n.old = NULL, rec.init = 'all')
{
xdata = .extractdata(data)
data = xdata$data
index = xdata$index
period = xdata$period
origdata = data
origindex = index
T = length(origdata) - out.sample
data = origdata[1:T]
index = origindex[1:T]
if(!is.null(n.old)) Nx = n.old else Nx = length(data)
recinit = .checkrec(rec.init, Nx)
model = spec@model
ipars = model$pars
pars = unlist(model$fixed.pars)
parnames = names(pars)
modelnames = .checkallfixed(spec)
if(is.na(all(match(modelnames, parnames), 1:length(modelnames)))) {
cat("\nugarchfilter-->error: parameters names do not match specification\n")
cat("Expected Parameters are: ")
cat(paste(modelnames))
cat("\n")
stop("Exiting", call. = FALSE)
}
setfixed(spec)<-as.list(pars)
model = spec@model
model$modeldata$T = T
ipars = model$pars
idx = model$pidx
modelinc = model$modelinc
m = model$maxOrder
N = c(m,T)
mexdata = model$modeldata$mexdata[1:T, , drop = FALSE]
vexdata = model$modeldata$vexdata[1:T, , drop = FALSE]
distribution = model$modeldesc$distribution
dist = model$modeldesc$distno
fmodel = model$fmodel
fpars = fmodel$fpars
fk = fpars$fk
kdelta = ipars[idx["delta", 1], 1] + fk*ipars[idx["lambda", 1], 1]
sumbeta = sum(ipars[idx["beta",1]:idx["beta",2],1])
kappa = apply(cbind(ipars[idx["eta1", 1]:idx["eta1", 2], 1], ipars[idx["eta2", 1]:idx["eta2", 2], 1]), 1,
FUN = function(x) fgarchKappa(ipars[idx["lambda", 1], 1], ipars[idx["delta", 1], 1], x[1], x[2],
fk, ipars[idx["ghlambda", 1], 1], ipars[idx["shape", 1], 1], ipars[idx["skew", 1], 1],
distribution))
persist = sumbeta + sum(ipars[idx["alpha",1]:idx["alpha",2],1] * kappa)
rx = .arfimaxfilter(modelinc[1:21], ipars[,1], idx, mexdata = mexdata, h = 0, data = data, N = N)
res = rx$res
zrf = rx$zrf
if(!is.null(n.old)){
rx2 = .arfimaxfilter(modelinc[1:21], ipars[,1], idx, mexdata = mexdata[1:Nx, , drop = FALSE], h = 0, data = origdata[1:Nx], N = c(m, Nx))
res2 = rx2$res
mvar = ifelse(recinit$type==1, mean(abs(res2[1:recinit$n])^ipars[idx["lambda", 1], 1])^(1/ipars[idx["lambda", 1], 1]),
backcastv(abs(res2), Nx, recinit$n, ipars[idx["lambda", 1], 1])^(1/ipars[idx["lambda", 1], 1]))
} else{
mvar = ifelse(recinit$type==1, mean(abs(res[1:recinit$n])^ipars[idx["lambda", 1], 1])^(1/ipars[idx["lambda", 1], 1]),
backcastv(abs(res), T, recinit$n, ipars[idx["lambda", 1], 1])^(1/ipars[idx["lambda", 1], 1]))
}
hEst = mvar
if(modelinc[15]>0) {
mv = sum(apply(matrix(vexdata, ncol = modelinc[15]), 2, "mean")*ipars[idx["vxreg",1]:idx["vxreg",2],1])
} else{
mv = 0
}
if(modelinc[7]>0){
ipars[idx["omega",1],1] = max(eps, ipars[idx["omega",1],1])
} else{
mvar2 = ifelse(!is.na(modelinc[22]), modelinc[22], mvar)
ipars[idx["omega",1],1] = (mvar2^ipars[idx["lambda", 1], 1]) * (1 - persist) - mv
}
if(modelinc[6]>0) mexdata = as.double(as.vector(mexdata)) else mexdata = double(1)
if(modelinc[15]>0) vexdata = as.double(as.vector(vexdata)) else vexdata = double(1)
ans = try( .C("fgarchfilterC", model = as.integer(modelinc[1:21]),
pars = as.double(ipars[,1]), idx = as.integer(idx[,1]-1),
hEst = as.double(hEst), kdelta = as.double(kdelta),
x = as.double(data), res = as.double(res), e = as.double(res^2),
mexdata = mexdata, vexdata = vexdata, zrf = as.double(zrf),
constm = double(T), condm = double(T), m = as.integer(m),
T = as.integer(T), h = double(T), z = double(T), llh = double(1),
LHT = double(T), PACKAGE = "rugarch"), silent = TRUE )
filter = list()
filter$z = ans$z
filter$sigma = ans$h
filter$residuals = ans$res
filter$LLH = -ans$llh
filter$log.likelihoods = ans$LHT
filter$persistence = persist
filter$distribution = distribution
filter$ipars = ipars
model$modeldata$data = origdata
model$modeldata$index = origindex
model$modeldata$period = period
model$n.start = out.sample
sol = new("uGARCHfilter",
filter = filter,
model = model)
return(sol)
}
.fgarchforecast = function(fitORspec, data = NULL, n.ahead = 10, n.roll = 0, out.sample = 0,
external.forecasts = list(mregfor = NULL, vregfor = NULL), ...)
{
fit = fitORspec
data = fit@model$modeldata$data
Nor = length(as.numeric(data))
index = fit@model$modeldata$index
period = fit@model$modeldata$period
ns = fit@model$n.start
N = Nor - ns
model = fit@model
ipars = fit@fit$ipars
modelinc = model$modelinc
idx = model$pidx
if( n.roll > ns ) stop("\nugarchforecast-->error: n.roll must not be greater than out.sample!")
pars = fit@fit$coef
ipars = fit@fit$ipars
xreg = .forcregressors(model, external.forecasts$mregfor, external.forecasts$vregfor, n.ahead, Nor, out.sample = ns, n.roll)
mxf = xreg$mxf
vxf = xreg$vxf
fmodel = model$fmodel
fpars = fmodel$fpars
fk = fpars$fk
kdelta = ipars[idx["delta", 1], 1] + fk*ipars[idx["lambda", 1], 1]
sumbeta = sum(ipars[idx["beta",1]:idx["beta",2],1])
distribution = model$modeldesc$distribution
dkappa = apply(cbind(ipars[idx["eta1", 1]:idx["eta1", 2], 1], ipars[idx["eta2", 1]:idx["eta2", 2], 1]), 1,
FUN = function(x) fgarchKappa(ipars[idx["lambda", 1], 1], ipars[idx["delta", 1], 1], x[1], x[2],
fk, ipars[idx["ghlambda", 1], 1], ipars[idx["shape", 1], 1], ipars[idx["skew", 1], 1],
distribution))
fcreq = ifelse(ns >= (n.ahead+n.roll), n.ahead+n.roll, ns)
fspec = ugarchspec(variance.model = list(model = "fGARCH",
garchOrder = c(modelinc[8], modelinc[9]), submodel = model$modeldesc$vsubmodel,
external.regressors = vxf[1:(N + fcreq), , drop = FALSE]),
mean.model = list(armaOrder = c(modelinc[2], modelinc[3]),
include.mean = modelinc[1],
archm = ifelse(modelinc[5]>0,TRUE,FALSE), archpow = modelinc[5], arfima = modelinc[4],
external.regressors = mxf[1:(N + fcreq), , drop = FALSE], archex = modelinc[20]),
distribution.model = model$modeldesc$distribution, fixed.pars = as.list(pars))
tmp = xts(data[1:(N + fcreq)], index[1:(N + fcreq)])
flt = .fgarchfilter(data = tmp, spec = fspec, n.old = N)
sigmafilter = flt@filter$sigma
resfilter = flt@filter$residuals
zfilter = flt@filter$z
seriesFor = sigmaFor = matrix(NA, ncol = n.roll+1, nrow = n.ahead)
colnames(seriesFor) = colnames(sigmaFor) = as.character(index[N:(N+n.roll)])
rownames(seriesFor) = rownames(sigmaFor) = paste("T+", 1:n.ahead, sep="")
for(i in 1:(n.roll+1)){
np = N + i - 1
if(modelinc[1] > 0){
mu = rep(ipars[idx["mu",1]:idx["mu",2], 1], N+i+n.ahead-1)
} else{
mu = rep(0, N+i+n.ahead-1)
}
omega = rep(ipars[idx["omega",1]:idx["omega",2], 1], N+i+n.ahead-1)
h = c(sigmafilter[1:(N+i-1)], rep(0, n.ahead))
epsx = c(resfilter[1:(N+i-1)], rep(0, n.ahead))
x = c(data[1:(N+i-1)], rep(0, n.ahead))
z = c(zfilter[1:(N+i-1)], rep(0, n.ahead))
mxfi = mxf[1:(N+i-1+n.ahead), , drop = FALSE]
vxfi = vxf[1:(N+i-1+n.ahead), , drop = FALSE]
ans = .nfgarchforecast(ipars, modelinc[1:21], idx, mu, omega, kdelta, dkappa, mxfi, vxfi, h, epsx, z, data = x, N = np, n.ahead)
sigmaFor[,i] = ans$h
seriesFor[,i] = ans$x
}
fcst = list()
fcst$n.ahead = n.ahead
fcst$N = N+ns
fcst$n.start = ns
fcst$n.roll = n.roll
fcst$sigmaFor = sigmaFor
fcst$seriesFor = seriesFor
model$modeldata$sigma = flt@filter$sigma
model$modeldata$residuals = flt@filter$residuals
ans = new("uGARCHforecast",
forecast = fcst,
model = model)
return(ans)
}
.nfgarchforecast = function(ipars, modelinc, idx, mu, omega, kdelta, dkappa, mxfi, vxfi, h, epsx, z, data, N, n.ahead)
{
if(modelinc[15]>0){
omega = omega + vxfi%*%t(matrix(ipars[idx["vxreg",1]:idx["vxreg",2],1], ncol = modelinc[15]))
}
for(i in 1:n.ahead){
if(modelinc[9]>0){
h[N+i] = omega[N+i] + sum(ipars[idx["beta",1]:idx["beta",2],1]*h[N+i-(1:modelinc[9])]^ipars[idx["lambda",1],1])
} else{
h[N+i] = omega[N+i]
}
if(modelinc[8]>0){
for (j in 1:modelinc[8]){
if (i-j > 0){
s = dkappa[j] * (h[N + i - j]^ipars[idx["lambda",1],1])
} else{
s = ((abs(z[N+i-j] - ipars[idx["eta2",1]+j-1,1]) - ipars[idx["eta1",1]+j-1,1]*(z[N+i-j] - ipars[idx["eta2",1]+j-1,1]))^kdelta) * (h[N + i - j]^ipars[idx["lambda",1],1])
}
h[N+i] = h[N+i] + ipars[idx["alpha",1]+j-1,1]*s
}
}
h[N+i] = h[N+i]^(1/ipars[idx["lambda",1],1])
}
if(modelinc[4]>0){
res = arfimaf(ipars, modelinc[1:21], idx, mu, mxfi, h, epsx, z, data, N, n.ahead)
} else{
res = armaf(ipars, modelinc[1:21], idx, mu, mxfi, h, epsx, z, data, N, n.ahead)
}
return(list(h = h[(N+1):(N+n.ahead)], x = res[(N+1):(N+n.ahead)]))
}
.fgarchforecast2 = function(fitORspec, data = NULL, n.ahead = 10, n.roll = 0, out.sample = 0,
external.forecasts = list(mregfor = NULL, vregfor = NULL), ...)
{
spec = fitORspec
if(is.null(data)) stop("\nugarchforecast-->error: data must not be NULL when using a specification!")
xdata = .extractdata(data)
Nor = length(as.numeric(xdata$data))
data = xdata$data
N = length(as.numeric(data))
index = xdata$index
period = xdata$period
ns = out.sample
if( n.roll > ns ) stop("\nugarchforecast-->error: n.roll must not be greater than out.sample!")
N = Nor - ns
model = spec@model
ipars = model$pars
pars = unlist(model$fixed.pars)
parnames = names(pars)
modelnames = .checkallfixed(spec)
if(is.na(all(match(modelnames, parnames), 1:length(modelnames)))) {
cat("\nugarchforecast-->error: parameters names do not match specification\n")
cat("Expected Parameters are: ")
cat(paste(modelnames))
cat("\n")
stop("Exiting", call. = FALSE)
}
setfixed(spec)<-as.list(pars)
model = spec@model
idx = model$pidx
ipars = model$pars
modelinc = model$modelinc
model$modeldata$data = data
model$modeldata$index = index
model$modeldata$period = period
xreg = .forcregressors(model, external.forecasts$mregfor, external.forecasts$vregfor, n.ahead, Nor, out.sample = ns, n.roll)
mxf = xreg$mxf
vxf = xreg$vxf
fmodel = model$fmodel
fpars = fmodel$fpars
fk = fpars$fk
kdelta = ipars[idx["delta", 1], 1] + fk*ipars[idx["lambda", 1], 1]
sumbeta = sum(ipars[idx["beta",1]:idx["beta",2],1])
distribution = model$modeldesc$distribution
dkappa = apply(cbind(ipars[idx["eta1", 1]:idx["eta1", 2], 1], ipars[idx["eta2", 1]:idx["eta2", 2], 1]), 1,
FUN = function(x) fgarchKappa(ipars[idx["lambda", 1], 1], ipars[idx["delta", 1], 1], x[1], x[2],
fk, ipars[idx["ghlambda", 1], 1], ipars[idx["shape", 1], 1], ipars[idx["skew", 1], 1],
distribution))
fcreq = ifelse(ns >= (n.ahead+n.roll), n.ahead+n.roll, ns)
fspec = ugarchspec(variance.model = list(model = "fGARCH",
garchOrder = c(modelinc[8], modelinc[9]), submodel = model$modeldesc$vsubmodel,
external.regressors = vxf[1:(N + fcreq), , drop = FALSE]),
mean.model = list(armaOrder = c(modelinc[2], modelinc[3]),
include.mean = modelinc[1],
archm = ifelse(modelinc[5]>0,TRUE,FALSE), archpow = modelinc[5], arfima = modelinc[4],
external.regressors = mxf[1:(N + fcreq), , drop = FALSE], archex = modelinc[20]),
distribution.model = model$modeldesc$distribution, fixed.pars = as.list(pars))
tmp = xts(data[1:(N + fcreq)], index[1:(N + fcreq)])
flt = .fgarchfilter(data = tmp, spec = fspec, n.old = N)
sigmafilter = flt@filter$sigma
resfilter = flt@filter$residuals
zfilter = flt@filter$z
seriesFor = sigmaFor = matrix(NA, ncol = n.roll+1, nrow = n.ahead)
colnames(seriesFor) = colnames(sigmaFor) = as.character(index[N:(N+n.roll)])
rownames(seriesFor) = rownames(sigmaFor) = paste("T+", 1:n.ahead, sep="")
for(i in 1:(n.roll+1)){
np = N + i - 1
if(modelinc[1] > 0){
mu = rep(ipars[idx["mu",1]:idx["mu",2], 1], N+i+n.ahead-1)
} else{
mu = rep(0, N+i+n.ahead-1)
}
omega = rep(ipars[idx["omega",1]:idx["omega",2], 1], N+i+n.ahead-1)
h = c(sigmafilter[1:(N+i-1)], rep(0, n.ahead))
epsx = c(resfilter[1:(N+i-1)], rep(0, n.ahead))
x = c(data[1:(N+i-1)], rep(0, n.ahead))
z = c(zfilter[1:(N+i-1)], rep(0, n.ahead))
mxfi = mxf[1:(N+i-1+n.ahead), , drop = FALSE]
vxfi = vxf[1:(N+i-1+n.ahead), , drop = FALSE]
ans = .nfgarchforecast(ipars, modelinc[1:21], idx, mu, omega, kdelta, dkappa, mxfi, vxfi, h, epsx, z, data = x, N = np, n.ahead)
sigmaFor[,i] = ans$h
seriesFor[,i] = ans$x
}
fcst = list()
fcst$n.ahead = n.ahead
fcst$N = N+ns
fcst$n.start = ns
fcst$n.roll = n.roll
fcst$sigmaFor = sigmaFor
fcst$seriesFor = seriesFor
model$modeldata$sigma = flt@filter$sigma
model$modeldata$residuals = flt@filter$residuals
ans = new("uGARCHforecast",
forecast = fcst,
model = model)
return(ans)
}
.fgarchsim = function(fit, n.sim = 1000, n.start = 0, m.sim = 1, startMethod =
c("unconditional","sample"), presigma = NA, prereturns = NA,
preresiduals = NA, rseed = NA, custom.dist = list(name = NA, distfit = NA),
mexsimdata = NULL, vexsimdata = NULL, ...)
{
if( (n.sim+n.start) < 1000 && m.sim > 100 ){
ans = .fgarchsim2(fit = fit, n.sim = n.sim, n.start = n.start, m.sim = m.sim,
startMethod = startMethod, presigma = presigma, prereturns = prereturns,
preresiduals = preresiduals, rseed = rseed, custom.dist = custom.dist,
mexsimdata = mexsimdata, vexsimdata = vexsimdata)
} else{
ans = .fgarchsim1(fit = fit, n.sim = n.sim, n.start = n.start, m.sim = m.sim,
startMethod = startMethod, presigma = presigma, prereturns = prereturns,
preresiduals = preresiduals, rseed = rseed, custom.dist = custom.dist,
mexsimdata = mexsimdata, vexsimdata = vexsimdata)
}
return( ans )
}
.fgarchsim1 = function(fit, n.sim = 1000, n.start = 0, m.sim = 1, startMethod =
c("unconditional","sample"), presigma = NA, prereturns = NA,
preresiduals = NA, rseed = NA, custom.dist = list(name = NA, distfit = NA),
mexsimdata = NULL, vexsimdata = NULL)
{
if(fit@model$modelinc[4]>0){
if(n.start<fit@model$modelinc[3]){
warning("\nugarchsim-->warning: n.start>=MA order for arfima model...automatically setting.")
n.start = fit@model$modelinc[3]
}
}
if(is.na(rseed[1])){
sseed = NA
} else{
if(length(rseed) != m.sim) sseed = as.integer(rseed[1]) else sseed = rseed[1:m.sim]
}
n = n.sim + n.start
startMethod = startMethod[1]
data = fit@model$modeldata$data
N = length(as.numeric(data))
data = data[1:(N - fit@model$n.start)]
N = length(as.numeric(data))
m = fit@model$maxOrder
resids = fit@fit$residuals
sigma = fit@fit$sigma
model = fit@model
modelinc = model$modelinc
idx = model$pidx
ipars = fit@fit$ipars
xreg = .simregressors(model, mexsimdata, vexsimdata, N, n, m.sim, m)
mexsim = xreg$mexsimlist
vexsim = xreg$vexsimlist
if(N < n.start){
startmethod[1] = "unconditional"
warning("\nugarchsim-->warning: n.start greater than length of data...using unconditional start method...\n")
}
fmodel = model$fmodel
fpars = fmodel$fpars
fk = fpars$fk
kdelta = ipars[idx["delta", 1], 1] + fk*ipars[idx["lambda", 1], 1]
distribution = model$modeldesc$distribution
persist = persistence(fit)
if(length(sseed) == 1){
zmatrix = data.frame(dist = model$modeldesc$distribution, lambda = ipars[idx["ghlambda",1], 1],
skew = ipars[idx["skew",1], 1], shape = ipars[idx["shape",1], 1], n = n * m.sim, seed = sseed[1])
z = .custzdist(custom.dist, zmatrix, m.sim, n)
} else{
zmatrix = data.frame(dist = rep(model$modeldesc$distribution, m.sim), lambda = rep(ipars[idx["ghlambda",1], 1], m.sim),
skew = rep(ipars[idx["skew",1], 1], m.sim), shape = rep(ipars[idx["shape",1], 1], m.sim),
n = rep(n, m.sim), seed = sseed)
z = .custzdist(custom.dist, zmatrix, m.sim, n)
}
if(startMethod == "unconditional"){
z = rbind(matrix(0, nrow = m, ncol = m.sim), z)
} else{
z = rbind(matrix(tail(fit@fit$z, m), nrow = m, ncol = m.sim), z)
}
if(!is.na(presigma[1])){
presigma = as.vector(presigma)
if(length(presigma)<m) stop(paste("\nugarchsim-->error: presigma must be of length ", m, sep=""))
}
if(!is.na(prereturns[1])){
prereturns = as.vector(prereturns)
if(length(prereturns)<m) stop(paste("\nugarchsim-->error: prereturns must be of length ", m, sep=""))
}
if(!is.na(preresiduals[1])){
preresiduals = as.vector(preresiduals)
if(length(preresiduals)<m) stop(paste("\nugarchsim-->error: preresiduals must be of length ", m, sep=""))
preres = matrix(preresiduals, nrow = m)
}
if(is.na(presigma[1])){
if(startMethod[1] == "unconditional"){
hEst = uncvariance(fit)^(1/ipars[idx["lambda",1],1])
presigma = as.numeric(rep(hEst, m))}
else{
presigma = tail(sigma, m)
}
}
if(is.na(prereturns[1])){
if(startMethod[1] == "unconditional"){
prereturns = as.numeric(rep(uncmean(fit), m))
}
else{
prereturns = tail(data, m)
}
}
h = c(presigma, rep(0, n))
x = c(prereturns, rep(0, n))
constm = matrix(ipars[idx["mu",1]:idx["mu",2], 1], ncol = m.sim, nrow = n + m)
z[is.na(z) | is.nan(z) | !is.finite(z)] = 0
sigmaSim = matrix(0, ncol = m.sim, nrow = n.sim)
seriesSim = matrix(0, ncol = m.sim, nrow = n.sim)
residSim = matrix(0, ncol = m.sim, nrow = n.sim)
for(i in 1:m.sim){
if(is.na(preresiduals[1])){
if(startMethod[1] == "unconditional"){
preres = as.numeric(z[1:m, i])*presigma
} else{
preres = tail(resids, m)
}
}
res = c(preres, rep(0, n))
ans1 = try(.C("fgarchsimC", model = as.integer(modelinc[1:21]),
pars = as.double(ipars[,1]), idx = as.integer(idx[,1]-1),
kdelta = as.double(kdelta), h = as.double(h),
z = as.double(z[,i]), res = as.double(res),
vexdata = as.double(vexsim[[i]]), T = as.integer(n+m),
m = as.integer(m), PACKAGE = "rugarch"), silent = TRUE)
if(inherits(ans1, "try-error")) stop("\nugarchsim-->error: error in calling C function....\n")
sigmaSim[,i] = ans1$h[(n.start + m + 1):(n+m)]
residSim[,i] = ans1$res[(n.start + m + 1):(n+m)]
if(modelinc[6]>0){
mxreg = matrix( ipars[idx["mxreg",1]:idx["mxreg",2], 1], ncol = modelinc[6] )
if(modelinc[20]==0){
constm[,i] = constm[,i] + mxreg %*%t( matrix( mexsim[[i]], ncol = modelinc[6] ) )
} else{
if(modelinc[20] == modelinc[6]){
constm[,i] = constm[,i] + mxreg %*%t( matrix( mexsim[[i]]*ans1$h, ncol = modelinc[6] ) )
} else{
constm[,i] = constm[,i] + mxreg[,1:(modelinc[6]-modelinc[20]),drop=FALSE] %*%t( matrix( mexsim[[i]][,1:(modelinc[6]-modelinc[20]),drop=FALSE], ncol = modelinc[6]-modelinc[20] ) )
constm[,i] = constm[,i] + mxreg[,(modelinc[6]-modelinc[20]+1):modelinc[6],drop=FALSE] %*%t( matrix( mexsim[[i]][,(modelinc[6]-modelinc[20]+1):modelinc[6],drop=FALSE]*ans1$h, ncol = modelinc[20] ) )
}
}
}
if(modelinc[5]>0) constm[,i] = constm[,i] + ipars[idx["archm",1]:idx["archm",2], 1]*(ans1$h^modelinc[5])
if(modelinc[4]>0){
fres = c(ans1$res[(m+1):(n+m)], if(modelinc[3]>0) rep(0, modelinc[3]) else NULL)
ans2 = .arfimaxsim(modelinc[1:21], ipars, idx, constm[1:n, i], fres, T = n)
seriesSim[,i] = head(ans2$series, n.sim)
} else{
ans2 = .armaxsim(modelinc[1:21], ipars, idx, constm[,i], x, ans1$res, T = n + m, m)
seriesSim[,i] = ans2$x[(n.start + m + 1):(n+m)]
}
}
sim = list(sigmaSim = sigmaSim, seriesSim = seriesSim, residSim = residSim)
model$modeldata$sigma = sigma
sol = new("uGARCHsim",
simulation = sim,
model = model,
seed = as.integer(sseed))
return(sol)
}
.fgarchsim2 = function(fit, n.sim = 1000, n.start = 0, m.sim = 1, startMethod =
c("unconditional","sample"), presigma = NA, prereturns = NA,
preresiduals = NA, rseed = NA, custom.dist = list(name = NA, distfit = NA),
mexsimdata = NULL, vexsimdata = NULL)
{
if(fit@model$modelinc[4]>0){
if(n.start<fit@model$modelinc[3]){
warning("\nugarchsim-->warning: n.start>=MA order for arfima model...automatically setting.")
n.start = fit@model$modelinc[3]
}
}
if(is.na(rseed[1])){
sseed = NA
} else{
if(length(rseed) != m.sim) sseed = as.integer(rseed[1]) else sseed = rseed[1:m.sim]
}
n = n.sim + n.start
startMethod = startMethod[1]
data = fit@model$modeldata$data
N = fit@model$modeldata$T
data = data[1:N]
m = fit@model$maxOrder
resids = fit@fit$residuals
sigma = fit@fit$sigma
model = fit@model
modelinc = model$modelinc
idx = model$pidx
ipars = fit@fit$ipars
xreg = .simregressors(model, mexsimdata, vexsimdata, N, n, m.sim, m)
mexsim = xreg$mexsimlist
vexsim = xreg$vexsimlist
fmodel = model$fmodel
fpars = fmodel$fpars
fk = fpars$fk
kdelta = ipars[idx["delta", 1], 1] + fk*ipars[idx["lambda", 1], 1]
distribution = model$modeldesc$distribution
persist = persistence(fit)
if(N < n.start){
startmethod[1] = "unconditional"
warning("\nugarchsim-->warning: n.start greater than length of data...using unconditional start method...\n")
}
if(length(sseed) == 1){
zmatrix = data.frame(dist = model$modeldesc$distribution, lambda = ipars[idx["ghlambda",1], 1],
skew = ipars[idx["skew",1], 1], shape = ipars[idx["shape",1], 1], n = n * m.sim, seed = sseed[1])
z = .custzdist(custom.dist, zmatrix, m.sim, n)
} else{
zmatrix = data.frame(dist = rep(model$modeldesc$distribution, m.sim), lambda = rep(ipars[idx["ghlambda",1], 1], m.sim),
skew = rep(ipars[idx["skew",1], 1], m.sim), shape = rep(ipars[idx["shape",1], 1], m.sim),
n = rep(n, m.sim), seed = sseed)
z = .custzdist(custom.dist, zmatrix, m.sim, n)
}
if(startMethod == "unconditional"){
z = rbind(matrix(0, nrow = m, ncol = m.sim), z)
} else{
z = rbind(matrix(tail(fit@fit$z, m), nrow = m, ncol = m.sim), z)
}
if(!is.na(presigma[1])){
presigma = as.vector(presigma)
if(length(presigma)<m) stop(paste("\nugarchsim-->error: presigma must be of length ", m, sep=""))
}
if(!is.na(prereturns[1])){
prereturns = as.vector(prereturns)
if(length(prereturns)<m) stop(paste("\nugarchsim-->error: prereturns must be of length ", m, sep=""))
}
if(!is.na(preresiduals[1])){
preresiduals = as.vector(preresiduals)
if(length(preresiduals)<m) stop(paste("\nugarchsim-->error: preresiduals must be of length ", m, sep=""))
preres = matrix(preresiduals[1:m], nrow = m, ncol = m.sim)
}
if(is.na(presigma[1])){
if(startMethod[1] == "unconditional"){
hEst = uncvariance(fit)^(1/ipars[idx["lambda",1],1])
presigma = as.numeric(rep(hEst, m))}
else{
presigma = tail(sigma, m)
}
}
if(is.na(prereturns[1])){
if(startMethod[1] == "unconditional"){
prereturns = as.numeric(rep(uncmean(fit), m))
}
else{
prereturns = tail(data, m)
}
}
h = matrix(c(presigma, rep(0, n)), nrow = n + m, ncol = m.sim)
x = matrix(c(prereturns, rep(0, n)), nrow = n + m, ncol = m.sim)
constm = matrix(ipars[idx["mu",1]:idx["mu",2],1], nrow = n + m, ncol = m.sim)
sigmaSim = matrix(0, ncol = m.sim, nrow = n.sim)
seriesSim = matrix(0, ncol = m.sim, nrow = n.sim)
residSim = matrix(0, ncol = m.sim, nrow = n.sim)
z[is.na(z) | is.nan(z) | !is.finite(z)] = 0
if(is.na(preresiduals[1])){
if(startMethod[1] == "unconditional"){
preres = matrix( z[1:m, 1:m.sim] * presigma, nrow = m, ncol = m.sim )
} else{
preres = matrix(tail(resids, m), nrow = m, ncol = m.sim)
}
}
res = rbind(preres, matrix(0, nrow = n, ncol = m.sim))
if(modelinc[15]>0){
vxreg = matrix( ipars[idx["vxreg",1]:idx["vxreg",2], 1], ncol = modelinc[15] )
vxs = sapply(vexsim, FUN = function(x) vxreg%*%t(matrix(x, ncol = modelinc[15])))
} else{
vxs = matrix(0, nrow = m + n, ncol = m.sim)
}
ans = .Call("mfgarchsim", model = as.integer(modelinc[1:21]),
pars = as.numeric(ipars[,1]), idx = as.integer(idx[,1]-1),
kdelta = as.numeric(kdelta), h = h, z = z, res = res, vxs = vxs,
N = as.integer( c(m, n) ), PACKAGE = "rugarch")
sigmaSim = matrix(( ans$h[(n.start + m + 1):(n+m), ] ), ncol = m.sim)
residSim = matrix(ans$res[(n.start + m + 1):(n+m), ], ncol = m.sim)
if(modelinc[6]>0){
mxreg = matrix( ipars[idx["mxreg",1]:idx["mxreg",2], 1], ncol = modelinc[6] )
if(modelinc[20]==0){
mxs = sapply(mexsim, FUN = function(x) mxreg%*%t(matrix(x, ncol = modelinc[6])))
} else{
if(modelinc[20] == modelinc[6]){
mxs = sapply(mexsim, FUN = function(x) mxreg%*%t(matrix(x*ans$h, ncol = modelinc[6])))
} else{
mxs = sapply(mexsim, FUN = function(x) mxreg[,1:(modelinc[6]-modelinc[20]),drop=FALSE]%*%t(matrix(x[,1:(modelinc[6]-modelinc[20]),drop=FALSE], ncol = modelinc[6])))
mxs = mxs + sapply(mexsim, FUN = function(x) mxreg[,(modelinc[6]-modelinc[20]+1):modelinc[6],drop=FALSE]%*%t(matrix(x[,(modelinc[6]-modelinc[20]+1):modelinc[6],drop=FALSE]*ans$h, ncol = modelinc[20])))
}
}
} else{
mxs = 0
}
if(modelinc[5]>0){
imh = ipars[idx["archm",1],1]*(ans$h^modelinc[5])
} else{
imh = 0
}
constm = constm + mxs + imh
if(modelinc[4]>0){
for(i in 1:m.sim){
fres = c(ans$res[(m+1):(n+m), i], if(modelinc[3]>0) rep(0, modelinc[3]) else NULL)
tmp = .arfimaxsim(modelinc[1:21], ipars, idx, constm[1:n, i], fres, T = n)
seriesSim[,i] = head(tmp$series, n.sim)
}
} else{
tmp = .Call("marmaxsim", model = as.integer(modelinc[1:21]),
pars = as.numeric(ipars[,1]), idx = as.integer(idx[,1]-1),
mu = constm, x = x, res = ans$res, N = as.integer( c(m, n) ),
PACKAGE = "rugarch")
seriesSim = matrix(tmp$x[(n.start + m + 1):(n+m), ], ncol = m.sim)
}
sim = list(sigmaSim = sigmaSim, seriesSim = seriesSim, residSim = residSim)
model$modeldata$sigma = sigma
sol = new("uGARCHsim",
simulation = sim,
model = model,
seed = as.integer(sseed))
return(sol)
}
.fgarchpath = function(spec, n.sim = 1000, n.start = 0, m.sim = 1,
presigma = NA, prereturns = NA, preresiduals = NA, rseed = NA,
custom.dist = list(name = NA, distfit = NA), mexsimdata = NULL,
vexsimdata = NULL, ...)
{
if( (n.sim+n.start) < 1000 && m.sim > 100 ){
ans = .fgarchpath2(spec = spec, n.sim = n.sim, n.start = n.start, m.sim = m.sim,
presigma = presigma, prereturns = prereturns, preresiduals = preresiduals,
rseed = rseed, custom.dist = custom.dist, mexsimdata = mexsimdata,
vexsimdata = vexsimdata)
} else{
ans = .fgarchpath1(spec = spec, n.sim = n.sim, n.start = n.start, m.sim = m.sim,
presigma = presigma, prereturns = prereturns, preresiduals = preresiduals,
rseed = rseed, custom.dist = custom.dist, mexsimdata = mexsimdata,
vexsimdata = vexsimdata)
}
return( ans )
}
.fgarchpath1 = function(spec, n.sim = 1000, n.start = 0, m.sim = 1,
presigma = NA, prereturns = NA, preresiduals = NA, rseed = NA,
custom.dist = list(name = NA, distfit = NA), mexsimdata = NULL,
vexsimdata = NULL)
{
if(spec@model$modelinc[4]>0){
if(n.start<spec@model$modelinc[3]){
warning("\nugarchpath-->warning: n.start>=MA order for arfima model...automatically setting.")
n.start = spec@model$modelinc[3]
}
}
if(is.na(rseed[1])){
sseed = NA
} else{
if(length(rseed) != m.sim) sseed = as.integer(rseed[1]) else sseed = rseed[1:m.sim]
}
model = spec@model
ipars = model$pars
pars = unlist(model$fixed.pars)
parnames = names(pars)
modelnames = .checkallfixed(spec)
if(is.na(all(match(modelnames, parnames), 1:length(modelnames)))) {
cat("\nugarchpath-->error: parameters names do not match specification\n")
cat("Expected Parameters are: ")
cat(paste(modelnames))
cat("\n")
stop("Exiting", call. = FALSE)
}
setfixed(spec)<-as.list(pars)
model = spec@model
ipars = model$pars
idx = model$pidx
modelinc = model$modelinc
n = n.sim + n.start
m = model$maxOrder
N = 0
if(modelinc[6]>0) {
mexdata = matrix(model$modeldata$mexdata, ncol = modelinc[6])
N = dim(mexdata)[1]
} else { mexdata = NULL }
if(modelinc[15]>0) {
vexdata = matrix(model$modeldata$vexdata, ncol = modelinc[15])
N = dim(vexdata)[1]
} else { vexdata = NULL }
distribution = model$modeldesc$distribution
xreg = .simregressors(model, mexsimdata, vexsimdata, N, n, m.sim, m)
mexsim = xreg$mexsimlist
vexsim = xreg$vexsimlist
fmodel = model$fmodel
fpars = fmodel$fpars
fk = fpars$fk
kdelta = ipars[idx["delta", 1], 1] + fk*ipars[idx["lambda", 1], 1]
persist = persistence(spec)
if(persist >= 1) warning(paste("\nugarchpath->warning: persitence :", round(persist, 5), sep=""))
if(length(sseed) == 1){
zmatrix = data.frame(dist = distribution, lambda = ipars[idx["ghlambda",1], 1],
skew = ipars[idx["skew",1], 1], shape = ipars[idx["shape",1], 1],
n = n * m.sim, seed = sseed[1])
z = .custzdist(custom.dist, zmatrix, m.sim, n)
} else{
zmatrix = data.frame(dist = rep(distribution, m.sim), lambda = rep(ipars[idx["ghlambda",1], 1], m.sim),
skew = rep(ipars[idx["skew",1], 1], m.sim), shape = rep(ipars[idx["shape",1], 1], m.sim),
n = rep(n, m.sim), seed = sseed)
z = .custzdist(custom.dist, zmatrix, m.sim, n)
}
z = rbind(matrix(0, nrow = m, ncol = m.sim), z)
if(!is.na(presigma[1])){
presigma = as.vector(presigma)
if(length(presigma)<m) stop(paste("\nugarchsim-->error: presigma must be of length ", m, sep=""))
}
if(!is.na(prereturns[1])){
prereturns = as.vector(prereturns)
if(length(prereturns)<m) stop(paste("\nugarchsim-->error: prereturns must be of length ", m, sep=""))
}
if(!is.na(preresiduals[1])){
preresiduals = as.vector(preresiduals)
if(length(preresiduals)<m) stop(paste("\nugarchsim-->error: preresiduals must be of length ", m, sep=""))
preres = matrix(preresiduals, nrow = m)
}
if(is.na(presigma[1])){
hEst = uncvariance(spec)^(1/ipars[idx["lambda",1],1])
presigma = as.numeric(rep(hEst, m))
}
if(is.na(prereturns[1])){
prereturns = as.numeric(rep(uncmean(spec), times = m))
}
h = c(presigma, rep(0, n))
x = c(prereturns, rep(0, n))
constm = matrix(ipars[idx["mu",1]:idx["mu",2],1], ncol = m.sim, nrow = n + m)
sigmaSim = matrix(0, ncol = m.sim, nrow = n.sim)
seriesSim = matrix(0, ncol = m.sim, nrow = n.sim)
residSim = matrix(0, ncol = m.sim, nrow = n.sim)
for(i in 1:m.sim){
if(is.na(preresiduals[1])){
preres = as.numeric(z[1:m,i])*presigma
}
z[1:m, 1:m.sim] = preres[1:m]/presigma[1:m]
z[is.na(z) | is.nan(z) | !is.finite(z)] = 0
res = c(preres, rep(0, n))
ans1 = try(.C("fgarchsimC", model = as.integer(modelinc[1:21]),
pars = as.double(ipars[,1]), idx = as.integer(idx[,1]-1),
kdelta = as.double(kdelta), h = as.double(h),
z = as.double(z[,i]), res = as.double(res),
vexdata = as.double(vexsim[[i]]), T = as.integer(n+m),
m = as.integer(m), PACKAGE = "rugarch"), silent = TRUE)
if(inherits(ans1, "try-error")) stop("\nugarchsim-->error: error in calling C function....\n")
sigmaSim[,i] = ans1$h[(n.start + m + 1):(n+m)]
residSim[,i] = ans1$res[(n.start + m + 1):(n+m)]
if(modelinc[6]>0){
mxreg = matrix( ipars[idx["mxreg",1]:idx["mxreg",2], 1], ncol = modelinc[6] )
if(modelinc[20]==0){
constm[,i] = constm[,i] + as.numeric( mxreg %*%t( matrix( mexsim[[i]], ncol = modelinc[6] ) ) )
} else{
if(modelinc[20] == modelinc[6]){
constm[,i] = constm[,i] + as.numeric( mxreg %*%t( matrix( mexsim[[i]]*ans1$h, ncol = modelinc[6] ) ) )
} else{
constm[,i] = constm[,i] + as.numeric( mxreg[,1:(modelinc[6]-modelinc[20]),drop=FALSE] %*%t( matrix( mexsim[[i]][,1:(modelinc[6]-modelinc[20]),drop=FALSE], ncol = modelinc[6]-modelinc[20] ) ) )
constm[,i] = constm[,i] + as.numeric( mxreg[,(modelinc[6]-modelinc[20]+1):modelinc[6],drop=FALSE] %*%t( matrix( mexsim[[i]][,(modelinc[6]-modelinc[20]+1):modelinc[6],drop=FALSE]*ans1$h, ncol = modelinc[20] ) ) )
}
}
}
if(modelinc[5]>0) constm[,i] = constm[,i] + ipars[idx["archm",1]:idx["archm",2], 1]*(ans1$h^modelinc[5])
if(modelinc[4]>0){
fres = c(ans1$res[(m+1):(n+m)], if(modelinc[3]>0) rep(0, modelinc[3]) else NULL)
ans2 = .arfimaxsim(modelinc[1:21], ipars, idx, constm[1:n, i], fres, T = n)
seriesSim[,i] = head(ans2$series, n.sim)
} else{
ans2 = .armaxsim(modelinc[1:21], ipars, idx, constm[,i], x, ans1$res, T = n + m, m)
seriesSim[,i] = ans2$x[(n.start + m + 1):(n+m)]
}
}
path = list(sigmaSim = sigmaSim, seriesSim = seriesSim, residSim = residSim)
sol = new("uGARCHpath",
path = path,
model = model,
seed = as.integer(sseed))
return(sol)
}
.fgarchpath2 = function(spec, n.sim = 1000, n.start = 0, m.sim = 1,
presigma = NA, prereturns = NA, preresiduals = NA, rseed = NA,
custom.dist = list(name = NA, distfit = NA), mexsimdata = NULL,
vexsimdata = NULL)
{
if(spec@model$modelinc[4]>0){
if(n.start<spec@model$modelinc[3]){
warning("\nugarchpath-->warning: n.start>=MA order for arfima model...automatically setting.")
n.start = spec@model$modelinc[3]
}
}
if(is.na(rseed[1])){
sseed = NA
} else{
if(length(rseed) != m.sim) sseed = as.integer(rseed[1]) else sseed = rseed[1:m.sim]
}
model = spec@model
ipars = model$pars
pars = unlist(model$fixed.pars)
parnames = names(pars)
modelnames = .checkallfixed(spec)
if(is.na(all(match(modelnames, parnames), 1:length(modelnames)))) {
cat("\nugarchpath-->error: parameters names do not match specification\n")
cat("Expected Parameters are: ")
cat(paste(modelnames))
cat("\n")
stop("Exiting", call. = FALSE)
}
setfixed(spec)<-as.list(pars)
model = spec@model
ipars = model$pars
idx = model$pidx
modelinc = model$modelinc
n = n.sim + n.start
m = spec@model$maxOrder
N = 0
if(modelinc[6]>0) {
mexdata = matrix(model$modeldata$mexdata, ncol = modelinc[6])
N = dim(mexdata)[1]
} else { mexdata = NULL }
if(modelinc[15]>0) {
vexdata = matrix(model$modeldata$vexdata, ncol = modelinc[15])
N = dim(vexdata)[1]
} else { vexdata = NULL }
distribution = model$modeldesc$distribution
xreg = .simregressors(model, mexsimdata, vexsimdata, N, n, m.sim, m)
mexsim = xreg$mexsimlist
vexsim = xreg$vexsimlist
fmodel = model$fmodel
fpars = fmodel$fpars
fk = fpars$fk
kdelta = ipars[idx["delta", 1], 1] + fk*ipars[idx["lambda", 1], 1]
persist = persistence(spec)
if(persist >= 1) warning(paste("\nugarchpath->warning: persitence :", round(persist, 5), sep=""))
if(length(sseed) == 1){
zmatrix = data.frame(dist = distribution, lambda = ipars[idx["ghlambda",1], 1],
skew = ipars[idx["skew",1], 1], shape = ipars[idx["shape",1], 1],
n = n * m.sim, seed = sseed[1])
z = .custzdist(custom.dist, zmatrix, m.sim, n)
} else{
zmatrix = data.frame(dist = rep(distribution, m.sim), lambda = rep(ipars[idx["ghlambda",1], 1], m.sim),
skew = rep(ipars[idx["skew",1], 1], m.sim), shape = rep(ipars[idx["shape",1], 1], m.sim),
n = rep(n, m.sim), seed = sseed)
z = .custzdist(custom.dist, zmatrix, m.sim, n)
}
z = rbind(matrix(0, nrow = m, ncol = m.sim), z)
if(!is.na(presigma[1])){
presigma = as.vector(presigma)
if(length(presigma)<m) stop(paste("\nugarchsim-->error: presigma must be of length ", m, sep=""))
}
if(!is.na(prereturns[1])){
prereturns = as.vector(prereturns)
if(length(prereturns)<m) stop(paste("\nugarchsim-->error: prereturns must be of length ", m, sep=""))
}
if(!is.na(preresiduals[1])){
preresiduals = as.vector(preresiduals)
if(length(preresiduals)<m) stop(paste("\nugarchsim-->error: preresiduals must be of length ", m, sep=""))
preres = matrix(preresiduals[1:m], nrow = m, ncol = m.sim)
}
if(is.na(presigma[1])){
hEst = uncvariance(spec)^(1/ipars[idx["lambda",1],1])
presigma = as.numeric(rep(hEst, times = m))
}
if(is.na(prereturns[1])){
prereturns = as.numeric(rep(uncmean(spec), times = m))
}
h = matrix(c(presigma, rep(0, n)), nrow = n + m, ncol = m.sim)
x = matrix(c(prereturns, rep(0, n)), nrow = n + m, ncol = m.sim)
constm = matrix(ipars[idx["mu",1]:idx["mu",2],1], nrow = n + m, ncol = m.sim)
sigmaSim = matrix(0, ncol = m.sim, nrow = n.sim)
seriesSim = matrix(0, ncol = m.sim, nrow = n.sim)
residSim = matrix(0, ncol = m.sim, nrow = n.sim)
if(is.na(preresiduals[1])){
preres = matrix( z[1:m, 1:m.sim] * presigma, nrow = m, ncol = m.sim )
} else{
preres = matrix( preresiduals, nrow = m, ncol = m.sim )
}
z[1:m, 1:m.sim] = preres[1:m, 1:m.sim]/presigma[1:m]
z[is.na(z) | is.nan(z) | !is.finite(z)] = 0
res = rbind(preres, matrix(0, nrow = n, ncol = m.sim))
if(modelinc[15]>0){
vxreg = matrix( ipars[idx["vxreg",1]:idx["vxreg",2], 1], ncol = modelinc[15] )
vxs = sapply(vexsim, FUN = function(x) vxreg%*%t(matrix(x, ncol = modelinc[15])))
} else{
vxs = matrix(0, nrow = m + n, ncol = m.sim)
}
ans = .Call("mfgarchsim", model = as.integer(modelinc[1:21]),
pars = as.numeric(ipars[,1]), idx = as.integer(idx[,1]-1),
kdelta = as.numeric(kdelta), h = h, z = z, res = res, vxs = vxs,
N = as.integer( c(m, n) ), PACKAGE = "rugarch")
sigmaSim = matrix(( ans$h[(n.start + m + 1):(n+m), ] ), ncol = m.sim)
residSim = matrix(ans$res[(n.start + m + 1):(n+m), ], ncol = m.sim)
if(modelinc[6]>0){
mxreg = matrix( ipars[idx["mxreg",1]:idx["mxreg",2], 1], ncol = modelinc[6] )
if(modelinc[20]==0){
mxs = sapply(mexsim, FUN = function(x) mxreg%*%t(matrix(x, ncol = modelinc[6])))
} else{
if(modelinc[20] == modelinc[6]){
mxs = sapply(mexsim, FUN = function(x) mxreg%*%t(matrix(x*ans$h, ncol = modelinc[6])))
} else{
mxs = sapply(mexsim, FUN = function(x) mxreg[,1:(modelinc[6]-modelinc[20]),drop=FALSE]%*%t(matrix(x[,1:(modelinc[6]-modelinc[20]),drop=FALSE], ncol = modelinc[6])))
mxs = mxs + sapply(mexsim, FUN = function(x) mxreg[,(modelinc[6]-modelinc[20]+1):modelinc[6],drop=FALSE]%*%t(matrix(x[,(modelinc[6]-modelinc[20]+1):modelinc[6],drop=FALSE]*ans$h, ncol = modelinc[20])))
}
}
} else{
mxs = 0
}
if(modelinc[5]>0){
imh = ipars[idx["archm",1],1]*(ans$h^modelinc[5])
} else{
imh = 0
}
constm = constm + mxs + imh
if(modelinc[4]>0){
for(i in 1:m.sim){
fres = c(ans$res[(m+1):(n+m), i], if(modelinc[3]>0) rep(0, modelinc[3]) else NULL)
tmp = .arfimaxsim(modelinc[1:21], ipars, idx, constm[1:n, i], fres, T = n)
seriesSim[,i] = head(tmp$series, n.sim)
}
} else{
tmp = .Call("marmaxsim", model = as.integer(modelinc[1:21]),
pars = as.numeric(ipars[,1]), idx = as.integer(idx[,1]-1),
mu = constm, x = x, res = ans$res, N = as.integer( c(m, n) ),
PACKAGE = "rugarch")
seriesSim = matrix(tmp$x[(n.start + m + 1):(n+m), ], ncol = m.sim)
}
path = list(sigmaSim = sigmaSim, seriesSim = seriesSim, residSim = residSim)
sol = new("uGARCHpath",
path = path,
model = model,
seed = as.integer(sseed))
return(sol)
}
|
context("task2b")
test_that("Mark even more on task2", {
expect_is(task2(5:10), "integer", info = "task2 don't return an integer.")
expect_equal(length(task2(5:10)), 1, info = "task2 do not return one value.")
expect_equal(task2(vector=5:10), 15, info = "task2(vector=5:10) don't return 15")
expect_equal(task2(vector=1:5), 6, info = "task2(vector=1:5) don't return 6")
})
|
library(ggvis)
x_bar <- "x&
sigma_hat <- "σ&
brushed_summary <- function(items, session, page_loc, ...) {
if (nrow(items) == 0) return()
items$key__ <- NULL
lines <- Map(function(name, vals) {
paste0(name, ": ",
x_bar, " = ", round(mean(vals), 2), " ",
sigma_hat, " = ", round(sd(vals), 2)
)
}, names(items), items)
html <- paste(unlist(lines), collapse = "<br />\n")
show_tooltip(session, page_loc$r + 5, page_loc$t, html)
}
mtcars %>% ggvis(x = ~wt, y = ~mpg) %>%
layer_points(size.brush := 400) %>%
handle_brush(brushed_summary)
pressure %>% ggvis(x = ~temperature, y = ~pressure) %>%
scale_nominal("x", range = "width", padding = 0, points = FALSE) %>%
layer_rects(y2 = 0, width = band(), fill.brush := "red") %>%
handle_brush(brushed_summary)
data("diamonds", package="ggplot2")
d <- diamonds[sample(nrow(diamonds), 10000), ]
d %>% ggvis(x = ~carat, y = ~price) %>%
layer_points(size := 40, fillOpacity := 0.02, fillOpacity.brush := 0.4) %>%
handle_brush(brushed_summary)
|
init_mode <- function(y, u, distribution) {
switch(distribution,
poisson = {
y <- y / u
y[y < 0.1 | is.na(y)] <- 0.1
y <- log(y)
},
binomial = {
y <- qlogis((ifelse(is.na(y), 0.5, y) + 0.5) / (u + 1))
},
gamma = {
y <- y / u
y[is.na(y) | y < 1] <- 1
y <- log(y)
},
"negative binomial" = {
y <- y / u
y[is.na(y) | y < 1 / 6] <- 1 / 6
y <- log(y)
},
gaussian = {
},
stop(paste("Argument distribution must be 'poisson', 'binomial', 'gamma',",
"'gaussian', or 'negative binomial'.", sep = " "))
)
y
}
|
"mcnp-dataset: dataset"
|
LKA <- function(level){
x <- NULL
if(level==1){
x1 <- github.cssegisanddata.covid19(country = "Sri Lanka")
x2 <- ourworldindata.org(id = "LKA")
x <- full_join(x1, x2, by = "date")
}
return(x)
}
|
qiita_get_comments <- function(comment_id = NULL, item_id = NULL,
per_page = 100L, page_offset = 0L, page_limit = 1L) {
if(!is.null(comment_id) && !is.null(item_id)) stop("You cannot specify comment_id and item_id both")
if(is.null(comment_id) && is.null(item_id)) stop("Please specify commend_id or item_id")
if(!is.null(comment_id)){
result <- purrr::map(comment_id, qiita_get_single_comment_by_id)
return(result)
}
if(!is.null(item_id)) {
result <- purrr::map(item_id, qiita_get_comments_by_item,
per_page = per_page, page_offset = page_offset, page_limit = page_limit)
return(purrr::flatten(result))
}
}
qiita_get_single_comment_by_id <- function(comment_id) {
path <- sprintf("/api/v2/comments/%s", comment_id)
qiita_api("GET", path = path)
}
qiita_get_comments_by_item <- function(item_id, per_page, page_offset, page_limit) {
path <- sprintf("/api/v2/items/%s/comments", item_id)
result <- qiita_api("GET", path = path,
per_page = per_page, page_offset = page_offset, page_limit = page_limit)
}
qiita_delete_comment <- function(comment_id) {
if(!purrr::is_scalar_character(comment_id)) stop("comment_id must be a scalar character!")
path <- sprintf("/api/v2/comments/%s", comment_id)
qiita_api("DELETE", path = path)
}
qiita_update_comment <- function(comment_id, body) {
if(!purrr::is_scalar_character(comment_id)) stop("comment_id must be a scalar character!")
if(!purrr::is_scalar_character(body)) stop("body must be a scalar character!")
path <- sprintf("/api/v2/comments/%s", comment_id)
qiita_api("PATCH", path = path,
payload = qiita_util_payload(body = body))
}
qiita_post_comment <- function(item_id, body) {
if(!purrr::is_scalar_character(item_id)) stop("item_id must be a scalar character!")
if(!purrr::is_scalar_character(body)) stop("body must be a scalar character!")
path <- sprintf("/api/v2/items/%s/comments", item_id)
qiita_api("POST", path = path,
payload = qiita_util_payload(body = body))
}
|
context("Testing ATbounds: simulation_dgp")
test_that("ATE-oracle and the mean difference estimator should be similar when P(D=1|X) = 0.5", {
set.seed(1)
data <- simulation_dgp(10000, ps_spec = "overlap")
y <- data$outcome
d <- data$treat
ate <- data$ate_oracle
mde <- mean(d*y)/mean(d) - mean((1-d)*y)/mean(1-d)
diff <- (abs(ate - mde) <= 1e-2)
expect_true(diff)
})
test_that("ATE-oracle and the mean difference estimator should be different when P(D=1|X) is a function of X", {
set.seed(1)
data <- simulation_dgp(10000, ps_spec = "non-overlap")
y <- data$outcome
d <- data$treat
ate <- data$ate_oracle
mde <- mean(d*y)/mean(d) - mean((1-d)*y)/mean(1-d)
diff <- (abs(ate - mde) <= 1e-2)
expect_false(diff)
})
test_that("Discrete and continuous cases are different", {
set.seed(1)
data_d <- simulation_dgp(100, ps_spec = "non-overlap", x_discrete = TRUE)
set.seed(1)
data_c <- simulation_dgp(100, ps_spec = "non-overlap")
expect_false(data_c$covariate[1] == data_d$covariate[1])
})
|
is.nloptr <- function( x ) {
if( is.null(x) ) { return( FALSE ) }
if( !is.list(x) ) { return( FALSE ) }
if ( !is.function(x$eval_f) ) { stop('eval_f is not a function') }
if ( !is.null(x$eval_g_ineq) ) {
if ( !is.function(x$eval_g_ineq) ) { stop('eval_g_ineq is not a function') }
}
if ( !is.null(x$eval_g_eq) ) {
if ( !is.function(x$eval_g_eq) ) { stop('eval_g_eq is not a function') }
}
if ( any( is.na( x$x0 ) ) ) { stop('x0 contains NA') }
if ( length( x$x0 ) != length( x$lower_bounds ) ) { stop('length(lb) != length(x0)') }
if ( length( x$x0 ) != length( x$upper_bounds ) ) { stop('length(ub) != length(x0)') }
if ( any( x$x0 < x$lower_bounds ) ) { stop('at least one element in x0 < lb') }
if ( any( x$x0 > x$upper_bounds ) ) { stop('at least one element in x0 > ub') }
list_algorithms <- c( "NLOPT_GN_DIRECT", "NLOPT_GN_DIRECT_L", "NLOPT_GN_DIRECT_L_RAND",
"NLOPT_GN_DIRECT_NOSCAL", "NLOPT_GN_DIRECT_L_NOSCAL",
"NLOPT_GN_DIRECT_L_RAND_NOSCAL", "NLOPT_GN_ORIG_DIRECT",
"NLOPT_GN_ORIG_DIRECT_L", "NLOPT_GD_STOGO", "NLOPT_GD_STOGO_RAND",
"NLOPT_LD_SLSQP", "NLOPT_LD_LBFGS_NOCEDAL", "NLOPT_LD_LBFGS", "NLOPT_LN_PRAXIS",
"NLOPT_LD_VAR1", "NLOPT_LD_VAR2", "NLOPT_LD_TNEWTON",
"NLOPT_LD_TNEWTON_RESTART", "NLOPT_LD_TNEWTON_PRECOND",
"NLOPT_LD_TNEWTON_PRECOND_RESTART", "NLOPT_GN_CRS2_LM",
"NLOPT_GN_MLSL", "NLOPT_GD_MLSL", "NLOPT_GN_MLSL_LDS",
"NLOPT_GD_MLSL_LDS", "NLOPT_LD_MMA", "NLOPT_LD_CCSAQ", "NLOPT_LN_COBYLA",
"NLOPT_LN_NEWUOA", "NLOPT_LN_NEWUOA_BOUND", "NLOPT_LN_NELDERMEAD",
"NLOPT_LN_SBPLX", "NLOPT_LN_AUGLAG", "NLOPT_LD_AUGLAG",
"NLOPT_LN_AUGLAG_EQ", "NLOPT_LD_AUGLAG_EQ", "NLOPT_LN_BOBYQA",
"NLOPT_GN_ISRES", "NLOPT_GN_ESCH" )
if ( !( x$options$algorithm %in% list_algorithms ) ) {
stop( paste('Incorrect algorithm supplied. Use one of the following:\n', paste( list_algorithms, collapse='\n' ) ) )
}
list_algorithmsD <- list_algorithms[ grep( "NLOPT_[G,L]D", list_algorithms ) ]
list_algorithmsN <- list_algorithms[ grep( "NLOPT_[G,L]N", list_algorithms ) ]
f0 <- x$eval_f( x$x0 )
if ( is.list( f0 ) ) {
if ( is.na( f0$objective ) ) { stop('objective in x0 returns NA') }
if ( any( is.na( f0$gradient ) ) ) { stop('gradient of objective in x0 returns NA') }
if ( length( f0$gradient ) != length( x$x0 ) ) { stop('wrong number of elements in gradient of objective') }
if ( x$options$algorithm %in% list_algorithmsN ) {
warning( 'a gradient was supplied for the objective function, but algorithm ',
x$options$algorithm, ' does not use gradients.' )
}
} else {
if ( is.na( f0 ) ) { stop('objective in x0 returns NA') }
if ( x$options$algorithm %in% list_algorithmsD ) {
stop( paste( 'A gradient for the objective function is needed by algorithm',
x$options$algorithm, 'but was not supplied.\n' ) )
}
}
if ( !is.null( x$eval_g_ineq ) ) {
g0_ineq <- x$eval_g_ineq( x$x0 )
if ( is.list( g0_ineq ) ) {
if ( any( is.na( g0_ineq$constraints ) ) ) { stop('inequality constraints in x0 returns NA') }
if ( any( is.na( g0_ineq$jacobian ) ) ) { stop('jacobian of inequality constraints in x0 returns NA') }
if ( length( g0_ineq$jacobian ) != length( g0_ineq$constraints )*length( x$x0 ) ) {
stop(paste('wrong number of elements in jacobian of inequality constraints (is ',
length( g0_ineq$jacobian ),
', but should be ',
length( g0_ineq$constraints ), ' x ',
length( x$x0 ), ' = ',
length( g0_ineq$constraints )*length( x$x0 ), ')', sep=''))
}
if ( x$options$algorithm %in% list_algorithmsN ) {
warning( 'a gradient was supplied for the inequality constraints, but algorithm ',
x$options$algorithm, ' does not use gradients.' )
}
} else {
if ( any( is.na( g0_ineq ) ) ) { stop('inequality constraints in x0 returns NA') }
if ( x$options$algorithm %in% list_algorithmsD ) {
stop( paste( 'A gradient for the inequality constraints is needed by algorithm',
x$options$algorithm, 'but was not supplied.\n' ) )
}
}
}
if ( !is.null( x$eval_g_eq ) ) {
g0_eq <- x$eval_g_eq( x$x0 )
if ( is.list( g0_eq ) ) {
if ( any( is.na( g0_eq$constraints ) ) ) { stop('equality constraints in x0 returns NA') }
if ( any( is.na( g0_eq$jacobian ) ) ) { stop('jacobian of equality constraints in x0 returns NA') }
if ( length( g0_eq$jacobian ) != length( g0_eq$constraints )*length( x$x0 ) ) {
stop(paste('wrong number of elements in jacobian of equality constraints (is ',
length( g0_eq$jacobian ),
', but should be ',
length( g0_eq$constraints ), ' x ',
length( x$x0 ), ' = ',
length( g0_eq$constraints )*length( x$x0 ), ')', sep=''))
}
if ( x$options$algorithm %in% list_algorithmsN ) {
warning( 'a gradient was supplied for the equality constraints, but algorithm ',
x$options$algorithm, ' does not use gradients.' )
}
} else {
if ( any( is.na( g0_eq ) ) ) { stop('equality constraints in x0 returns NA') }
if ( x$options$algorithm %in% list_algorithmsD ) {
stop( paste( 'A gradient for the equality constraints is needed by algorithm',
x$options$algorithm, 'but was not supplied.\n' ) )
}
}
}
if ( x$num_constraints_eq > 0 ) {
eq_algorithms <- c("NLOPT_LD_AUGLAG",
"NLOPT_LN_AUGLAG",
"NLOPT_LD_AUGLAG_EQ",
"NLOPT_LN_AUGLAG_EQ",
"NLOPT_GN_ISRES",
"NLOPT_LD_SLSQP")
if( !( x$options$algorithm %in% eq_algorithms ) ) {
stop(paste('If you want to use equality constraints, then you should use one of these algorithms', paste(eq_algorithms, collapse=', ')))
}
}
if ( x$options$algorithm %in%
c("NLOPT_LD_AUGLAG",
"NLOPT_LN_AUGLAG",
"NLOPT_LD_AUGLAG_EQ",
"NLOPT_LN_AUGLAG_EQ",
"NLOPT_GN_MLSL",
"NLOPT_GD_MLSL",
"NLOPT_GN_MLSL_LDS",
"NLOPT_GD_MLSL_LDS") ) {
if ( is.null( x$local_options ) ) {
stop(paste('The algorithm', x$options$algorithm, 'needs a local optimizer; specify an algorithm and termination condition in local_opts'))
}
}
if ( x$num_constraints_ineq !=
length( x$options$tol_constraints_ineq ) ) {
stop(paste('The vector tol_constraints_ineq in the options list has size',
length(x$options$tol_constraints_ineq),
'which is unequal to the number of inequality constraints,',
x$num_constraints_ineq, '.'))
}
if ( x$num_constraints_eq !=
length( x$options$tol_constraints_eq ) ) {
stop(paste('The vector tol_constraints_eq in the options list has size',
length(x$options$tol_constraints_eq),
'which is unequal to the number of equality constraints,',
x$num_constraints_eq, '.'))
}
return( TRUE )
}
|
xpose.plot.histogram <-
function(x,object,
inclZeroWRES = FALSE,
onlyfirst = FALSE,
samp = NULL,
type = "density",
aspect = object@[email protected]$aspect,
scales = list(),
by = object@[email protected]$condvar,
force.by.factor = FALSE,
ordby = object@[email protected]$ordby,
byordfun = object@[email protected]$byordfun,
shingnum = object@[email protected]$shingnum,
shingol = object@[email protected]$shingol,
strip = function(...)
strip.default(...,strip.names=c(TRUE,TRUE)),
subset = xsubset(object),
main = xpose.create.title.hist(x,object,subset,...),
xlb = NULL,
ylb = "Density",
hicol = object@[email protected]$hicol,
hilty = object@[email protected]$hilty,
hilwd = object@[email protected]$hilwd,
hidcol = object@[email protected]$hidcol,
hidlty = object@[email protected]$hidlty,
hidlwd = object@[email protected]$hidlwd,
hiborder = object@[email protected]$hiborder,
mirror = FALSE,
max.plots.per.page=4,
mirror.aspect="fill",
pass.plot.list=FALSE,
x.cex=NULL,
y.cex=NULL,
main.cex=NULL,
mirror.internal=list(strip.missing=missing(strip)),
...) {
plotTitle <- main
arg.list <- formals(xpose.plot.histogram)
arg.names <- names(arg.list)
new.arg.list <- vector("list",length(arg.names))
names(new.arg.list) <- arg.names
for (argnam in arg.names){
if (argnam=="..."){
next
}
tmp <- get(argnam)
if (is.null(tmp)){
} else {
new.arg.list[[argnam]]=tmp
}
}
if (mirror){
create.mirror(xpose.plot.histogram,
new.arg.list,mirror,plotTitle,...)
} else {
if(!is.null(x)) {
if(length(x)> 1) {
xlb <- NULL
} else {
if(is.null(xlb)) {
xlb <- xlabel(x,object)
}
}
}
if(!is.null(samp)) {
data <- SData(object,inclZeroWRES,onlyfirst=onlyfirst,
subset=subset,samp=samp)
} else {
data <- Data(object,inclZeroWRES,onlyfirst=onlyfirst,subset=subset)
}
if(!is.null(by) && force.by.factor) {
for(b in by) {
data[,b] <- as.factor(data[,b])
}
}
if(length(x) > 1) {
reps <-c(xvardef("id",object),xvardef("idlab",object),
xvardef("wres",object))
if(!is.null(by)) reps <- c(reps,by)
data <- xpose.stack(data,object,x,reps)
object <- new("xpose.data",
Runno=object@Runno,
Data = NULL)
Data(object) <- data
onlyfirst = FALSE
if(is.null(by)) {
by <- "ind"
} else {
by <- c("ind",by)
}
x <- "values"
scales=list(relation="free")
}
data <- subset(data, get(x) != object@Prefs@Miss)
if(any(is.null(data))) return("The subset expression is invalid!\n")
bb <- NULL
if(any(is.null(by))) {
formel <- paste("~",x,sep="")
} else {
for(b in by) {
bb <- c(bb,xlabel(b,object))
if(!is.factor(data[,b])) {
data[,b] <- equal.count(data[,b],number=shingnum,overl=shingol)
} else {
if(any(!is.null(ordby))) {
data[,b] <- reorder(data[,b],data[,ordby],byordfun)
}
if(names(data[,b,drop=F])!="ind") {
levels(data[,b]) <-
paste(xlabel(names(data[,b,drop=F]),object),":",
levels(data[,b]),sep="")
}
}
}
bys <- paste(by,collapse="*")
formel <- paste("~",x,"|",bys,sep="")
}
if(missing(strip)) {
strip <- function(var.name,...)
strip.default(var.name=bb,strip.names=c(F,T),...)
}
xvarnam <- x
if(!is.null(x.cex)) {
if (is.list(xlb)){
xlb$cex=x.cex
} else {
xlb <- list(xlb,cex=x.cex)
}
}
if(!is.null(y.cex)) {
if (is.list(ylb)){
ylb$cex=y.cex
} else {
ylb <- list(ylb,cex=y.cex)
}
}
if(is.null(main)) {
} else {
if(!is.null(main.cex)) {
if (is.list(main)){
main$cex=main.cex
} else {
main <- list(main,cex=main.cex)
}
}
}
if(missing("type")) {
if (length(levs <- unique(data[,x])) <= object@[email protected]) {
type <- "count"
ylb <- "Count"
}
}
xplot <- histogram(formula(formel),data,
...,
obj=object,
prepanel = function(x,bins.per.panel.equal = TRUE,...) {
if(length(levs <- unique(x)) <= object@[email protected]) {
xlim <- as.character(sort(levs))
return(list(xlim=xlim))
} else {
xlim <- range(x)
if(!bins.per.panel.equal){
nint <- round(log2(length(x))+1)
endpoints <- range(x[!is.na(x)])
breaks <- do.breaks(endpoints, nint)
hdat <- hist(x, breaks=breaks, plot=F)
ddat <- density(x,na.rm=T)
ylim <- c(0, max(hdat$density,ddat$y))
return(list(xlim=xlim,ylim=ylim))
}
return(list(xlim=xlim))
}
},
panel=xpose.panel.histogram,
aspect=aspect,
ylab=ylb,
xlab=xlb,
type=type,
scales=scales,
main=main,
xvarnam=xvarnam,
hidlty = hidlty,
hidcol = hidcol,
hidlwd = hidlwd,
hiborder = hiborder,
hilty = hilty,
hilwd = hilwd,
strip=strip,
hicol = hicol)
return(xplot)
}
}
|
dplcon = function(x, xmin, alpha, log = FALSE) {
if (log) {
pdf = log(alpha - 1) - log(xmin) - alpha * (log(x / xmin))
pdf[x < xmin] = -Inf
} else {
pdf = (alpha - 1) / xmin * (x / xmin) ^ (-alpha)
pdf[x < xmin] = 0
}
pdf
}
pplcon = function(q, xmin, alpha, lower.tail = TRUE) {
cdf = 1 - (q / xmin) ^ (-alpha + 1)
if (!lower.tail)
cdf = 1 - cdf
cdf[q < round(xmin)] = 0
cdf
}
rplcon = function(n, xmin, alpha) {
xmin * (1 - runif(n)) ^ (-1 / (alpha - 1))
}
|
print.ptwiseELtest = function(x, digits = max(3L, getOption("digits") - 3L), quiet = FALSE, ...){
if(quiet == FALSE){
cat("\nCall:\n", paste(deparse(x$call), sep = "\n", collapse = "\n"), "\n", sep = "")
cat("\nRange of time_pts is from ", format(min(x$result_dataframe$time_pts), digits = digits)," to ", format(max(x$result_dataframe$time_pts), digits = digits), sep = "")
cat("\n", sum(x$result_dataframe$decision == 1), " out of ", nrow(x$result_dataframe), " decisions are 1, the other ", sum(x$result_dataframe$decision == 0), " decisions are 0", sep = "")
cat("\n-----\nSummary of stat_ptwise:\n")
print(summary(x$result_dataframe$stat_ptwise, digits = digits), digits = digits)
cat("-----\nSummary of critval_ptwise:\n")
print(summary(x$result_dataframe$critval_ptwise, digits = digits), digits = digits)
}
result = list(call = x$call, time_pts = x$result_dataframe$time_pts, decision = x$result_dataframe$decision, stat_ptwise = x$result_dataframe$stat_ptwise, critval = x$result_dataframe$critval)
invisible(result)
}
|
RemoveRowsWithNaNs <- function(x, cols=NULL) {
p=dim(x)[2]
if (is.null(cols))
cols=1:p
sumas=apply(x[, cols],1,sumisna)
good=which(sumas==0)
newx=x[good,]
return(newx)
}
sumisna <- function(x){
return(sum(is.na(x)))
}
|
or.relimp.lm <- function (model, ui, ci = NULL, index = 2:length(coef(model)),
meq = 0, tol = sqrt(.Machine$double.eps), ...)
{
if (!("lm" %in% class(model)))
stop("ERROR: model must be of class lm.")
if (length(model$xlevels)>0) stop("model must not contain any factors!")
if (max(attr(model$terms,"order")) != 1)
stop ("model must not contain higher order terms")
namen <- names(coef(model))
resp <- attr(model$terms, "response")
xcol <- which(rowSums(attr(model$terms, "factors")) > 0)
DATA <- as.data.frame(model$model[, c(resp, xcol)])
wt <- weights(model)
if (is.null(wt)) wt <- rep(1/nrow(DATA),nrow(DATA))
aus <- Shapley.value(set.func(all.R2(cov.wt(DATA,wt=wt)$cov, ui, ci = ci, index = index,
meq = meq, tol = tol, ...)))
names(aus) <- namen[-1]
aus
}
|
"oildata"
|
genodds <- function(response, group, strata=NULL,alpha=0.05,ties="split",
nnt=FALSE, verbose=FALSE,upper=TRUE, assume_no_effect=FALSE
)
{
if(length(response)==0)
{
stop("Response cannot be empty")
}
if(length(group)==0)
{
stop("Group cannot be empty")
}
if (length(response)!=length(group))
{
stop("Response and Group are different lengths")
}
if(is.null(strata))
{
nMissing <- sum(is.na(response) | is.na(group))
if(sum(is.na(response) | is.na(group)) > 0)
{
warning(sprintf("Dropped %d observations with missing values",nMissing))
keepList <- !(is.na(response) | is.na(group))
response <- response[keepList]
group <- group[keepList]
}
}
else
{
nMissing <- sum(is.na(response) | is.na(group) | is.na(strata))
if(nMissing > 0)
{
warning(sprintf("Dropped %d observations with missing values",nMissing))
keepList <- !(is.na(response) | is.na(group) | is.na(strata) )
response <- response[keepList]
group <- group[keepList]
strata <- strata[keepList]
}
}
if(length(unique(group))!=2)
{
stop("Group must take on exactly 2 values")
}
if(!(class(group) %in% "factor"))
{
group=as.factor(group)
}
if (is.null(strata)){
strata=rep("All data",length(response))
} else {
if (length(strata)!=length(group))
{
stop("Strata and Response/Group are different lengths")
}
if(!(class(strata) %in% "factor"))
{
strata=as.factor(strata)
}
}
if(ties=="split")
{
contr_fav=0.5
}
else if (ties=="drop")
{
contr_fav=NA
}
else
{
stop("Invalid ties option specified")
}
genodds_strata=function(response,group)
{
crosstab=as.matrix(table(response,group))
N=sum(crosstab)
p=crosstab/N
Rt=p[,2:1]
Rs=get_Rs(p)
Rd=get_Rd(p)
if(!is.na(contr_fav))
{
Rs=Rs+(1-contr_fav)*Rt
Rd=Rd+contr_fav*Rt
}
Pc=sum(p*Rs)
Pd=sum(p*Rd)
odds=Pc/Pd
SEodds=2/Pd*(sum(p*(odds*Rd-Rs)^2)/N)^0.5
SElnodds=SEodds/odds
conf.int=exp( qnorm(c(alpha/2,1-alpha/2),mean=log(odds),sd=SElnodds) )
p=outer(apply(p,1,sum),apply(p,2,sum))
Rt=p[,2:1]
Rs=get_Rs(p)
Rd=get_Rd(p)
if(!is.na(contr_fav))
{
Rs=Rs+contr_fav*Rt
Rd=Rd+contr_fav*Rt
}
Pc=sum(p*Rs)
Pd=sum(p*Rd)
SEnull=2/Pd*(sum(p*(1*Rd-Rs)^2)/N)^0.5
SElnnull=SEnull/1
SE <- ifelse(assume_no_effect,SElnnull,SElnodds)
p=pnorm(abs(log(odds)),sd=SE,lower.tail=FALSE)*2
out=list(odds=odds,conf.int=conf.int,p=p,SEodds=SEodds,SEnull=SEnull,
xtab=crosstab)
return(out)
}
results=by(data.frame(response,group),
strata,
function(df) genodds_strata(df$response,df$group)
)
pooled_lnodds=do.call("sum",lapply(results,function(x) x$odds^2/x$SEodds^2 * log(x$odds) ))/
do.call("sum",lapply(results,function(x) x$odds^2/x$SEodds^2 ))
pooled_SElnodds=sqrt(1/do.call("sum",lapply(results,function(x) x$odds^2/x$SEodds^2)))
pooled_SElnnull=sqrt(1/do.call("sum",lapply(results,function(x) 1/x$SEnull^2)))
SE <- ifelse(assume_no_effect,pooled_SElnnull,pooled_SElnodds)
pooled_lnconf.int=qnorm(c(alpha/2,1-alpha/2),mean=pooled_lnodds,sd=SE)
pooled_p=pnorm(abs(pooled_lnodds),sd=SE,lower.tail=FALSE)*2
pooled_rel_statistic=do.call("sum",lapply(results,function(x) ( (log(x$odds) - pooled_lnodds)/(x$SEodds/x$odds) )^2 ) )
pooled_rel_p = pchisq(pooled_rel_statistic,length(results)-1,lower.tail = FALSE)
crosslayer=lapply(results, function(x) {
lapply(results, function(y) {
return(data.frame(lnrel=log(x$odds/y$odds),selnrel= sqrt( (x$SEodds/x$odds)^2 + (y$SEodds/y$odds)^2 )) )
})
})
lnrel=matrix(nrow=length(crosslayer),ncol = length(crosslayer),dimnames = list(names(crosslayer),names(crosslayer)))
SElnrel=matrix(nrow=length(crosslayer),ncol = length(crosslayer),dimnames = list(names(crosslayer),names(crosslayer)))
for (i in names(crosslayer) )
{
for (j in names(crosslayer[[i]]))
{
lnrel[i,j]=crosslayer[[i]][[j]]$lnrel
SElnrel[i,j]=crosslayer[[i]][[j]]$selnrel
}
}
rm(crosslayer)
out=list(pooled_lnodds=pooled_lnodds,
pooled_lnconf.int=pooled_lnconf.int,
pooled_SElnodds=pooled_SElnodds,
pooled_SElnnull=pooled_SElnnull,
pooled_p=pooled_p,
relative_lnodds=lnrel,
relative_selnodds=SElnrel,
pooled_rel_statistic=pooled_rel_statistic,
pooled_rel_p=pooled_rel_p,
results=results,
param.record=list(response=response, group=group, strata=strata,
alpha=alpha,ties=ties,
nnt=nnt, verbose=verbose,upper=upper)
)
class(out)=c("Genodds",class(out))
return(out)
}
print.Genodds<-function(x,...){
args <- list(...)
nnt <- x$param.record$nnt
verbose <- x$param.record$verbose
upper <- x$param.record$upper
if("nnt" %in% names(args)){
nnt <- args$nnt
}
if("verbose" %in% names(args)){
verbose <- args$verbose
}
if("upper" %in% names(args)){
upper <- args$upper
}
print_triangle <- function(mat,upper=TRUE,diag=TRUE){
blank <- sprintf("%s",paste(rep(" ",10),collapse=""))
cat(blank)
sapply(colnames(mat),function(x){cat(sprintf("%10s",x))})
cat("\n")
sapply(1:nrow(mat),function(i){
cat(sprintf("%10s",rownames(mat)[i]))
sapply(1:nrow(mat),function(j,i){
if( (i<=j & upper & diag) | (i>=j & !upper & diag) |
(i<j & upper & !diag) | (i>j & !upper & !diag)
){
cat(sprintf("%10f",mat[i,j]))
} else {
cat(sprintf("%s",blank))
}
},i=i)
cat("\n")
})
}
cat("\t Agresti's Generalized odds ratios\n\n")
cat(sprintf("Odds that a random observation in group %s \nwill have a higher response score than a random\nobservation in group %s:\n\n", x$param.record$groupnames[2], x$param.record$groupnames[1]))
for (i in names(x$results))
{
if (length(x$results)>1)
{
cat(paste(" ",substr(paste(i," ",sep=""),1,10),sep=""))
}
else{
cat(" ")
}
cat(sprintf(" Odds: %2.3f (%2.3f, %2.3f) p=%1.4f",
x$results[[i]]$odds,
x$results[[i]]$conf.int[1],
x$results[[i]]$conf.int[2],
x$results[[i]]$p))
if(nnt)
{
cat("\n")
if (length(x$results)>1)
{
cat(" ")
}
else{
cat(" ")
}
nntVals <- 1+2/(c(x$results[[i]]$odds,x$results[[i]]$conf.int)-1)
if(sum(nntVals[2:3]>0)==1){
if(nntVals[1]>0){nntVals[2:3] <- nntVals[3:2]}
cat(sprintf(" NNT: %2.3f (-Inf, %2.3f)U(%2.3f,Inf)",
nntVals[1],
min(nntVals[2:3]),
max(nntVals[2:3])
))
} else {
if(nntVals[1]>0){nntVals[2:3] <- nntVals[3:2]}
cat(sprintf(" NNT: %2.3f (%2.3f, %2.3f)",
nntVals[1],
nntVals[2],
nntVals[3]
))
}
cat("\n")
}
if (length(x$results)>1)
{
cat("\n")
}
}
if (length(x$results)>1)
{
cat("\n")
cat("Test of H0: odds ratios are equal among strata:\n")
cat(sprintf(" X-squared = %2.2f, df= %d \t p=%1.4f", x$pooled_rel_statistic, length(x$results)-1, x$pooled_rel_p))
cat("\n\n")
if (x$pooled_rel_p>x$param.record$alpha | verbose==TRUE)
{
cat("Test of H0: pooled odds = 1:\n")
cat(sprintf(" Pooled odds: %2.3f (%2.3f,%2.3f)", exp(x$pooled_lnodds), exp(x$pooled_lnconf.int[1]), exp(x$pooled_lnconf.int[2])))
cat(sprintf(" p=%0.4f", x$pooled_p))
if(nnt)
{
cat("\n")
nntVals <- 1+2/(c(exp(x$pooled_lnodds),exp(x$pooled_lnconf.int))-1)
if(sum(nntVals[2:3]>0)==1){
if(nntVals[1]>0){nntVals[2:3] <- nntVals[3:2]}
cat(sprintf(" NNT: %2.3f (-Inf, %2.3f)U(%2.3f,Inf)",
nntVals[1],
min(nntVals[2:3]),
max(nntVals[2:3])
))
} else {
if(nntVals[1]>0){nntVals[2:3] <- nntVals[3:2]}
cat(sprintf(" NNT: %2.3f (%2.3f, %2.3f)",
nntVals[1],
nntVals[2],
nntVals[3]
))
}
}
}
if (x$pooled_rel_p<=x$param.record$alpha | verbose==TRUE)
{
cat("\n\nGeneralised relative risk ratios among strata:\n\n")
print_triangle(exp(x$relative_lnodds),
upper=upper,diag=TRUE)
cat("\n\nZ scores for relative risk ratios among strata:\n\n")
print_triangle(x$relative_lnodds/x$relative_selnodds,
upper=upper,diag=TRUE)
}
}
cat("\n")
return(invisible(x))
}
|
.updateVariable <- function(variable, value, filter = NULL) {
if (is.Expr(value)) {
castFuncs <- c(
"cast", "format_datetime", "parse_datetime",
"numeric_to_datetime", "datetime_to_numeric"
)
if (has.function(value@expression, castFuncs)) {
halt(
"A variable cannot be updated with a derivation that changes ",
"its type. Are you trying to overwrite a variable with a ",
"derivation of itself to change the type? If so, you might ",
"want to use `type(ds$variable)<-` instead."
)
}
}
payload <- list(
command = "update",
variables = .updatePayload(variable, value)
)
payload[["filter"]] <- zcl(filter)
dref <- datasetReference(variable)
update_url <- paste0(dref, "table/")
out <- crPOST(update_url, body = toJSON(payload))
dropCache(dref)
invisible(out)
}
.updatePayload <- function(variable, value) {
if (is.Array(variable)) {
out <- sapply(urls(subvariables(variable)), function(x) {
zcl(value)
}, simplify = FALSE)
} else {
out <- structure(list(zcl(value)),
.Names = self(variable)
)
}
out <- lapply(out, function(x) {
if ("column" %in% names(x)) {
missings <- is.na(x$column)
if (any(missings)) {
x$column <- as.list(x$column)
x$column[missings] <- rep(
list(.no.data.value(type(variable))), sum(missings)
)
}
}
return(x)
})
return(out)
}
.dispatchFilter <- function(f) {
if (is.logical(f)) {
f <- which(f)
}
if (is.numeric(f)) {
f <- .seqCrunch(zfunc("row"), f - 1)
}
return(f)
}
setMethod("[<-", c("CrunchVariable", "ANY", "missing", "ANY"), .backstopUpdate)
setMethod(
"[<-", c("CrunchVariable", "ANY", "missing", "NULL"),
function(x, i, j, value) return(NULL)
)
.var.updater <- function(x, i, j, value) {
if (missing(i)) i <- NULL
.updateVariable(x, value, filter = .dispatchFilter(i))
return(x)
}
setMethod(
"[<-", c("TextVariable", "ANY", "missing", "character"),
.var.updater
)
setMethod(
"[<-", c("NumericVariable", "ANY", "missing", "numeric"),
.var.updater
)
setMethod(
"[<-", c("DatetimeVariable", "ANY", "missing", "Date"),
.var.updater
)
setMethod(
"[<-", c("DatetimeVariable", "ANY", "missing", "POSIXt"),
.var.updater
)
.var.updater.crunchobj <- function(x, i, j, value) {
if (missing(i)) i <- NULL
if (!identical(zcl(i), zcl(activeFilter(value)))) {
halt("Cannot update a variable with an expression that has a different filter")
} else {
.var.updater(x, i, j, value)
}
}
setMethod(
"[<-", c("CrunchVariable", "ANY", "missing", "CrunchExpr"),
.var.updater.crunchobj
)
setMethod(
"[<-", c("CrunchVariable", "ANY", "missing", "CrunchVariable"),
.var.updater.crunchobj
)
setMethod(
"[<-", c("CrunchVariable", "ANY", "missing", "CrunchLogicalExpr"),
.backstopUpdate
)
.categorical.update <- list(
numeric = function(x, i, j, value) {
if (missing(i)) i <- NULL
if (all(c(NA, -1) %in% value)) {
halt("Cannot have both NA and -1 when specifying category ids")
}
value[is.na(value)] <- -1
invalids <- setdiff(value, ids(categories(x)))
add.no.data <- -1 %in% invalids
invalids <- setdiff(invalids, -1)
if (length(invalids)) {
plural <- length(invalids) > 1
halt(paste0(
"Input value", ifelse(plural, "s ", " "),
serialPaste(invalids), ifelse(plural, " are ", " is "),
"not present in the category ids of variable ", dQuote(name(x))
))
}
if (add.no.data) {
x <- addNoDataCategory(x)
}
out <- .updateVariable(x, value, filter = .dispatchFilter(i))
return(x)
},
character = function(x, i, j, value) {
if (missing(i)) i <- NULL
value[is.na(value)] <- "No Data"
invalids <- setdiff(value, names(categories(x)))
add.no.data <- "No Data" %in% invalids
invalids <- setdiff(invalids, "No Data")
if (length(invalids)) {
plural <- length(invalids) > 1
halt(paste0(
"Input value", ifelse(plural, "s ", " "),
serialPaste(invalids), ifelse(plural, " are ", " is "),
"not present in the category names of variable ",
dQuote(name(x))
))
}
if (add.no.data) {
x <- addNoDataCategory(x)
}
value <- n2i(value, categories(x))
out <- .updateVariable(x, value, filter = .dispatchFilter(i))
return(x)
},
factor = function(x, i, j, value) {
if (missing(i)) i <- NULL
x[i] <- as.character(value)
return(x)
}
)
setMethod(
"[<-", c("CategoricalVariable", "ANY", "missing", "numeric"),
.categorical.update[["numeric"]]
)
setMethod(
"[<-", c("CategoricalVariable", "ANY", "missing", "character"),
.categorical.update[["character"]]
)
setMethod(
"[<-", c("CategoricalVariable", "ANY", "missing", "factor"),
.categorical.update[["factor"]]
)
setMethod(
"[<-", c("CategoricalArrayVariable", "ANY", "missing", "numeric"),
.categorical.update[["numeric"]]
)
setMethod(
"[<-", c("CategoricalArrayVariable", "ANY", "missing", "character"),
.categorical.update[["character"]]
)
setMethod(
"[<-", c("CategoricalArrayVariable", "ANY", "missing", "factor"),
.categorical.update[["factor"]]
)
setMethod(
"[<-", c("CrunchVariable", "ANY", "missing", "logical"),
function(x, i, j, value) {
if (all(is.na(value))) {
value <- .no.data.value(type(x), add.type = TRUE)
if (has.categories(x)) {
return(.categorical.update[["numeric"]](x, i, j, value))
}
if (missing(i)) i <- NULL
out <- .updateVariable(x, value, filter = .dispatchFilter(i))
return(x)
} else if (has.categories(x) &&
all(names(categories(x)) %in% c("True", "False", "No Data"))) {
value <- c("True", "False")[2L - as.integer(value)]
return(.categorical.update[["character"]](x, i, j, value))
} else {
.backstopUpdate(x, i, j, value)
}
}
)
.no.data.value <- function(x, add.type = FALSE) {
if (has.categories(x)) {
return(NA)
} else {
out <- NA
if (add.type) {
out <- list(value = NA, type = list(class = x))
}
return(out)
}
}
setMethod("is.na<-", "CrunchVariable", function(x, value) {
x[value] <- NA
})
|
multiple_csv_reader <- function(file_path, rows_to_skip = 0,
col_names = TRUE) {
list_of_files <-
list.files(path = file_path,
full.names = TRUE,
pattern = "*.csv")
all <-
lapply(list_of_files,
read_csv,
skip = rows_to_skip,
col_names = col_names
)
all_cleaned <- lapply(all, clean_names)
final_table <- do.call(rbind, all_cleaned)
return(final_table)
}
|
MCMC <- function(p, n, init, scale=rep(1, length(init)),
adapt=!is.null(acc.rate), acc.rate=NULL, gamma=2/3, list=TRUE,
showProgressBar=interactive(), n.start=0, ...) {
if(adapt & !is.numeric(acc.rate)) stop('Argument "acc.rate" is missing!')
if(gamma<=0.5 | gamma>1) stop('Argument "gamma" must be in (0.5, 1]!')
if(is.numeric(adapt)) n.adapt <- adapt
if(adapt==TRUE) n.adapt <- Inf
if(adapt==FALSE) n.adapt <- 0
d <- length(init)
X <- matrix(NA, ncol=d, nrow=n)
colnames(X) <- names(init)
X[1,] <- init
p.val <- rep(NA, n)
val <- p(X[1,], ...)
if(is.list(val)) {
returns.list <- TRUE
extras <- list()
if(!"log.density" %in% names(val)) {
stop("The list returned by 'p' must contain an element named 'log.density!'")
}
if(length(val$log.density)>1) stop("The list element 'log.density' must be a scalar value!")
p.val[1] <- val$log.density
extras[[1]] <- val["log.density" != names(val)]
} else {
returns.list <- FALSE
if(length(val)>1) stop("The function 'p' must return a scalar value or a named list! See ?MCMC.!")
p.val[1] <- val
}
if(d>1) {
if(length(scale)==d) {
M <- diag(scale)
} else {
M <- scale
}
} else {
M <- matrix(scale)
}
if(ncol(M) != length(init)) stop("Length or dimension of 'init' and 'scale' do not match!")
S <- t(chol(M))
cat(' generate', n, 'samples \n')
if(showProgressBar){
pb <- txtProgressBar(min=0, max=n, style=3)
}
update.step <- max(5, floor(n/100))
k <- 0
for(i in 2:n){
if(showProgressBar && i %% update.step == 0) {
setTxtProgressBar(pb, i)
}
U <- rt(d, df=d)
X.prop <- c( X[i-1,] + S %*% U )
names(X.prop) <- names(init)
val <- p(X.prop, ...)
if(returns.list) {
p.val.prop <- val$log.density
extras.prop <- val["log.density" != names(val)]
} else {
p.val.prop <- val
}
alpha <- min(1, exp( p.val.prop - p.val[i-1] ))
if(!is.finite(alpha)) alpha <- 0
if(runif(1)<alpha) {
X[i,] <- X.prop
p.val[i] <- p.val.prop
if(returns.list) {
extras[[i]] <- extras.prop
}
k <- k+1
} else {
X[i,] <- X[i-1,]
p.val[i] <- p.val[i-1]
if(returns.list) {
extras[[i]] <- extras[[i-1]]
}
}
ii <- i+n.start
if(ii < n.adapt) {
S <- ramcmc::adapt_S(S, U, alpha, ii, acc.rate, gamma)
}
}
if(showProgressBar){
close(pb)
}
acceptance.rate <- round(k/(n-1), 3)
if(list) {
res <- list(samples=X,
log.p=p.val,
cov.jump=S %*% t(S),
n.sample=n,
acceptance.rate=acceptance.rate,
adaption=adapt,
sampling.parameters=list(sample.density=p,
acc.rate=acc.rate,
gamma=gamma)
)
if(returns.list) {
res$extra.values = extras
}
return(res)
} else {
cat("Acceptance rate:", acceptance.rate, "\n")
return(X)
}
}
MCMC.add.samples <- function(MCMC.object, n.update, ...) {
if(!is.null(names(MCMC.object))) {
if(is.matrix(MCMC.object)) stop("Only MCMC objects generated with option 'list=TRUE' can be updated!")
p <- MCMC.object$sampling.parameters$sample.density
init <- MCMC.object$samples[nrow(MCMC.object$samples),]
scale <- MCMC.object$cov.jump
acc.rate <- MCMC.object$sampling.parameters$acc.rate
gamma <- MCMC.object$sampling.parameters$gamma
n.before <- MCMC.object$n.sample
adapt <- MCMC.object$adaption
samp.update <- MCMC(p=p, n=n.update, init=init, scale=scale, adapt=adapt, acc.rate=acc.rate,
gamma=gamma, list=TRUE, n.start=n.before, ...)
MCMC.object$cov.jump <- samp.update$cov.jump
m <- c(MCMC.object$n.sample, samp.update$n.sample)
MCMC.object$acceptance.rate <- 1/sum(m)*(m[1]*MCMC.object$acceptance.rate + m[2]*samp.update$acceptance.rate)
MCMC.object$n.sample <- MCMC.object$n.sample + n.update
MCMC.object$samples <- rbind(MCMC.object$samples, samp.update$samples)
MCMC.object$log.p <- c(MCMC.object$log.p, samp.update$log.p)
if("extra.values" %in% names(MCMC.object)) {
MCMC.object$extra.values <- c(MCMC.object$extra.values, samp.update$extra.values)
}
return(MCMC.object)
}
if(is.null(names(MCMC.object))) {
MCMC.object <- lapply(MCMC.object, function(x) MCMC.add.samples(x, n.update=n.update, ...))
return(MCMC.object)
}
}
MCMC.parallel <- function(p, n, init, n.chain=4, n.cpu, packages=NULL, dyn.libs=NULL,
scale=rep(1, length(init)), adapt=!is.null(acc.rate),
acc.rate=NULL, gamma=2/3, list=TRUE, ...) {
cl <- makeCluster(min(n.cpu, detectCores()))
on.exit({ stopCluster(cl); print("Cluster stopped.")})
varlist <- unique(c(ls(), ls(envir=.GlobalEnv), ls(envir=parent.env(environment()))))
clusterExport(cl, varlist=varlist, envir=environment())
clusterSetRNGStream(cl)
wd <- getwd()
clusterExport(cl, varlist=c("packages", "dyn.libs", "wd"), envir=environment())
MCMC.wrap <- function(x, ...) {
if(!is.null(packages)) sapply(packages, function(x) require(x, character.only=TRUE))
if (!is.null(dyn.libs)) {
sapply(dyn.libs, function(x) dyn.load(paste(wd, x, sep = "/")))
on.exit( sapply(dyn.libs, function(x) dyn.unload(paste(wd, x, sep = "/"))) )
}
MCMC(...)
}
result <- clusterApply(cl, 1:n.chain, MCMC.wrap, p=p, n=n, init=init,
scale=scale, adapt=adapt, acc.rate=acc.rate,
gamma=gamma, list=list, ...)
return(result)
}
convert.to.coda <- function(sample) {
if(!is.null(names(sample))) {
if(is.matrix(sample)) {
obj <- coda::mcmc(sample)
}
if(is.list(sample)) {
obj <- coda::mcmc(sample$samples)
}
return(obj)
} else {
if(is.matrix(sample[[1]])) {
obj <- as.mcmc.list(lapply(sample, coda::mcmc))
}
if(is.list(sample[[1]])) {
obj <- as.mcmc.list(lapply(sample, function(x) {coda::mcmc(x$samples)}))
}
return(obj)
}
}
|
add1.BTm <- function(object, scope, scale = 0, test = c("none", "Chisq", "F"),
x = NULL, ...) {
old.form <- formula(object)
new.form <- update.formula(old.form, scope)
if (!is.character(scope)){
orandom <- findbars(old.form[[2]])
srandom <- findbars(new.form[[2]])
if (length(srandom) && !identical(orandom, srandom))
stop("Random effects structure of object and scope must be ",
"identical.")
scope <- add.scope(old.form, new.form)
}
if (!length(scope))
stop("no terms in scope for adding to object")
if (is.null(x)) {
model <- Diff(object$player1, object$player2, new.form, object$id,
object$data, object$separate.ability, object$refcat)
if (sum(model$offset) > 0)
warning("ignoring offset terms in scope")
x <- model$X
asgn <- attr(x, "assign")
oTerms <- c("sep"[0 %in% asgn], object$term.labels)
object$terms <- terms(reformulate(oTerms))
y <- object$y
dummy <- y ~ x - 1
if (!is.null(model$random)) {
dummy <- update(dummy, .~ . + Z)
Z <- model$random
}
argPos <- match(c("weights", "subset", "na.action"),
names(object$call), 0)
mf <- as.call(c(model.frame, as.list(object$call)[argPos],
list(formula = dummy, offset = object$offset)))
mf <- eval(mf, parent.frame())
x <- mf$x
y <- model.response(mf)
Z <- mf$Z
wt <- model.weights(mf)
if (is.null(wt)) wt <- rep.int(1, length(y))
offset <- model.offset(mf)
}
else {
asgn <- attr(x, "assign")
y <- object$y
wt <- object$prior.weights
offset <- object$offset
Z <- object$random
}
if (is.null(object$random)){
attr(x, "assign") <- asgn + 1
object$formula <- formula(object$terms)
object$x <- x
object$y <- y
object$random <- Z
object$prior.weights <- wt
object$offset <- offset
stat.table <- NextMethod(x = x)
attr(stat.table, "heading")[3] <- deparse(old.form)
if (newsep <- sum(asgn == 0) - sum(object$assign ==0))
attr(stat.table, "heading") <- c(attr(stat.table, "heading"),
paste("\n", newsep,
" separate effects added\n",
sep = ""))
attr(stat.table, "separate.abilities") <- colnames(x)[asgn == 0]
return(stat.table)
}
oTerms <- attr(terms(nobars(old.form)), "term.labels")
Terms <- attr(terms(nobars(new.form)), "term.labels")
ousex <- asgn %in% c(0, which(Terms %in% oTerms))
sTerms <- vapply(strsplit(Terms, ":", fixed = TRUE),
function(x) paste(sort(x), collapse = ":"),
character(1))
method <- switch(object$method,
glmmPQL.fit)
control <- object$control
control$trace <- FALSE
if (scale == 0) dispersion <- 1
else dispersion <- scale
ns <- length(scope)
stat <- df <- numeric(ns)
names(stat) <- names(df) <- as.character(scope)
tryerror <- FALSE
for (i in seq(scope)) {
stt <- paste(sort(strsplit(scope[i], ":")[[1]]), collapse = ":")
usex <- match(asgn, match(stt, sTerms), 0) > 0 | ousex
fit <- method(X = x[, usex, drop = FALSE], y = y, Z = Z, weights = wt,
offset = offset, family = object$family,
control = control,
sigma = object$call$sigma,
sigma.fixed = object$sigma.fixed)
class(fit) <- oldClass(object)
ind <- (usex & !ousex)[usex]
trystat <- try(t(coef(fit)[ind]) %*%
chol2inv(chol(vcov(fit, dispersion = dispersion)[ind, ind])) %*%
coef(fit)[ind], silent = TRUE)
if (inherits(trystat, "try-error")) {
stat[i] <- df[i] <- NA
tryerror <- TRUE
}
else {
stat[i] <- trystat
df[i] <- sum(ind)
}
}
table <- data.frame(stat, df)
dimnames(table) <- list(names(df), c("Statistic", "Df"))
title <- "Single term additions\n"
topnote <- paste("Model: ", deparse(as.vector(formula(object))),
if (scale > 0) paste("\nscale: ", format(scale), "\n"),
if (tryerror)
"\n\nTest statistic unestimable for at least one term")
test <- match.arg(test)
if (test == "Chisq") {
dfs <- table[, "Df"]
vals <- table[, "Statistic"]
vals[dfs %in% 0] <- NA
table <- cbind(table, `P(>|Chi|)` = pchisq(vals, abs(dfs),
lower.tail = FALSE))
}
else if (test == "F") {
df.dispersion <- Inf
if (df.dispersion == Inf) {
fam <- object[[1]]$family$family
if (fam == "binomial" || fam == "poisson")
warning(gettextf(
"using F test with a '%s' family is inappropriate",
fam), domain = NA, call. = FALSE)
else {
warning("using F test with a fixed dispersion is inappropriate")
}
}
dfs <- table[, "Df"]
Fvalue <- table[, "Statistic"]/abs(dfs)
Fvalue[dfs %in% 0] <- NA
table <- cbind(table, F = Fvalue, `Pr(>F)` =
pf(Fvalue, abs(dfs), df.dispersion,
lower.tail = FALSE))
}
if (newsep <- sum(asgn == 0) - sum(object$assign ==0))
heading <- c(heading, paste("\n", newsep,
" separate effects added\n",
sep = ""))
structure(table, heading = c(title, topnote),
class = c("anova", "data.frame"),
separate.abilities = colnames(x)[asgn == 0])
}
|
PipeOpTaskSurvRegr = R6Class("PipeOpTaskSurvRegr",
inherit = PipeOpTaskTransformer,
public = list(
initialize = function(id = "trafotask_survregr", param_vals = list()) {
ps = ParamSet$new(list(
ParamFct$new("method", default = "ipcw",
levels = c("ipcw", "mrl", "bj", "delete", "omit", "reorder"),
tags = "train"),
ParamFct$new("estimator", default = "kaplan", levels = c("kaplan", "akritas", "cox"),
tags = "train"),
ParamDbl$new("alpha", default = 1, lower = 0, upper = 1, tags = "train"),
ParamDbl$new("lambda", default = 0.5, lower = 0, upper = 1, tags = "train"),
ParamDbl$new("eps", default = 1e-15, lower = 0, upper = 1, tags = "train"),
ParamUty$new("features", tags = "train"),
ParamUty$new("target", tags = "train"),
ParamFct$new("learner", default = "linear.regression",
levels = c("linear.regression", "mars", "pspline", "tree", "acosso",
"enet", "enet2", "mnet", "snet"), tags = c("train", "bj")),
ParamLgl$new("center", default = TRUE, tags = c("train", "bj")),
ParamLgl$new("mimpu", default = NULL, special_vals = list(NULL), tags = c("train", "bj")),
ParamInt$new("iter.bj", default = 20, lower = 2, tags = c("train", "bj")),
ParamInt$new("max.cycle", default = 5, lower = 1, tags = c("train", "bj")),
ParamInt$new("mstop", default = 50, lower = 1, tags = c("train", "bj")),
ParamDbl$new("nu", default = 0.1, lower = 0, tags = c("train", "bj"))
))
ps$add_dep("alpha", "method", CondEqual$new("ipcw"))
ps$add_dep("eps", "method", CondEqual$new("ipcw"))
ps$add_dep("estimator", "method", CondAnyOf$new(c("ipcw", "mrl")))
ps$add_dep("lambda", "estimator", CondEqual$new("akritas"))
ps$add_dep("features", "method", CondEqual$new("reorder"))
ps$add_dep("target", "method", CondEqual$new("reorder"))
ps$add_dep("center", "method", CondEqual$new("bj"))
ps$add_dep("mimpu", "method", CondEqual$new("bj"))
ps$add_dep("iter.bj", "method", CondEqual$new("bj"))
ps$add_dep("center", "method", CondEqual$new("bj"))
ps$add_dep("mstop", "method", CondEqual$new("bj"))
ps$add_dep("nu", "method", CondEqual$new("bj"))
ps$add_dep("learner", "method", CondEqual$new("bj"))
super$initialize(id = id,
param_set = ps,
param_vals = param_vals,
input = data.table(name = c("input", "input_features"),
train = c("TaskSurv", "*"),
predict = c("TaskSurv", "*")),
output = data.table(name = "output", train = "TaskRegr", predict = "TaskRegr")
)
}
),
private = list(
.predict = function(inputs) {
pv = self$param_set$values
target = pv$target
if (is.null(target)) {
target = inputs[[1]]$target_names[1L]
}
backend = private$.reorder(copy(inputs[[1]]$data()), pv$features, target, inputs[[2]])
return(list(TaskRegr$new(id = inputs[[1]]$id, backend = backend, target = target)))
},
.transform = function(inputs) {
input = inputs[[1]]
backend = copy(input$data())
time = input$target_names[1L]
status = input$target_names[2L]
pv = self$param_set$values
method = pv$method
if (is.null(method)) {
method = "ipcw"
}
estimator = pv$estimator
if (is.null(estimator)) {
estimator = "kaplan"
}
eps = pv$eps
if (is.null(eps)) {
eps = 1e-15
}
backend = switch(method,
ipcw = private$.ipcw(backend, status, time, estimator, eps),
mrl = private$.mrl(backend, status, time, input, estimator),
bj = private$.bj(backend, status, time),
delete = private$.delete(backend, status),
omit = private$.omit(backend, status),
reorder = private$.reorder(backend, pv$features, pv$target, inputs[[2]])
)
target = ifelse(method == "reorder", pv$target, time)
new_task = TaskRegr$new(id = input$id, backend = backend, target = target)
if (method == "ipcw") {
new_task$col_roles$weight = "ipc_weights"
}
return(new_task)
},
.ipcw = function(backend, status, time, estimator, eps) {
cens = backend[[status]] == 0
new_backend = copy(backend)
new_backend[[status]] = 1 - new_backend[[status]]
task = TaskSurv$new("ipcw", new_backend, time, status)
est = switch(estimator,
kaplan = LearnerSurvKaplan,
cox = LearnerSurvCoxPH,
akritas = get_akritas_learner()
)$new()
if (estimator == "akritas") {
est$param_set$values$lambda = self$param_set$values$lambda
}
est = est$train(task)$predict(task)$distr
weights = as.numeric(est$survival(data = matrix(task$truth()[, 1], nrow = 1)))
weights[weights == 0] = eps
weights = 1 / weights
alpha = self$param_set$values$alpha
if (!is.null(alpha)) {
if (alpha == 0) {
weights[cens] = 0
} else {
weights[cens] = weights[cens] * alpha
}
}
backend$ipc_weights = weights
return(subset(backend, select = -status))
},
.mrl = function(backend, status, time, input, estimator) {
cens = backend[[status]] == 0
upper = max(backend[[time]])
unique_times = sort(unique(backend[[time]]))
if (estimator == "kaplan") {
est = LearnerSurvKaplan$new()$train(input)$predict(input, row_ids = 1)$distr[1]
den = est$survival(backend[[time]][cens])
num = sapply(backend[[time]][cens], function(x) {
est$survivalAntiDeriv(x)
})
mrl = num / den
} else {
if (estimator == "cox") {
est = LearnerSurvCoxPH$new()$train(input)$predict(input)$distr
} else {
est = get_akritas_learner()$new()
est$param_set$values$lambda = self$param_set$values$lambda
est = est$train(input)$predict(input)$distr
}
den = as.numeric(est$survival(data = matrix(backend[[time]], nrow = 1)))[cens]
mrl = numeric(sum(cens))
for (i in seq_along(mrl)) {
x = backend[cens, ][[time]][i]
int_range = unique_times[x <= unique_times & upper >= unique_times]
num = (sum(est[i]$survival(int_range)) * (diff(range(int_range)) / length(int_range)))
mrl[i] = num / den[i]
}
}
backend[[time]][cens] = backend[[time]][cens] + mrl
return(subset(backend, select = -status))
},
.bj = function(backend, status, time) {
mlr3misc::require_namespaces("bujar")
x = data.frame(backend)[, colnames(backend) %nin% c(time, status), drop = FALSE]
x = model.matrix(~., x)[, -1]
bj = mlr3misc::invoke(bujar::bujar,
y = backend[[time]],
cens = backend[[status]],
x = x,
tuning = FALSE,
vimpint = FALSE,
.args = self$param_set$get_values(tags = "bj")
)
backend[[time]] = bj$ynew
return(backend)
},
.delete = function(backend, status) {
subset(backend, status == 1, select = -status)
},
.omit = function(backend, status) {
subset(backend, select = -status)
},
.reorder = function(backend, features, target, task) {
if (is.null(task)) {
if (is.null(features)) {
stop("One of 'features' or 'task' must be provided.")
} else {
features = subset(backend, select = features)
}
} else {
assertClass(task, "TaskSurv")
features = copy(task$data(cols = task$feature_names))
}
if (target %nin% colnames(features)) {
target = subset(backend, select = target)
return(cbind(features, target))
} else {
return(features)
}
}
)
)
|
autocov_VAR1 = function(A, Sigma, lag_max)
{
A = as.matrix(A)
m = nrow(Sigma)
N = m * (lag_max+1)
Gamma = array(NA, dim = c(m, m, lag_max+1))
eig = eigen(A)
V = eig$vectors
lambda = eig$values
if (any(Re(lambda) > 1 - 1e-20)) {
warning("Unit root detected in autocov_VAR1")
}
rm(eig)
if (isSymmetric(V)) {
V_inv = t(V)
} else {
V_inv = solve(V)
}
C = V_inv %*% as.matrix(Sigma) %*% t(V_inv)
rm(V_inv)
e = matrix(1 / (1 - lambda %x% lambda), m^2, 1) * matrix(C, m^2, 1)
rm(C)
E = matrix(e, m, m)
rm(e)
Gamma[,,1] = Re(V %*% E %*% t(V))
rm(E, V)
for (h in seq_len(lag_max)) {
Gamma[,,h+1] = A %*% Gamma[,,h]
}
return(Gamma)
}
|
setClass("EDAResults",
contains = "list")
summaryEDAResults <- function (object) {
numGens <- sapply(object, function (r) r@numGens)
fEvals <- sapply(object, function (r) r@fEvals)
bestEval <- sapply(object, function (r) r@bestEval)
cpuTime <- sapply(object, function (r) r@cpuTime)
f <- function (x) c(min(x), median(x), max(x), mean(x), sd(x))
data <- cbind(f(numGens), f(fEvals), f(bestEval), f(cpuTime))
colnames(data) <- c("Generations", "Evaluations",
"Best Evaluation", "CPU Time")
rownames(data) <- c("Minimum", "Median", "Maximum",
"Mean", "Std. Dev.")
data
}
setMethod("summary", "EDAResults", summaryEDAResults)
showEDAResults <- function (object) {
numGens <- sapply(object, function (r) r@numGens)
fEvals <- sapply(object, function (r) r@fEvals)
bestEval <- sapply(object, function (r) r@bestEval)
cpuTime <- sapply(object, function (r) r@cpuTime)
data <- cbind(numGens, fEvals, bestEval, cpuTime)
colnames(data) <- c("Generations", "Evaluations",
"Best Evaluation", "CPU Time")
rownames(data) <- paste("Run", as.character(seq(length = nrow(data))))
cat("\n"); print(data); cat("\n")
}
setMethod("show", "EDAResults", showEDAResults)
edaIndepRuns <- function (eda, f, lower, upper, runs, verbose = FALSE) {
results <- new("EDAResults")
for (run in seq(length = runs)) {
result <- edaRun(eda, f, lower, upper)
results <- as(c(results, result), "EDAResults")
if (verbose) {
if (run == 1) {
cat("\n")
w <- max(getOption("digits") + 5, 15)
h <- c("Run", "Generations", "Evaluations",
"Best Evaluation", "CPU Time")
cat(format(h, justify = "right", width = w), "\n")
}
cat(format(c(run, result@numGens, result@fEvals), width = w),
format(result@bestEval, scientific = TRUE, width = w),
format(result@cpuTime, width = w),
"\n")
}
}
if (verbose && runs > 1) {
numGens <- sapply(results, function (r) r@numGens)
fEvals <- sapply(results, function (r) r@fEvals)
bestEval <- sapply(results, function (r) r@bestEval)
cpuTime <- sapply(results, function (r) r@cpuTime)
cat("\n")
h <- c("", "Generations", "Evaluations",
"Best Evaluation", "CPU Time")
cat(format(h, justify = "right", width = w), "\n")
functions <- list(min, median, max, mean, sd)
rowNames <- c("Minimum", "Median", "Maximum", "Mean", "Std. Dev.")
for (i in seq(along = functions)) {
f <- functions[[i]]
cat(format(rowNames[i], justify = "right", width = w),
format(c(f(numGens), f(fEvals)), width = w),
format(f(bestEval), scientific = TRUE, width = w),
format(f(cpuTime), width = w),
"\n")
}
}
as(results, "EDAResults")
}
|
has_endmark <- function(x, endmarks = c('?', '.', '!'), ...){
!is.na(x) & grepl(
sprintf('[%s]\\s*$', paste(endmarks, collapse = "")),
x, perl = TRUE,
...
)
}
|
fixdmat <- function(v)
{
n <- (1 + sqrt(1 + 8 * length(v)))/2
dist.m <- matrix(0, n, n)
dist.m[row(dist.m) < col(dist.m)] <- v
dist.m <- dist.m + t(dist.m)
dist.m
}
|
get_bin_info <- function(dat) {
dat <- dat[!vapply(dat, is.null, FUN.VALUE = logical(1L))]
dummy_dat_list <- do.call("rbind",
lapply(seq_along(dat), function(type) {
do.call("rbind",
lapply(seq_along(dat[[type]]$fleets), function(fleet) {
data.frame(
"fleet" = dat[[type]]$fleets[fleet],
"year" = dat[[type]]$years[[fleet]],
"type" = names(dat)[type],
stringsAsFactors = FALSE)}
))
}))
invisible(dummy_dat_list)
}
|
airdist <- function(ann=FALSE) {
usr <- diff(par("usr"))[c(1,3)]
plt <- diff(par("plt"))[c(1,3)]
if (abs(diff(plt/usr)) > 0.005)
warning("plot x and y scales may differ: use plot(..., asp=1)")
coords <- locator(2)
res <- sqrt(diff(coords$x)^2 + diff(coords$y)^2)
if (ann) {
lines(coords)
text(mean(coords$x), mean(coords$y), format(res, digits=3),
pos=4, offset=0.2, cex=0.7)
}
if (.Platform$OS.type == "windows") bringToTop(-1)
list(dist=res, coords=coords)
}
|
add_zoom <- function(func, scale_low, scale_high) {
function(X, ...) {
if (is.matrix(X)) {
X_scaled <- sweep(X, 2, scale_high-scale_low,'*')
X_scaled <- sweep(X_scaled, 2, scale_low, '+')
} else {
X_scaled <- X * (scale_high - scale_low) + scale_low
}
func(X_scaled)
}
}
|
lets.iucn.ha <- function(input, count = FALSE) {
stop(paste("This function is no longer supported by the letsR package.",
"Users can look for the package rredlist for similar functions.",
"See the note on our github page for more details."))
input <- .getnames(input)
names <- c("Species", "Forest", "Savanna", "Shrubland", "Grassland",
"Wetlands", "Rocky areas", "Caves and Subterranean Habitats",
"Desert", "Marine Neritic", "Marine Oceanic", "Marine Deep Ocean Floor",
"Marine Intertidal", "Marine Coastal/Supratidal", "Artificial/Terrestrial",
"Artificial/Aquatic", "Introduced Vegetation", "Other", "Unknown")
habitat <- matrix(0, nrow = length(input), ncol = length(names))
colnames(habitat) <- names
n <- length(input)
if (count) {
if (!"tools:rstudio" %in% search()) {
dev.new(width = 2, height = 2, pointsize = 12)
par(mar = c(0, 0, 0, 0))
}
for(i in 1:n){
plot.new()
text(0.5, 0.5, paste(paste("Total:", n, "\n",
"Species to go: ",
(n - i))))
ncolumns <- ncol(habitat)
habitat[i, 2:(ncolumns - 1)] <- .Habitat(input, i,
habitat,
names)
if (sum(habitat[i, ]) == 0) {
habitat[i, ncolumns] <- 1
}
}
dev.off()
}
if (!count) {
for(i in 1:n){
ncolumns <- ncol(habitat)
habitat[i, 2:(ncolumns - 1)] <- .Habitat(input, i,
habitat,
names)
if (sum(habitat[i, ]) == 0) {
habitat[i, ncolumns] <- 1
}
}
}
habitat[, 1] <- input
return(as.data.frame(habitat))
}
.Habitat <- function(input, i, habitat, names) {
c <- .getcode(input[i])
httpclas <- "https://www.iucnredlist.org/details/classify/"
h2 <- try(htmlParse(paste(httpclas, c, "/0", sep = "")),
silent = TRUE)
b2 <- try(xpathSApply(h2, '//html', xmlValue), silent = TRUE)
Nnames <- length(names)
habitatParcial <- numeric(Nnames - 2)
for(t in 2:(Nnames - 1)) {
if (sum(grep(names[t], b2)) > 0) {
habitatParcial[(t - 1)] <- 1
}
}
return(habitatParcial)
}
|
NULL
eval_target_coverage_summary <- function(x, solution, include_zone, include_sense)
UseMethod("eval_target_coverage_summary")
eval_target_coverage_summary.default <- function(
x, solution, include_zone, include_sense) {
stop("argument to x must be a ConservationProblem object")
}
eval_target_coverage_summary.ConservationProblem <- function(
x, solution,
include_zone = number_of_zones(x) > 1,
include_sense = number_of_zones(x) > 1) {
assertthat::assert_that(
inherits(x, "ConservationProblem"),
assertthat::is.flag(include_zone), assertthat::noNA(include_zone),
assertthat::is.flag(include_sense), assertthat::noNA(include_sense))
if (is.Waiver(x$targets))
stop("argument to x does not have targets, ",
"please use the eval_feature_representation() function to evaluate ",
"problems without targets.")
targets <- x$feature_targets()
abundances <- x$feature_abundances_in_total_units()
solution <- planning_unit_solution_status(x, solution)
solution[is.na(solution)] <- 0
d <- targets[, c("feature", "zone", "sense"), drop = FALSE]
attr(d, "out.attrs") <- NULL
d$total_amount <- vapply(
seq_len(nrow(targets)), FUN.VALUE = numeric(1), function(i) {
z <- targets$zone[[i]]
f <- targets$feature[[i]]
idx <- as.matrix(data.frame(f, z))
sum(abundances[idx])
})
d$absolute_held <- rcpp_absolute_amount_held_by_solution(
x$get_data("rij_matrix"), as.list(targets), solution)
d$feature <- x$feature_names()[d$feature]
zn <- x$zone_names()
d$zone <- lapply(d$zone, function(z) zn[z])
d$absolute_target <- targets$value
d$absolute_shortfall <- abs(d$absolute_target - d$absolute_held)
d$absolute_shortfall <- ifelse(
(targets$sense == ">=") &
(d$absolute_held >= d$absolute_target),
rep(0, nrow(d)),
d$absolute_shortfall)
d$absolute_shortfall <- ifelse(
(targets$sense == "=") &
((d$absolute_held - d$absolute_target) < 1e-10),
rep(0, nrow(d)),
d$absolute_shortfall)
d$absolute_shortfall <- ifelse(
(targets$sense == "<=") &
(d$absolute_held <= d$absolute_target),
rep(0, nrow(d)),
d$absolute_shortfall)
d$relative_target <- d$absolute_target / d$total_amount
d$relative_held <- d$absolute_held / d$total_amount
d$relative_shortfall <- d$absolute_shortfall / d$total_amount
d$relative_target[!is.finite(d$relative_target)] <- 0
d$relative_held[!is.finite(d$relative_held)] <- 0
d$relative_shortfall[!is.finite(d$relative_shortfall)] <- 0
d$met <- d$absolute_shortfall < 1e-10
cn <- c(
"feature", "zone", "sense", "met", "total_amount",
"absolute_target", "absolute_held", "absolute_shortfall",
"relative_target", "relative_held", "relative_shortfall")
if (!isTRUE(include_zone)) cn <- setdiff(cn, "zone")
if (!isTRUE(include_sense)) cn <- setdiff(cn, "sense")
d[, cn, drop = FALSE]
}
|
if(interactive()) {
w <- gwindow("Box container examples", visible=FALSE)
g <- ggroup(cont=w, horizontal=FALSE)
g1 <- ggroup(cont=g)
sapply(c("one", "two", "three"), function(i) gbutton(i, cont=g1))
g2 <- gframe("Framed box", cont=g)
sapply(c("one", "two", "three"), function(i) gbutton(i, cont=g2))
g3 <- gexpandgroup("expanding box", cont=g)
sapply(c("one", "two", "three"), function(i) gbutton(i, cont=g3))
visible(g3) <- TRUE
g5 <- ggroup(cont=g)
l <- glabel("click button to hide/show label", cont=g5)
gbutton("hide/show label", cont=g5, handler=function(h,...) {
l <- g5[1]
visible(l) <- !visible(l)
})
g6 <- ggroup(cont=g)
l <- glabel("click button to delete label", cont=g6)
gbutton("delete label", cont=g6, handler=function(h,...) {
delete(g6, l)
enabled(h$obj) <- FALSE
})
identical(l$parent[1], l)
visible(w) <- TRUE
w <- gwindow("expand, anchor, fill", visible=FALSE)
g <- ggroup(cont=w, horizontal=FALSE)
g1 <- ggroup(cont=g, expand=TRUE)
glabel("no expand", cont=g1)
g1 <- ggroup(cont=g, expand=TRUE)
g1 <- ggroup(cont=g, expand=TRUE)
b1 <- glabel("expand, c(-1,1)", cont=g1, expand=TRUE, anchor=c(-1,1), fill=FALSE)
g1 <- ggroup(cont=g, expand=TRUE)
b1 <- glabel("expand=fill=TRUE", cont=g1, expand=TRUE, fill=TRUE)
g1 <- ggroup(cont=g, expand=TRUE)
b1 <- glabel("expand=TRUE, fill='x'", cont=g1, expand=TRUE, fill="x")
g1 <- ggroup(cont=g, expand=TRUE)
b1 <- glabel("expand=TRUE, fill='y'", cont=g1, expand=TRUE, fill="y")
visible(w) <- TRUE
}
|
mediate_tsls <- function(model.m, model.y, treat = "treat.name",
conf.level = .95,
robustSE = FALSE, cluster = NULL,
boot = FALSE, sims = 1000, est_se = TRUE,
...){
if (!inherits(model.m, "lm") | !inherits(model.y, "lm"))
stop("both mediator and outcome models must be of class `lm'.")
m_var <- all.vars(formula(model.m)[[2]])
y_var <- all.vars(formula(model.y)[[2]])
t_var <- treat
if (length(y_var) > 1L || length(m_var) > 1L)
stop("Left-hand side of model must only have one variable.")
n_y <- nobs(model.y)
n_m <- nobs(model.m)
if (n_y != n_m)
stop("number of observations in both models must be identical.")
if (!is.null(cluster)) {
if (NROW(cluster) != n_y)
stop("length of `cluster' must be equal to number of observations in models.")
} else {
cluster <- seq(n_y)
}
.dat <- eval(getCall(model.y)$data)
.dat <- .dat[names(model.m$fitted.values), ]
.dat[[m_var]] <- predict(model.m)
mod.y <- my_update(model.y, data = .dat)
d <- coef(mod.y)[m_var] * coef(model.m)[t_var]
z <- coef(mod.y)[t_var]
tau.coef <- d + z
nu <- d / tau.coef
if (!est_se) {
se_d <- se_z <- se_tau <- se_n <- NA
d.ci <- z.ci <- tau.ci <- n.ci <- NA
d.p <- z.p <- tau.p <- n.p <- NA
} else {
if (!boot) {
sims <- NA
if (!is.null(cluster)) {
vcv_y <- sandwich::vcovCL(mod.y, cluster = cluster, ...)
vcv_m <- sandwich::vcovCL(model.m, cluster = cluster, ...)
} else if (robustSE) {
vcv_y <- sandwich::vcovHC(mod.y, ...)
vcv_m <- sandwich::vcovHC(model.m, ...)
} else {
vcv_y <- vcov(mod.y)
vcv_m <- vcov(model.m)
}
se_d <- sqrt(
coef(mod.y)[m_var]^2 * vcv_m[t_var, t_var] +
coef(model.m)[t_var]^2 * vcv_y[m_var, m_var] +
vcv_m[t_var, t_var] * vcv_y[m_var, m_var]
)
se_z <- sqrt(vcv_y[t_var, t_var])
se_tau <- sqrt(
vcv_y[t_var, t_var] +
(se_d)^2 +
2 * vcv_y[t_var, m_var] * coef(model.m)[t_var]
)
delta <- function(f, B, Sigma) {
ff <- deriv(f, names(B), func = TRUE)
x <- do.call(ff, as.list(B))
grad <- as.matrix(attr(x, "gradient"), nr = 1)
sqrt(grad %*% Sigma %*% t(grad))
}
Coefs <- c(coef(model.m)[t_var], coef(mod.y)[t_var], coef(mod.y)[m_var])
Coefs <- setNames(Coefs, c("b2", "b3", "gamma"))
Sigma <- diag(c(vcv_m[t_var, t_var], diag(vcv_y)[c(t_var, m_var)]))
Sigma[3,2] <- Sigma[2,3] <- vcv_y[t_var, m_var]
f <- ~b2 * gamma / (b2 * gamma + b3)
se_n <- as.vector(delta(f, Coefs, Sigma))
qq <- (1 - conf.level) / 2
qq <- setNames(c(qq, 1 - qq), c("low", "high"))
d.ci <- d + qnorm(qq) * se_d
z.ci <- z + qnorm(qq) * se_z
tau.ci <- tau.coef + qnorm(qq) * se_tau
n.ci <- nu + qnorm(qq) * se_n
d.p <- pnorm(-abs(d), sd = se_d)
z.p <- pnorm(-abs(z), sd = se_z)
tau.p <- pnorm(-abs(tau.coef), sd = se_tau)
n.p <- pnorm(-abs(nu), sd = se_n)
} else {
cl <- split(seq_along(cluster), cluster)
cf <- matrix(rep.int(0, 4 * sims), ncol = 4,
dimnames = list(NULL, c("delta", "zeta", "tau", "nu")))
for(i in 1:sims) {
.subset <- unlist(cl[sample(names(cl), length(cl), replace = TRUE)])
.dat_y <- eval(getCall(model.y)$data)[.subset, ]
.dat_m <- eval(getCall(model.m)$data)[.subset, ]
out <- tryCatch({
up_y <- my_update(model.y, data = .dat_y)
up_m <- my_update(model.m, data = .dat_m)
mediate_tsls(up_m, up_y, treat = treat, cluster = NULL, est_se = FALSE)[c("d1", "z0", "tau.coef", "n0")]
}, error = function(e) {
setNames(rep(list(NA), 4), c("d1", "z0", "tau.coef", "n0"))
})
cf[i, ] <- unlist(out)
}
se_d <- sd(cf[, "delta"], na.rm = TRUE)
se_z <- sd(cf[, "zeta"], na.rm = TRUE)
se_tau <- sd(cf[, "tau"], na.rm = TRUE)
se_n <- sd(cf[, "nu"], na.rm = TRUE)
qq <- (1 - conf.level) / 2
qq <- setNames(c(qq, 1 - qq), c("low", "high"))
d.ci <- quantile(cf[, "delta"], qq, na.rm = TRUE)
z.ci <- quantile(cf[, "zeta"], qq, na.rm = TRUE)
tau.ci <- quantile(cf[, "tau"], qq, na.rm = TRUE)
n.ci <- quantile(cf[, "nu"], qq, na.rm = TRUE)
d.p <- pval(cf[, "delta"], d)
z.p <- pval(cf[, "zeta"], z)
tau.p <- pval(cf[, "tau"], tau.coef)
n.p <- pval(cf[, "nu"], nu)
}
}
out <- list(d1 = unname(d), d1.se = se_d, d1.p = d.p, d1.ci = d.ci,
d0 = unname(d), d0.se = se_d, d0.p = d.p, d0.ci = d.ci,
z1 = unname(z), z1.se = se_z, z1.p = z.p, z1.ci = z.ci,
z0 = unname(z), z0.se = se_z, z0.p = z.p, z0.ci = z.ci,
tau.coef = unname(tau.coef), tau.se = se_tau,
tau.ci = tau.ci, tau.p = tau.p,
n0 = unname(nu), n0.se = se_n, n0.ci = n.ci, n0.p = n.p,
boot = boot, boot.ci.type = "perc",
treat = treat, mediator = m_var,
nobs = nobs(model.y), sims = sims,
INT = FALSE, conf.level = conf.level,
model.y = model.y, model.m = model.m
)
class(out) <- c("mediate", "mediate.tsls")
return(out)
}
my_update <- function(mod, formula = NULL, data = NULL) {
call <- getCall(mod)
if (is.null(call)) {
stop("Model object does not support updating (no call)", call. = FALSE)
}
term <- terms(mod)
if (is.null(term)) {
stop("Model object does not support updating (no terms)", call. = FALSE)
}
if (!is.null(data)) call$data <- data
if (!is.null(formula)) call$formula <- update.formula(call$formula, formula)
env <- attr(term, ".Environment")
eval(call, env, parent.frame())
}
|
test_that("agg_png 8bit generates file", {
file <- tempfile(fileext = '.png')
agg_png(file)
plot(1:10, 1:10)
dev.off()
expect_gt(file.info(file)$size, 0)
unlink(file)
})
test_that("agg_png 16bit generates file", {
file <- tempfile(fileext = '.png')
agg_png(file, bitsize = 16)
plot(1:10, 1:10)
dev.off()
expect_gt(file.info(file)$size, 0)
unlink(file)
})
|
i_pca <- function(data1, data2 = NULL, current_rank, nchunk = 2, disk=FALSE) {
if(anyNA(data1)==TRUE | anyNA(data2)==TRUE ){stop("The data set should not contain missing data")}
if(is.null(data2)==FALSE) {
if(dim(data1)[2] != dim(data2)[2]){stop("The data sets must have the same number of columns/variables")}
}
if (missing("current_rank")) {
current_rank = dim(data1)[2]
}
tdata2 = FALSE
if(is.null(data2)==TRUE){
tdata2 = TRUE
n=nrow(data1)
n1s=1:ceiling(n/2)
data2=data1[-n1s,]
data1=data1[n1s,]
}
if(disk==TRUE){
suppressWarnings(dir.create("./PCstory"))
}else{
allCoords=list()
allCoordsU=list()
allctr=list()
allcor=list()
allctrU=list()
allcorU=list()
}
collabs = colnames(data1)
nrows = nrow(rbind(data1,data2))
ncols = ncol(data1)
nrows1 = nrow(data1)
dims = current_rank
if ((length(nchunk) > 1 ) & (sum(nchunk) != nrow(data2))) {
stop("\nchunk blocks do not match the size of 'data2'")
}
n1=nrow(data1)
data1=scale(data1,center=F,scale=F)
eg1 = do_es(data1)
PC1 = (1/sqrt(n1))*eg1$v[,1:current_rank]%*%diag(eg1$d[1:current_rank])
PCu1 = eg1$u[,1:current_rank]%*%diag(eg1$d[1:current_rank])
eig=(eg1$d[1:current_rank] * (1/sqrt(n1)))^2
cen_data1=scale(data1,center=T,scale=F)
dist2.ind <- apply(cen_data1^2,1,sum)
sum_sq_d1 = apply(data1^2,2,sum)
m1=apply(data1,2,mean)
dist2.var = (1/n1)*(sum_sq_d1-nrow(data1)* m1^2)
PC1.ctr = (PC1^2)%*% pseudoinverse(diag(1/eig))
PC1.cor= PC1/sqrt(dist2.var)
PCu1.cor <- PCu1^2/dist2.ind
PCu1.ctr <- t(t(PCu1^2 * (1/n1))/eig)
if(disk==TRUE){
fnameA=paste("./PCstory/PCstart",1,".txt",sep="")
fnameB=paste("./PCstory/PCEnd",1,".txt",sep="")
fnameC=paste("./PCstory/PCstartUnit",1,".txt",sep="")
fnameD=paste("./PCstory/PCendUnit",1,".txt",sep="")
fnameE=paste("./PCstory/PCctrUnit",1,".txt",sep="")
fnameF=paste("./PCstory/PCcorUnit",1,".txt",sep="")
write.table(file=fnameA, PC1[,1:dims])
write.table(file=fnameB, PC1[,1:dims])
write.table(file=fnameC,PCu1[,1:dims])
write.table(file=fnameD, PCu1[,1:dims])
write.table(file=fnameE, PCu1.ctr[,1:dims])
write.table(file=fnameF, PCu1.cor[,1:dims])
}
if(disk==FALSE){
allCoordsU[[1]]=PCu1[,c(1:dims)]
allCoords[[1]]=PC1[,c(1:dims)]
allctrU[[1]] = PCu1.ctr[,c(1:dims)]
allcorU[[1]] = PCu1.cor[,c(1:dims)]
allctr[[1]] = PC1.ctr[,c(1:dims)]
allcor[[1]] = PC1.cor[,c(1:dims)]
}
if (length(nchunk) == 1)
{
if(floor(nrow(data2)/nchunk) < 2){stop("There are blocks with zero rows. Please reduce the number of chunks.")}
} else {
if(any(nchunk < 1) == TRUE){stop("Block size should be greater than 1.")}
}
out.split = mat_split(data2, (nchunk))
mat.story = out.split$splitMat
if (length(nchunk) > 1) {
nchunk = length(nchunk)
}
for (q in 1:length(mat.story)) {
mat.chu = data.matrix(mat.story[[q]])
nchu=nrow(mat.chu)
nchu = nrow(mat.chu)
mat.chu = scale(mat.chu,center=F,scale=F)
sum_sq_dchu = apply(mat.chu^2,2,sum)
eg2 = do_es(mat.chu)
eg12 = add_es(eg1, eg2, method="esm", current_rank)
n12=eg12$m
m12=eg12$orgn
n2=(nrow(mat.chu))
PCall = (1/sqrt(n12))* eg12$v%*%diag(eg12$d)
PCuall = eg12$u%*%diag(eg12$d)
eig=(eg12$d * (1/sqrt(n12)))^2
dist2_12.ind = apply(PCuall^2,1,sum)
sum_sq_d12=sum_sq_d1+sum_sq_dchu
dist2_12.var = as.vector((1/n12)*((sum_sq_d12)-n12* m12^2))
PCall.ctr = t(t(PCall^2)/eig)*100
PCall.cor= (PCall / sqrt(dist2_12.var))^2
PCuall.cor <- (PCuall^2)/dist2_12.ind
PCuall.ctr <- t(t(PCuall^2 * (1/n12))/eig)*100
PCall = sign_match(PC1, PCall)
PCuall = sign_match(PCu1, PCuall)
if(disk==FALSE){
allCoords[[q+1]]=PCall[,c(1:dims)]
allCoordsU[[q+1]]=PCuall[,c(1:dims)]
allctrU[[q+1]] = PCuall.ctr[,c(1:dims)]
allcorU[[q+1]] = PCuall.cor[,c(1:dims)]
allctr[[q+1]] = PCall.ctr[,c(1:dims)]
allcor[[q+1]] = PCall.cor[,c(1:dims)]
}
eg1 = eg12
dist2.ind=dist2_12.ind
sum_sq_d1=sum_sq_d12
n1=n12
if(disk==TRUE){
fnameA=paste("./PCstory/PCstart",q,".txt",sep="")
fnameB=paste("./PCstory/PCEnd",q+1,".txt",sep="")
fnameC=paste("./PCstory/PCstartUnit",q,".txt",sep="")
fnameD=paste("./PCstory/PCendUnit",q+1,".txt",sep="")
fnameE=paste("./PCstory/PCctrUnit",q+1,".txt",sep="")
fnameF=paste("./PCstory/PCcorUnit",q+1,".txt",sep="")
write.table(file=fnameA, PC1[,1:dims])
write.table(file=fnameB, PCall[,1:dims])
write.table(file=fnameC, PCu1[,1:dims])
write.table(file=fnameD, PCuall[,1:dims])
write.table(file=fnameE, PCuall.ctr[,1:dims])
write.table(file=fnameF, PCuall.cor[,1:dims])
}
}
out = list()
out$rowpcoord = PCuall[,c(1:dims)]
out$colpcoord = PCall[,c(1:dims)]
out$eg=eg12
sv = eg12$d/sqrt(nrows)
out$sv = sv[c(1:dims)]
if (current_rank == ncols) {
out$inertia.e= eg12$d^2/(sum(eg12$d^2))
} else {
out$inertia.e= sv^2/ncols
}
out$levelnames = collabs
out$rowctr=PCuall.ctr[,c(1:dims)]
out$colctr=PCall.ctr[,c(1:dims)]
out$rowcor=PCuall.cor[,c(1:dims)]
out$colcor=PCall.cor[,c(1:dims)]
out$nchunk = nchunk
out$disk = disk
if((disk==FALSE) & (tdata2==FALSE)) {
out$allrowcoord=allCoordsU
out$allcolcoord=allCoords
out$allrowctr=allctrU
out$allcolctr=allctr
out$allrowcor=allcorU
out$allcolcor=allcor
}
class(out)="i_pca"
return(out)
}
|
phageselected<-function(FUN,phage_names,Result){
if(FUN=="ExhaustivePhi"| FUN=="ClusteringPhi"){
return(c(phagelist(phage_names,c(Result[-2]))))
}
if(FUN=="ExhaustiveSearch" | FUN=="ClusteringSearch"){
l<-length(Result)
list_pn<-phagelist(phage_names,c(1,Result[l-1]))
Size1<-list_pn
if (l==4){return(Size1) }
list_pn<-phagelist(phage_names,c(2,Result[l-4],Result[l-3]))
Size2<-list_pn
if (l==7){return(c(Size1,Size2))}
list_pn<-phagelist(phage_names,c(3,Result[l-6],Result[l-7],Result[l-8]))
Size3<-list_pn
if(l==11){return(c(Size1,Size2,Size3))}
list_pn<-phagelist(phage_names,c(4,Result[l-10],Result[l-11],Result[l-12],Result[l-13]))
Size4<-list_pn
if(l==16){return(c(Size1,Size2,Size3,Size4))}
list_pn<-phagelist(phage_names,c(5,Result[l-15],Result[l-16],Result[l-17],Result[l-18],Result[l-19]))
Size5<-list_pn
if(l==22){return(c(Size1,Size2,Size3,Size4,Size5))}
list_pn<-phagelist(phage_names,c(6,Result[l-21],Result[l-22],Result[l-23],Result[l-24],Result[l-25],Result[l-26]))
Size6<-list_pn
if(l==29){return(c(Size1,Size2,Size3,Size4,Size5,Size6))}
list_pn<-phagelist(phage_names,c(7,Result[l-28],Result[l-29],Result[l-30],Result[l-31],Result[l-32],Result[l-33],Result[l-34]))
Size7<-list_pn
return(c(Size1,Size2,Size3,Size4,Size5,Size6,Size7))
}
}
|
rNeymanScott <-
function(kappa, expand, rcluster, win = owin(c(0,1),c(0,1)), ...,
lmax=NULL, nsim=1, drop=TRUE, nonempty=TRUE, saveparents=TRUE)
{
if(missing(expand) && !is.null(rmax <- list(...)$rmax))
expand <- rmax
if(is.function(rcluster))
return(rPoissonCluster(kappa, expand, rcluster, win, ...,
lmax=lmax, nsim=nsim, drop=drop,
saveparents=saveparents))
if(!(is.list(rcluster) && length(rcluster) == 2))
stop("rcluster should be either a function, or a list of two elements")
win <- as.owin(win)
mu <- rcluster[[1]]
rdisplace <- rcluster[[2]]
if(is.numeric(mu)) {
if(!(length(mu) == 1 && mu >= 0))
stop("rcluster[[1]] should be a single nonnegative number")
mumax <- mu
} else if (is.im(mu) || is.function(mu)) {
if(is.function(mu)) mu <- as.im(mu, W=win, ..., strict=TRUE)
mumax <- max(mu)
} else stop("rcluster[[1]] should be a number, a function or a pixel image")
if(!is.function(rdisplace))
stop("rcluster[[2]] should be a function")
frame <- boundingbox(win)
dilated <- grow.rectangle(frame, expand)
if(is.im(kappa) && !is.subset.owin(dilated, as.owin(kappa)))
stop(paste("The window in which the image",
sQuote("kappa"),
"is defined\n",
"is not large enough to contain the dilation of the window",
sQuote("win")))
if(nonempty) {
if(is.function(kappa)) {
kappa <- as.im(kappa, W=dilated, ..., strict=TRUE)
lmax <- NULL
}
kappa <- kappa * (1 - exp(-mumax))
}
parentlist <- rpoispp(kappa, lmax=lmax, win=dilated, nsim=nsim,
drop=FALSE, warnwin=FALSE)
resultlist <- vector(mode="list", length=nsim)
for(i in 1:nsim) {
parents <- parentlist[[i]]
np <- npoints(parents)
if(np == 0) {
result <- ppp(numeric(0), numeric(0), window=win)
parentid <- integer(0)
noff <- 0
} else {
if(!nonempty) {
csize <- rpois(np, mumax)
} else {
csize <- qpois(runif(np, min=dpois(0, mumax)), mumax)
}
noff <- sum(csize)
xparent <- parents$x
yparent <- parents$y
x0 <- rep.int(xparent, csize)
y0 <- rep.int(yparent, csize)
dd <- rdisplace(noff, ...)
mm <- if(is.ppp(dd)) marks(dd) else NULL
xy <- xy.coords(dd)
dx <- xy$x
dy <- xy$y
if(!(length(dx) == noff))
stop("rcluster returned the wrong number of points")
xoff <- x0 + dx
yoff <- y0 + dy
parentid <- rep.int(1:np, csize)
retain <- inside.owin(xoff, yoff, win)
if(is.im(mu))
retain[retain] <- inside.owin(xoff[retain], yoff[retain], as.owin(mu))
xoff <- xoff[retain]
yoff <- yoff[retain]
parentid <- parentid[retain]
if(!is.null(mm)) mm <- marksubset(mm, retain)
result <- ppp(xoff, yoff, window=win, check=FALSE, marks=mm)
}
if(is.im(mu)) {
P <- eval.im(mu/mumax)
result <- rthin(result, P)
}
if(saveparents) {
attr(result, "parents") <- parents
attr(result, "parentid") <- parentid
attr(result, "expand") <- expand
attr(result, "cost") <- np + noff
}
resultlist[[i]] <- result
}
result <- simulationresult(resultlist, nsim, drop)
return(result)
}
fakeNeyScot <- function(Y, lambda, win, saveLambda, saveparents) {
if(saveLambda || saveparents) {
if(saveLambda && !is.im(lambda)) lambda <- as.im(lambda, W=win)
if(saveparents) emptyparents <- ppp(window=win)
if(isSingle <- is.ppp(Y)) Y <- solist(Y)
for(i in seq_along(Y)) {
Yi <- Y[[i]]
if(saveLambda) attr(Yi, "lambda") <- lambda
if(saveparents) {
attr(Yi, "parents") <- emptyparents
attr(Yi, "parentid") <- integer(0)
attr(Yi, "cost") <- npoints(Yi)
}
Y[[i]] <- Yi
}
if(isSingle) Y <- Y[[1L]]
}
return(Y)
}
rMatClust <- local({
rundisk <- function(n, radius) {
R <- radius * sqrt(runif(n, min=0, max=1))
Theta <- runif(n, min=0, max=2*pi)
cbind(R * cos(Theta), R * sin(Theta))
}
rMatClust <-
function(kappa, scale, mu, win = owin(c(0,1),c(0,1)),
nsim=1, drop=TRUE, saveLambda=FALSE, expand = scale, ...,
poisthresh=1e-6, nonempty=TRUE, saveparents=TRUE) {
if(missing(scale)) scale <- list(...)$r
check.1.real(scale)
stopifnot(scale > 0)
kok <- is.numeric(kappa) || is.im(kappa)
if(kok) {
kappamax <- max(kappa)
} else {
kim <- as.im(kappa, W=win, ..., strict=TRUE)
kra <- range(kim)
kappamax <- kra[2] + 0.05 * diff(kra)
}
if(1/(pi * kappamax * scale^2) < poisthresh) {
kapmu <- mu * (if(kok) kappa else kim)
result <- rpoispp(kapmu, win=win, nsim=nsim, drop=drop, warnwin=FALSE)
result <- fakeNeyScot(result, kapmu, win, saveLambda, saveparents)
return(result)
}
result <- rNeymanScott(kappa, scale, list(mu, rundisk), win, radius=scale,
nsim=nsim, drop=FALSE,
nonempty=nonempty,
saveparents = saveparents || saveLambda)
if(saveLambda){
for(i in 1:nsim) {
parents <- attr(result[[i]], "parents")
Lambda <- clusterfield("MatClust", parents, scale=scale, mu=mu, ...)
attr(result[[i]], "Lambda") <- Lambda[win, drop=FALSE]
}
}
return(if(nsim == 1 && drop) result[[1]] else result)
}
rMatClust
})
rThomas <- local({
gaus <- function(n, sigma) {
matrix(rnorm(2 * n, mean=0, sd=sigma), ncol=2)
}
rThomas <-
function(kappa, scale, mu, win = owin(c(0,1),c(0,1)), nsim=1, drop=TRUE,
saveLambda=FALSE, expand = 4*scale, ...,
poisthresh=1e-6, nonempty=TRUE, saveparents=TRUE) {
if(missing(scale)) scale <- list(...)$sigma
check.1.real(scale)
stopifnot(scale > 0)
kok <- is.numeric(kappa) || is.im(kappa)
if(kok) {
kappamax <- max(kappa)
} else {
kim <- as.im(kappa, W=win, ..., strict=TRUE)
kra <- range(kim)
kappamax <- kra[2] + 0.05 * diff(kra)
}
if(1/(4*pi * kappamax * scale^2) < poisthresh) {
kapmu <- mu * (if(kok) kappa else kim)
result <- rpoispp(kapmu, win=win, nsim=nsim, drop=drop, warnwin=FALSE)
result <- fakeNeyScot(result, kapmu, win, saveLambda, saveparents)
return(result)
}
if(missing(expand))
expand <- clusterradius("Thomas", scale = scale, ...)
result <- rNeymanScott(kappa, expand, list(mu, gaus),
win, sigma=scale,
nsim=nsim, drop=FALSE,
nonempty=nonempty,
saveparents = saveparents || saveLambda)
if(saveLambda){
for(i in 1:nsim) {
parents <- attr(result[[i]], "parents")
Lambda <- clusterfield("Thomas", parents, scale=scale, mu=mu, ...)
attr(result[[i]], "Lambda") <- Lambda[win, drop=FALSE]
}
}
return(if(nsim == 1 && drop) result[[1]] else result)
}
rThomas
})
rCauchy <- local({
rnmix.invgam <- function(n = 1, rate) {
V <- matrix(rnorm(2 * n, 0, 1), nrow = n, ncol = 2)
s <- 1/rgamma(n, shape=1/2, rate=rate)
return(sqrt(s) * V)
}
rCauchy <- function (kappa, scale, mu, win = owin(), thresh = 0.001,
nsim=1, drop=TRUE, saveLambda=FALSE, expand = NULL,
..., poisthresh=1e-6, nonempty=TRUE, saveparents=TRUE) {
dots <- list(...)
if(missing(scale)) scale <- dots$omega
if(missing(thresh))
thresh <- dots$eps %orifnull% 0.001
kok <- is.numeric(kappa) || is.im(kappa)
if(kok) {
kappamax <- max(kappa)
} else {
kim <- as.im(kappa, W=win, ..., strict=TRUE)
kra <- range(kim)
kappamax <- kra[2] + 0.05 * diff(kra)
}
if(1/(pi * kappamax * scale^2) < poisthresh) {
kapmu <- mu * (if(kok) kappa else kim)
result <- rpoispp(kapmu, win=win, nsim=nsim, drop=drop, warnwin=FALSE)
result <- fakeNeyScot(result, kapmu, win, saveLambda, saveparents)
return(result)
}
if(missing(expand)){
expand <- clusterradius("Cauchy", scale = scale, thresh = thresh, ...)
} else if(!missing(thresh)){
warning("Argument ", sQuote("thresh"), " is ignored when ", sQuote("expand"), " is given")
}
result <- rNeymanScott(kappa, expand,
list(mu, rnmix.invgam),
win, rate = scale^2/2, nsim=nsim, drop=FALSE,
nonempty=nonempty,
saveparents = saveparents || saveLambda)
if(saveLambda){
for(i in 1:nsim) {
parents <- attr(result[[i]], "parents")
Lambda <- clusterfield("Cauchy", parents, scale=scale, mu=mu, ...)
attr(result[[i]], "Lambda") <- Lambda[win, drop=FALSE]
}
}
return(if(nsim == 1 && drop) result[[1]] else result)
}
rCauchy })
rVarGamma <- local({
rnmix.gamma <- function(n = 1, shape, rate) {
V <- matrix(rnorm(2 * n, 0, 1), nrow = n, ncol = 2)
s <- rgamma(n, shape=shape, rate=rate)
return(sqrt(s) * V)
}
rVarGamma <- function(kappa, nu, scale, mu, win = owin(),
thresh = 0.001, nsim=1, drop=TRUE, saveLambda=FALSE,
expand = NULL, ..., poisthresh=1e-6,
nonempty = TRUE,
saveparents=TRUE) {
dots <- list(...)
if(missing(nu)){
nu <- resolve.vargamma.shape(nu.ker=dots$nu.ker, nu.pcf=dots$nu.pcf)$nu.ker
} else{
check.1.real(nu)
stopifnot(nu > -1/2)
}
if(missing(scale)) scale <- dots$omega
if(missthresh <- missing(thresh))
thresh <- dots$eps %orifnull% 0.001
kok <- is.numeric(kappa) || is.im(kappa)
if(kok) {
kappamax <- max(kappa)
} else {
kim <- as.im(kappa, W=win, ..., strict=TRUE)
kra <- range(kim)
kappamax <- kra[2] + 0.05 * diff(kra)
}
if(1/(4 * pi * kappamax * scale^2) < poisthresh) {
kapmu <- mu * (if(kok) kappa else kim)
result <- rpoispp(kapmu, win=win, nsim=nsim, drop=drop, warnwin=FALSE)
result <- fakeNeyScot(result, kapmu, win, saveLambda, saveparents)
return(result)
}
if(missing(expand)){
expand <- clusterradius("VarGamma", scale = scale, nu = nu,
thresh = thresh, ...)
} else if(!missthresh){
warning("Argument ", sQuote("thresh"), " is ignored when ", sQuote("expand"), " is given")
}
result <- rNeymanScott(kappa, expand,
list(mu, rnmix.gamma), win,
shape = nu + 1,
rate = 1/(2 * scale^2),
nsim=nsim, drop=FALSE,
nonempty = nonempty,
saveparents = saveparents || saveLambda)
if(saveLambda){
for(i in 1:nsim) {
parents <- attr(result[[i]], "parents")
Lambda <- clusterfield("VarGamma", parents, scale=scale,
nu=nu, mu=mu, ...)
attr(result[[i]], "Lambda") <- Lambda[win, drop=FALSE]
}
}
return(if(nsim == 1 && drop) result[[1]] else result)
}
rVarGamma
})
|
set.seed(8675309)
library(bsts)
time.dimension <- 100
xdim <- 3
beta.sd <- c(.1, .2, .05)
residual.sd <- .7
beta <- matrix(nrow = xdim, ncol = time.dimension)
beta[, 1] <- rnorm(xdim)
for (i in 2:time.dimension) {
beta[, i] <- rnorm(xdim, beta[, i - 1], beta.sd)
}
predictors <- matrix(rnorm(time.dimension * xdim),
nrow = time.dimension, ncol = xdim)
yhat <- rowSums(predictors * t(beta))
y <- rnorm(time.dimension, yhat, residual.sd)
test_that("model runs with default prior", {
ss <- AddDynamicRegression(list(), y ~ predictors)
model <- bsts(y, state.specification = ss, niter = 100)
ss <- AddDynamicRegression(list(), y ~ predictors,
model.options = DynamicRegressionRandomWalkOptions(
sigma.prior = list(
SdPrior(beta.sd[1], 10),
SdPrior(beta.sd[2], 10),
SdPrior(beta.sd[3], 10))))
model <- bsts(y, state.specification = ss, niter = 1000, seed = 8675309)
burn <- SuggestBurn(.1, model)
CheckMcmcMatrix(model$dynamic.regression.coefficients[, 1, ],
beta[1, ], burn = burn)
CheckMcmcMatrix(model$dynamic.regression.coefficients[, 2, ],
beta[2, ], burn = burn)
CheckMcmcMatrix(model$dynamic.regression.coefficients[, 3, ],
beta[3, ], burn = burn)
ss <- AddDynamicRegression(list(), y ~ predictors,
model.options = DynamicRegressionRandomWalkOptions(
sigma.prior = SdPrior(beta.sd[1], 1)))
model <- bsts(y, state.specification = ss, niter = 100)
})
test_that("predict method runs without crashing for DLM's", {
library(bsts)
data(iclaims)
train <- window(initial.claims, start = "2004-01-04", end="2010-01-01")
test <- window(initial.claims, start="2010-01-02")
ss <- AddLocalLinearTrend(list(), train$iclaimsNSA)
ss <- AddSeasonal(ss, train$iclaimsNSA, nseasons = 52)
ss <- AddDynamicRegression(ss, formula = iclaimsNSA ~ unemployment.office,
data = train)
model <- bsts(train$iclaimsNSA, state.specification = ss, niter = 1000)
test_subset <- cbind(
"department.of.unemployment" = test$department.of.unemployment,
"unemployment.office" = test$unemployment.office)
pred <- predict(model, newdata = test_subset)
})
test_that("predict method runs without crashing for DLM's with static regressors", {
library(bsts)
data(iclaims)
train <- window(initial.claims, start = "2004-01-04", end="2010-01-01")
test <- window(initial.claims, start="2010-01-02")
ss <- AddLocalLinearTrend(list(), train$iclaimsNSA)
ss <- AddSeasonal(ss, train$iclaimsNSA, nseasons = 52)
ss <- AddDynamicRegression(ss,
formula = iclaimsNSA ~ unemployment.office,
data = train)
model <- bsts(iclaimsNSA ~ idaho.unemployment,
state.specification = ss,
niter = 100,
data = train)
test.subset <- cbind(test,
"department.of.unemployment" = test$department.of.unemployment)
pred <- predict(model, newdata = test)
})
test_that("dynamic regression fails gracefully with non-trivial time stamps", {
library(bsts)
set.seed(8675309)
n <- 1000
x <- matrix(rnorm(n))
beta <- cumsum(rnorm(n, 0, .1)) - 12
level <- cumsum(rnorm(n)) + 18
error <- rnorm(n, 0, .1)
y <- level + x * beta + error
ss <- list()
ss <- AddLocalLevel(ss, y)
ss <- AddDynamicRegression(ss, y ~ x)
model <- bsts(y, state.specification = ss, niter = 100, seed = 8675309)
new_timestamps <- sort(sample(1:2000, 1000))
expect_error(
model <- bsts(y, state.specification = ss, niter = 100, seed = 8675309,
timestamps = new_timestamps),
"Dynamic regression models are only supported with trivial time stamps.")
})
|
tasks <-
function(list){
if(length(list$tasks) > 0){
tasks <- ldply((lapply(list$tasks, function(x){t(unlist(x))})))
return(tasks)
}
else{
message("Warning: This type of data is missing or incomplete for this occupation.")
}
}
|
`ensemble.spatialBlock` <- function(
x=NULL, p=NULL,
a=NULL, an=1000, EPSG=NULL,
excludep=FALSE, target.groups=FALSE, k=4,
factors=NULL,
theRange=NULL, return.object=FALSE, ...
)
{
ensemble.data <- ensemble.calibrate.models(x=x, p=p, a=a, an=an,
SSB.reduce=FALSE,
excludep=excludep, target.groups=target.groups, k=0,
ENSEMBLE.tune=F,
MAXENT=0, MAXNET=0, MAXLIKE=0, GBM=0, GBMSTEP=0, RF=0, CF=0,
GLM=0, GLMSTEP=0, GAM=0, GAMSTEP=0, MGCV=0, MGCVFIX=0,
EARTH=0, RPART=0, NNET=0, FDA=0, SVM=0, SVME=0, GLMNET=0,
BIOCLIM.O=0, BIOCLIM=0, DOMAIN=0, MAHAL=0, MAHAL01=0,
factors=factors,
evaluations.keep=TRUE)
p.new <- as.data.frame(ensemble.data$evaluations$p)
a.new <- as.data.frame(ensemble.data$evaluations$a)
names(a.new) <- names(p.new)
PA.input <- data.frame(pb=c(rep(1, nrow(p.new)), rep(0, nrow(a.new))), rbind(p.new, a.new))
PA.Spatial <- sf::st_as_sf(PA.input, coords=names(a.new), crs=raster::crs(x))
if (is.null(EPSG) == FALSE) {sf::st_crs(PA.Spatial) <- EPSG}
sb1 <- blockCV::spatialBlock(speciesData=PA.Spatial, species="pb", theRange=theRange, k=k, ...)
k <- list(p=p.new, a=a.new, groupp=sb1$foldID[PA.input$pb == 1], groupa=sb1$foldID[PA.input$pb == 0])
if (return.object == F) {
return(k)
}else{
results <- list(k=k, block.object=sb1, speciesData=PA.Spatial)
return(results)
}
}
`ensemble.envBlock` <- function(
x=NULL, p=NULL,
a=NULL, an=1000, EPSG=NULL,
excludep=FALSE, target.groups=FALSE, k=4,
factors=NULL,
return.object=FALSE, ...
)
{
ensemble.data <- ensemble.calibrate.models(x=x, p=p, a=a, an=an,
SSB.reduce=FALSE,
excludep=excludep, target.groups=target.groups, k=0,
ENSEMBLE.tune=F,
MAXENT=0, MAXNET=0, MAXLIKE=0, GBM=0, GBMSTEP=0, RF=0, CF=0,
GLM=0, GLMSTEP=0, GAM=0, GAMSTEP=0, MGCV=0, MGCVFIX=0,
EARTH=0, RPART=0, NNET=0, FDA=0, SVM=0, SVME=0, GLMNET=0,
BIOCLIM.O=0, BIOCLIM=0, DOMAIN=0, MAHAL=0, MAHAL01=0,
factors=factors,
evaluations.keep=TRUE)
p.new <- as.data.frame(ensemble.data$evaluations$p)
a.new <- as.data.frame(ensemble.data$evaluations$a)
names(a.new) <- names(p.new)
PA.input <- data.frame(pb=c(rep(1, nrow(p.new)), rep(0, nrow(a.new))), rbind(p.new, a.new))
PA.Spatial <- sp::SpatialPointsDataFrame(PA.input[, c(2:3)], data=PA.input, proj4string=raster::crs(x))
if (is.null(EPSG) == FALSE) {sf::st_crs(PA.Spatial) <- EPSG}
eb1 <- blockCV::envBlock(rasterLayer=x, speciesData=PA.Spatial, species="pb", k=k, ...)
k <- list(p=p.new, a=a.new, groupp=eb1$foldID[PA.input$pb == 1], groupa=eb1$foldID[PA.input$pb == 0])
if (return.object == F) {
return(k)
}else{
results <- list(k=k, block.object=eb1, speciesData=PA.Spatial)
return(results)
}
}
|
mat.prod <-
function(A,B){
matrices <- matrix(0, ncol=ncol(A), nrow=nrow(A))
for (i in 1:ncol(A)) {
matrices[,i] <- A[,i]*B
}
return(matrices)
}
|
matched_set <- function(matchedsets, id, t, L, t.var, id.var, treatment.var)
{
if(length(matchedsets) != length(id) | length(matchedsets) != length(t) | length(id) != length(t))
{
stop("Number of matched sets, length of t, length of id specifications do not match up")
}
names(matchedsets) <- paste0(id, ".", t)
class(matchedsets) <- c("matched.set", "list")
attr(matchedsets, 'refinement.method') <- NULL
attr(matchedsets, "lag") <- L
attr(matchedsets, "t.var") <- t.var
attr(matchedsets, "id.var" ) <- id.var
attr(matchedsets, "treatment.var") <- treatment.var
return(matchedsets)
}
summary.matched.set <- function(object, ..., verbose = TRUE)
{
set <- object
Lengthcol <- sapply(set, length)
ts <- as.integer(sub(".*\\.", "", names(set)))
ids <- as.integer(sub("\\..*", "", names(set)))
df <- data.frame(i = ids, t = ts, matched.set.size = Lengthcol)
colnames(df)[1:2] <- c(attr(set, "id.var"), attr(set, "t.var"))
rownames(df) <- NULL
if(verbose)
{
summary.result <- list()
summary.result$overview <- df
summary.result$set.size.summary <- summary(Lengthcol, ...)
summary.result$number.of.treated.units <- length(set)
summary.result$num.units.empty.set <- sum(Lengthcol == 0)
summary.result$lag <- attr(set, "lag")
return(summary.result)
}
else
{
return(df)
}
}
plot.matched.set <- function(x, ..., border = NA, col = "grey", ylab = "Frequency of Size",
xlab ="Matched Set Size" , lwd = NULL,
main = "Distribution of Matched Set Sizes",
freq = TRUE, include.empty.sets = FALSE)
{
set <- x
lvec <- sapply(set, length)
if(include.empty.sets)
{
graphics::hist(x = lvec, freq = freq, border = border, col = col, ylab = ylab, xlab = xlab, main = main, ...)
}
else
{
lvec.nonempty <- lvec[lvec > 0]
if(sum(lvec == 0) > 0)
{
num.empties <- as.character(sum(lvec == 0))
graphics::hist(x = lvec.nonempty, freq = freq, border = border, col = col, ylab = ylab,
xlab = xlab, main = main, ...)
graphics::lines(x = c(0,0),
y = c(0, num.empties),
lwd = 4,
col = "
}
else
{
graphics::hist(x = lvec.nonempty, freq = freq, border = border, col = col, ylab = ylab,
xlab = xlab, main = main, ...)
}
}
}
print.matched.set <- function(x, ..., verbose = FALSE)
{
set <- x
if(verbose)
{
class(set) <- "list"
print(set, ...)
}
else {
print(summary(set, verbose = F), ...)
}
}
`[.matched.set` <- function(x, i, j = NULL, drop = NULL)
{
if(!is.null(j)) stop("matched.set object is a list.")
class(x) <- "list"
temp <- x[i]
attr(temp, "lag") <- attr(x, "lag")
attr(temp, "refinement.method") <- attr(x, "refinement.method")
attr(temp, "t.var") <- attr(x, "t.var")
attr(temp, "id.var" ) <- attr(x, "id.var" )
attr(temp, "treatment.var") <- attr(x, "treatment.var")
attr(temp, "distances") <- attr(x, "distances")
attr(temp, "max.match.size") <- attr(x, "max.match.size")
attr(temp, "covs.formula") <- attr(x, "covs.formula")
attr(temp, "match.missing") <- attr(x, "match.missing")
class(temp) <- "matched.set"
return(temp)
}
build_balance_mats <- function(idx, ordered_expanded_data, msets)
{
subset.per.matchedset <- function(sub.idx, set)
{
wts <- attr(set, "weights")[which(set == ordered_expanded_data[sub.idx[1:(length(sub.idx) - 1)], attr(msets, "id.var")])]
return(cbind(ordered_expanded_data[sub.idx,], data.frame("weights" = c(wts, Inf))))
}
unnest <- function(mset.idx, mset)
{
lapply(mset.idx, subset.per.matchedset, set = mset)
}
result <- mapply(FUN = unnest, mset.idx = idx, mset = msets, SIMPLIFY = FALSE)
return(result)
}
|
lambda3<-function(x, item.stats.max=12, missing="complete"){
n <- dim(x)[1]
p <- dim(x)[2]
sigma <- impute.cov(x, missing)
sigma.cor<-cov2cor(sigma)
Obs<-colSums(!is.na(x))
Mean<-colMeans(x, na.rm=TRUE)
SD<-sapply(x, sd, na.rm=TRUE)
onerow<-rep(1,p)
onerow<-t(onerow)
onevector<-t(onerow)
Unstandardized<-(p/(p-1))*(1-(onerow%*%diag(sigma)/(onerow%*%sigma%*%onevector)))
Standardized<-(p/(p-1))*(1-(onerow%*%diag(sigma.cor)/(onerow%*%sigma.cor%*%onevector)))
items<-p
lambda3<-data.frame(Unstandardized, Standardized)
If.Dropped<-rep(NA,p)
for(i in 1:p){
onerow.d<-rep(1,(p-1))
onerow.d<-t(onerow.d)
onevector.d<-t(onerow.d)
sigma.d<-sigma[-i,-i]
If.Dropped[i]<-(p/(p-1))*(1-(onerow.d%*%diag(sigma.d)/(onerow.d%*%sigma.d%*%onevector.d)))
}
if(n != p) {
item.stats<-data.frame(Mean,SD,Obs,If.Dropped, row.names=(colnames(x)))
}
else{
item.stats=data.frame(If.Dropped, row.names=(colnames(x)))
}
rownames(item.stats)=colnames(x)
result<-list(lambda3=lambda3, item.stats=item.stats, items=items, item.stats.max=item.stats.max)
class(result) <- c("lambda3")
return(result)
}
|
trailing_whitespace_linter <- function() {
Linter(function(source_file) {
res <- re_matches(
source_file$lines,
rex(capture(name = "space", some_of(" ", regex("\\t"))), or(newline, end)),
global = TRUE,
locations = TRUE
)
lapply(seq_along(source_file$lines), function(itr) {
mapply(
FUN = function(start, end) {
if (is.na(start)) {
return()
}
line_number <- names(source_file$lines)[itr]
Lint(
filename = source_file$filename,
line_number = line_number,
column_number = start,
type = "style",
message = "Trailing whitespace is superfluous.",
line = source_file$lines[as.character(line_number)],
ranges = list(c(start, end))
)
},
start = res[[itr]]$space.start,
end = res[[itr]]$space.end,
SIMPLIFY = FALSE
)
})
})
}
|
setMethod("summary",
signature=c("sda"),
function(object) return(object@backend$summary()))
setMethod("pretrain",
signature=c("sda"),
function(object) return(object@backend$pretrain()))
setMethod("finetune",
signature=c("sda"),
function(object) return(object@backend$finetune()))
setMethod("PretrainLearningRate",
signature=c("sda"),
function(object) {
info <- summary(object)
return(info$PretrainLearningRate)
})
setMethod("CorruptionLevel",
signature=c("sda"),
function(object) {
info <- summary(object)
return(info$CorruptionLevel)
})
setMethod("PretrainingEpochs",
signature=c("sda"),
function(object) {
info <- summary(object)
return(info$PretrainingEpochs)
})
setMethod("FinetuneLearningRate",
signature=c("sda"),
function(object) {
info <- summary(object)
return(info$FinetuneLearningRate)
})
setMethod("FinetuneEpochs",
signature=c("sda"),
function(object) {
info <- summary(object)
return(info$FinetuneEpochs)
})
setMethod("predict",
signature=c("sda","matrix"),
function(object,test) {
return(object@backend$predict(test))
})
setMethod("setPretrainLearningRate",
signature=c("sda","numeric"),
function(object, x) {
return(object@backend$setPretrainLearningRate(x))
})
setMethod("setPretrainEpochs",
signature=c("sda","numeric"),
function(object, x) {
return(object@backend$setPretrainEpochs(x))
})
setMethod("setFinetuneLearningRate",
signature=c("sda","numeric"),
function(object, x) {
return(object@backend$setFinetuneLearningRate(x))
})
setMethod("setFinetuneEpochs",
signature=c("sda","numeric"),
function(object, x) {
return(object@backend$setFinetuneEpochs(x))
})
setMethod("setCorruptionLevel",
signature=c("sda","numeric"),
function(object, x) {
return(object@backend$setCorruptionLevel(x))
})
setMethod("LearningRate",
signature=c("sda"),
function(object) {
info <- summary(object)
ll <- list()
ll$'PretrainLearningRate' <- info$PretrainLearningRate
ll$'FinetuneLearningRate' <- info$FinetuneLearningRate
return(ll)
})
Rsda <- function(x, y, hidden){
sdaModule <- new(Sda)
sdaModule$init(x, y, hidden)
return(new("sda",
backend=sdaModule))
}
|
effectscan <-
function(cross, pheno.col=1, chr, get.se=FALSE, draw=TRUE,
gap=25, ylim, mtick=c("line","triangle"),
add.legend=TRUE, alternate.chrid=FALSE, ...)
{
type <- crosstype(cross)
mtick <- match.arg(mtick)
if(type == "4way")
stop("effect scan not working for 4-way cross yet.")
if(length(pheno.col) > 1) {
pheno.col <- pheno.col[1]
warning("effectscan can take just one phenotype; only the first will be used")
}
if(is.character(pheno.col)) {
num <- find.pheno(cross, pheno.col)
if(is.na(num))
stop("Couldn't identify phenotype \"", pheno.col, "\"")
pheno.col <- num
}
if(pheno.col < 1 | pheno.col > nphe(cross))
stop("pheno.col values should be between 1 and the no. phenotypes")
if(!is.numeric(cross$pheno[,pheno.col]))
stop("phenotype \"", colnames(cross$pheno)[pheno.col], "\" is not numeric.")
pheno <- cross$pheno[,pheno.col]
wh <- is.na(pheno)
if(any(wh)) {
pheno <- pheno[!wh]
cross <- subset.cross(cross, ind=(!wh))
}
if(!missing(chr)) cross <- subset.cross(cross, chr=chr)
chr_type <- sapply(cross$geno, chrtype)
n.ind <- length(pheno)
results <- NULL
for(i in 1:nchr(cross)) {
if(!("draws" %in% names(cross$geno[[i]])))
stop("You must first run sim.geno.")
draws <- cross$geno[[i]]$draws
if("map" %in% names(attributes(cross$geno[[i]]$draws)))
map <- attr(cross$geno[[i]]$draws,"map")
else {
stp <- attr(cross$geno[[i]]$draws, "step")
oe <- attr(cross$geno[[i]]$draws, "off.end")
if("stepwidth" %in% names(attributes(cross$geno[[i]]$draws)))
stpw <- attr(cross$geno[[i]]$draws, "stepwidth")
else stpw <- "fixed"
map <- create.map(cross$geno[[i]]$map,stp,oe,stpw)
}
if(is.matrix(map)) {
marnam <- colnames(map)
map <- map[1,]
}
else marnam <- names(map)
if(type == "risib" || type=="riself" || type=="dh" || type=="haploid") {
mapping <- rbind(c(+1, -1),
c(+1, +1))
colnames(mapping) <- c("intercept","a")
dropcol <- 1
}
else if(type=="bc") {
if(chr_type[i] == "X") {
sexpgm <- getsex(cross)
draws <- reviseXdata(type, "full", sexpgm, draws=draws,
cross.attr=attributes(cross))
if(is.null(sexpgm$sex) || all(sexpgm$sex==0) || all(sexpgm$sex==1)) {
mapping <- rbind(c(+1, -0.5),
c(+1, +0.5))
colnames(mapping) <- c("intercept", "a")
dropcol <- 1
}
else {
mapping <- rbind(c(+1, 0,-0.5, 0),
c(+1, 0,+0.5, 0),
c(+1,+1, 0, -0.5),
c(+1,+1, 0, +0.5))
colnames(mapping) <- c("intercept", "sex", "a.female", "a.male")
dropcol <- 1:2
}
}
else {
mapping <- rbind(c(+1, -0.5),
c(+1, +0.5))
colnames(mapping) <- c("intercept", "a")
dropcol <- 1
}
}
else {
if(chr_type[i] == "X") {
sexpgm <- getsex(cross)
draws <- reviseXdata(type, "full", sexpgm, draws=draws,
cross.attr=attributes(cross))
if(is.null(sexpgm$pgm) || all(sexpgm$pgm==0) || all(sexpgm$pgm==1)) {
if(is.null(sexpgm$sex) || all(sexpgm$sex==0) || all(sexpgm$sex==1)) {
mapping <- rbind(c(+1, -0.5),
c(+1, +0.5))
colnames(mapping) <- c("intercept", "a")
dropcol <- 1
}
else {
mapping <- rbind(c(+1, 0,-0.5, 0),
c(+1, 0,+0.5, 0),
c(+1,+1, 0, -0.5),
c(+1,+1, 0, +0.5))
colnames(mapping) <- c("intercept", "sex", "a.female", "a.male")
dropcol <- 1:2
}
}
else {
if(is.null(sexpgm$sex) || all(sexpgm$sex==0)) {
mapping <- rbind(c(+1, 0,-0.5, 0),
c(+1, 0,+0.5, 0),
c(+1,+1, 0,-0.5),
c(+1,+1, 0,+0.5))
colnames(mapping) <- c("intercept","dir","a.forw","a.rev")
dropcol <- 1:2
}
else if(all(sexpgm$sex==1)) {
mapping <- rbind(c(+1, -0.5),
c(+1, +0.5))
colnames(mapping) <- c("intercept", "a")
dropcol <- 1
}
else {
mapping <- rbind(c(+1, 0, 0, -0.5, 0, 0),
c(+1, 0, 0, +0.5, 0, 0),
c(+1,+1, 0, 0,-0.5, 0),
c(+1,+1, 0, 0,+0.5, 0),
c(+1, 0,+1, 0, 0,-0.5),
c(+1, 0,+1, 0, 0,+0.5))
colnames(mapping) <- c("intercept","dir","sex","a.femaleforw","a.femalerev","a.male")
dropcol <- 1:3
}
}
}
else {
mapping <- rbind(c(+1, -1, 0),
c(+1, 0, +1),
c(+1, +1, 0))
colnames(mapping) <- c("intercept","a","d")
dropcol <- 1
}
}
n.gen <- ncol(mapping)
n.pos <- ncol(draws)
n.imp <- dim(draws)[3]
z <- .C("R_effectscan",
as.integer(n.ind),
as.integer(n.gen),
as.integer(n.imp),
as.integer(n.pos),
as.integer(draws-1),
as.double(pheno),
as.double(mapping),
beta=as.double(rep(0,n.pos*n.gen)),
se=as.double(rep(0,n.pos*n.gen)),
as.integer(get.se),
PACKAGE="qtl")
beta <- t(matrix(z$beta, ncol=n.pos))
colnames(beta) <- colnames(mapping)
if(get.se) {
se <- t(matrix(z$se, ncol=n.pos))
colnames(se) <- paste("se", colnames(mapping), sep=".")
beta <- cbind(beta, se[,-dropcol,drop=FALSE])
}
z <- beta[,-dropcol,drop=FALSE]
w <- marnam
o <- grep("^loc-*[0-9]+",w)
if(length(o) > 0)
w[o] <- paste("c",names(cross$geno)[i],".",w[o],sep="")
rownames(z) <- w
z <- as.data.frame(z, stringsAsFactors=TRUE)
z <- cbind(chr=factor(rep(names(cross$geno)[i],length(map)),levels=names(cross$geno)),
pos=as.numeric(map), z)
rownames(z) <- w
if(i==1) results <- z
else {
w <- match(colnames(z), colnames(results))
if(any(is.na(w))) {
curnam <- colnames(results)
for(j in which(is.na(w)))
results <- cbind(results, rep(NA, nrow(results)))
colnames(results) <- c(curnam, colnames(z)[is.na(w)])
}
w <- match(colnames(results), colnames(z))
if(any(is.na(w))) {
curnam <- colnames(z)
for(j in which(is.na(w)))
z <- cbind(z, rep(NA, nrow(z)))
colnames(z) <- c(curnam, colnames(results)[is.na(w)])
}
results <- rbind(results, z)
}
}
class(results) <- c("effectscan", "scanone", "data.frame")
if(draw) {
if(missing(ylim))
plot.effectscan(results, gap=gap, mtick=mtick, add.legend=add.legend,
alternate.chrid=alternate.chrid, ...)
else
plot.effectscan(results, gap=gap, mtick=mtick, add.legend=add.legend,
ylim=ylim, alternate.chrid=alternate.chrid, ...)
}
invisible(results)
}
plot.effectscan <-
function(x, gap=25, ylim, mtick=c("line","triangle"),
add.legend=TRUE, alternate.chrid=FALSE, ...)
{
col <- c("blue","red","darkorange","darkgreen","purple")
lightcol <- c("lightblue", "pink", "peachpuff1", "palegreen1", "thistle1")
results <- x
eff <- 3:ncol(results)
if(length(grep("^se", colnames(results)))>0) get.se <- TRUE
else get.se <- FALSE
if(get.se) {
se <- grep("^se", colnames(results)[eff])
eff <- eff[-se]
se <- se + 2
lo <- as.matrix(results[,eff]) - as.matrix(results[,se])
hi <- as.matrix(results[,eff]) + as.matrix(results[,se])
yl <- range(c(lo,hi), na.rm=TRUE)
}
else yl <- range(results[,eff], na.rm=TRUE)
if(!missing(ylim)) yl <- ylim
plot.scanone(results, lodcolumn=1, ylim=yl, gap=gap, mtick=mtick, alternate.chrid=alternate.chrid,
col=col[1], ...)
if(get.se) {
begend <- matrix(unlist(tapply(results[,2],results[,1],range)),ncol=2,byrow=TRUE)
rownames(begend) <- unique(results[,1])
chr <- unique(as.character(results[,1]))
begend <- begend[as.character(chr),,drop=FALSE]
len <- begend[,2]-begend[,1]
if(length(len)>1) start <- c(0,cumsum(len+gap))-c(begend[,1],0)
else start <- 0
x <- results[,2]
for(i in seq(along=chr))
x[results[,1]==chr[i]] <- results[results[,1]==chr[i],2]+start[i]
for(i in seq(along=chr)) {
wh <- results[,1]==chr[i]
for(j in 1:ncol(lo)) {
if(any(!is.na(lo[wh,j]))) {
xx <- c(x[wh], rev(x[wh]))
yy <- c(lo[wh,j], rev(hi[wh,j]))
polygon(xx, yy, col=lightcol[j], border=lightcol[j])
}
}
}
for(i in seq(along=chr)) {
wh <- results[,1]==chr[i]
for(j in 1:ncol(lo)) {
if(any(!is.na(lo[wh,j]))) {
xx <- c(x[wh], rev(x[wh]))
yy <- c(lo[wh,j], rev(hi[wh,j]))
lines(xx, yy, col=lightcol[j])
}
}
}
plot.scanone(results, lodcolumn=1, add=TRUE, col=col[1])
}
if(length(eff) > 1) {
for(i in seq(along=eff)[-1])
plot.scanone(results, lodcolumn=eff[i]-2, gap=gap, add=TRUE, col=col[i])
}
if(add.legend)
legend("top", legend=names(results)[eff], col=col[1:length(eff)], lwd=2)
abline(h=0, lty=2)
}
|
test_that("cli_progress_builtin_handlers", {
expect_true(is.character(cli_progress_builtin_handlers()))
expect_true(all(
c("cli", "shiny", "rstudio") %in% cli_progress_builtin_handlers()
))
})
test_that("cli_progress_select_handlers
withr::local_options(
"cli.progress_handlers" = c("foo", "bar"),
"cli.progress_handlers_force" = c("forced"),
"cli.progress_handlers_only" = "logger"
)
expect_equal(
names(cli_progress_select_handlers(list(), environment())),
"logger"
)
})
test_that("cli_progress_select_handlers
withr::local_options(
"cli.progress_handlers" = c("foo", "bar", "baz"),
"cli.progress_handlers_force" = NULL,
"cli.progress_handlers_only" = NULL
)
fake <- list(
foo = list(able = function(...) FALSE),
bar = list(),
baz = list(),
forced = list()
)
mockery::stub(cli_progress_select_handlers, "builtin_handlers", fake)
expect_equal(cli_progress_select_handlers(), fake["bar"])
})
test_that("cli_progress_select_handlers
withr::local_options(
"cli.progress_handlers" = c("foo", "bar", "baz"),
"cli.progress_handlers_force" = c("forced"),
"cli.progress_handlers_only" = NULL
)
fake <- list(
foo = list(able = function(...) FALSE),
bar = list(able = function(...) TRUE),
baz = list(),
forced = list()
)
mockery::stub(cli_progress_select_handlers, "builtin_handlers", fake)
expect_equal(cli_progress_select_handlers(), fake[c("bar", "forced")])
})
test_that("builtin_handlers", {
expect_true(is.list(builtin_handlers()))
expect_true(all(c("cli", "shiny", "rstudio") %in% names(builtin_handlers())))
})
|
print.rv <- function(x, digits = rvpar("print.digits"), ...) {
if (length(x)==0) {
return(cat("rv(0)\n"))
}
print(summary(x, ...), digits=digits, ...)
}
|
EndPointSeries <- "https://www.banxico.org.mx/SieAPIRest/service/v1/series/"
localEnv <- new.env(parent = emptyenv())
localEnv$BmxToken <- "NA"
validaToken <- function() {
if(localEnv$BmxToken=="NA") stop("A token must be set prior to query series. Use setToken(token)")
}
setTokenUtil <- function(token) {
if(!is.na(token) && !is.null(token))
localEnv$BmxToken <- token
}
consultaSeries <- function(seriesArr, startDate = NULL, endDate = NULL) {
if(length(seriesArr)>20) stop("Too many series, can only request maximun 20")
url <- EndPointSeries
path <- "/datos/"
locale <- "?locale=en"
fechas <- ""
series <- paste(seriesArr, sep = ",", collapse = ",")
if(!is.null(startDate) && !is.null(endDate))
fechas <- paste(startDate, endDate, sep = "/")
request <- paste(url, series, path, fechas, locale, sep = "")
token <- localEnv$BmxToken
headers <- httr::add_headers("Bmx-Token" = token, "Accept" = "application/json")
response <- httr::GET(request, headers)
if(response$status_code==200) {
jSonResponse=httr::content(response, as = "text")
bmxObject <- jsonlite::fromJSON(jSonResponse)
series <- bmxObject$bmx$series
seriesResponse <- vector("list", nrow(series))
for (row in 1:nrow(series)) {
idSerieTmp <- series[row, "idSerie"]
datos <- series[row, "datos"]
if(is.null(datos) || is.null(datos[[1]])) next
datosConvertidos <- suppressWarnings(as.numeric(gsub(",", "", datos[[1]][ ,"dato"], fixed = TRUE)))
fechasConvertidas <- as.Date(datos[[1]][ ,"fecha"], "%d/%m/%Y")
datosDataFrame <- data.frame(date=fechasConvertidas, value=datosConvertidos)
seriesResponse[[row]] <- c(datosDataFrame)
names(seriesResponse)[row] <- idSerieTmp
}
suppressWarnings(seriesResponse[!is.na(names(seriesResponse))])
} else if(response$status_code==404) {
warning(paste("Serie not found: ", series))
} else {
jSonResponse=httr::content(response, as = "text")
bmxObject <- jsonlite::fromJSON(jSonResponse)
mensaje <- bmxObject$error$mensaje
detalle <- bmxObject$error$detalle
stop(paste(mensaje,": ",detalle))
}
}
consultaMetadatosSeries <- function(seriesArr, localeCode="en") {
if(length(seriesArr)>20) stop("Too many series, can only request maximun 20")
url <- EndPointSeries
locale <- "?locale="
series <- paste(seriesArr, sep = ",", collapse = ",")
request <- paste(url, series, locale, localeCode, sep = "")
token <- localEnv$BmxToken
headers <- httr::add_headers("Bmx-Token" = token, "Accept" = "application/json")
response <- httr::GET(request, headers)
if(response$status_code==200) {
jSonResponse=httr::content(response, as = "text")
bmxObject <- jsonlite::fromJSON(jSonResponse)
series <- bmxObject$bmx$series
seriesResponse <- NULL
formatoFecha <- "%m/%d/%Y"
if(localeCode == "es") formatoFecha <- "%d/%m/%Y"
for (row in 1:nrow(series)) {
idSerieTmp <- series[row, "idSerie"]
tituloSerieTmp <- series[row, "titulo"]
fechaInicioTmp <- series[row, "fechaInicio"]
fechaFinTmp <- series[row, "fechaFin"]
periodicidadTmp <- series[row, "periodicidad"]
cifraTmp <- series[row, "cifra"]
unidadTmp <- series[row, "unidad"]
serieDataFrame <- data.frame(idSerie=idSerieTmp,
title=tituloSerieTmp,
startDate=as.Date(fechaInicioTmp, formatoFecha),
endDate=as.Date(fechaFinTmp, formatoFecha),
frequency=periodicidadTmp,
dataType=cifraTmp,
unit=unidadTmp
)
if(is.null(seriesResponse)) seriesResponse=serieDataFrame
else seriesResponse <- rbind(seriesResponse, serieDataFrame)
}
seriesResponse
} else if(response$status_code==404) {
warning(paste("Serie not found: ", series))
} else {
jSonResponse=httr::content(response, as = "text")
bmxObject <- jsonlite::fromJSON(jSonResponse)
mensaje <- bmxObject$error$mensaje
detalle <- bmxObject$error$detalle
stop(paste(mensaje,": ",detalle))
}
}
consultaUltimoDato <- function(seriesArr) {
if(length(seriesArr)>20) stop("Too many series, can only request maximun 20")
url <- EndPointSeries
path <- "/datos/oportuno"
locale <- "?locale=en"
series <- paste(seriesArr, sep = ",", collapse = ",")
request <- paste(url, series, path, locale, sep = "")
token <- localEnv$BmxToken
headers <- httr::add_headers("Bmx-Token" = token, "Accept" = "application/json")
response <- httr::GET(request, headers)
if(response$status_code==200) {
jSonResponse=httr::content(response, as = "text")
bmxObject <- jsonlite::fromJSON(jSonResponse)
series <- bmxObject$bmx$series
seriesResponse <- NULL
for (row in 1:nrow(series)) {
idSerieTmp <- series[row, "idSerie"]
datos <- series[row, "datos"]
if(is.null(datos) || is.null(datos[[1]])) next
datosConvertidos <- suppressWarnings(as.numeric(gsub(",", "", datos[[1]][ ,"dato"], fixed = TRUE)))
fechasConvertidas <- as.Date(datos[[1]][ ,"fecha"], "%d/%m/%Y")
serieDataFrame <- data.frame(idSerie=idSerieTmp, date=fechasConvertidas[[1]], value=datosConvertidos[[1]])
if(is.null(seriesResponse)) seriesResponse=serieDataFrame
else seriesResponse <- rbind(seriesResponse, serieDataFrame)
}
suppressWarnings(seriesResponse[!is.na(names(seriesResponse))])
} else if(response$status_code==404) {
warning(paste("Serie not found: ", series))
} else {
jSonResponse=httr::content(response, as = "text")
bmxObject <- jsonlite::fromJSON(jSonResponse)
mensaje <- bmxObject$error$mensaje
detalle <- bmxObject$error$detalle
stop(paste(mensaje,": ",detalle))
}
}
|
`cIndexKM` <-
function (y, x, index = "all")
{
kmeans_res <- y
gss <- function(x, clsize, withins)
{
n <- sum(clsize)
k <- length(clsize)
allmean <- colMeans(x)
dmean <- sweep(x, 2, allmean, "-")
allmeandist <- sum(dmean^2)
wgss <- sum(withins)
bgss <- allmeandist - wgss
list(wgss = wgss, bgss = bgss)
}
calinski <- function(zgss, clsize)
{
n <- sum(clsize)
k <- length(clsize)
if (k <= 1)
NA
else
zgss$bgss/(k - 1)/(zgss$wgss/(n - k))
}
ssi <- function(centers, clsize)
{
ncl <- dim(centers)[1]
nvar <- dim(centers)[2]
n <- sum(clsize)
cmax <- apply(centers, 2, max)
cmin <- apply(centers, 2, min)
cord <- apply(centers, 2, order)
cmaxi <- cord[ncl, ]
cmini <- cord[1, ]
meanmean <- mean(centers)
absmdif <- abs(apply(centers, 2, mean) - meanmean)
span <- cmax - cmin
csizemax <- clsize[cmaxi]
csizemin <- clsize[cmini]
hiest <- nvar
hiestw <- hiest * max(max(csizemax), max(csizemin)) *
exp(-min(absmdif))
sist <- sum(span)/hiest
sistw <- (span * exp(-absmdif)) %*% sqrt(csizemax * csizemin)/hiestw
list(ssi = sist, ssiw = sistw)
}
zgss <- gss(x, kmeans_res$size, kmeans_res$withinss)
index <- pmatch(index, c("calinski", "ssi", "all"))
if (is.na(index))
stop("invalid clustering index")
if (index == -1)
stop("ambiguous index")
vecallindex <- numeric(3)
if (any(index == 1) || (index == 3))
vecallindex[1] <- calinski(zgss, kmeans_res$size)
if (any(index == 2) || (index == 3))
vecallindex[2] <- ssi(kmeans_res$centers, kmeans_res$size)$ssiw
names(vecallindex) <- c("calinski", "ssi")
if (index < 3)
vecallindex <- vecallindex[index]
vecallindex
}
|
mvkurtosis <- function(x) {
n <- dim(x)[1]
m <- Rfast::colmeans(x)
s <- (crossprod(x) - n * tcrossprod(m))/(n - 1)
sum( Rfast::mahala(x, m, s)^2 ) / n
}
|
horvitzThompson <- function(y, pi = NULL, N = NULL, pi2 = NULL, var_est =FALSE, var_method="LinHB", B = 1000) {
if(!is.element(var_method, c("LinHB", "LinHH", "LinHTSRS", "LinHT", "bootstrapSRS"))){
message("Variance method input incorrect. It has to be \"LinHB\", \"LinHH\", \"LinHT\", \"LinHTSRS\", or \"bootstrapSRS\".")
return(NULL)
}
if(!(typeof(y) %in% c("numeric", "integer", "double"))){
stop("Must supply numeric y. For binary variable, convert to 0/1's.")
}
if(is.null(pi) && is.null(N)){
stop("Must supply either ", sQuote("pi")," or ", sQuote("N"))
}
if(is.null(pi)){
message("Assuming simple random sampling")
}
if (is.null(pi)) {
pi <- rep(length(y)/N, length(y))
}
weight <- pi^(-1)
n <- length(y)
total <- as.vector(t(y) %*% weight)
if (is.null(N)) {
N <- sum(weight)
mu <- as.vector(total * (1/N))
} else {
mu <- as.vector((total/N))
}
if(var_est==TRUE){
if(var_method!="bootstrapSRS"){
varEst <- varMase(y = y,pi = pi,pi2 = pi2,method = var_method, N = N)
}else{
dat <- cbind(y,pi)
t_boot <- boot(data = dat, statistic = htt, R = B)
varEst <- var(t_boot$t)*n/(n-1)*(N-n)/(N-1)
}
varEstMu <- varEst*N^(-2)
return(list(pop_total = total, pop_mean = mu, pop_total_var=varEst, pop_mean_var = varEstMu))
}else{
return(list(pop_total = total, pop_mean = mu))
}
}
|
epsilonSquared = function (x, g=NULL, group="row", ci=FALSE, conf=0.95,
type="perc",
R=1000, histogram=FALSE, digits=3,
reportIncomplete=FALSE,
... ){
if(is.matrix(x)){x=as.table(x)}
if(is.table(x)){
Counts = as.data.frame(x)
Long = Counts[rep(row.names(Counts), Counts$Freq), c(1, 2)]
rownames(Long) = seq(1:nrow(Long))
if(group=="row"){
g=factor(Long[,1])
x=as.numeric(Long[,2])}
if(group=="column"){
g=factor(Long[,2])
x=as.numeric(Long[,1])}
}
g = factor(g)
g = droplevels(g)
n = length(g)
KW = kruskal.test(x, g, ...)
e2 = KW$statistic / (n-1)
E2 = signif(e2, digits=digits)
if(ci==TRUE){
Data = data.frame(x,g)
Function = function(input, index){
Input = input[index,]
n = length(Input$g)
if(length(unique(droplevels(Input$g)))==1){
FLAG=1
return(c(NA,FLAG))}
if(length(unique(droplevels(Input$g)))>1){
KW = kruskal.test(Input$x, Input$g, ...)
e2 = KW$statistic / (n-1)
FLAG=0
return(c(e2, FLAG))
}}
Boot = boot(Data, Function, R=R)
BCI = boot.ci(Boot, conf=conf, type=type)
if(type=="norm") {CI1=BCI$normal[2]; CI2=BCI$normal[3]}
if(type=="basic"){CI1=BCI$basic[4]; CI2=BCI$basic[5]}
if(type=="perc") {CI1=BCI$percent[4]; CI2=BCI$percent[5]}
if(type=="bca") {CI1=BCI$bca[4]; CI2=BCI$bca[5]}
if(sum(Boot$t[,2])>0 & reportIncomplete==FALSE) {CI1=NA; CI2=NA}
CI1=signif(CI1, digits=digits)
CI2=signif(CI2, digits=digits)
if(histogram==TRUE){hist(Boot$t[,1], col = "darkgray", xlab="epsilon-squared",
main="")}
}
if(ci==FALSE){names(E2) = "epsilon.squared"; return(E2)}
if(ci==TRUE){names(E2) = ""
return(data.frame(epsilon.squared=E2, lower.ci=CI1, upper.ci=CI2))}
}
|
source("test.prolog.R")
source("check.models.equal.R")
options(warn=1)
library(earth)
show.earth.mod <- function(mod, modname, nresponses, caption, trace, ...)
{
set.seed(2019)
cat("\nsummary:", modname, "\n\n")
print(summary(mod))
cat("\nevimp:", modname, "\n\n")
evimp <- evimp(mod)
print(evimp)
cat("\n")
nrow <- 1 + max(1, ceiling(nresponses * nrow(evimp(mod)) / 2))
par(mfrow=c(nrow, 2), mar=c(3, 3, 3, 1), mgp=c(1.5, 0.5, 0), oma=c(0, 0, 5, 0))
if(nresponses == 1)
plot(mod, which=c(1,3), do.par=0, caption=caption, trace=trace)
else {
plot(mod, nresponse=1, which=3, do.par=0,
caption=caption, trace=trace,
main="Response 1: Residuals vs Fitted")
plot(mod, nresponse=max(nresponses), which=3, do.par=0,
caption=caption, trace=trace,
main=sprint("Response %d: Residuals vs Fitted", max(nresponses)))
}
cat("\nplotmo:", modname, "\n\n")
if(nresponses == 1)
plotmo(mod, do.par=0, pt.col="red", trace=trace)
else for(iresponse in 1:nresponses)
plotmo(mod, nresponse=iresponse, do.par=0, pt.col=iresponse+1, trace=trace)
par(org.par)
cat("-------------------------------------------------------------------------------\n\n")
}
show.earth.formula <- function(formula, data=trees, subset=NULL, nresponses=1,
show=TRUE, caption=modname, trace=0, ...)
{
modname <- sprint("formula=%s (nresponses=%d)",
deparse(substitute(formula)), nresponses)
printf("%s\n", modname)
mod <- earth(formula=formula, data=data, subset=subset, trace=1, keepxy=TRUE)
global.mod <<- mod
n <- if(is.null(subset)) nrow(data) else nrow(data[subset,])
if(!(all(dim(mod$fitted.values) == c(n, nresponses)))) {
cat("dim(mod$fitted.values)", dim(mod$fitted.values), "\n")
stop("show.earth.formula: wrong response dimensions (see above)")
}
if(show)
show.earth.mod(mod=mod, modname=modname, nresponses=nresponses,
caption=caption, trace=trace, ...)
mod
}
show.earth.Formula <- function(formula, data=trees, subset=NULL, nresponses=1,
show=TRUE, caption=modname, trace=0, ...)
{
modname <- sprint("Formula=%s (nresponses=%d)",
deparse(substitute(formula)), nresponses)
printf("%s\n", modname)
mod <- earth(formula=formula, data=data, subset=subset, trace=1, keepxy=TRUE)
global.mod <<- mod
if(!(all(dim(mod$fitted.values) == c(31, nresponses)))) {
cat("dim(mod$fitted.values)", dim(mod$fitted.values), "\n")
stop("show.earth.Formula: wrong response dimensions (see above)")
}
show.earth.mod(mod=mod, modname=modname, nresponses=nresponses,
caption=caption, trace=trace, ...)
mod
}
VolNeg <- -sqrt(trees$Volume)
SinVol <- sin(pi * trees$Volume / max(trees$Volume))
global.mod <- NULL
show.earth.formula(Volume/VolNeg ~., show=FALSE)
show.earth.formula(Volume/99 ~., show=FALSE)
show.earth.formula(Volume*99 ~., show=FALSE)
show.earth.formula(Volume-99 ~., show=FALSE)
show.earth.formula(Volume ~., show=FALSE)
show.earth.formula(cbind(Volume+VolNeg)~., show=FALSE)
show.earth.formula((Volume+VolNeg) ~., show=FALSE)
show.earth.formula(I(Volume+VolNeg) ~., show=FALSE)
show.earth.formula(VolNeg~Girth+Height, show=FALSE)
show.earth.formula(cbind(VolNeg, SinVol)~., nresponses=2, show=FALSE)
show.earth.formula(cbind(VolNeg, SinVol)~., nresponses=2, show=FALSE)
show.earth.formula(cbind(VolNeg/33, SinVol)~., nresponses=2, show=FALSE)
show.earth.formula(cbind(VolNeg+33, SinVol)~., nresponses=2, show=FALSE)
show.earth.formula(cbind(VolNeg, SinVol)~Girth, nresponses=2, show=FALSE)
randx <- c(0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0)
show.earth.formula(VolNeg~randx, nresponses=1)
VolNeg.randx <- earth(VolNeg~randx, trace=1)
plotmo(VolNeg.randx)
VolVolNeg <- show.earth.formula(cbind(Volume, VolNeg)~Girth+Height, nresponses=2, trace=0)
trees1 <- trees
Volume <- trees1$Volume
trees1$Volume <- NULL
cbind.Volume.VolNeg <- cbind(Volume, VolNeg)
VolGlobalVolNeg <- show.earth.formula(cbind(Volume, VolNeg)~Girth+Height, data=trees1, nresponses=2, trace=0,
caption="VolGlobalVolNeg: This page should be the same as the previous page")
check.models.equal(VolVolNeg, VolGlobalVolNeg, msg="VolVolNeg, VolGlobalVolNeg", newdata=trees[3,])
VolVolNega <- show.earth.Formula(Volume+VolNeg~Girth+Height, nresponses=2,
caption="VolVolNega: This page should be the same as the previous page")
check.models.equal(VolVolNega, VolVolNeg, msg="VolVolNega, VolVolNeg", newdata=trees[3,])
Vol.VolNeg.dot <- show.earth.Formula(Volume+VolNeg~., nresponses=2,
caption="Vol.VolNeg.dot: This page should be the same as the previous page")
check.models.equal(Vol.VolNeg.dot, VolVolNega, msg="Vol.VolNeg.dot, VolVolNega", newdata=trees[3,])
trees1 <- trees
trees1$VolNeg <- VolNeg
VolVolNegc <- show.earth.Formula(Volume+VolNeg~., data=trees1, nresponses=2,
caption="Vol.VolNeg.trees1: This page should be the same as the previous page")
check.models.equal(VolVolNegc, VolVolNega, msg="VolVolNegc, VolVolNega", newdata=trees1[2:3,])
VolVolNega.nokeepxy <- earth(Volume+VolNeg~Girth+Height, data=trees, trace=1)
check.models.equal(VolVolNega.nokeepxy, VolVolNega, msg="VolVolNega.nokeepxy, VolVolNega", newdata=trees1[2:3,])
caption <- "VolVolNega.nokeepxy This page should be the same as the previous page"
par(mfrow=c(3, 2), mar=c(3, 3, 3, 1), mgp=c(1.5, 0.5, 0), oma=c(0, 0, 5, 0))
plot(VolVolNega.nokeepxy, nresponse=1, which=3, do.par=0,
caption=caption, trace=0,
main="Response 1: Residuals vs Fitted")
plot(VolVolNega.nokeepxy, nresponse=2, which=3, do.par=0,
caption=caption, trace=0,
main="Response 2: Residuals vs Fitted")
plotmo(VolVolNega.nokeepxy, nresponse=1, do.par=0, pt.col=2)
plotmo(VolVolNega.nokeepxy, nresponse=2, do.par=0, pt.col=3)
par(org.par)
plot(VolVolNega.nokeepxy)
subset2 <- seq(from=1, to=nrow(trees1), by=2)
Vol.formula.subset.nokeepxy <- earth(Volume~Girth+Height, data=trees1, subset=subset2, trace=1)
plot(Vol.formula.subset.nokeepxy, caption="Vol.formula.subset.nokeepxy")
plotmo(Vol.formula.subset.nokeepxy, nresponse=1, trace=1, pt.col=2, caption="Vol.formula.subset.nokeepxy")
Vol.formula.subset.keepxy <- earth(Volume~Girth+Height, data=trees1, subset=subset2, trace=1)
plotmo(Vol.formula.subset.keepxy, nresponse=1, trace=1, pt.col=2, caption="Vol.formula.subset.keepxy")
VolVolNega.formula.subset.nokeepxy <- earth(cbind.Volume.VolNeg~Girth+Height, data=trees1, subset=subset2, trace=1)
VolVolNega.Formula.subset.nokeepxy <- earth(Volume+VolNeg ~Girth+Height, data=trees1, subset=subset2, trace=1)
check.models.equal(VolVolNega.formula.subset.nokeepxy, VolVolNega.Formula.subset.nokeepxy, "VolVolNega.formula.subset.nokeepxy, VolVolNega.Formula.subset.nokeepxy", newdata=trees[3,])
caption <- "VolVolNega.formula.subset.nokeepxy"
par(mfrow=c(3, 2), mar=c(3, 3, 3, 1), mgp=c(1.5, 0.5, 0), oma=c(0, 0, 5, 0))
plot(VolVolNega.formula.subset.nokeepxy, nresponse=1, which=3, do.par=0,
caption=caption, trace=1,
main="Response 1: Residuals vs Fitted")
plot(VolVolNega.formula.subset.nokeepxy, nresponse=2, which=3, do.par=0,
caption=caption, trace=1,
main="Response 2: Residuals vs Fitted")
plotmo(VolVolNega.formula.subset.nokeepxy, nresponse=1, do.par=0, pt.col=2)
plotmo(VolVolNega.formula.subset.nokeepxy, nresponse=2, do.par=0, pt.col=3)
par(org.par)
caption <- "VolVolNega.Formula.subset.nokeepxy"
par(mfrow=c(3, 2), mar=c(3, 3, 3, 1), mgp=c(1.5, 0.5, 0), oma=c(0, 0, 5, 0))
plot(VolVolNega.Formula.subset.nokeepxy, nresponse=1, which=3, do.par=0,
caption=caption, trace=1,
main="Response 1: Residuals vs Fitted")
plot(VolVolNega.Formula.subset.nokeepxy, nresponse=2, which=3, do.par=0,
caption=caption, trace=1,
main="Response 2: Residuals vs Fitted")
plotmo(VolVolNega.Formula.subset.nokeepxy, nresponse=1, do.par=0, pt.col=2)
plotmo(VolVolNega.Formula.subset.nokeepxy, nresponse=2, do.par=0, pt.col=3)
par(org.par)
subset2 <- seq(from=1, to=nrow(trees1), by=2)
VolVolNega.formula.subset.keepxy <- earth(cbind.Volume.VolNeg~Girth+Height, data=trees1, subset=subset2, trace=1, keepxy=TRUE)
VolVolNega.Formula.subset.keepxy <- earth(Volume+VolNeg ~Girth+Height, data=trees1, subset=subset2, trace=1, keepxy=TRUE)
check.models.equal(VolVolNega.formula.subset.nokeepxy, VolVolNega.formula.subset.keepxy, msg="VolVolNega.formula.subset.nokeepxy, VolVolNega.formula.subset.keepxy", newdata=trees1[2:3,])
check.models.equal(VolVolNega.Formula.subset.nokeepxy, VolVolNega.Formula.subset.keepxy, msg="VolVolNega.Formula.subset.nokeepxy, VolVolNega.Formula.subset.keepxy", newdata=trees1[2:3,])
check.models.equal(VolVolNega.formula.subset.keepxy, VolVolNega.Formula.subset.keepxy, msg="VolVolNega.formula.subset.keepxy, VolVolNega.Formula.subset.keepxy", newdata=trees1[2:3,])
caption <- "VolVolNega.formula.subset.keepxy"
par(mfrow=c(3, 2), mar=c(3, 3, 3, 1), mgp=c(1.5, 0.5, 0), oma=c(0, 0, 5, 0))
plot(VolVolNega.formula.subset.keepxy, nresponse=1, which=3, do.par=0,
caption=caption, trace=1,
main="Response 1: Residuals vs Fitted")
plot(VolVolNega.formula.subset.keepxy, nresponse=2, which=3, do.par=0,
caption=caption, trace=1,
main="Response 2: Residuals vs Fitted")
try(plotmo(VolVolNega.formula.subset.keepxy, nresponse=1, do.par=0, pt.col=2))
try(plotmo(VolVolNega.formula.subset.keepxy, nresponse=2, do.par=0, pt.col=3))
par(org.par)
caption <- "VolVolNega.Formula.subset.keepxy"
par(mfrow=c(3, 2), mar=c(3, 3, 3, 1), mgp=c(1.5, 0.5, 0), oma=c(0, 0, 5, 0))
plot(VolVolNega.Formula.subset.keepxy, nresponse=1, which=3, do.par=0,
caption=caption, trace=1,
main="Response 1: Residuals vs Fitted")
plot(VolVolNega.Formula.subset.keepxy, nresponse=2, which=3, do.par=0,
caption=caption, trace=1,
main="Response 2: Residuals vs Fitted")
try(plotmo(VolVolNega.Formula.subset.keepxy, nresponse=1, do.par=0, pt.col=2))
try(plotmo(VolVolNega.Formula.subset.keepxy, nresponse=2, do.par=0, pt.col=3))
par(org.par)
weights2 <- sqrt(1:nrow(trees1))
VolVolNega.formula.weights.subset.nokeepxy <- earth(cbind.Volume.VolNeg~Girth+Height, data=trees1, weights=weights2, subset=subset2, trace=1)
VolVolNega.Formula.weights.subset.nokeepxy <- earth(Volume+VolNeg ~Girth+Height, data=trees1, weights=weights2, subset=subset2, trace=1)
check.models.equal(VolVolNega.formula.weights.subset.nokeepxy, VolVolNega.Formula.weights.subset.nokeepxy, "VolVolNega.formula.weights.subset.nokeepxy, VolVolNega.Formula.weights.subset.nokeepxy")
caption <- "VolVolNega.formula.weights.subset.nokeepxy"
par(mfrow=c(3, 2), mar=c(3, 3, 3, 1), mgp=c(1.5, 0.5, 0), oma=c(0, 0, 5, 0))
plot(VolVolNega.formula.weights.subset.nokeepxy, nresponse=1, which=3, do.par=0,
caption=caption, trace=1,
main="Response 1: Residuals vs Fitted")
plot(VolVolNega.formula.weights.subset.nokeepxy, nresponse=2, which=3, do.par=0,
caption=caption, trace=1,
main="Response 2: Residuals vs Fitted")
plotmo(VolVolNega.formula.weights.subset.nokeepxy, nresponse=1, do.par=0, pt.col=2)
plotmo(VolVolNega.formula.weights.subset.nokeepxy, nresponse=2, do.par=0, pt.col=3)
par(org.par)
caption <- "VolVolNega.Formula.weights.subset.nokeepxy"
par(mfrow=c(3, 2), mar=c(3, 3, 3, 1), mgp=c(1.5, 0.5, 0), oma=c(0, 0, 5, 0))
plot(VolVolNega.Formula.weights.subset.nokeepxy, nresponse=1, which=3, do.par=0,
caption=caption, trace=1,
main="Response 1: Residuals vs Fitted")
plot(VolVolNega.Formula.weights.subset.nokeepxy, nresponse=2, which=3, do.par=0,
caption=caption, trace=1,
main="Response 2: Residuals vs Fitted")
plotmo(VolVolNega.Formula.weights.subset.nokeepxy, nresponse=1, do.par=0, pt.col=2)
plotmo(VolVolNega.Formula.weights.subset.nokeepxy, nresponse=2, do.par=0, pt.col=3)
par(org.par)
data(ozone1)
mul1 <- earth(cbind(O3,wind) ~ ., data=ozone1)
mul2 <- earth(O3 + wind ~ ., data=ozone1)
check.models.equal(mul2, mul1, "mul2, mul1", newdata=ozone1[1:3,])
mul3 <- earth(ozone1[,-c(1,3)], ozone1[,c(1,3)])
check.models.equal(mul3, mul1,"mul3, mul", newdata=ozone1[1:3,])
mul4 <- earth(cbind(log.O3=log(O3),wind) ~ ., data=ozone1)
print(summary(mul4))
x1 <- ozone1$O3
x2 <- ozone1$win
x3 <- ozone1$O3
y1 <- ozone1$temp
y2 <- ozone1$doy
mul5 <- earth(x=data.frame(x1, x2, log.x3=log(x3)), y=data.frame(y1, y2), trace=1)
print(summary(mul5))
log.x3 <- log(x3)
mul6 <- earth(y1 + y2 ~ x1 + x2 + log.x3, trace=1)
stopifnot(all.equal(as.vector(mul5$coefficients), as.vector(mul6$coefficients)))
stopifnot(all.equal(as.vector(mul5$dirs), as.vector(mul6$dirs)))
mul7 <- earth(y1 + y2 ~ x1 + x2 + log(x3), trace=1)
stopifnot(all.equal(as.vector(mul5$coefficients), as.vector(mul7$coefficients)))
stopifnot(all.equal(as.vector(mul5$dirs), as.vector(mul7$dirs)))
expect.err(try(earth(log(O3 + wind) + ibt ~ temp, data=ozone1, trace=1)),
"terms like 'log(O3 + wind)' are not allowed on the LHS of a multiple-response formula")
expect.err(try(show.earth.Formula(VolNeg+Volume~1, nresponses=2)), "'x' has no columns")
expect.err(try(show.earth.Formula(VolNeg+Volume~Volume, nresponses=2)), "x is empty")
options(warn=2)
expect.err(try(show.earth.formula(Volume~Volume)), "(converted from warning) the response appeared on the right-hand side and was dropped")
options(warn=1)
show.earth.Formula(VolNeg+Volume~Girth, nresponses=2, subset=)
show.earth.Formula(Volume+VolNeg+SinVol~., nresponses=3)
show.earth.formula(VolNeg+SinVol~randx, nresponses=2)
VolNeg.SinVol.randx <- earth(VolNeg+SinVol~randx, trace=1)
plotmo(VolNeg.SinVol.randx, nresponse=2)
expect.err(try(earth(Volume+VolNeg|99~Girth+Height, data=trees, trace=1)), "multiple parts on left side of formula (because \"|\" was used)")
expect.err(try(earth(Volume+VolNeg~Girth+Height|Volume, data=trees, trace=1)), "multiple parts on right side of formula (because \"|\" was used)")
a1 <- earth(Volume+VolNeg~Girth+(Height|Volume), data=trees, trace=1)
stopifnot(NCOL(a1$coefficients) == 2)
a2 <- earth(Volume+VolNeg~Girth+I(Height|Volume), data=trees, trace=1)
stopifnot(NCOL(a2$coefficients) == 2)
a3 <- earth((Volume+VolNeg)~Girth+Height, data=trees, trace=1)
stopifnot(NCOL(a3$coefficients) == 1)
expect.err(try(earth(Volume+VolNeg*999~., data=trees, trace=1)), "invalid model formula in ExtractVars")
expect.err(try(earth(Volume+VolNeg/99+SinVol~., data=trees, trace=1)), "invalid model formula in ExtractVars")
library(earth)
data(ozone1)
expect.err(try(earth(log(O3) + wind ~ ., data=ozone1, trace=1)),
"terms like 'log(O3)' are not allowed on the LHS of a multiple-response formula")
a1 <- earth(cbind(log.O3=log(O3),wind) ~ humidity+temp, data=ozone1)
options(warn=2)
expect.err(try(coef(a1)), "coef.earth: multiple response model: returning coefficients for just the first response")
options(warn=1)
a2 <- earth(cbind(log(O3),wind) ~ humidity+temp, data=ozone1)
stopifnot(all.equal(as.vector(a2$coefficients), as.vector(a1$coefficients)))
log.O3 <- log(ozone1$O3)
a3 <- earth(cbind(log.O3,wind) ~ humidity+temp, data=ozone1)
stopifnot(all.equal(as.vector(a3$coefficients), as.vector(a1$coefficients)))
a4 <- earth(log.O3+wind ~ humidity+temp, data=ozone1)
stopifnot(all.equal(as.vector(a4$coefficients), as.vector(a1$coefficients)))
expect.err(try(earth(log(O3)+wind ~ humidity+temp, data=ozone1)),
"terms like 'log(O3)' are not allowed on the LHS of a multiple-response formula")
data(etitanic)
pclass.age <- earth(pclass+age~sibsp, data=etitanic)
plot(pclass.age, nresponse=4)
par(mfrow=c(2,2))
cat("plotmo(pclass.age, nresponse=1):\n")
plotmo(pclass.age, nresponse=1, main="nresponse=1, pclass1st", do.par=FALSE)
cat("plotmo(pclass.age, nresponse=2):\n")
plotmo(pclass.age, nresponse=2, main="nresponse=2, pclass2nd", do.par=FALSE)
cat("plotmo(pclass.age, nresponse=3):\n")
plotmo(pclass.age, nresponse=3, main="nresponse=3, pclass3rd", do.par=FALSE)
cat("plotmo(pclass.age, nresponse=4):\n")
plotmo(pclass.age, nresponse=4, main="nresponse=4, age", do.par=FALSE)
cat("plotmo(pclass.age, nresponse=5):\n")
options(warn=2)
expect.err(try(plotmo(pclass.age, nresponse=5, main="nresponse=5", do.par=FALSE)), "nresponse is 5 but the number of columns is only 4")
options(warn=1)
age.pclass <- earth(age+pclass~sibsp, data=etitanic)
par(mfrow=c(2,2))
cat("plotmo(age.pclass, nresponse=1):\n")
plotmo(age.pclass, nresponse=1, main="nresponse=1, age", do.par=FALSE, trace=1)
cat("plotmo(age.pclass, nresponse=2):\n")
plotmo(age.pclass, nresponse=2, main="nresponse=2, pclass1st", do.par=FALSE)
cat("plotmo(age.pclass, nresponse=3):\n")
plotmo(age.pclass, nresponse=3, main="nresponse=3, pclass2nd", do.par=FALSE)
cat("plotmo(age.pclass, nresponse=4):\n")
plotmo(age.pclass, nresponse=4, main="nresponse=4, pclass3rd", do.par=FALSE)
cat("plotmo(age.pclass, nresponse=5):\n")
options(warn=2)
expect.err(try(plotmo(age.pclass, nresponse=5, main="nresponse=5", do.par=FALSE)), "nresponse is 5 but the number of columns is only 4")
options(warn=1)
pclass.sex <- earth(pclass+sex~sibsp, data=etitanic)
par(mfrow=c(2,2))
cat("plotmo(pclass.sex, nresponse=1):\n")
plotmo(pclass.sex, nresponse=1, main="nresponse=1, pclass1st", do.par=FALSE, trace=1)
cat("plotmo(pclass.sex, nresponse=2):\n")
plotmo(pclass.sex, nresponse=2, main="nresponse=2, pclass2nd", do.par=FALSE)
cat("plotmo(pclass.sex, nresponse=3):\n")
plotmo(pclass.sex, nresponse=3, main="nresponse=3, pclass3rd", do.par=FALSE)
cat("plotmo(pclass.sex, nresponse=4):\n")
plotmo(pclass.sex, nresponse=4, main="nresponse=4, age", do.par=FALSE)
cat("plotmo(pclass.sex, nresponse=5):\n")
options(warn=2)
expect.err(try(plotmo(pclass.sex, nresponse=5, main="nresponse=5", do.par=FALSE)), "nresponse is 5 but the number of columns is only 4")
options(warn=1)
options(warn=2)
expect.err(try(earth(pclass+sex~.-survived, data=etitanic)), "'varlist' has changed (from nvar=4) to new 5 after EncodeVars() -- should no longer happen!")
options(warn=1)
expect.err(try(earth(pclass+sex~.-survived, data=etitanic)), "model frame and formula mismatch in model.matrix()")
options(warn=2)
expect.err(try(earth(pclass+sex~.-nonesuch, data=etitanic)), "'varlist' has changed (from nvar=5) to new 6 after EncodeVars() -- should no longer happen!")
options(warn=1)
expect.err(try(earth(pclass+sex~.-nonesuch, data=etitanic)), "model frame and formula mismatch in model.matrix()")
source("test.epilog.R")
|
library(SEMID)
library(igraph)
context("Testing known examples on graphID.ancestralID.")
source("graphExamples.R")
test_that("graphID.ancestralID returns correct value for known examples.", {
for (i in 1:length(graphExamples)) {
graphExample <- graphExamples[[i]]
L <- graphExample$L
O <- graphExample$O
htcId <- graphExample$htcId
ancId <- graphExample$ancId
m <- nrow(L)
if (!is.dag(graph.adjacency(L))) {
next
}
result <- graphID.ancestralID(L, O)
if (!is.null(ancId)) {
if (ancId == 1) {
expect_equal(sort(result), 1:m)
} else {
expect_true(all(result %in% 1:m) && length(result) < m)
}
}
if (htcId == 0) {
expect_true(all(result %in% 1:m) && length(result) < m)
} else if (htcId == 1) {
expect_equal(sort(result), 1:m)
} else {
expect_true(all(graphID.htcID(L, O) %in% result))
}
}
})
|
"summary.Pedig"<-function(object, keep.only=NULL, maxd=50, d=4, ...){
PedigAsDataTable <- "data.table" %in% class(object)
object <- as.data.frame(object)
if(is.logical(keep.only)){keep.only <- object[keep.only,1]}
ids <- as.character(object[[1]])
if(!is.null(keep.only)){
keep.only <- as.character(keep.only)
keep.only <- setdiff(keep.only, c(NA))
}
Pedig <- prePed(object, keep=keep.only, addNum=TRUE)
ids <- ids[ids %in% Pedig$Indiv]
Pedig$Inbreeding <- pedInbreeding(Pedig)$Inbr
compl <- completeness(Pedig, maxd=maxd, by="Indiv")
setDT(compl)
setDT(Pedig)
Completeness <- NULL
equiGen <- NULL
fullGen <- NULL
Indiv <- NULL
maxGen <- NULL
meanCom <- NULL
numSire <- NULL
numDam <- NULL
Inbreeding <- NULL
Res <- compl[, list(maxGen=length(Completeness)-1, equiGen=sum(Completeness)-1), by="Indiv"]
sRes <- compl[compl$Completeness==1, list(fullGen=length(Completeness)-1), by="Indiv"]
Res <- sRes[Res, list(Indiv, equiGen, fullGen, maxGen), on="Indiv"]
sRes <- compl[compl$Generation %in% (0:(d-1)), list(meanCom=sum(Completeness)/(1*d)), by="Indiv"]
Res <- sRes[Res, list(Indiv, equiGen, fullGen, maxGen, meanCom), on="Indiv"]
Res$meanCom[is.na(Res$meanCom)]<-0
Pedig <- Res[Pedig,list(Indiv, numSire, numDam, equiGen, fullGen, maxGen, meanCom, Inbreeding), on="Indiv"]
Pedig <- as.data.frame(Pedig)
setDF(Pedig)
Pedig$patIndex <- 0
Pedig[Pedig$numSire!=0,"patIndex"]<-Pedig[Pedig$numSire[Pedig$numSire!=0],"meanCom"]
Pedig$matIndex <- 0
Pedig[Pedig$numDam!=0,"matIndex"]<-Pedig[Pedig$numDam[Pedig$numDam!=0],"meanCom"]
Pedig$PCI <- 2*Pedig$matIndex*Pedig$patIndex/(Pedig$matIndex+Pedig$patIndex)
Pedig[is.na(Pedig$PCI),"PCI"]<-0
Pedig <- Pedig[,c("Indiv","equiGen", "fullGen", "maxGen", "PCI","Inbreeding")]
if(!is.null(keep.only)){
ids<-ids[ids %in% keep.only]
}
rownames(Pedig)<-Pedig$Indiv
Pedig <- Pedig[ids,]
if(PedigAsDataTable){setDT(Pedig)}
Pedig
}
|
library(R2WinBUGS)
library(BRugs)
dat=read.csv("C3_Species.csv",header=T)
my.model = function(){
Jmax ~ dlnorm(4.7,2.7)
alpha~dnorm(0.25,100)
vmax ~dlnorm(4.6,2.7)
r ~ dlnorm(0.75,1.56)
cp ~ dlnorm(1.9,2.7)
tau ~ dgamma(0.1,0.1)
for(i in 1:n){
al[i]<-(alpha*q[i]/(sqrt(1+(alpha*alpha*q[i]*q[i])/(Jmax*Jmax))))*(pi[i]-cp)/(4*pi[i]+8*cp)
ae[i]<- vmax*(pi[i]-cp)/(pi[i]+Kc*(1+po/Ko))
prean[i]<-min(al[i], ae[i]) - r
an[i]~dnorm(prean[i],tau)
pA[i] ~ dnorm(prean[i],tau)
}
}
write.model(my.model,"c3model.txt")
init<-list()
init[[1]]<-list(r=1.2, vmax=39,alpha=0.25, tau=10, cp=6, Jmax=80)
init[[2]]<-list(r=1, vmax=100, alpha=0.20, tau=20, cp=4, Jmax=150)
init[[3]]<-list(r=2, vmax=60, alpha=0.28, tau=20, cp=5,Jmax=60)
spp.list = unique(as.character(dat$id))
c3mcmc <- list()
for(s in spp.list){
sel = which(dat$id == s)
an=dat$Photo[sel]
pi=dat$Ci_Pa[sel]
q=dat$PARi[sel]
mydat<-list(an=an, pi=pi, q=q,n=length(an),Kc=46,Ko=22000,po=21000)
mc3 <- openbugs(mydat,
init,
model.file="c3model.txt",
n.chains=3,
n.iter=1000,
n.burnin=500,
n.thin =25,
parameters.to.save=c("r","vmax","alpha","Jmax", "cp","tau", "prean", "pA")
)
c3mcmc[[s]]=mc3
}
for(s in spp.list){
pdf(paste(s, " model trace.pdf",sep=""))
plot(as.mcmc.list(c3mcmc[[s]]))
dev.off()
}
sel1=which(dat$id==s)
an=dat$Photo[sel1]
plot(an,c3mcmc[[s]]$mean$prean, ylim=c(-5,40),xlim=c(-5,40), pch=19,
main="Predicted photosynthesis vs measured phtosynthesis", xlab="Measured An (umol m-2 s-1)",
ylab="Predicted An (umol m-2 s-1)",cex.main=1.6,cex.lab=1.4)
abline(0,1, col="dark green",lwd=3)
|
.dt_styles_key <- "_styles"
dt_styles_get <- function(data) {
dt__get(data, .dt_styles_key)
}
dt_styles_set <- function(data, styles) {
dt__set(data, .dt_styles_key, styles)
}
dt_styles_init <- function(data) {
dplyr::tibble(
locname = character(0),
grpname = character(0),
colname = character(0),
locnum = numeric(0),
rownum = integer(0),
colnum = integer(0),
styles = list()
) %>%
dt_styles_set(styles = ., data = data)
}
dt_styles_add <- function(data,
locname,
grpname,
colname,
locnum,
rownum,
styles) {
data %>%
dt_styles_get() %>%
dplyr::bind_rows(
dplyr::tibble(
locname = locname,
grpname = grpname,
colname = colname,
locnum = locnum,
rownum = rownum,
colnum = NA_integer_,
styles = list(styles)
)
) %>%
dt_styles_set(styles = ., data = data)
}
dt_styles_pluck <- function(styles_tbl,
locname = missing_arg(),
grpname = missing_arg(),
colname = missing_arg(),
locnum = missing_arg(),
rownum = missing_arg(),
grprow = missing_arg()) {
if (is.null(styles_tbl$html_style)) {
stop("dt_styles_pluck must be passed a built styles table")
}
idx <- rep_len(TRUE, nrow(styles_tbl))
if (!is_missing(locname)) {
idx <- idx & styles_tbl$locname %in% locname
}
if (!is_missing(grpname)) {
idx <- idx & styles_tbl$grpname %in% grpname
}
if (!is_missing(colname)) {
idx <- idx & styles_tbl$colname %in% colname
}
if (!is_missing(locnum)) {
idx <- idx & styles_tbl$locnum %in% locnum
}
if (!is_missing(rownum)) {
idx <- idx & styles_tbl$rownum %in% rownum
}
if (!is_missing(grprow)) {
idx <- idx & round((styles_tbl$rownum %% 1) * 100) %in% grprow
}
styles_tbl[idx, ]
}
|
calculateNLL <-
function(resp, des, fixEf, reVar, resVar){
n = nrow(des)
V = (resVar**2) *(des %*% diag(reVar) %*% diag(reVar) %*% t(des) + diag(n))
return(dmvnorm(as.vector(resp), mean = des %*% fixEf, sigma = V, log = TRUE))
}
|
intermittent_pumping <- function(t, starts, stops, rates, method = "glover", d, S, Tr, ...) {
starts.all <- base::matrix(starts, nrow = length(t), ncol = length(starts), byrow = T)
stops.all <- base::matrix(stops, nrow = length(t), ncol = length(starts), byrow = T)
rates.all <- base::matrix(rates, nrow = length(t), ncol = length(starts), byrow = T)
t.all <- base::matrix(t, nrow = length(t), ncol = length(starts))
t.starts <- t.all - starts.all
t.starts[t.starts < 0] <- 0
t.stops <- t.all - stops.all
t.stops[t.stops < 0] <- 0
t.starts.vec <- c(t.starts)
t.stops.vec <- c(t.stops)
rates.all.vec <- c(rates.all)
Qs.all.vec <- rep(0, length(t.starts.vec))
if (method == "glover") {
Qs.all.vec[t.starts.vec > 0] <-
rates.all.vec[t.starts.vec > 0] *
(glover(t = t.starts.vec[t.starts.vec > 0], d = d, S = S, Tr = Tr) -
glover(t = t.stops.vec[t.starts.vec > 0], d = d, S = S, Tr = Tr))
} else if (method == "hunt") {
if (!exists("lmda", where = list(...))) stop("Need to supply lmda value for Hunt model")
lmda <- list(...)$lmda
if (exists("lmda_max", where = list(...))) {
lmda_max <- list(...)$lmda_max
} else {
lmda_max <- Inf
}
Qs.all.vec[t.starts.vec > 0] <-
rates.all.vec[t.starts.vec > 0] *
(hunt(t = t.starts.vec[t.starts.vec > 0], d = d, S = S, Tr = Tr, lmda = lmda, lmda_max = lmda_max) -
hunt(t = t.stops.vec[t.starts.vec > 0], d = d, S = S, Tr = Tr, lmda = lmda, lmda_max = lmda_max))
}
Qs.all <- matrix(Qs.all.vec, nrow = length(t), ncol = length(starts))
Q.out <- rowSums(Qs.all)
}
|
sgPLSda <-
function(X,
Y,
ncomp = 2,
keepX = rep(ncol(X), ncomp),
max.iter = 500,
tol = 1e-06,
ind.block.x, alpha.x = alpha.x, upper.lambda = 10^5)
{
if (is.null(dim(Y)))
{
Y = as.factor(Y)
ind.mat = unmap(as.numeric(Y))
}else {
stop("'Y' should be a factor or a class vector.")
}
result = sgPLS(X, ind.mat, ncomp = ncomp, mode = "regression", keepX = keepX,
max.iter = max.iter, tol = tol,ind.block.x = ind.block.x,alpha.x = alpha.x,upper.lambda = upper.lambda)
cl = match.call()
cl[[1]] = as.name('sgPLSda')
result$call = cl
result$ind.mat = ind.mat
result$names$Y = levels(Y)
class(result) = c("sgPLSda","gPLSda","splsda","plsda")
return(invisible(result))
}
|
`drop1.cca` <-
function(object, scope, test = c("none", "permutation"),
permutations = how(nperm = 199), ...)
{
if (inherits(object, "prc"))
stop("'step'/'drop1' cannot be used for 'prc' objects")
if (is.null(object$terms))
stop("ordination model must be fitted using formula")
test <- match.arg(test)
out <- NextMethod("drop1", object, test="none", ...)
cl <- class(out)
if (test == "permutation") {
rn <- rownames(out)[-1]
if (missing(scope))
scope <- rn
else if (!is.character(scope))
scope <- drop.scope(scope)
adds <- anova(object, by = "margin", scope = scope,
permutations = permutations, ...)
nr <- nrow(adds)
out <- cbind(out, rbind(NA, adds[rn,3:4]))
class(out) <- cl
}
out
}
|
KWDual <- function(A, d, w, ...){
n <- nrow(A)
m <- ncol(A)
A <- t(A)
A <- Matrix::Matrix(A, sparse = TRUE)
dots <- list(...)
rtol <- ifelse(length(dots$rtol), dots$rtol, 1e-6)
verb <- ifelse(length(dots$verb), dots$verb, 0)
if(length(dots$control)) control <- dots$control
else control <- NULL
if(utils::packageVersion("Rmosek") < 9){
P <- list(sense = "min")
P$c <- rep(0, n)
P$A <- A
P$bc <- rbind(rep(0,m),d)
P$bx <- rbind(rep(0,n),rep(Inf,n))
opro <- matrix ( list (), nrow =5, ncol = n)
rownames ( opro ) <- c(" type ","j","f","g","h")
opro[1,] <- as.list(rep('log',n))
opro[2,] <- as.list(1:n)
opro[3,] <- as.list(-w)
opro[4,] <- as.list(rep(1,n))
opro[5,] <- as.list(rep(0,n))
P$scopt<- list(opro = opro)
P$dparam$intpnt_nl_tol_rel_gap <- rtol
}
else {
P <- list(sense = "min")
A0 <- Matrix::Matrix(0, m, n)
P$c <- c(rep(0,n), -w)
P$A <- cbind(A, A0)
P$bc <- rbind(rep(0, m), d)
P$bx <- rbind(c(rep(0, n), rep(-Inf,n)), rep(Inf, 2*n))
P$F <- sparseMatrix(c(seq(1,3*n, by = 3), seq(3, 3*n, by = 3)),
c(1:n, (n+1):(2*n)), x = rep(1,2*n))
P$g <- rep(c(0,1,0), n)
P$cones <- matrix(list("PEXP", 3, NULL), nrow = 3, ncol = n)
rownames(P$cones) <- c("type", "dim", "conepar")
P$dparam$intpnt_co_tol_rel_gap <- rtol
}
if(length(control)){
P$iparam <- control$iparam
P$dparam <- control$dparam
P$sparam <- control$sparam
}
z <- Rmosek::mosek(P, opts = list(verbose = verb))
status <- z$sol$itr$solsta
if (status != "OPTIMAL")
warning(paste("Solution status = ", status))
f <- z$sol$itr$suc
if(min(f) < -rtol)
warning("estimated mixing distribution has some negative values: consider reducing rtol")
else f[f < 0] <- 0
g <- as.vector(t(A) %*% (f * d))
list(f = f, g = g, status = status)
}
|
helper_makepredictcall <- function(var, call, fun, key_attr)
{
fun_symbol <- substitute(fun)
fun_name <- as.character(fun_symbol)
if (as.character(call)[1L] == fun_name ||
(is.call(call) && identical(eval(call[[1L]]), eval(fun_symbol)))) {
at <- attributes(var)[key_attr]
call <- call[1L:2L]
call[names(at)] <- at
}
call
}
makepredictcall.bSpline2 <- function(var, call)
{
helper_makepredictcall(
var, call, fun = bSpline,
key_attr = c("degree", "knots", "Boundary.knots", "intercept",
"derivs", "integral")
)
}
makepredictcall.naturalSpline <- function(var, call)
{
helper_makepredictcall(
var, call, fun = naturalSpline,
key_attr = c("knots", "Boundary.knots", "intercept",
"derivs", "integral")
)
}
makepredictcall.dbs <- function(var, call)
{
helper_makepredictcall(
var, call, fun = dbs,
key_attr = c("degree", "knots", "Boundary.knots", "intercept", "derivs")
)
}
makepredictcall.ibs <- function(var, call)
{
helper_makepredictcall(
var, call, fun = ibs,
key_attr = c("degree", "knots", "Boundary.knots", "intercept")
)
}
makepredictcall.mSpline <- function(var, call)
{
helper_makepredictcall(
var, call, fun = mSpline,
key_attr = c("degree", "knots", "Boundary.knots", "intercept",
"periodic", "derivs", "integral")
)
}
makepredictcall.iSpline <- function(var, call)
{
helper_makepredictcall(
var, call, fun = iSpline,
key_attr = c("degree", "knots", "Boundary.knots", "intercept", "derivs")
)
}
makepredictcall.cSpline <- function(var, call)
{
helper_makepredictcall(
var, call, fun = cSpline,
key_attr = c("degree", "knots", "Boundary.knots", "intercept",
"derivs", "scale")
)
}
makepredictcall.bernsteinPoly <- function(var, call)
{
helper_makepredictcall(
var, call, fun = bernsteinPoly,
key_attr = c("degree", "Boundary.knots", "intercept",
"derivs", "integral")
)
}
|
stan_jm <- function(formulaLong, dataLong, formulaEvent, dataEvent, time_var,
id_var, family = gaussian, assoc = "etavalue",
lag_assoc = 0, grp_assoc, epsilon = 1E-5,
basehaz = c("bs", "weibull", "piecewise"), basehaz_ops,
qnodes = 15, init = "prefit", weights,
priorLong = normal(autoscale=TRUE), priorLong_intercept = normal(autoscale=TRUE),
priorLong_aux = cauchy(0, 5, autoscale=TRUE), priorEvent = normal(autoscale=TRUE),
priorEvent_intercept = normal(autoscale=TRUE), priorEvent_aux = cauchy(autoscale=TRUE),
priorEvent_assoc = normal(autoscale=TRUE), prior_covariance = lkj(autoscale=TRUE),
prior_PD = FALSE, algorithm = c("sampling", "meanfield", "fullrank"),
adapt_delta = NULL, max_treedepth = 10L, QR = FALSE,
sparse = FALSE, ...) {
dots <- list(...)
if ("seed" %in% names(dots))
set.seed(dots$seed)
algorithm <- match.arg(algorithm)
basehaz <- match.arg(basehaz)
if (missing(basehaz_ops)) basehaz_ops <- NULL
if (missing(weights)) weights <- NULL
if (missing(id_var)) id_var <- NULL
if (missing(time_var)) time_var <- NULL
if (missing(grp_assoc)) grp_assoc <- NULL
if (!is.null(weights))
stop("'weights' are not yet implemented.")
if (QR)
stop("'QR' decomposition is not yet implemented.")
if (sparse)
stop("'sparse' option is not yet implemented.")
if (is.null(time_var))
stop("'time_var' must be specified.")
formulaLong <- validate_arg(formulaLong, "formula"); M <- length(formulaLong)
if (M > 3L)
stop("'stan_jm' is currently limited to a maximum of 3 longitudinal outcomes.")
dataLong <- validate_arg(dataLong, "data.frame", validate_length = M)
dataEvent <- as.data.frame(dataEvent)
ok_family_classes <- c("function", "family", "character")
ok_families <- c("binomial", "gaussian", "Gamma",
"inverse.gaussian", "poisson", "neg_binomial_2")
family <- validate_arg(family, ok_family_classes, validate_length = M)
family <- lapply(family, validate_famlink, ok_families)
ok_assoc_classes <- c("NULL", "character")
assoc <- validate_arg(assoc, ok_assoc_classes, validate_length = M)
priorLong <- broadcast_prior(priorLong, M)
priorLong_intercept <- broadcast_prior(priorLong_intercept, M)
priorLong_aux <- broadcast_prior(priorLong_aux, M)
stanfit <- stan_jm.fit(formulaLong = formulaLong, dataLong = dataLong,
formulaEvent = formulaEvent, dataEvent = dataEvent,
time_var = time_var, id_var = id_var, family = family,
assoc = assoc, lag_assoc = lag_assoc, grp_assoc = grp_assoc,
epsilon = epsilon, basehaz = basehaz, basehaz_ops = basehaz_ops,
qnodes = qnodes, init = init, weights = weights,
priorLong = priorLong,
priorLong_intercept = priorLong_intercept,
priorLong_aux = priorLong_aux,
priorEvent = priorEvent,
priorEvent_intercept = priorEvent_intercept,
priorEvent_aux = priorEvent_aux,
priorEvent_assoc = priorEvent_assoc,
prior_covariance = prior_covariance, prior_PD = prior_PD,
algorithm = algorithm, adapt_delta = adapt_delta,
max_treedepth = max_treedepth, QR = QR, sparse = sparse, ...)
if (algorithm != "optimizing" && !is(stanfit, "stanfit")) return(stanfit)
y_mod <- attr(stanfit, "y_mod")
e_mod <- attr(stanfit, "e_mod")
a_mod <- attr(stanfit, "a_mod")
cnms <- attr(stanfit, "cnms")
flevels <- attr(stanfit, "flevels")
assoc <- attr(stanfit, "assoc")
id_var <- attr(stanfit, "id_var")
basehaz <- attr(stanfit, "basehaz")
grp_stuff <- attr(stanfit, "grp_stuff")
prior_info <- attr(stanfit, "prior_info")
stanfit <- drop_attributes(stanfit, "y_mod", "e_mod", "a_mod", "cnms",
"flevels", "assoc", "id_var", "basehaz",
"grp_stuff", "prior_info")
terms <- c(fetch(y_mod, "terms"), list(terms(e_mod$mod)))
n_yobs <- fetch_(y_mod, "x", "N")
n_grps <- sapply(flevels, n_distinct)
n_subjects <- e_mod$Npat
fit <- nlist(stanfit, formula = c(formulaLong, formulaEvent), family,
id_var, time_var, weights, qnodes, basehaz, assoc,
M, cnms, flevels, n_grps, n_subjects, n_yobs, epsilon,
algorithm, terms, glmod = y_mod, survmod = e_mod,
assocmod = a_mod, grp_stuff, dataLong, dataEvent,
prior.info = prior_info, stan_function = "stan_jm",
call = match.call(expand.dots = TRUE))
out <- stanmvreg(fit)
return(out)
}
|
adjectives <- c(
'Afrikaner', 'Aristotelian', 'Arthurian', 'Australian', 'Bahraini',
'Bohemian', 'Brethren', 'Ethiopian', 'Franciscan', 'Honduran',
'Judeo-Christian', 'Manchurian', 'Mexican', 'Mosaic', 'Namibian',
'Oceanic', 'Ottoman', 'Proctor', 'Qatari', 'Salvadoran',
'Swiss', 'Terran', 'Tudor', 'Venezuelan', 'abroad',
'absorbing', 'abstract', 'academic', 'accelerated', 'accented',
'accountant', 'acquainted', 'acute', 'addicting', 'addictive',
'adjustable', 'admired', 'adult', 'adverse', 'advised',
'aerosol', 'afraid', 'aggravated', 'aggressive', 'agreeable',
'alienate', 'aligned', 'all-round', 'alleged', 'almond',
'alright', 'altruistic', 'ambient', 'ambivalent', 'amiable',
'amino', 'amorphous', 'amused', 'anatomical', 'ancestral',
'angelic', 'angrier', 'answerable', 'anti-gay', 'antiquarian',
'antiretroviral', 'appellate', 'applicable', 'apportioned', 'approachable',
'appropriated', 'arabic', 'archer', 'aroused', 'arrested',
'assertive', 'assigned', 'athletic', 'atrocious', 'attained',
'authoritarian', 'autobiographical', 'avaricious', 'avocado', 'awake',
'awsome', 'backstage', 'backwoods', 'balding', 'bandaged',
'banded', 'banned', 'barreled', 'battle', 'beaten',
'begotten', 'beguiled', 'bellied', 'belted', 'beneficent',
'besieged', 'betting', 'big-money', 'biggest', 'biochemical',
'bipolar', 'blackened', 'blame', 'blessed', 'blindfolded',
'bloat', 'blocked', 'blooded', 'blue-collar', 'blushing',
'boastful', 'bolder', 'bolstered', 'bonnie', 'bored',
'boundary', 'bounded', 'bounding', 'branched', 'brawling',
'brazen', 'breeding', 'bridged', 'brimming', 'brimstone',
'broadest', 'broiled', 'broker', 'bronze', 'bruising',
'buffy', 'bullied', 'bungling', 'burial', 'buttery',
'cancerous', 'candied', 'canonical', 'cantankerous', 'cardinal',
'carefree', 'caretaker', 'casual', 'cathartic', 'causal',
'chapel', 'characterized', 'charcoal', 'cheeky', 'cherished',
'chipotle', 'chirping', 'chivalrous', 'circumstantial', 'civic',
'civil', 'civilised', 'clanking', 'clapping', 'claptrap',
'classless', 'cleansed', 'cleric', 'cloistered', 'codified',
'colloquial', 'colour', 'combat', 'combined', 'comely',
'commissioned', 'commonplace', 'commuter', 'commuting', 'comparable',
'complementary', 'compromising', 'conceding', 'concentrated', 'conceptual',
'conditioned', 'confederate', 'confident', 'confidential', 'confining',
'confuse', 'congressional', 'consequential', 'conservative', 'constituent',
'contaminated', 'contemporaneous', 'contraceptive', 'convertible', 'convex',
'cooked', 'coronary', 'corporatist', 'correlated', 'corroborated',
'cosmic', 'cover', 'crash', 'crazier', 'craziest',
'crypto', 'culminate', 'cushioned', 'dandy', 'dashing',
'dazzled', 'decreased', 'decrepit', 'dedicated', 'defaced',
'defective', 'defenseless', 'deluded', 'deodorant', 'departed',
'depress', 'designing', 'despairing', 'destitute', 'detective',
'determined', 'devastating', 'deviant', 'devilish', 'devoted',
'diabetic', 'diagonal', 'dictated', 'didactic', 'differentiated',
'diffused', 'dirtier', 'disabling', 'disconnected', 'discovered',
'disdainful', 'diseased', 'disfigured', 'disheartened', 'disheveled',
'disillusioned', 'disparate', 'dissident', 'doable', 'doctrinal',
'doing', 'dotted', 'double-blind', 'downbeat', 'dozen',
'draining', 'draught', 'dread', 'dried', 'dropped',
'drowned', 'dulled', 'duplicate', 'eaten', 'echoing',
'economical', 'elaborated', 'elastic', 'elective', 'electoral',
'elven', 'embryo', 'emerald', 'emergency', 'emissary',
'emotional', 'employed', 'enamel', 'encased', 'encrusted',
'endangered', 'engraved', 'engrossing', 'enlarged', 'enlisted',
'enlivened', 'ensconced', 'entangled', 'enthralling', 'entire',
'envious', 'eradicated', 'eroded', 'esoteric', 'essential',
'evaporated', 'ever-present', 'evergreen', 'everlasting', 'exacting',
'exasperated', 'excess', 'exciting', 'executable', 'existent',
'exonerated', 'exorbitant', 'exotic', 'exponential', 'export',
'extraordinary', 'exultant', 'exulting', 'facsimile', 'fading',
'fainter', 'faith-based', 'fallacious', 'faltering', 'famous',
'fancier', 'fast-growing', 'fated', 'favourable', 'fearless',
'feathered', 'fellow', 'fermented', 'ferocious', 'fiddling',
'filling', 'firmer', 'fitted', 'flammable', 'flawed',
'fledgling', 'fleshy', 'flexible', 'flickering', 'floral',
'flowering', 'flowing', 'foggy', 'folic', 'foolhardy',
'foolish', 'footy', 'forehand', 'forked', 'formative',
'formulaic', 'foul-mouthed', 'fractional', 'fragrant', 'fraudulent',
'freakish', 'freckled', 'freelance', 'freight', 'fresh',
'fretted', 'frugal', 'fulfilling', 'fuming', 'funded',
'funny', 'furry', 'garbled', 'gathered', 'gendered',
'geologic', 'geometric', 'gibberish', 'gilded', 'ginger',
'glare', 'glaring', 'gleaming', 'glorified', 'glorious',
'goalless', 'gold-plated', 'goody', 'grammatical', 'grande',
'grateful', 'gratuitous', 'graven', 'greener', 'grinding',
'grizzly', 'groaning', 'grudging', 'guaranteed', 'gusty',
'gypsy', 'half-breed', 'hand-held', 'handheld', 'hands-off',
'hard-pressed', 'harlot', 'healing', 'healthier', 'healthiest',
'heart', 'heart-shaped', 'heathen', 'hedonistic', 'heralded',
'herbal', 'high-density', 'high-performance', 'high-res', 'high-yield',
'hissy', 'hitless', 'holiness', 'homesick', 'homosexual',
'honorable', 'hooded', 'hopeless', 'horrendous', 'horrible',
'hot-button', 'huddled', 'human', 'humbling', 'humid',
'humiliating', 'hypnotized', 'idealistic', 'idiosyncratic', 'ignited',
'illustrated', 'illustrative', 'imitated', 'immense', 'immersive',
'immigrant', 'immoral', 'impassive', 'impressionable', 'improbable',
'impulsive', 'in-between', 'in-flight', 'inattentive', 'inbound',
'inbounds', 'incalculable', 'incomprehensible', 'indefatigable', 'indigenous',
'indigo', 'indiscriminate', 'indomitable', 'inert', 'inflate',
'inform', 'inheriting', 'injured', 'injurious', 'inking',
'inoffensive', 'insane', 'insensible', 'insidious', 'insincere',
'insistent', 'insolent', 'insufferable', 'intemperate', 'interdependent',
'interesting', 'interfering', 'intern', 'interpreted', 'intersecting',
'intolerable', 'intolerant', 'intuitive', 'irresolute', 'irritate',
'japan', 'jealous', 'jerking', 'joining', 'joint',
'journalistic', 'joyful', 'keyed', 'knowing', 'lacklustre',
'laden', 'lagging', 'lamented', 'laughable', 'layered',
'leather', 'leathern', 'leery', 'left-footed', 'legible',
'leisure', 'lessening', 'liberating', 'life-size', 'lifted',
'lightest', 'limitless', 'listening', 'literary', 'liver',
'livid', 'lobster', 'locked', 'long-held', 'long-lasting',
'long-running', 'long-suffering', 'loudest', 'loveliest', 'low-budget',
'low-carb', 'lowering', 'lucid', 'luckless', 'lusty',
'luxurious', 'magazine', 'maniac', 'manmade', 'maroon',
'mastered', 'mated', 'material', 'materialistic', 'meaningful',
'measuring', 'mediaeval', 'medical', 'meditated', 'medley',
'melodic', 'memorable', 'memorial', 'metabolic', 'metallic',
'metallurgical', 'metering', 'midair', 'midterm', 'midway',
'mighty', 'migrating', 'mind-blowing', 'mind-boggling', 'minor',
'mirrored', 'misguided', 'misshapen', 'mitigated', 'mixed',
'modernized', 'molecular', 'monarch', 'monastic', 'morbid',
'motley', 'motorized', 'mounted', 'multi-million', 'multidisciplinary',
'muscled', 'muscular', 'muted', 'mysterious', 'mythic',
'nail-biting', 'natural', 'nauseous', 'negative', 'networked',
'neurological', 'neutered', 'newest', 'night', 'nitrous',
'no-fly', 'noncommercial', 'nonsense', 'north', 'nuanced',
'occurring', 'offensive', 'oldest', 'oncoming', 'one-eyed',
'one-year', 'onstage', 'onward', 'opaque', 'open-ended',
'operating', 'opportunist', 'opposing', 'opt-in', 'ordinate',
'outdone', 'outlaw', 'outsized', 'overboard', 'overheated',
'oversize', 'overworked', 'oyster', 'paced', 'panting',
'paralyzed', 'paramount', 'parental', 'parted', 'partisan',
'passive', 'pastel', 'patriot', 'peacekeeping', 'pedestrian',
'peevish', 'penal', 'penned', 'pensive', 'perceptual',
'perky', 'permissible', 'pernicious', 'perpetuate', 'perplexed',
'pervasive', 'perverted', 'petrochemical', 'philosophical', 'picturesque',
'pillaged', 'piped', 'piquant', 'pitching', 'plausible',
'pliable', 'plumb', 'politician', 'polygamous', 'poorest',
'pornographic', 'portmanteau', 'posed', 'positive', 'possible',
'postpartum', 'prank', 'pre-emptive', 'precocious', 'predicted',
'premium', 'preparatory', 'prerequisite', 'prescient', 'preserved',
'presidential', 'pressed', 'pressurized', 'presumed', 'prewar',
'priced', 'pricier', 'primal', 'primer', 'primetime',
'printed', 'private', 'pro-abortion', 'problem', 'procedural',
'process', 'prodigious', 'professional', 'programmed', 'progressive',
'prolific', 'promising', 'promulgated', 'pronged', 'proportionate',
'protracted', 'pulled', 'pulsed', 'purgatory', 'quick',
'raped', 'rapid-fire', 'raunchy', 'razed', 'reactive',
'readable', 'realizing', 'recognised', 'recovering', 'recurrent',
'recycled', 'redeemable', 'reflecting', 'regal', 'registering',
'reliable', 'reminiscent', 'remorseless', 'removable', 'renewable',
'repeating', 'repellent', 'reserve', 'resigned', 'respectful',
'rested', 'restrict', 'resultant', 'retaliatory', 'retiring',
'revelatory', 'reverend', 'reversing', 'revolving', 'ridiculous',
'right-hand', 'ringed', 'risque', 'robust', 'roomful',
'rotating', 'roused', 'rubber', 'run-down', 'running',
'runtime', 'rustling', 'safest', 'salient', 'sanctioned',
'saute', 'saved', 'scandalized', 'scarlet', 'scattering',
'sceptical', 'scheming', 'schizophrenic', 'scoundrel', 'scratched',
'scratchy', 'scrolled', 'seated', 'second-best', 'segregated',
'self-taught', 'semiautomatic', 'senior', 'sensed', 'sentient',
'sexier', 'shadowy', 'shaken', 'shaker', 'shameless',
'shaped', 'shiny', 'shipped', 'shivering', 'shoestring',
'short', 'short-lived', 'signed', 'simplest', 'simplistic',
'sizable', 'skeleton', 'skinny', 'skirting', 'skyrocketed',
'slamming', 'slanting', 'slapstick', 'sleek', 'sleepless',
'sleepy', 'slender', 'slimmer', 'smacking', 'smokeless',
'smothered', 'smouldering', 'snuff', 'socialized', 'solid-state',
'sometime', 'sought', 'spanking', 'sparing', 'spattered',
'specialized', 'specific', 'speedy', 'spherical', 'spiky',
'spineless', 'sprung', 'squint', 'stainless', 'standing',
'starlight', 'startled', 'stately', 'statewide', 'stereoscopic',
'sticky', 'stimulant', 'stinky', 'stoked', 'stolen',
'storied', 'strained', 'strapping', 'strengthened', 'stubborn',
'stylized', 'suave', 'subjective', 'subjugated', 'subordinate',
'succeeding', 'suffering', 'summary', 'sunset', 'sunshine',
'supernatural', 'supervisory', 'supply-side', 'surrogate', 'suspended',
'suspenseful', 'swarthy', 'sweating', 'sweeping', 'swinging',
'swooning', 'sympathize', 'synchronized', 'synonymous', 'synthetic',
'tailed', 'tallest', 'tangible', 'tanked', 'tarry',
'technical', 'tectonic', 'telepathic', 'tenderest', 'territorial',
'testimonial', 'theistic', 'thicker', 'third-world', 'threatening',
'tight-lipped', 'timed', 'timely', 'timid', 'torrent',
'totalled', 'tougher', 'traditional', 'transformed', 'transgendered',
'trapped', 'traumatic', 'traveled', 'traverse', 'treated',
'trial', 'trunk', 'trusting', 'trying', 'twisted',
'two-lane', 'tyrannical', 'unaided', 'unassisted', 'unassuming',
'unattractive', 'uncapped', 'uncomfortable', 'uncontrolled', 'uncooked',
'uncooperative', 'underground', 'undersea', 'undisturbed', 'unearthly',
'uneasy', 'unequal', 'unfazed', 'unfinished', 'unforeseen',
'unforgivable', 'unidentified', 'unimaginative', 'uninspired', 'unintended',
'uninvited', 'universal', 'unmasked', 'unorthodox', 'unparalleled',
'unpleasant', 'unprincipled', 'unread', 'unreasonable', 'unregulated',
'unreliable', 'unremitting', 'unsafe', 'unsanitary', 'unsealed',
'unsuccessful', 'unsupervised', 'untimely', 'unwary', 'unwrapped',
'uppity', 'upstart', 'useless', 'utter', 'valiant',
'valid', 'valued', 'vanilla', 'vaulting', 'vaunted',
'veering', 'vegetative', 'vented', 'verbal', 'verifying',
'veritable', 'versed', 'vinyl', 'virgin', 'visceral',
'visual', 'voluptuous', 'walk-on', 'wanton', 'warlike',
'washed', 'waterproof', 'waved', 'weakest', 'well-bred',
'well-chosen', 'well-informed', 'wetting', 'wheeled', 'whirlwind',
'widen', 'widening', 'widow', 'willful', 'willing',
'winnable', 'winningest', 'wireless', 'wistful', 'woeful',
'wooded', 'woodland', 'wordless', 'workable', 'worldly',
'worldwide', 'worst-case', 'worsted', 'worthless'
)
words <- c(
'exemplificative', 'extracorpuscular', 'unsultry', 'semaphorist', 'retrospect',
'cibarian', 'observantly', 'unbusiness', 'singlehanded', 'regrinder',
'carapacic', 'presecure', 'pored', 'channelbill', 'nondiabolic',
'autoconvection', 'vicianin', 'featureful', 'xiphosurous', 'gundy',
'tumefaction', 'autoexcitation', 'sterigmatic', 'nonedible', 'codpitchings',
'valhall', 'vale', 'mestiza', 'membral', 'vapographic',
'greekless', 'goldbeating', 'oralization', 'uncertitude', 'irrationally',
'unagile', 'xyloquinone', 'solidism', 'propleural', 'shielding',
'laicity', 'clockmaking', 'tartar', 'spongeous', 'vocalness',
'rancor', 'bootlessness', 'holluschick', 'aethionema', 'precooler',
'rhizocarp', 'son', 'horrisonant', 'tubik', 'machairodontidae',
'euphon', 'chiromantical', 'proforeign', 'compacted', 'unpartialness',
'gastroatrophia', 'condylura', 'unverifiable', 'hesione', 'gula',
'preacquaintance', 'antiepiscopal', 'orbific', 'untrustworthy', 'tinampipi',
'flyball', 'hazelnut', 'unbendable', 'luciferidae', 'jeanne',
'aggrandizable', 'alogically', 'cerulein', 'gelotoscopy', 'aggregant',
'restaur', 'paurometabolic', 'oversweeten', 'sheathy', 'postischial',
'luminaire', 'trip', 'hysterogen', 'angelina', 'counterattestation',
'supercensure', 'planting', 'invector', 'mitrate', 'chaogenous',
'becarpet', 'passiflora', 'awikiwiki', 'apiarist', 'predivide',
'sidebone', 'harpooner', 'unrun', 'curiology', 'padcloth',
'aglaozonia', 'poulticewise', 'condyloid', 'zoophile', 'anasarcous',
'blotless', 'monogenetic', 'subalternity', 'nephrocystitis', 'sitiology',
'outwit', 'bloodwood', 'potamogetonaceae', 'cheeter', 'vaccina',
'trust', 'sornari', 'scoke', 'outroar', 'smashup',
'hyalopsite', 'downbeat', 'umbrous', 'decimation', 'rondo',
'gastropneumatic', 'noninquiring', 'hymenopterologist', 'overdelightedly', 'strickenly',
'cowherb', 'chromatoscope', 'deprivation', 'sproutling', 'canonicals',
'alveolate', 'anhysteretic', 'pane', 'olivil', 'balaenidae',
'untalented', 'odorator', 'rebear', 'reannoyance', 'vampirism',
'theftproof', 'immarcibleness', 'propenyl', 'fitting', 'isobathytherm',
'tariffism', 'leptometer', 'feminist', 'riddling', 'phantasmatic',
'radiotelephone', 'pitapat', 'linenette', 'ricksha', 'cosectional',
'nonchalky', 'misact', 'misencourage', 'maidling', 'iodotherapy',
'polytopical', 'berake', 'microbial', 'treehair', 'carpocapsa',
'rhapsodism', 'woolworker', 'numberer', 'nonparallel', 'autoantibody',
'motif', 'intramatrical', 'ditheism', 'susu', 'jungle',
'scolytoid', 'overspeed', 'unsuspiciousness', 'staffed', 'gibber',
'prestore', 'moistish', 'iconophilism', 'sanjay', 'supracoralline',
'amidophenol', 'quadricentennial', 'randem', 'counterblast', 'emetically',
'godling', 'weenong', 'unscrewed', 'transcurrent', 'vaginolabial',
'myelospongium', 'conservatorship', 'scatteredly', 'excursory', 'unaccredited',
'slingstone', 'pterygophore', 'palluites', 'rink', 'fungin',
'publicanism', 'torbanitic', 'jubbah', 'snottiness', 'berginize',
'undutiful', 'floatless', 'overfavor', 'sleer', 'feria',
'helminthosporoid', 'unstation', 'adnoun', 'hydatopyrogenic', 'glossoscopy',
'dragbar', 'solidaric', 'bewilderment', 'freshmanic', 'ramal',
'infusoriform', 'bohor', 'strophosis', 'bisaccate', 'brunfelsia',
'opisthognathidae', 'luxation', 'revisitant', 'willowish', 'stercorous',
'geomorphy', 'defiguration', 'syncephalus', 'troublesomely', 'podophthalma',
'malaxation', 'aromatites', 'semita', 'sextennial', 'mart',
'paleozoology', 'microsporic', 'sickly', 'retransformation', 'agrostography',
'urination', 'itza', 'mucose', 'hydrosulphite', 'araucano',
'unillustrative', 'yellows', 'glucosine', 'lina', 'yearday',
'overcompensatory', 'bacteriologically', 'thermometric', 'broadbrim', 'jabberingly',
'sociative', 'quintadene', 'heinous', 'hygrograph', 'extrality',
'frightless', 'leanly', 'submuscular', 'lymphotomy', 'subvaluation',
'subimago', 'elaeagnaceae', 'overtalker', 'tittupy', 'melania',
'refinement', 'dissyllabify', 'pogonologist', 'ophiology', 'bronchotomist',
'unmotivatedly', 'lask', 'undersitter', 'antichoromanic', 'haggardness',
'consociative', 'palaeogenetic', 'uncurl', 'parabulic', 'cottus',
'admonitionist', 'stroking', 'tributariness', 'entresol', 'oversubscriber',
'humanhood', 'necromantically', 'intramyocardial', 'havanese', 'arboriculture',
'comicotragedy', 'chronogrammatical', 'skulled', 'cremator', 'osmosis',
'unspecious', 'morus', 'continentalize', 'auricle', 'janitorship',
'semibay', 'semigenuflection', 'aphid', 'hyperphenomena', 'hydroximic',
'preguilty', 'disinflation', 'evertile', 'flintily', 'benzothiazoline',
'unmomentary', 'phylonepionic', 'disregardfulness', 'heterochromy', 'nonmythological',
'centriffed', 'metasome', 'ethyl', 'noncallable', 'deadly',
'alala', 'teri', 'timeful', 'peasantize', 'dactylis',
'overcomer', 'beatster', 'crossly', 'pigflower', 'lech',
'takar', 'jobless', 'unibivalent', 'unticketed', 'favorless',
'lumachel', 'fastidiosity', 'platyrrhina', 'nonexculpation', 'medicotopographic',
'spruer', 'spheroidicity', 'coagulable', 'copiapite', 'multimotored',
'unijugous', 'trophodynamic', 'unchaired', 'orthopath', 'propylitic',
'unlevied', 'counterquery', 'thermopalpation', 'museography', 'overcondense',
'trigonodont', 'overcaring', 'baseboard', 'accentual', 'sulphotungstic',
'unchallenged', 'inspirer', 'floatsman', 'epitasis', 'unsentimentality',
'otocranial', 'gumflower', 'nondecadent', 'incunabulist', 'mistful',
'plethoretic', 'bismarine', 'titillative', 'counterscoff', 'cradle',
'coppy', 'etna', 'pneumonectomy', 'vowmaker', 'flinchingly',
'outoven', 'labidophorous', 'sugarplum', 'hypocrital', 'criminalness',
'pseudalveolar', 'negroism', 'autobiographer', 'eutomous', 'hypochloric',
'snaringly', 'guileless', 'acromiohyoid', 'pathopsychology', 'unthwacked',
'latchman', 'inthronize', 'hydronephrosis', 'gelong', 'autonomy',
'indusial', 'levis', 'unbloomed', 'abstentionist', 'evade',
'redemise', 'pounder', 'unappraised', 'hyperdelicate', 'teapoy',
'phytographist', 'coinstantaneousness', 'promote', 'unintrenchable', 'carvol',
'cystopteris', 'ailuro', 'strifemaking', 'annelidous', 'witherite',
'hemianopia', 'phantasia', 'postwise', 'lienitis', 'kathleen',
'parallelopipedon', 'oroanal', 'acroparalysis', 'unfelted', 'demirobe',
'lapidarian', 'faveolate', 'propositus', 'conning', 'gobo',
'effeminate', 'njave', 'nectrioidaceae', 'snapbag', 'semihiant',
'jumpy', 'epiploon', 'stumblingly', 'spiceland', 'jocosely',
'epiphytical', 'outvoyage', 'kasha', 'otidiphaps', 'mike',
'standpipe', 'basifugal', 'externum', 'tealery', 'seminarian',
'podatus', 'hageen', 'synectic', 'imbitter', 'pteraspis',
'shavester', 'reseda', 'lounging', 'milleporine', 'teutophile',
'subsultorily', 'phthartolatrae', 'windwardness', 'pappy', 'uncondensing',
'parasyphilis', 'allagophyllous', 'agrestic', 'binh', 'fightwite',
'blennocele', 'superficies', 'parallelwise', 'underchancellor', 'undeteriorated',
'supposableness', 'uranospinite', 'preobtainable', 'opportuneless', 'unstecked',
'analcite', 'untrinitarian', 'bartsia', 'physiosophic', 'boatsman',
'pruriousness', 'profit', 'transpierce', 'lotment', 'semipreservation',
'fallectomy', 'macrophysics', 'antisquama', 'culicidae', 'cycadophyta',
'preinterest', 'trithionic', 'coagulator', 'danian', 'acrux',
'nonremembrance', 'stockfather', 'amphibolitic', 'sabaoth', 'phytotopography',
'tetragonally', 'knowledgement', 'unmirthful', 'genicular', 'diuranate',
'violaceous', 'micropathology', 'polypragmacy', 'gossipred', 'clamcracker',
'bankrider', 'unnavigated', 'silicone', 'chamberleted', 'cellulomonas',
'concubitous', 'conjugatae', 'branchiogenous', 'monogenesis', 'tabefy',
'recommendably', 'portail', 'grue', 'enwallow', 'hexastichon',
'millihenry', 'illegitimatize', 'preapprise', 'fantasticism', 'sherryvallies',
'achromatinic', 'hemeralope', 'vermouth', 'exeat', 'proviso',
'gowl', 'cissampelos', 'endemism', 'discommendation', 'deratization',
'meliceric', 'underclearer', 'equidistantly', 'precapture', 'nyxis',
'duckhouse', 'jointedly', 'chortler', 'reconquer', 'leewardly',
'macronucleus', 'dashed', 'pau', 'unspoused', 'unbarreled',
'presuppress', 'pyovesiculosis', 'nesotragus', 'roger', 'epicoele',
'intraoctave', 'quisquilious', 'coachman', 'proteose', 'dolesomely',
'catasetum', 'steatopathic', 'detoxication', 'nagmaal', 'subbrachycephalic',
'conjugative', 'desolative', 'chiseler', 'cleidoscapular', 'thrombocyte',
'unpaired', 'albertist', 'endostracum', 'overflowingly', 'oxyaenidae',
'gerate', 'moriche', 'ophioglossum', 'retransportation', 'chondromucoid',
'uncancellable', 'uncanned', 'unpredestined', 'xyst', 'scena',
'undealable', 'introspection', 'forgetfulness', 'autopathic', 'psychichthys',
'untasked', 'flummydiddle', 'autoproteolysis', 'moslemin', 'conformal',
'porphyrogenitism', 'misleading', 'marshfire', 'unwinning', 'unpaintedly',
'headed', 'unstone', 'nonsanctity', 'palaeophytological', 'bregmata',
'cloveroot', 'nonrepresentative', 'peristyle', 'taurocol', 'florisugent',
'noncoagulation', 'pneumonopleuritis', 'loppard', 'united', 'nickelization',
'vaporiform', 'pedioecetes', 'heterochromosome', 'enrolment', 'nonembezzlement',
'cliffside', 'transmutive', 'arthroendoscopy', 'bromoaurate', 'pleurovisceral',
'cyperaceous', 'trogoniformes', 'companionably', 'bartramiaceae', 'gastroatrophia',
'pegmatite', 'initiator', 'disdiaclast', 'thrombocyst', 'admonitor',
'spinomuscular', 'bewhig', 'microerg', 'mucorioid', 'cerebration',
'nandow', 'vaporescent', 'landscape', 'hippological', 'dispositively',
'communalizer', 'argillite', 'platopic', 'chough', 'pesteringly',
'cocytean', 'copable', 'oysterish', 'becap', 'retracted',
'polydenominational', 'retromaxillary', 'phantasmagorical', 'cowhiding', 'blockpate',
'fluobromide', 'isokontae', 'shellum', 'glyphographic', 'pyohemothorax',
'nonbending', 'unintimate', 'troutiness', 'arbitrariness', 'collegiate',
'rebelly', 'hariolate', 'etesian', 'foreshoulder', 'trivariant',
'trimonthly', 'hollandaise', 'streptotrichal', 'uncriticised', 'barrabkie',
'gurgeon', 'anthropophagous', 'monotonically', 'enchair', 'pherecratean',
'homoeotopy', 'tautologism', 'shielded', 'girlishly', 'nonsuctorial',
'obolaria', 'copatriot', 'noemi', 'protozoic', 'gratia',
'allotheria', 'pontify', 'potdar', 'motettist', 'reaffirm',
'caesarship', 'hyperlipoidemia', 'accuracy', 'ortive', 'omnivoracious',
'vitiligoidea', 'papyr', 'overargue', 'unamiably', 'casuality',
'sarcode', 'herpetography', 'parapsychosis', 'successional', 'bivalved',
'slash', 'stimulator', 'underdrawers', 'angelique', 'thimblerigging',
'redelay', 'signary', 'tauromorphous', 'oligolactia', 'provincially',
'knighthead', 'prodigiously', 'quadrilobed', 'procivilian', 'neencephalon',
'potamological', 'rhodanic', 'noncommunicable', 'renovate', 'cometlike',
'mutual', 'frontless', 'unapt', 'contortional', 'glauconiidae',
'gustativeness', 'chaitya', 'biogenetic', 'tenonian', 'product',
'peripetasma', 'thlinget', 'thoughtlessness', 'machineful', 'lined',
'spirity', 'calyceraceous', 'homologoumena', 'tremble', 'berrier',
'equidistance', 'commissive', 'cicatricula', 'firebird', 'gyroceracone',
'meresman', 'anonol', 'parasuchia', 'microgamy', 'dominie',
'idiot', 'copolymerize', 'lustfully', 'micropodiformes', 'tractory',
'unreleasable', 'ribble', 'uncorruptible', 'macadamia', 'idleheaded',
'tetrabasic', 'tribromoethanol', 'poetdom', 'crusily', 'mackenboy',
'poriness', 'stool', 'pantheonize', 'ethnotechnography', 'nataka',
'boltstrake', 'subobtuse', 'belonging', 'felonweed', 'otocephaly',
'pseudoclassicism', 'inexpertness', 'interolivary', 'coleopterological', 'sauropodous',
'spontaneousness', 'canephoros', 'ennui', 'mollifyingness', 'coregnant',
'neurotherapist', 'electrometrically', 'umbelwort', 'uloid', 'campoo',
'seedsman', 'peripety', 'decaesarize', 'unhammered', 'theatrophonic',
'unaccostable', 'pathosocial', 'conestoga', 'isopleuran', 'grudgeful',
'spiritualization', 'obreptitious', 'gnathostoma', 'supplementation', 'silipan',
'underbasal', 'trisyllabism', 'reconquer', 'pennopluma', 'procollegiate',
'unreadiness', 'send', 'knowledge', 'immigratory', 'remix',
'unsurmountably', 'trochophore', 'salvarsan', 'boarstaff', 'pulmocardiac',
'zonure', 'chytridiose', 'postembryonic', 'dockhouse', 'noology',
'trihedral', 'stigmatize', 'constupration', 'nongravitation', 'aponeurology',
'choler', 'arthrosis', 'myrmecophagine', 'hydrosarcocele', 'prebronchial',
'antieducational', 'equidurable', 'unsubscribing', 'unsophistically', 'recessively',
'madreporacea', 'excruciating', 'bacteriopsonin', 'theanthropy', 'photomacrograph',
'aphidozer', 'laurite', 'bump', 'dogmatics', 'myosurus',
'misstay', 'platyrrhine', 'unsolar', 'borehole', 'jinny',
'vangueria', 'ponerid', 'kibitzer', 'tar', 'inhabitant',
'phaeophyceous', 'allotropical', 'anaesthesia', 'unaffronted', 'pyrogenation',
'incorrupted', 'tumultuary', 'unattaining', 'tibiotarsus', 'grasper',
'myiarchus', 'clotho', 'homonymy', 'dystrophy', 'overharden',
'outblush', 'abacinate', 'unconditioned', 'ultradignified', 'crocin',
'moralize', 'subdecuple', 'scratchwork', 'scrollery', 'knowingness',
'siderology', 'beduchess', 'cadmia', 'unbay', 'unportioned',
'berkeleyite', 'ctenophorous', 'undersluice', 'recordative', 'unsubsidiary',
'semiopaque', 'pseudoarchaist', 'carpocephalum', 'opisthognathidae', 'multiplier',
'dressy', 'figent', 'avunculate', 'alloisomer', 'overdistantly',
'precipitately', 'unharmoniously', 'myelospongium', 'galravage', 'penniveined',
'intrusionism', 'redepreciation', 'functionary', 'basipterygium', 'remissibility',
'chaus', 'opobalsam', 'introsentient', 'inconclusiveness', 'sekhwan',
'craber', 'bulgarophil', 'sherrymoor', 'celliferous', 'homological',
'malling', 'unkoshered', 'unseafaring', 'overleap', 'unshapen',
'unfostered', 'promodernistic', 'comfortroot', 'national', 'palaeophytic',
'pseudofilaria', 'timeous', 'pseudoglanders', 'turnplow', 'alvine',
'cocainomania', 'itonididae', 'electrotelegraphy', 'hypnotizability', 'monophylite'
)
nouns <- c(
'Armour', 'Barrymore', 'Cabot', 'Catholicism', 'Chihuahua',
'Christianity', 'Easter', 'Frenchman', 'Lowry', 'Mayer',
'Orientalism', 'Pharaoh', 'Pueblo', 'Pullman', 'Rodeo',
'Saturday', 'Sister', 'Snead', 'Syrah', 'Tuesday',
'Woodward', 'abbey', 'abduction', 'absence', 'absorption',
'abstinence', 'absurdity', 'abundance', 'abuser', 'acceptance',
'accessibility', 'accommodation', 'accomplice', 'accountability', 'accounting',
'accreditation', 'accuracy', 'acquiescence', 'acreage', 'actress',
'actuality', 'adage', 'adaptation', 'adherence', 'adjustment',
'adoption', 'adultery', 'advancement', 'advert', 'advertisement',
'advertising', 'advice', 'aesthetics', 'affinity', 'aggression',
'agriculture', 'aircraft', 'airtime', 'allegation', 'allegiance',
'allegory', 'allergy', 'allies', 'alligator', 'allocation',
'allotment', 'altercation', 'ambulance', 'ammonia', 'anatomy',
'anemia', 'ankle', 'announcement', 'annoyance', 'annuity',
'anomaly', 'anthropology', 'anxiety', 'apartheid', 'apologise',
'apostle', 'apparatus', 'appeasement', 'appellation', 'appendix',
'applause', 'appointment', 'appraisal', 'archery', 'archipelago',
'architecture', 'ardor', 'arrears', 'arrow', 'artisan',
'artistry', 'ascent', 'assembly', 'assignment', 'association',
'asthma', 'atheism', 'attacker', 'attraction', 'attractiveness',
'auspices', 'authority', 'avarice', 'aversion', 'aviation',
'babbling', 'backlash', 'baker', 'ballet', 'balls',
'banjo', 'baron', 'barrier', 'barrister', 'bases',
'basin', 'basis', 'battery', 'battling', 'bedtime',
'beginner', 'begun', 'bending', 'bicycle', 'billing',
'bingo', 'biography', 'biology', 'birthplace', 'blackberry',
'blather', 'blossom', 'boardroom', 'boasting', 'bodyguard',
'boldness', 'bomber', 'bondage', 'bonding', 'bones',
'bonus', 'bookmark', 'boomer', 'booty', 'bounds',
'bowling', 'brainstorming', 'breadth', 'breaker', 'brewer',
'brightness', 'broccoli', 'broth', 'brotherhood', 'browsing',
'brunch', 'brunt', 'building', 'bullion', 'bureaucracy',
'burglary', 'buyout', 'by-election', 'cabal', 'cabbage',
'calamity', 'campaign', 'canonization', 'captaincy', 'carcass',
'carrier', 'cartridge', 'cassette', 'catfish', 'caught',
'celebrity', 'cemetery', 'certainty', 'certification', 'charade',
'chasm', 'check-in', 'cheerleader', 'cheesecake', 'chemotherapy',
'chili', 'china', 'chivalry', 'cholera', 'cilantro',
'circus', 'civilisation', 'civility', 'clearance', 'clearing',
'clerk', 'climber', 'closeness', 'clothing', 'clutches',
'coaster', 'coconut', 'coding', 'collaborator', 'colleague',
'college', 'collision', 'colors', 'combustion', 'comedian',
'comer', 'commander', 'commemoration', 'commenter', 'commissioner',
'commune', 'competition', 'completeness', 'complexity', 'computing',
'comrade', 'concur', 'condominium', 'conduit', 'confidant',
'configuration', 'confiscation', 'conflagration', 'conflict', 'consist',
'consistency', 'consolidation', 'conspiracy', 'constable', 'consul',
'consultancy', 'contentment', 'contents', 'contractor', 'conversation',
'cornerstone', 'corpus', 'correlation', 'councilman', 'counselor',
'countdown', 'countryman', 'coverage', 'covering', 'coyote',
'cracker', 'creator', 'criminality', 'crocodile', 'cropping',
'cross-examination', 'crossover', 'crossroads', 'culprit', 'cumin',
'curator', 'curfew', 'cursor', 'custard', 'cutter',
'cyclist', 'cyclone', 'cylinder', 'cynicism', 'daddy',
'damsel', 'darkness', 'dawning', 'daybreak', 'dealing',
'dedication', 'deduction', 'defection', 'deference', 'deficiency',
'definition', 'deflation', 'degeneration', 'delegation', 'delicacy',
'delirium', 'deliverance', 'demeanor', 'demon', 'demonstration',
'denomination', 'dentist', 'departure', 'depletion', 'depression',
'designation', 'despotism', 'detention', 'developer', 'devolution',
'dexterity', 'diagnosis', 'dialect', 'differentiation', 'digger',
'digress', 'dioxide', 'diploma', 'disability', 'disarmament',
'discord', 'discovery', 'dishonesty', 'dismissal', 'disobedience',
'dispatcher', 'disservice', 'distribution', 'distributor', 'diver',
'diversity', 'docking', 'dollar', 'dominance', 'domination',
'dominion', 'donkey', 'doorstep', 'doorway', 'dossier',
'downside', 'drafting', 'drank', 'drilling', 'driver',
'drumming', 'drunkenness', 'duchess', 'ducking', 'dugout',
'dumps', 'dwelling', 'dynamics', 'eagerness', 'earnestness',
'earnings', 'eater', 'editor', 'effectiveness', 'electricity',
'elements', 'eloquence', 'emancipation', 'embodiment', 'embroidery',
'emperor', 'employment', 'encampment', 'enclosure', 'encouragement',
'endangerment', 'enlightenment', 'enthusiasm', 'environment', 'environs',
'envoy', 'epilepsy', 'equation', 'equator', 'error',
'espionage', 'estimation', 'euthanasia', 'evacuation', 'exaggeration',
'examination', 'exclamation', 'expediency', 'exploitation', 'extinction',
'eyewitness', 'falls', 'fascism', 'fastball', 'feces',
'feedback', 'ferocity', 'fertilization', 'fetish', 'finale',
'firing', 'fixing', 'flashing', 'flask', 'flora',
'fluke', 'folklore', 'follower', 'foothold', 'footing',
'forefinger', 'forefront', 'forgiveness', 'formality', 'formation',
'formula', 'foyer', 'fragmentation', 'framework', 'fraud',
'freestyle', 'frequency', 'friendliness', 'fries', 'frigate',
'fulfillment', 'function', 'functionality', 'fundraiser', 'fusion',
'futility', 'gallantry', 'gallery', 'genesis', 'genitals',
'girlfriend', 'glamour', 'glitter', 'glucose', 'google',
'grandeur', 'grappling', 'greens', 'gridlock', 'grocer',
'groundwork', 'grouping', 'gunman', 'gusto', 'habitation',
'hacker', 'hallway', 'hamburger', 'hammock', 'handling',
'hands', 'handshake', 'happiness', 'hardship', 'headcount',
'header', 'headquarters', 'heads', 'headset', 'hearth',
'hearts', 'heath', 'hegemony', 'height', 'hello',
'helper', 'helping', 'helplessness', 'hierarchy', 'hoarding',
'hockey', 'homeland', 'homer', 'honesty', 'horror',
'horseman', 'hostility', 'housing', 'humility', 'hurricane',
'iceberg', 'idiot', 'ignition', 'illness', 'illustration',
'illustrator', 'immunity', 'immunization', 'imperialism', 'imprisonment',
'inaccuracy', 'inaction', 'inactivity', 'inauguration', 'indecency',
'indicator', 'inevitability', 'infamy', 'infiltration', 'influx',
'iniquity', 'innocence', 'innovation', 'insanity', 'inspiration',
'instruction', 'instructor', 'insurer', 'interact', 'intercession',
'intercourse', 'intermission', 'interpretation', 'intersection', 'interval',
'intolerance', 'intruder', 'invasion', 'investment', 'involvement',
'irrigation', 'iteration', 'jenny', 'jihad', 'jogging',
'jones', 'joseph', 'juggernaut', 'juncture', 'jurisprudence',
'juror', 'kangaroo', 'kingdom', 'knocking', 'laborer',
'larceny', 'laurels', 'layout', 'leadership', 'leasing',
'legislation', 'leopard', 'liberation', 'licence', 'lifeblood',
'lifeline', 'ligament', 'lighting', 'likeness', 'line-up',
'lineage', 'liner', 'lineup', 'liquidation', 'listener',
'literature', 'litigation', 'litre', 'loathing', 'locality',
'lodging', 'logic', 'longevity', 'lookout', 'lordship',
'lustre', 'ma"am', 'machinery', 'madness', 'magnificence',
'mahogany', 'mailing', 'mainframe', 'maintenance', 'majority',
'manga', 'mango', 'manifesto', 'mantra', 'manufacturer',
'maple', 'martin', 'martyrdom', 'mathematician', 'matrix',
'matron', 'mayhem', 'mayor', 'means', 'meantime',
'measurement', 'mechanics', 'mediator', 'medics', 'melodrama',
'memory', 'mentality', 'metaphysics', 'method', 'metre',
'miner', 'mirth', 'misconception', 'misery', 'mishap',
'misunderstanding', 'mobility', 'molasses', 'momentum', 'monarchy',
'monument', 'morale', 'mortality', 'motto', 'mouthful',
'mouthpiece', 'mover', 'movie', 'mowing', 'murderer',
'musician', 'mutation', 'mythology', 'narration', 'narrator',
'nationality', 'negligence', 'neighborhood', 'neighbour', 'nervousness',
'networking', 'nexus', 'nightmare', 'nobility', 'nobody',
'noodle', 'normalcy', 'notification', 'nourishment', 'novella',
'nucleus', 'nuisance', 'nursery', 'nutrition', 'nylon',
'oasis', 'obscenity', 'obscurity', 'observer', 'offense',
'onslaught', 'operation', 'opportunity', 'opposition', 'oracle',
'orchestra', 'organisation', 'organizer', 'orientation', 'originality',
'ounce', 'outage', 'outcome', 'outdoors', 'outfield',
'outing', 'outpost', 'outset', 'overseer', 'owner',
'oxygen', 'pairing', 'panther', 'paradox', 'parliament',
'parsley', 'parson', 'passenger', 'pasta', 'patchwork',
'pathos', 'patriotism', 'pendulum', 'penguin', 'permission',
'persona', 'perusal', 'pessimism', 'peter', 'philosopher',
'phosphorus', 'phrasing', 'physique', 'piles', 'plateau',
'playing', 'plaza', 'plethora', 'plurality', 'pneumonia',
'pointer', 'poker', 'policeman', 'polling', 'polygamy',
'poster', 'posterity', 'posting', 'postponement', 'potassium',
'pottery', 'poultry', 'pounding', 'pragmatism', 'precedence',
'precinct', 'preoccupation', 'pretense', 'priesthood', 'prisoner',
'privacy', 'probation', 'proceeding', 'proceedings', 'processing',
'processor', 'progression', 'projection', 'prominence', 'propensity',
'prophecy', 'prorogation', 'prospectus', 'protein', 'prototype',
'providence', 'provider', 'provocation', 'proximity', 'puberty',
'publicist', 'publicity', 'publisher', 'pundit', 'putting',
'quantity', 'quart', 'quilting', 'quorum', 'racism',
'radiance', 'ralph', 'rancher', 'ranger', 'rapidity',
'rapport', 'ratification', 'rationality', 'reaction', 'reader',
'reassurance', 'rebirth', 'receptor', 'recipe', 'recognition',
'recourse', 'recreation', 'rector', 'recurrence', 'redemption',
'redistribution', 'redundancy', 'refinery', 'reformer', 'refrigerator',
'regularity', 'regulator', 'reinforcement', 'reins', 'reinstatement',
'relativism', 'relaxation', 'rendition', 'repayment', 'repentance',
'repertoire', 'repository', 'republic', 'reputation', 'resentment',
'residency', 'resignation', 'restaurant', 'resurgence', 'retailer',
'retention', 'retirement', 'reviewer', 'riches', 'righteousness',
'roadblock', 'robber', 'rocks', 'rubbing', 'runoff',
'saloon', 'salvation', 'sarcasm', 'saucer', 'savior',
'scarcity', 'scenario', 'scenery', 'schism', 'scholarship',
'schoolboy', 'schooner', 'scissors', 'scolding', 'scooter',
'scouring', 'scrimmage', 'scrum', 'seating', 'sediment',
'seduction', 'seeder', 'seizure', 'self-confidence', 'self-control',
'self-respect', 'semicolon', 'semiconductor', 'semifinal', 'senator',
'sending', 'serenity', 'seriousness', 'servitude', 'sesame',
'setup', 'sewing', 'sharpness', 'shaving', 'shoplifting',
'shopping', 'siding', 'simplicity', 'simulation', 'sinking',
'skate', 'sloth', 'slugger', 'snack', 'snail',
'snapshot', 'snark', 'soccer', 'solemnity', 'solicitation',
'solitude', 'somewhere', 'sophistication', 'sorcery', 'souvenir',
'spaghetti', 'specification', 'specimen', 'specs', 'spectacle',
'spectre', 'speculation', 'sperm', 'spoiler', 'squad',
'squid', 'staging', 'stagnation', 'staircase', 'stairway',
'stamina', 'standpoint', 'standstill', 'stanza', 'statement',
'stillness', 'stimulus', 'stocks', 'stole', 'stoppage',
'storey', 'storyteller', 'stylus', 'subcommittee', 'subscription',
'subsidy', 'suburb', 'success', 'sufferer', 'supposition',
'suspension', 'sweater', 'sweepstakes', 'swimmer', 'syndrome',
'synopsis', 'syntax', 'system', 'tablespoon', 'taker',
'tavern', 'technology', 'telephony', 'template', 'tempo',
'tendency', 'tendon', 'terrier', 'terror', 'terry',
'theater', 'theology', 'therapy', 'thicket', 'thoroughfare',
'threshold', 'thriller', 'thunderstorm', 'ticker', 'tiger',
'tights', 'to-day', 'tossing', 'touchdown', 'tourist',
'tourney', 'toxicity', 'tracing', 'tractor', 'translation',
'transmission', 'transmitter', 'trauma', 'traveler', 'treadmill',
'trilogy', 'trout', 'tumor', 'tuning', 'twenties',
'tycoon', 'tyrant', 'ultimatum', 'underdog', 'underwear',
'unhappiness', 'unification', 'university', 'uprising', 'vaccination',
'validity', 'vampire', 'vanguard', 'variation', 'vegetation',
'verification', 'viability', 'vicinity', 'victory', 'viewpoint',
'villa', 'vindication', 'violation', 'vista', 'vocalist',
'vogue', 'volcano', 'voltage', 'vomiting', 'vulnerability',
'waistcoat', 'waitress', 'wardrobe', 'warmth', 'watchdog',
'wealth', 'weariness', 'whereabouts', 'whisky', 'whiteness',
'widget', 'width', 'windfall', 'wiring', 'witchcraft',
'withholding', 'womanhood', 'words', 'workman', 'youngster'
)
|
dag.plot<-function(prec1and2=matrix(0),prec3and4=matrix(0),critical.activities=NULL){
A<-prec1and2
A0<-prec1and2
A0[A0%in%2]<-0
if(dim(prec3and4)[1]==1){prec3and4=matrix(0,nrow=dim(A)[1],ncol=dim(A)[2])}
B<-prec3and4
ii <- as.logical(colSums(A0))
iii <- as.logical(rowSums(A0))
A[as.logical(A)]<-1
B[as.logical(B)]<-1
A<-A+B
colnames(A) <- 1:nrow(A)
rownames(A) <- 1:nrow(A)
B<-cbind(A,rep(0,dim(A)[1]))
B<-rbind(B,rep(0,dim(B)[2]))
B<-cbind(B,rep(0,dim(B)[1]))
B<-rbind(B,rep(0,dim(B)[2]))
colnames(B) <- 1:nrow(B)
rownames(B) <- 1:nrow(B)
B[dim(A)[1]+1,colnames(A)[!ii]]<-1
B[rownames(A)[!iii],dim(A)[1]+2]<-1
colnames(B)[dim(A)[1]+1]<-"S"
rownames(B)[dim(A)[1]+1]<-"S"
colnames(B)[dim(A)[1]+2]<-"E"
rownames(B)[dim(A)[1]+2]<-"E"
i0<-which(t(B) !=0)
A0<-B
A0[1:(dim(A0)[1]-2),1:(dim(A0)[2]-2)]<-prec1and2
B0<-B
B0[1:(dim(B0)[1]-2),1:(dim(B0)[2]-2)]<-prec3and4
i2<-which(t(A0) ==2)
i3<-which(t(B0) ==3)
i4<-which(t(B0) ==4)
label<-rep("",length(i0))
label[match(i2,i0)]<-"SS"
label[match(i3,i0)]<-"FF"
label[match(i4,i0)]<-"SF"
network <- graph_from_adjacency_matrix(B)
if(is.null(critical.activities)){
plot(network,edge.label=label,vertex.color = "green",vertex.shape="square",vertex.label.color=c(rep("black",dim(A)[1]),"blue","blue")
,layout=layout.kamada.kawai)
}
else{
color<-c(rep("green",dim(prec1and2)[1]),"red","red")
color[critical.activities]<-"red"
plot(network,edge.label=label,vertex.color = color,vertex.shape="square",vertex.label.color=c(rep("black",dim(A)[1]),"blue","blue")
,layout=layout.kamada.kawai)
}
}
|
detailed_itineraries <- function(r5r_core,
origins,
destinations,
mode = "WALK",
mode_egress = "WALK",
departure_datetime = Sys.time(),
max_walk_dist = Inf,
max_bike_dist = Inf,
max_trip_duration = 120L,
walk_speed = 3.6,
bike_speed = 12,
max_rides = 3,
max_lts = 2,
shortest_path = TRUE,
n_threads = Inf,
verbose = TRUE,
progress = TRUE,
drop_geometry = FALSE) {
old_options <- options()
old_dt_threads <- data.table::getDTthreads()
on.exit({
options(old_options)
data.table::setDTthreads(old_dt_threads)
})
options(datatable.optimize = Inf)
checkmate::assert_class(r5r_core, "jobjRef")
mode_list <- select_mode(mode, mode_egress)
departure <- posix_to_string(departure_datetime)
checkmate::assert_numeric(max_trip_duration)
max_trip_duration <- as.integer(max_trip_duration)
max_walk_time <- set_max_street_time(max_walk_dist,
walk_speed,
max_trip_duration)
max_bike_time <- set_max_street_time(max_bike_dist,
bike_speed,
max_trip_duration)
checkmate::assert_logical(shortest_path)
checkmate::assert_logical(drop_geometry)
origins <- assert_points_input(origins, "origins")
destinations <- assert_points_input(destinations, "destinations")
n_origs <- nrow(origins)
n_dests <- nrow(destinations)
if (n_origs != n_dests) {
if ((n_origs > 1) && (n_dests > 1)) {
stop(paste("Origins and destinations dataframes must either have the",
"same size or one of them must have only one entry."))
} else {
if (n_origs > n_dests) {
destinations <- destinations[rep(1, n_origs), ]
message("Destinations dataframe expanded to match the number of origins.")
} else {
origins <- origins[rep(1, n_dests), ]
message("Origins dataframe expanded to match the number of destinations.")
}
}
}
set_speed(r5r_core, walk_speed, "walk")
set_speed(r5r_core, bike_speed, "bike")
set_max_rides(r5r_core, max_rides)
set_max_lts(r5r_core, max_lts)
if (shortest_path) {
set_suboptimal_minutes(r5r_core, 0L)
} else {
set_suboptimal_minutes(r5r_core, 5L)
}
set_n_threads(r5r_core, n_threads)
set_verbose(r5r_core, verbose)
set_progress(r5r_core, progress)
path_options <- r5r_core$detailedItineraries(origins$id,
origins$lat,
origins$lon,
destinations$id,
destinations$lat,
destinations$lon,
mode_list$direct_modes,
mode_list$transit_mode,
mode_list$access_mode,
mode_list$egress_mode,
departure$date,
departure$time,
max_walk_time,
max_bike_time,
max_trip_duration,
drop_geometry)
if (is.null(path_options)) {
return(data.table::data.table(path_options))
} else {
path_options <- java_to_dt(path_options)
if (!is.data.frame(path_options)) {
path_options <- data.table::rbindlist(path_options)
if (nrow(path_options) == 0) return(path_options)
}
}
if (nrow(path_options) == 0) return(path_options)
path_options[, total_duration := sum(segment_duration, wait), by = .(fromId, toId, option)]
if (shortest_path) {
path_options <- path_options[path_options[, .I[total_duration == min(total_duration)], by = .(fromId, toId)]$V1]
path_options <- path_options[path_options[, .I[option == min(option)], by = .(fromId, toId)]$V1]
} else {
path_options[, temp_route := fifelse(route == "", mode, route)]
path_options[, temp_sign := paste(temp_route, collapse = "_"), by = .(fromId, toId, option)]
path_options <- path_options[path_options[, .I[total_duration == min(total_duration)],by = .(fromId, toId, temp_sign)]$V1]
path_options <- path_options[path_options[, .I[option == min(option)], by = .(fromId, toId, temp_sign)]$V1]
path_options[, grep("temp_", names(path_options), value = TRUE) := NULL]
}
path_options[, option := data.table::rleid(option), by = .(fromId, toId)]
if (!drop_geometry) {
path_options[, geometry := sf::st_as_sfc(geometry)]
path_options <- sf::st_sf(path_options, crs = 4326)
}
return(path_options)
}
|
bic.alfamixnorm <- function(x, G, a = seq(-1, 1, by = 0.1), veo = FALSE, graph = TRUE) {
n <- dim(x)[1] ; p <- dim(x)[2]
if ( min(x) == 0 ) a <- a[a > 0]
names <- paste("alfa=", a)
abic <- sapply(names, function(x) NULL)
for ( i in 1:length(a) ) {
z <- alfa(x, a[i])
y <- z$aff
sa <- z$sa
mod <- mixture::gpcm(y, G = G, mnames = NULL, start = 0, mmax = 100, veo = veo)
abic[[ i ]] <- mod$BIC[, , 3] - 2 * sa - ( abs(a[i]) > 1e-12 ) * log(n)
pou <- which( is.na(abic[[ i ]] ) )
if ( length(pou) > 0 ) abic[[ i ]][pou] <- -Inf
}
bica <- matrix(nrow = length(G), ncol = length(a) )
for ( i in 1:length(a) ) bica[, i] <- Rfast::rowMaxs( abic[[ i ]], value = TRUE )
if ( graph ) {
plot( a, bica[1, ], type = "b", pch = 9, xlab = expression( paste(alpha, " values") ),
ylab = "BIC values", ylim = c( min(bica, na.rm = TRUE), max(bica, na.rm = TRUE) ),
cex.lab = 1.2, cex.axis = 1.2, xaxt = "n" )
abline(v = a, col = "lightgrey", lty = 2)
abline(h = seq( min(bica), max(bica), length = 10 ), col = "lightgrey", lty = 2)
for ( i in 2:length(G) ) lines(a, bica[i, ], type = "b", pch = 9, col = i)
}
poio <- which( bica == max(bica), arr.ind = TRUE )[1, 2]
optalpha <- a[poio]
pou <- which( abic[[ poio ]] == max(abic[[ poio ]]), arr.ind = TRUE )
optG <- G[ pou[1, 1] ]
optmodel <- colnames(abic[[ poio ]])[ pou[1, 2] ]
list(abic = abic, optalpha = optalpha, optG = optG, optmodel = optmodel)
}
|
create.bin <- function(file_genotype=NULL, type="text", AA=NULL, AB=NULL, BB=NULL,
availmemGb=8, dim_of_M=NULL, quiet=TRUE, missing=NULL){
if(.Platform$OS.type == "unix") {
binMfile <- paste(tempdir() , "/", "M.bin", sep="")
binMtfile <- paste(tempdir() , "/", "Mt.bin", sep="")
} else {
binMfile <- paste(tempdir() , "\\", "M.bin", sep="")
binMtfile <- paste(tempdir() , "\\", "Mt.bin", sep="")
}
neededMem <- ( dim_of_M[1] * dim_of_M[1] * 8 + 2.0 *( dim_of_M[1] * dim_of_M[2] * 8 ))/( 1000000000.0) ;
if (type=="text"){
if(!is.null(missing)) {
missing <- as.character(missing)
} else {
missing <- "NA"
}
it_worked <- createM_BIN_rcpp(f_name = file_genotype, type=type , f_name_bin = binMfile, AA = AA, AB = AB, BB = BB,
max_memory_in_Gbytes=availmemGb, dims = dim_of_M ,
quiet = quiet, message=message , missing=missing)
if(!it_worked)
return(FALSE)
message(" \n Taking transpose of marker data and writing untransposed and transposed data to disc ... \n")
createMt_BIN_rcpp(f_name_in = binMfile, f_name_out = binMtfile, type=type,
max_memory_in_Gbytes=availmemGb, dims = dim_of_M, quiet = quiet, message=message )
message("\n Writing of marker data to disc is complete ... \n")
} else {
ncol <- dim_of_M[2]
dim_of_M[2] <- 2*dim_of_M[2] + 6
it_worked <- createM_BIN_rcpp(f_name = file_genotype, type=type, f_name_bin = binMfile, AA ="-9", AB = "-9", BB = "-9",
max_memory_in_Gbytes=availmemGb, dims = dim_of_M , quiet = quiet,
message=message , missing="NA")
if(!it_worked)
return(FALSE)
dim_of_M[2] <- ncol
message(" \n Taking transpose of marker data and writing untransposed and transposed data to disc ... \n")
createMt_BIN_rcpp(f_name_in = binMfile, f_name_out = binMtfile, type=type,
max_memory_in_Gbytes=availmemGb, dims = dim_of_M, quiet = quiet, message=message )
message(" \n Writing of marker data to disc is complete ... \n")
}
return(TRUE)
}
|
plot_metric_density <- function(results = NULL,
baseline = NULL,
metric = "",
fill = c("darkblue", "lightblue"),
alpha = 0.6,
theme_fn = ggplot2::theme_minimal,
xlim = NULL) {
assert_collection <- checkmate::makeAssertCollection()
if (is.null(results) && is.null(baseline)){
assert_collection$push(
"Either 'results' or 'baseline' must be a data frame. Both were 'NULL'.")
}
checkmate::assert_data_frame(x = results, col.names = "unique", null.ok = TRUE,
add = assert_collection)
checkmate::assert_data_frame(x = baseline, col.names = "unique", null.ok = TRUE,
add = assert_collection)
checkmate::assert_string(x = metric, min.chars = 1,
add = assert_collection)
checkmate::assert_character(x = fill, null.ok = TRUE,
add = assert_collection)
checkmate::assert_number(x = alpha, lower = 0, upper = 1,
add = assert_collection)
checkmate::assert_function(x = theme_fn,
add = assert_collection)
checkmate::assert_numeric(x = xlim, null.ok = TRUE,
add = assert_collection)
checkmate::reportAssertions(assert_collection)
if (!is.null(results))
checkmate::assert_names(x = names(results), must.include = metric,
add = assert_collection)
if (!is.null(baseline))
checkmate::assert_names(x = names(baseline), must.include = metric,
add = assert_collection)
checkmate::reportAssertions(assert_collection)
if (!is.null(results)){
results <- results %>%
base_select(cols = metric) %>%
dplyr::mutate(dataset = "Results")
}
if (!is.null(baseline)) {
baseline <- baseline %>%
base_select(cols = metric) %>%
dplyr::mutate(dataset = "Baseline")
} else {
fill <- fill[[2]]
}
data_to_plot <- results %>%
dplyr::bind_rows(baseline)
if (substr(metric, 1, 1) != "`") {
metric <- paste0("`", metric, "`")
}
data_to_plot %>%
ggplot2::ggplot(ggplot2::aes_string(x = metric, fill = "dataset")) +
ggplot2::geom_density(alpha = alpha) +
ggplot2::scale_fill_manual(values = fill) +
ggplot2::coord_cartesian(xlim = xlim) +
theme_fn() +
ggplot2::labs(y = "Density") +
ggplot2::theme(
axis.title.y = ggplot2::element_text(margin = ggplot2::margin(0, 6, 0, 0)),
axis.title.x.bottom = ggplot2::element_text(margin = ggplot2::margin(6, 0, 0, 0))
)
}
|
setGeneric("Watershed.Order", function(x,...) standardGeneric("Watershed.Order"))
setMethod("Watershed.Order", signature="Watershed",
function(x,...){
station = slot(x,"station")
subbasin = slot(x,"subbasin")
zhyd = slot(x,"zhyd")
river = slot(x,"river")
c1 = slot(x,"c1")
node = slot(x,"node")
id = list(gIntersects(station, subbasin, byid=T))
sb1 = SpDF_Subset(id, subbasin)
id = list(gIntersects(sb1, zhyd, byid=T))
zhyd_sb1 = SpDF_Subset(id, zhyd)
if(identical(c1, subbasin)){
id = list(gIntersects(zhyd_sb1, station, byid=T))
c1 = SpDF_Subset(id, zhyd_sb1)
slot(c1, "data")["order"] = 1
}
id = list(gCrosses(c1, river, byid=T))
if(length(id[[1]])==length(which(id[[1]]==FALSE))){
c1_outlet = 0
c1_inlet = 0
riverIO = 0
}else{
riverIO = SpDF_Subset(id, river)
id = list(gIntersects(c1, node, byid=T))
c1_node = SpDF_Subset(id, node)
id = list(gIntersects(c1, river, byid=T))
c1_river = SpDF_Subset(id, river)
buffer = gBuffer(riverIO, width=100)
id = list(gIntersects(buffer, node, byid=T))
nodeIO = SpDF_Subset(id, node)
boundary = gBoundary(c1)
dist = gDistance(nodeIO, boundary, byid =T); dist
dist
}
if (class(riverIO)=="SpatialLinesDataFrame"){
if (length(riverIO) == 1){
a = Watershed.IOR1(x=nodeIO, dist=dist)
print("length(riverIO) == 1")
}
if (length(riverIO) == 2){
a = Watershed.IOR2(x=nodeIO, dist=dist, node=c1_node)
print("length(riverIO) == 2")
}
if(length(riverIO) == 3){
a = Watershed.IOR3(x=nodeIO, y=riverIO, dist=dist)
print("length(riverIO) == 3")
}
if(length(riverIO) == 4){
a = Watershed.IOR4(x=nodeIO, y=riverIO, dist=dist)
print("length(riverIO) == 4")
}
c1_inlet = a[[1]]
c1_outlet= a[[2]]
}else{
riverIO = 0
c1_inlet = 0
c1_outlet = 0
}
if(class(c1_inlet)=="SpatialPointsDataFrame"){
a = Watershed.Tributary(x=c1_inlet,xo= c1_outlet,y=riverIO,z=nodeIO,zhyd=zhyd_sb1, c1=c1)
c2c3 = a[[1]]
c2 = a[[2]]
c3 = a[[3]]
node_trib = a[[4]]
}else{
c2c3 = 0
c2 = 0
c3= 0
node_trib = 0
}
return(list(c1=c1,c1_inlet=c1_inlet,c1_outlet=c1_outlet,c2=c2,c3=c3,node_trib=node_trib,sb1=sb1,riverIO=riverIO,nodeIO=nodeIO,c1_river=c1_river,c1_node=c1_node))
}
)
|
"df_crc"
|
warpSpline2 <- list(
fun = function(z, par, L = 2, knots) {
if (!requireNamespace("DiceKriging", quietly = TRUE)) {
stop("DiceKriging required")
}
y <- DiceKriging::scalingFun1d(z, knots = knots, eta = par)
attr(y, "gradient") <- DiceKriging:::scalingGrad1d(z, knots = knots)
return(y)
},
parNames = paste("eta"),
parDefault = c("eta" = 1),
parLower = c("eta" = 1e-10),
parUpper = c("eta" = Inf),
hasGrad = TRUE,
isCdf = FALSE
)
warpSpline1 <- list(
fun = function(z, par, L = 2, knots) {
yknots <- c(0, cumsum(par))
y <- approx(x = knots, y = yknots, xout = z)$y
attr(y, "gradient") <- outer(z, knots[-1], function(z, t){z >= t}) * 1
return(y)
},
parNames = paste("eta"),
parDefault = c("eta" = 1),
parLower = c("eta" = 1e-10),
parUpper = c("eta" = Inf),
hasGrad = TRUE,
isCdf = FALSE
)
warpPower <- list(
fun = function(z, par, L, knots = NULL) {
y <- pbeta(q = z, shape1 = par[1], shape2 = 1)
ind <- (z > 0) & (z < 1)
grad <- rep(0, length(z))
if (any(ind)) grad[ind] <- y[ind] * log(z[ind])
attr(y, "gradient") <- matrix(grad, ncol = 1)
return(y)
},
parNames = "pow",
parDefault = c("pow" = 1),
parLower = c("pow" = 1e-10),
parUpper = c("pow" = Inf),
hasGrad = TRUE,
isCdf = TRUE
)
eps <- 1e-10
warpNorm <- list(
fun = function(z, par, L, knots = NULL) {
Az <- pnorm(z, mean = par[1], sd = par[2])
A1 <- pnorm(1, mean = par[1], sd = par[2])
A0 <- pnorm(0, mean = par[1], sd = par[2])
az <- dnorm(z, mean = par[1], sd = par[2])
a1 <- dnorm(1, mean = par[1], sd = par[2])
a0 <- dnorm(0, mean = par[1], sd = par[2])
N <- Az - A0
D <- A1 - A0
y <- (Az - A0)/D
grad <- matrix(0, nrow = length(z), ncol = 2)
colnames(grad) <- c("mean", "sd")
grad[, 1] <- (az - a0) - N * (a1 - a0) / D
grad[, 1] <- - grad[, 1] / D
grad[, 2] <- (az * (z - par[1]) - a0 * (0 - par[1])) -
N * (a1 * (1 - par[1]) - a0 * (0 - par[1])) / D
grad[, 2] <- - grad[, 2] / par[2] / D
attr(y, "gradient") <- grad
return(y)
},
parNames = c("mean", "sd"),
parDefault = c(mean = 0.5, sd = 3),
parLower = c(mean = eps, sd = eps),
parUpper = c(mean = 1 - eps, sd = Inf),
hasGrad = TRUE,
isCdf = TRUE
)
warpUnorm <- list(
fun = function(z, par, L, knots = NULL) {
y <- pnorm(z, mean = par[1], sd = par[2])
grad <- matrix(0, nrow = length(z), ncol = 2)
colnames(grad) <- c("mean", "sd")
phi <- dnorm(z, mean = par[1], sd = par[2])
grad[, 1] <- - phi
grad[, 2] <- - phi * (z - par[1]) / par[2]
attr(y, "gradient") <- grad
return(y)
},
parNames = c("mean", "sd"),
parDefault = c(mean = 0.5, sd = 3),
parLower = c(mean = eps, sd = eps),
parUpper = c(mean = 1 - eps, sd = Inf),
hasGrad = TRUE,
isCdf = FALSE
)
|
data(World)
current.style <- tmap_style("classic")
qtm(World, fill="life_exp", fill.title="Life expectancy")
tmap_style("cobalt")
qtm(World, fill="life_exp", fill.title="Life expectancy")
tmap_style(current.style)
|
generate_2d_exponential = function(falloff = 1, dim = c(11,11), width = 3) {
if(length(dim) == 1) {
dim = c(dim, dim)
}
mindim = min(dim)
xy_ratio = dim[1]/dim[2]
if(xy_ratio > 1) {
x = seq(-width*xy_ratio,width*xy_ratio,length.out = dim[1])
y = seq(-width,width,length.out = dim[2])
} else {
x = seq(-width,width,length.out = dim[1])
y = seq(-width/xy_ratio,width/xy_ratio,length.out = dim[2])
}
testmat = matrix(0,dim[1],dim[2])
for(i in 1:length(x)) {
for(j in 1:length(y)) {
testmat[i,j] = (dexp(sqrt(x[i]^2+y[j]^2),rate=1/falloff))
}
}
testmat/sum(testmat)
}
|
crsra_import_as_course = function(x) {
if (inherits(x, "coursera_import")) {
return(x)
}
if (inherits(x, "coursera_course_import")) {
course_name = attributes(x)$course_name
partner_user_id = attributes(x)$partner_user_id
x = list(x)
names(x) = course_name
attr(x, "partner_user_id") = partner_user_id
class(x) = "coursera_import"
return(x)
}
stop("Object class not of coursera_course_import or coursera_import")
}
|
make_intervals = function(start, end) {
if (missing(end) && is.matrix(start) && ncol(start) == 2) {
end = start[,2]
start = start[,1]
}
stopifnot(length(start) > 0, length(start) == length(end))
structure(list(start = start, end = end), class = "intervals")
}
as_intervals = function(x, add_last = FALSE) {
stopifnot(is.atomic(x))
if (add_last)
x = c(x, tail(x, 1) + diff(tail(x, 2)))
make_intervals(start = head(x, -1), end = tail(x, -1))
}
length.intervals = function(x) length(x$start)
head.intervals = function(x, n) make_intervals(head(x$start, n), head(x$end, n))
tail.intervals = function(x, n) make_intervals(tail(x$start, n), tail(x$end, n))
c.intervals = function(...) {
dots = list(...)
start = do.call(c, lapply(dots, function(x) x$start))
end = do.call(c, lapply(dots, function(x) x$end))
make_intervals(start, end)
}
`[.intervals` = function(x, i, ...) {
make_intervals(x$start[i], x$end[i])
}
format.intervals = function(x, ...) {
mformat = function(x, ..., digits = getOption("digits")) {
if (inherits(x, "PCICt"))
format(x, ...)
else
format(x, digits = digits, ...)
}
if (inherits(x$start, "units")) {
stopifnot(units(x$start) == units(x$end))
paste0("[", format(as.numeric(x$start), ...), ",", format(as.numeric(x$end), ...), ") ",
"[", as.character(units(x$start)), "]")
} else
paste0("[", mformat(x$start, ...), ",", mformat(x$end, ...), ")")
}
find_interval = function(x, intervals) {
if (inherits(intervals$start, "Date") && inherits(x, "POSIXct"))
x = as.Date(x)
if (inherits(x, "Date") && inherits(intervals$start, "POSIXct"))
x = as.POSIXct(x)
if (all(intervals$start > intervals$end)) {
start = intervals$end
intervals$end = intervals$start
intervals$start = start
}
if (getRversion() < "4.1.0")
stop("R >= 4.1.0 required for handling intervals")
w = apply(outer(x, intervals$start, ">=") & outer(x, intervals$end, "<"), 1, which, simplify = FALSE)
w[lengths(w) == 0] = NA_integer_
unlist(w)
}
as.list.intervals = function(x, ...) {
structure(mapply(make_intervals, x$start, x$end, SIMPLIFY = FALSE),
class = "intervals_list")
}
format.intervals_list = function(x, ...) {
sapply(x, format, ...)
}
`[.intervals_list` = function(x, i, ...) {
ret = NextMethod()
is_null = sapply(ret, is.null)
ret[is_null] = list(make_intervals(NA_real_, NA_real_))
structure(ret, class = "intervals_list")
}
|
selectizeGroupUI <- function(id, params, label = NULL, btn_label = "Reset filters", inline = TRUE) {
ns <- NS(id)
if (inline) {
selectizeGroupTag <- tagList(
tags$b(label),
tags$div(
class="btn-group-justified selectize-group",
role="group", `data-toggle`="buttons",
lapply(
X = seq_along(params),
FUN = function(x) {
input <- params[[x]]
tagSelect <- tags$div(
class = "btn-group",
id = ns(paste0("container-", input$inputId)),
selectizeInput(
inputId = ns(input$inputId),
label = input$label %||% input$title,
choices = input$choices,
selected = input$selected,
multiple = ifelse(is.null(input$multiple), TRUE, input$multiple),
width = "100%",
options = list(
placeholder = input$placeholder,
plugins = list("remove_button"),
onInitialize = I('function() { this.setValue(""); }')
)
)
)
return(tagSelect)
}
)
),
actionLink(
inputId = ns("reset_all"),
label = btn_label,
icon = icon("times"),
style = "float: right;"
)
)
} else {
selectizeGroupTag <- tagList(
tags$b(label),
lapply(
X = seq_along(params),
FUN = function(x) {
input <- params[[x]]
tagSelect <- tags$div(
id = ns(paste0("container-", input$inputId)),
selectizeInput(
inputId = ns(input$inputId),
label = input$label %||% input$title,
choices = input$choices,
selected = input$selected,
multiple = ifelse(is.null(input$multiple), TRUE, input$multiple),
width = "100%",
options = list(
placeholder = input$placeholder,
plugins = list("remove_button"),
onInitialize = I('function() { this.setValue(""); }')
)
)
)
return(tagSelect)
}
),
actionLink(
inputId = ns("reset_all"),
label = btn_label,
icon = icon("times"),
style = "float: right;"
)
)
}
tagList(
singleton(
tagList(
tags$link(
rel="stylesheet",
type="text/css",
href="shinyWidgets/modules/styles-modules.css"
), toggleDisplayUi()
)
),
selectizeGroupTag
)
}
selectizeGroupServer <- function(input, output, session, data, vars, inline = TRUE) {
ns <- session$ns
toggleDisplayServer(
session = session, id = ns("reset_all"), display = "none"
)
rv <- reactiveValues(data = NULL, vars = NULL)
observe({
if (is.reactive(data)) {
rv$data <- data()
} else {
rv$data <- as.data.frame(data)
}
if (is.reactive(vars)) {
rv$vars <- vars()
} else {
rv$vars <- vars
}
for (var in names(rv$data)) {
if (var %in% rv$vars) {
toggleDisplayServer(
session = session, id = ns(paste0("container-", var)), display = ifelse(inline, "table-cell", "block")
)
} else {
toggleDisplayServer(
session = session, id = ns(paste0("container-", var)), display = "none"
)
}
}
})
observe({
lapply(
X = rv$vars,
FUN = function(x) {
vals <- sort(unique(rv$data[[x]]))
updateSelectizeInput(
session = session,
inputId = x,
choices = vals,
selected = isolate(input[[x]]),
server = TRUE
)
}
)
})
observeEvent(input$reset_all, {
lapply(
X = rv$vars,
FUN = function(x) {
vals <- sort(unique(rv$data[[x]]))
updateSelectizeInput(
session = session,
inputId = x,
choices = vals,
server = TRUE
)
}
)
})
observe({
vars <- rv$vars
lapply(
X = vars,
FUN = function(x) {
ovars <- vars[vars != x]
observeEvent(input[[x]], {
data <- rv$data
indicator <- lapply(
X = vars,
FUN = function(x) {
data[[x]] %inT% input[[x]]
}
)
indicator <- Reduce(f = `&`, x = indicator)
data <- data[indicator, ]
if (all(indicator)) {
toggleDisplayServer(session = session, id = ns("reset_all"), display = "none")
} else {
toggleDisplayServer(session = session, id = ns("reset_all"), display = "block")
}
for (i in ovars) {
if (is.null(input[[i]])) {
updateSelectizeInput(
session = session,
inputId = i,
choices = sort(unique(data[[i]])),
server = TRUE
)
}
}
if (is.null(input[[x]])) {
updateSelectizeInput(
session = session,
inputId = x,
choices = sort(unique(data[[x]])),
server = TRUE
)
}
}, ignoreNULL = FALSE, ignoreInit = TRUE)
}
)
})
return(reactive({
data <- rv$data
vars <- rv$vars
indicator <- lapply(
X = vars,
FUN = function(x) {
data[[x]] %inT% input[[x]]
}
)
indicator <- Reduce(f = `&`, x = indicator)
data <- data[indicator, ]
return(data)
}))
}
|
intsvy.config <- function( variables.pvlabelpref,
variables.pvlabelsuff,
variables.weight,
variables.jackknifeZone,
variables.jackknifeRep,
parameters.cutoffs,
parameters.cutoffs2,
parameters.percentiles,
parameters.weights,
parameters.PVreps,
parameters.varpv1,
input.type,
input.prefixes,
input.student,
input.student_colnames1,
input.student_colnames2,
input.student_pattern,
input.homeinput,
input.home_colnames,
input.school,
input.school_colnames,
input.teacher,
input.teacher_colnames,
input.student_ids,
input.school_ids,
input.type_part,
input.cnt_part, base.config = pirls_conf) {
config <- base.config
if (!missing(variables.pvlabelpref )) config$variables$pvlabelpref =variables.pvlabelpref
if (!missing(variables.pvlabelsuff )) config$variables$pvlabelsuff =variables.pvlabelsuff
if (!missing(variables.weight )) config$variables$weight =variables.weight
if (!missing(variables.jackknifeZone )) config$variables$jackknifeZone =variables.jackknifeZone
if (!missing(variables.jackknifeRep )) config$variables$jackknifeRep =variables.jackknifeRep
if (!missing(parameters.cutoffs )) config$parameters$cutoffs =parameters.cutoffs
if (!missing(parameters.cutoffs2 )) config$parameters$cutoffs2 =parameters.cutoffs2
if (!missing(parameters.percentiles )) config$parameters$percentiles =parameters.percentiles
if (!missing(parameters.weights )) config$parameters$weights =parameters.weights
if (!missing(parameters.PVreps )) config$parameters$PVreps =parameters.PVreps
if (!missing(parameters.varpv1 )) config$parameters$varpv1 =parameters.varpv1
if (!missing(input.type )) config$input$type =input.type
if (!missing(input.prefixes )) config$input$prefixes =input.prefixes
if (!missing(input.student )) config$input$student =input.student
if (!missing(input.student_colnames1 )) config$input$student_colnames1 =input.student_colnames1
if (!missing(input.student_colnames2 )) config$input$student_colnames2 =input.student_colnames2
if (!missing(input.student_pattern )) config$input$student_pattern =input.student_pattern
if (!missing(input.homeinput )) config$input$homeinput =input.homeinput
if (!missing(input.home_colnames )) config$input$home_colnames =input.home_colnames
if (!missing(input.school )) config$input$school =input.school
if (!missing(input.school_colnames )) config$input$school_colnames =input.school_colnames
if (!missing(input.teacher )) config$input$teacher =input.teacher
if (!missing(input.teacher_colnames )) config$input$teacher_colnames =input.teacher_colnames
if (!missing(input.student_ids )) config$input$student_ids =input.student_ids
if (!missing(input.school_ids )) config$input$school_ids =input.school_ids
if (!missing(input.type_part )) config$input$type_part =input.type_part
if (!missing(input.cnt_part )) config$input$cnt_part =input.cnt_part
config
}
|
library(azuremlsdk)
args <- commandArgs(trailingOnly = TRUE)
number_1 <- args[2]
log_metric_to_run("First Number", number_1)
number_2 <- args[4]
log_metric_to_run("Second Number", number_2)
sum <- as.numeric(number_1) + as.numeric(number_2)
log_metric_to_run("Sum", sum)
|
getProfilesMetadata <- function(colNames){
l1000_names <- as.vector(colNames)
l1000_compounds <- unlist(lapply(strsplit(l1000_names, '_'), function(X){paste0(X[1:(length(X)-3)], collapse = '_')}))
l1000_cellLine <- unlist(lapply(strsplit(l1000_names, '_'), function(X){X[(length(X)-2)]}))
l1000_concentration <- unlist(lapply(strsplit(l1000_names, '_'), function(X){X[(length(X)-1)]}))
l1000_time <- unlist(lapply(strsplit(l1000_names, '_'), function(X){X[length(X)]}))
l1000_metadata <- data.frame(compounds = l1000_compounds,
cellLine = l1000_cellLine,
concentration = l1000_concentration,
time = l1000_time,
names = l1000_names)
return(l1000_metadata)
}
|
PipeOpHist_B = R6::R6Class("Hist_B_imputation",
inherit = PipeOpImpute,
public = list(
initialize = function(id = "impute_hist_B", param_vals = list()) {
super$initialize(id, param_vals = param_vals, packages = "graphics", feature_types = c("integer", "numeric"))
}
),
private = list(
.train_imputer = function(feature, type, context) {
NULL
},
.impute = function(feature, type, model, context) {
model <- graphics::hist(feature, plot = FALSE)[c("counts", "breaks")]
if (is.atomic(model)) {
return(super$.impute(feature, type, model, context))
}
which.bins = sample.int(length(model$counts), sum(is.na(feature)), replace = TRUE, prob = model$counts)
sampled = stats::runif(length(which.bins), model$breaks[which.bins], model$breaks[which.bins + 1L])
if (type == "integer") {
sampled = as.integer(round(sampled))
}
feature[is.na(feature)] = sampled
feature
}
)
)
|
`denoiselabels` <-
function(d, eigvec, kpc, regression = TRUE)
{
if(length(d) != 1 || d < 0)
{
print("d >= 0 must be the dimension to project the labels to or 0 if the labels should be projected
to each dimension and a matrix with all projections should be returned")
return()
}
if(!is.matrix(eigvec))
{
print("eigvec must be a matrix containing the eigenvectors of the kernel matrix")
return()
}
if(nrow(eigvec) != ncol(eigvec))
{
print("eigvec must be a square matrix containing the eigenvectors of the kernel matrix")
return()
}
if(!is.matrix(kpc))
{
print("kpc must be a column vector containing the kernel pca coefficients")
return()
}
if(nrow(kpc) != nrow(eigvec) || ncol(kpc) != 1)
{
print("kpc must be a column vector of same dimension as eigvec containing the kernel pca coefficients")
return()
}
n <- nrow(eigvec)
if(d != 0)
{
yh <- eigvec[,1:d] %*% kpc[1:d, , drop = FALSE]
}
else
{
yh <- matrix(rep(0, n*n), n)
yh[, 1] <- eigvec[, 1, drop = FALSE] * kpc[1]
for(i in 2:n)
{
yh[, i] <- yh[, i - 1, drop = FALSE] + eigvec[, i, drop = FALSE] * kpc[i]
}
}
if(!regression)
{
yh <- sign(yh)
yh[yh == 0] <- 1
}
return(yh)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.