code
stringlengths 1
13.8M
|
---|
writeDefaultNamespace <-
function(filename, desc = file.path(dirname(filename), "DESCRIPTION"))
{
pkgInfo <- .split_description(.read_description(desc))
pkgs <- unique(c(names(pkgInfo$Imports), names(pkgInfo$Depends)))
pkgs <- pkgs[pkgs != "base"]
writeLines(c("
"
"",
"
"exportPattern(\".\")",
if (length(pkgs))
c("",
"
"import(",
paste0(" ", pkgs, collapse = ",\n"),
")")),
filename)
}
get_exclude_patterns <- function()
c("^\\.Rbuildignore$",
"(^|/)\\.DS_Store$",
"^\\.(RData|Rhistory)$",
"~$", "\\.bak$", "\\.swp$",
"(^|/)\\.
"^TITLE$", "^data/00Index$",
"^inst/doc/00Index\\.dcf$",
"^config\\.(cache|log|status)$",
"(^|/)autom4te\\.cache$",
"^src/.*\\.d$", "^src/Makedeps$",
"^src/so_locations$",
"^inst/doc/Rplots\\.(ps|pdf)$"
)
inRbuildignore <- function(files, pkgdir) {
exclude <- rep.int(FALSE, length(files))
ignore <- get_exclude_patterns()
ignore_file <- file.path(pkgdir, ".Rbuildignore")
if (file.exists(ignore_file))
ignore <- c(ignore, readLines(ignore_file, warn = FALSE))
for(e in ignore[nzchar(ignore)])
exclude <- exclude | grepl(e, files, perl = TRUE,
ignore.case = TRUE)
exclude
}
.build_packages <- function(args = NULL, no.q = interactive())
{
WINDOWS <- .Platform$OS.type == "windows"
Sys.umask("022")
writeLinesNL <- function(text, file)
{
con <- file(file, "wb")
on.exit(close(con))
writeLines(text, con)
}
system_with_capture <- function (command, args) {
outfile <- tempfile("xshell")
on.exit(unlink(outfile))
status <- system2(command, args, stdout=outfile, stderr=outfile)
list(status = status, stdout = readLines(outfile, warn = FALSE))
}
Ssystem <- function(command, args = character(), ...)
system2(command, args, stdout = NULL, stderr = NULL, ...)
do_exit <-
if(no.q)
function(status) (if(status) stop else message)(
".build_packages() exit status ", status)
else
function(status) q("no", status = status, runLast = FALSE)
parse_description_field <-
function(desc, field, default = TRUE, logical = TRUE)
str_parse(desc[field], default=default, logical=logical)
Usage <- function() {
cat("Usage: R CMD build [options] pkgdirs",
"",
"Build R packages from package sources in the directories specified by",
sQuote("pkgdirs"),
"",
"Options:",
" -h, --help print short help message and exit",
" -v, --version print version info and exit",
"",
" --force force removal of INDEX file",
" --keep-empty-dirs do not remove empty dirs",
" --no-build-vignettes do not (re)build package vignettes",
" --no-manual do not build the PDF manual even if \\Sexprs are present",
" --resave-data= re-save data files as compactly as possible:",
' "no", "best", "gzip" (default)',
" --resave-data same as --resave-data=best",
" --no-resave-data same as --resave-data=no",
" --compact-vignettes= try to compact PDF files under inst/doc:",
' "no" (default), "qpdf", "gs", "gs+qpdf", "both"',
" --compact-vignettes same as --compact-vignettes=qpdf",
" --compression= type of compression to be used on tarball:",
' "gzip" (default), "none", "bzip2", "xz"',
" --md5 add MD5 sums",
" --log log to file 'pkg-00build.log' when processing ",
" the pkgdir with basename 'pkg'",
"",
"Report bugs at <https://bugs.R-project.org>.", sep = "\n")
}
add_build_stamp_to_description_file <- function(ldpath, pkgdir)
{
db <- .read_description(ldpath)
if(dir.exists(file.path(pkgdir, "src")))
db["NeedsCompilation"] <- "yes"
else if(is.na(db["NeedsCompilation"]))
db["NeedsCompilation"] <- "no"
user <- Sys.info()["user"]
if(user == "unknown") user <- Sys.getenv("LOGNAME")
db["Packaged"] <-
sprintf("%s; %s",
format(Sys.time(), "%Y-%m-%d %H:%M:%S",
tz = 'UTC', usetz = TRUE),
user)
.write_description(db, ldpath)
}
add_expanded_R_fields_to_description_file <- function(ldpath) {
db <- .read_description(ldpath)
fields <- .expand_package_description_db_R_fields(db)
if(length(fields))
.write_description(c(db, fields), ldpath)
}
temp_install_pkg <- function(pkgdir, libdir) {
dir.create(libdir, mode = "0755", showWarnings = FALSE)
if(nzchar(install_dependencies) &&
all((repos <- getOption("repos")) != "@CRAN@")) {
available <- utils::available.packages(repos = repos)
db <- .read_description(file.path(pkgdir, "DESCRIPTION"))
package <- db["Package"]
available <-
rbind(available[available[, "Package"] != package, ,
drop = FALSE],
db[colnames(available)])
depends <- package_dependencies(package, available,
which = install_dependencies)
depends <- setdiff(unlist(depends),
utils::installed.packages())
if(length(depends)) {
message(paste(strwrap(sprintf("installing dependencies %s",
paste(sQuote(sort(depends)),
collapse = ", ")),
exdent = 2L),
collapse = "\n"), domain = NA)
utils::install.packages(depends,
libdir,
available =
available[-nrow(available), ,
drop = FALSE],
dependencies = NA)
}
}
if (WINDOWS) {
cmd <- file.path(R.home("bin"), "Rcmd.exe")
args <- c("INSTALL -l", shQuote(libdir),
"--no-multiarch", shQuote(pkgdir))
} else {
cmd <- file.path(R.home("bin"), "R")
args <- c("CMD", "INSTALL -l", shQuote(libdir),
"--no-multiarch", shQuote(pkgdir))
}
res <- system_with_capture(cmd, args)
if (res$status) {
printLog(Log, " -----------------------------------\n")
printLog0(Log, paste(c(res$stdout, ""), collapse = "\n"))
printLog(Log, " -----------------------------------\n")
unlink(libdir, recursive = TRUE)
printLog(Log, "ERROR: package installation failed\n")
do_exit(1L)
}
Sys.setenv("R_BUILD_TEMPLIB" = libdir)
TRUE
}
prepare_pkg <- function(pkgdir, desc, Log)
{
owd <- setwd(pkgdir); on.exit(setwd(owd))
checkingLog(Log, "DESCRIPTION meta-information")
res <- try(.check_package_description("DESCRIPTION"))
if (inherits(res, "try-error")) {
resultLog(Log, "ERROR")
messageLog(Log, "running '.check_package_description' failed")
} else {
if (any(lengths(res))) {
resultLog(Log, "ERROR")
print(res)
do_exit(1L)
} else resultLog(Log, "OK")
}
cleanup_pkg(pkgdir, Log)
libdir <- tempfile("Rinst")
ensure_installed <- function()
if (!pkgInstalled) {
messageLog(Log,
"installing the package to build vignettes")
pkgInstalled <<- temp_install_pkg(pkgdir, libdir)
}
pkgInstalled <- build_Rd_db(pkgdir, libdir, desc)
if (file.exists("INDEX")) update_Rd_index("INDEX", "man", Log)
doc_dir <- file.path("inst", "doc")
if ("makefile" %in% dir(doc_dir)) {
messageLog(Log, "renaming 'inst/doc/makefile' to 'inst/doc/Makefile'")
file.rename(file.path(doc_dir, "makefile"),
file.path(doc_dir, "Makefile"))
}
if (vignettes &&
parse_description_field(desc, "BuildVignettes", TRUE)) {
vignette_index_path <- file.path("build", "vignette.rds")
if(file.exists(vignette_index_path))
unlink(vignette_index_path)
loadVignetteBuilder(pkgdir, TRUE)
vigns <- pkgVignettes(dir = '.', check = TRUE)
if (!is.null(vigns) && length(vigns$docs)) {
ensure_installed()
creatingLog(Log, "vignettes")
R_LIBS <- Sys.getenv("R_LIBS", NA_character_)
if (!is.na(R_LIBS)) {
on.exit(Sys.setenv(R_LIBS = R_LIBS), add = TRUE)
Sys.setenv(R_LIBS = path_and_libPath(libdir, R_LIBS))
} else {
on.exit(Sys.unsetenv("R_LIBS"), add = TRUE)
Sys.setenv(R_LIBS = libdir)
}
cmd <- file.path(R.home("bin"), "Rscript")
args <- c("--vanilla",
"--default-packages=",
"-e", shQuote("tools::buildVignettes(dir = '.', tangle = TRUE)"))
oPATH <- Sys.getenv("PATH")
Sys.setenv(PATH = paste(R.home("bin"), oPATH,
sep = .Platform$path.sep))
res <- system_with_capture(cmd, args)
Sys.setenv(PATH = oPATH)
if (res$status) {
resultLog(Log, "ERROR")
printLog0(Log, paste(c(res$stdout, ""), collapse = "\n"))
do_exit(1L)
} else {
vigns <- pkgVignettes(dir = '.', output = TRUE, source = TRUE)
stopifnot(!is.null(vigns))
resultLog(Log, "OK")
}
if (basename(vigns$dir) == "vignettes") {
dir.create(doc_dir, recursive = TRUE, showWarnings = FALSE)
tocopy <- unique(c(vigns$docs, vigns$outputs,
unlist(vigns$sources)))
copied <- file.copy(tocopy, doc_dir, copy.date = TRUE)
if (!all(copied)) {
warning(sprintf(ngettext(sum(!copied),
"%s file\n", "%s files\n"),
sQuote("inst/doc")),
strwrap(paste(sQuote(basename(tocopy[!copied])), collapse=", "),
indent = 4, exdent = 2),
"\n ignored as vignettes have been rebuilt.",
"\n Run R CMD build with --no-build-vignettes to prevent rebuilding.",
call. = FALSE)
file.copy(tocopy[!copied], doc_dir, overwrite = TRUE, copy.date = TRUE)
}
unlink(c(vigns$outputs, unlist(vigns$sources)))
extras_file <- file.path("vignettes", ".install_extras")
if (file.exists(extras_file)) {
extras <- readLines(extras_file, warn = FALSE)
if(length(extras)) {
allfiles <- dir("vignettes", all.files = TRUE,
full.names = TRUE, recursive = TRUE,
include.dirs = TRUE)
inst <- rep.int(FALSE, length(allfiles))
for (e in extras)
inst <- inst | grepl(e, allfiles, perl = TRUE,
ignore.case = TRUE)
file.copy(allfiles[inst], doc_dir, recursive = TRUE, copy.date = TRUE)
}
}
}
vignetteIndex <- .build_vignette_index(vigns)
if(NROW(vignetteIndex) > 0L) {
sources <- vignetteIndex$R
for(i in seq_along(sources)) {
file <- file.path(doc_dir, sources[i])
if (!file_test("-f", file)) next
bfr <- readLines(file, warn = FALSE)
if(all(grepl("(^
unlink(file)
vignetteIndex$R[i] <- ""
}
}
}
dir.create("build", showWarnings = FALSE)
saveRDS(vignetteIndex,
file = vignette_index_path,
version = 2L)
}
} else {
fv <- file.path("build", "vignette.rds")
if(file.exists(fv)) {
checkingLog(Log, "vignette meta-information")
db <- readRDS(fv)
pdfs <- file.path("inst", "doc", db[nzchar(db$PDF), ]$PDF)
missing <- !file.exists(pdfs)
if(any(missing)) {
msg <- c("Output(s) listed in 'build/vignette.rds' but not in package:",
strwrap(sQuote(pdfs[missing]), indent = 2L, exdent = 2L),
"Run R CMD build without --no-build-vignettes to re-create")
errorLog(Log, paste(msg, collapse = "\n"))
do_exit(1L)
} else resultLog(Log, "OK")
}
}
if (compact_vignettes != "no" &&
length(pdfs <- dir(doc_dir, pattern = "[.]pdf", recursive = TRUE,
full.names = TRUE))) {
messageLog(Log, "compacting vignettes and other PDF files")
if(compact_vignettes %in% c("gs", "gs+qpdf", "both")) {
gs_cmd <- find_gs_cmd()
gs_quality <- "ebook"
} else {
gs_cmd <- ""
gs_quality <- "none"
}
qpdf <-
if(compact_vignettes %in% c("qpdf", "gs+qpdf", "both"))
Sys.which(Sys.getenv("R_QPDF", "qpdf")) else ""
res <- compactPDF(pdfs, qpdf = qpdf,
gs_cmd = gs_cmd, gs_quality = gs_quality)
res <- format(res, diff = 1e5)
if(length(res))
printLog0(Log, paste0(" ", format(res), collapse = "\n"), "\n")
}
if (pkgInstalled) {
unlink(libdir, recursive = TRUE)
cleanup_pkg(pkgdir, Log)
}
}
cleanup_pkg <- function(pkgdir, Log)
{
owd <- setwd(pkgdir); on.exit(setwd(owd))
pkgname <- basename(pkgdir)
if (dir.exists("src")) {
setwd("src")
messageLog(Log, "cleaning src")
if (WINDOWS) {
have_make <- nzchar(Sys.which(Sys.getenv("MAKE", "make")))
if (file.exists(fn <- "Makefile.ucrt") || file.exists(fn <- "Makefile.win")) {
if (have_make)
Ssystem(Sys.getenv("MAKE", "make"), paste0("-f ", fn, " clean"))
else warning("unable to run 'make clean' in 'src'",
domain = NA)
} else {
if (file.exists(fn <- "Makevars.ucrt") || file.exists(fn <- "Makevars.win")) {
if (have_make) {
makefiles <- paste("-f",
shQuote(file.path(R.home("share"), "make", "clean.mk")),
"-f", fn)
Ssystem(Sys.getenv("MAKE", "make"),
c(makefiles, "clean"))
} else warning("unable to run 'make clean' in 'src'",
domain = NA)
}
unlink(c(Sys.glob(c("*.o", "*.so", "*.dylib", "*.mod")),
paste0(pkgname, c(".a", ".dll", ".def")),
"symbols.rds"))
if (dir.exists(".libs")) unlink(".libs", recursive = TRUE)
if (dir.exists("_libs")) unlink("_libs", recursive = TRUE)
}
} else {
makefiles <- paste("-f",
shQuote(file.path(R.home("etc"),
Sys.getenv("R_ARCH"),
"Makeconf")))
if (file.exists("Makefile")) {
makefiles <- paste(makefiles, "-f", "Makefile")
Ssystem(Sys.getenv("MAKE", "make"), c(makefiles, "clean"))
} else {
if (file.exists("Makevars")) {
makefiles <- paste(makefiles, "-f",
shQuote(file.path(R.home("share"), "make", "clean.mk")),
"-f Makevars")
Ssystem(Sys.getenv("MAKE", "make"),
c(makefiles, "clean"))
}
unlink(c(Sys.glob(c("*.o", "*.so", "*.dylib", "*.mod")),
paste0(pkgname, c(".a", ".dll", ".def")),
"symbols.rds"))
if (dir.exists(".libs")) unlink(".libs", recursive = TRUE)
if (dir.exists("_libs")) unlink("_libs", recursive = TRUE)
}
}
}
setwd(owd)
if (WINDOWS) {
has_cleanup_ucrt <- file.exists("cleanup.ucrt")
if (has_cleanup_ucrt || file.exists("cleanup.win")) {
if (nzchar(Sys.which("sh.exe"))) {
Sys.setenv(R_PACKAGE_NAME = pkgname)
Sys.setenv(R_PACKAGE_DIR = pkgdir)
Sys.setenv(R_LIBRARY_DIR = dirname(pkgdir))
if (has_cleanup_ucrt) {
messageLog(Log, "running 'cleanup.ucrt'")
Ssystem("sh", "./cleanup.ucrt")
} else {
messageLog(Log, "running 'cleanup.win'")
Ssystem("sh", "./cleanup.win")
}
}
}
} else if (file_test("-x", "cleanup")) {
Sys.setenv(R_PACKAGE_NAME = pkgname)
Sys.setenv(R_PACKAGE_DIR = pkgdir)
Sys.setenv(R_LIBRARY_DIR = dirname(pkgdir))
messageLog(Log, "running 'cleanup'")
Ssystem("./cleanup")
}
revert_install_time_patches()
}
update_Rd_index <- function(oldindex, Rd_files, Log)
{
newindex <- tempfile()
res <- tryCatch(
Rdindex(Rd_files, newindex),
error = function(e) {
errorLog(Log, "computing Rd index failed:",
conditionMessage(e))
do_exit(1L)
})
checkingLog(Log, "whether ", sQuote(oldindex), " is up-to-date")
if (file.exists(oldindex)) {
ol <- readLines(oldindex, warn = FALSE)
nl <- readLines(newindex)
if (!identical(ol, nl)) {
resultLog(Log, "NO")
if (force) {
messageLog(Log, "removing ", sQuote(oldindex),
" as '--force' was given")
unlink(oldindex)
} else {
messageLog(Log, "use '--force' to remove ",
"the existing ", sQuote(oldindex))
unlink(newindex)
}
} else {
resultLog(Log, "OK")
unlink(newindex)
}
} else {
resultLog(Log, "NO")
messageLog(Log, "creating new ", sQuote(oldindex))
file.rename(newindex, oldindex)
}
}
build_Rd_db <- function(pkgdir, libdir, desc) {
build_partial_Rd_db_path <-
file.path("build", "partial.rdb")
if(file.exists(build_partial_Rd_db_path))
unlink(build_partial_Rd_db_path)
build_refman_path <-
file.path(pkgdir, "build", paste0(basename(pkgdir), ".pdf"))
if(file.exists(build_refman_path))
unlink(build_refman_path)
db <- .build_Rd_db(pkgdir, stages = NULL,
os = c("unix", "windows"), step = 1)
if (!length(db)) return(FALSE)
names(db) <- substring(names(db),
nchar(file.path(pkgdir, "man")) + 2L)
containsSexprs <-
which(sapply(db, function(Rd) getDynamicFlags(Rd)["\\Sexpr"]))
if (!length(containsSexprs)) return(FALSE)
messageLog(Log, "installing the package to process help pages")
dir.create(libdir, mode = "0755", showWarnings = FALSE)
savelib <- .libPaths()
.libPaths(c(libdir, savelib))
on.exit(.libPaths(savelib), add = TRUE)
temp_install_pkg(pkgdir, libdir)
containsBuildSexprs <-
which(sapply(db, function(Rd) getDynamicFlags(Rd)["build"]))
if (length(containsBuildSexprs)) {
for (i in containsBuildSexprs)
db[[i]] <- prepare_Rd(db[[i]], stages = "build",
stage2 = FALSE, stage3 = FALSE)
messageLog(Log, "saving partial Rd database")
partial <- db[containsBuildSexprs]
dir.create("build", showWarnings = FALSE)
saveRDS(partial, build_partial_Rd_db_path, version = 2L)
}
needRefman <- manual &&
parse_description_field(desc, "BuildManual", TRUE) &&
any(vapply(db,
function(Rd)
any(getDynamicFlags(Rd)[c("install", "render")]),
NA))
if (needRefman) {
messageLog(Log, "building the PDF package manual")
dir.create("build", showWarnings = FALSE)
..Rd2pdf(c("--force", "--no-preview",
paste0("--output=", build_refman_path),
pkgdir), quit = FALSE)
}
return(TRUE)
}
fix_nonLF_in_files <- function(pkgname, dirPattern, Log)
{
sDir <- file.path(pkgname, c("src", "inst/include"))
files <- dir(sDir, pattern = dirPattern,
full.names = TRUE, recursive = TRUE)
for (ff in files) {
old_time <- file.mtime(ff)
lines <- readLines(ff, warn = FALSE)
writeLinesNL(lines, ff)
Sys.setFileTime(ff, old_time)
}
}
fix_nonLF_in_source_files <- function(pkgname, Log) {
fix_nonLF_in_files(pkgname, dirPattern = "\\.([cfh]|cc|cpp|hpp)$", Log)
}
fix_nonLF_in_make_files <- function(pkgname, Log) {
fix_nonLF_in_files(pkgname,
paste0("^(",
paste(c("Makefile", "Makefile.in", "Makefile.win", "Makefile.ucrt",
"Makevars", "Makevars.in", "Makevars.win", "Makevars.ucrt"),
collapse = "|"), ")$"), Log)
makes <- dir(pkgname, pattern = "^Makefile$",
full.names = TRUE, recursive = TRUE)
for (ff in makes) {
lines <- readLines(ff, warn = FALSE)
writeLinesNL(lines, ff)
}
}
fix_nonLF_in_config_files <- function(pkgname, Log) {
files <- dir(pkgname, pattern = "^(configure|cleanup)$",
full.names = TRUE, recursive = TRUE)
for (ff in files) {
lines <- readLines(ff, warn = FALSE)
writeLinesNL(lines, ff)
}
}
find_empty_dirs <- function(d)
{
files <- dir(d, all.files = TRUE, full.names = TRUE)
for (dd in files[dir.exists(files)]) {
if (grepl("/\\.+$", dd)) next
find_empty_dirs(dd)
}
keep_empty1 <- parse_description_field(desc, "BuildKeepEmpty",
keep_empty)
if (!keep_empty1)
files <- dir(d, all.files = TRUE, full.names = TRUE)
if (length(files) <= 2L) {
if (keep_empty1) {
printLog(Log, "WARNING: directory ", sQuote(d), " is empty\n")
} else {
unlink(d, recursive = TRUE)
printLog(Log, "Removed empty directory ", sQuote(d), "\n")
}
}
}
fixup_R_dep <- function(pkgname, ver = "2.10")
{
desc <- .read_description(file.path(pkgname, "DESCRIPTION"))
Rdeps <- .split_description(desc)$Rdepends2
for(dep in Rdeps) {
if(dep$op != '>=') next
if(dep$version >= package_version(ver)) return()
}
flatten <- function(x) {
if(length(x) == 3L)
paste0(x$name, " (", x$op, " ", x$version, ")")
else x[[1L]]
}
deps <- desc["Depends"]
desc["Depends"] <- if(!is.na(deps)) {
deps <- .split_dependencies(deps)
deps <- deps[names(deps) != "R"]
paste(c(sprintf("R (>= %s)", ver), sapply(deps, flatten)),
collapse = ", ")
} else sprintf("R (>= %s)", ver)
.write_description(desc, file.path(pkgname, "DESCRIPTION"))
printLog(Log,
" NB: this package now depends on R (>= ", ver, ")\n")
}
resave_data_rda <- function(pkgname, resave_data)
{
if (resave_data == "no") return()
ddir <- file.path(pkgname, "data")
if(resave_data == "best") {
files <- Sys.glob(c(file.path(ddir, "*.rda"),
file.path(ddir, "*.RData"),
file.path(pkgname, "R", "sysdata.rda")))
messageLog(Log, "re-saving image files")
resaveRdaFiles(files)
rdas <- checkRdaFiles(files)
if(any(rdas$compress %in% c("bzip2", "xz")))
fixup_R_dep(pkgname, "2.10")
} else {
rdas <- checkRdaFiles(Sys.glob(c(file.path(ddir, "*.rda"),
file.path(ddir, "*.RData"))))
if(nrow(rdas)) {
update <- with(rdas, ASCII | compress == "none" | version < 2)
if(any(update)) {
messageLog(Log, "re-saving image files")
resaveRdaFiles(row.names(rdas)[update], "gzip")
}
}
if(file.exists(f <- file.path(pkgname, "R", "sysdata.rda"))) {
rdas <- checkRdaFiles(f)
update <- with(rdas, ASCII | compress == "none" | version < 2)
if(any(update)) {
messageLog(Log, "re-saving sysdata.rda")
resaveRdaFiles(f, "gzip")
}
}
}
}
resave_data_others <- function(pkgname, resave_data)
{
if (resave_data == "no") return()
if(!dir.exists(ddir <- file.path(pkgname, "data")))
return()
ddir <- normalizePath(ddir)
dataFiles <- filtergrep("\\.(rda|RData)$",
list_files_with_type(ddir, "data"))
if (!length(dataFiles)) return()
resaved <- character()
on.exit(unlink(resaved))
Rs <- grep("\\.[Rr]$", dataFiles, value = TRUE)
if (length(Rs)) {
messageLog(Log, "re-saving .R files as .rda")
lapply(Rs, function(x){
envir <- new.env(hash = TRUE)
sys.source(x, chdir = TRUE, envir = envir)
save(list = ls(envir, all.names = TRUE),
file = sub("\\.[Rr]$", ".rda", x),
compress = TRUE, compression_level = 9,
envir = envir,
version = 2L)
resaved <<- c(resaved, x)
})
printLog(Log,
" NB: *.R converted to .rda: other files may need to be removed\n")
}
tabs <- grep("\\.(CSV|csv|TXT|tab|txt)$", dataFiles, value = TRUE)
if (length(tabs)) {
messageLog(Log, "re-saving tabular files")
if (resave_data == "gzip") {
lapply(tabs, function(nm) {
x <- readLines(nm, warn = FALSE)
con <- gzfile(paste0(nm, ".gz"), "wb")
writeLines(x, con)
close(con)
resaved <<- c(resaved, nm)
})
} else {
OK <- TRUE
lapply(tabs, function(nm) {
x <- readLines(nm, warn = FALSE)
nm3 <- paste(nm, c("gz", "bz2", "xz"), sep = ".")
con <- gzfile(nm3[1L], "wb", compression = 9L); writeLines(x, con); close(con)
con <- bzfile(nm3[2L], "wb", compression = 9L); writeLines(x, con); close(con)
con <- xzfile(nm3[3L], "wb", compression = 9L); writeLines(x, con); close(con)
sizes <- file.size(nm3) * c(0.9, 1, 1)
ind <- which.min(sizes)
if(ind > 1) OK <<- FALSE
resaved <<- c(resaved, nm, nm3[-ind])
})
if (!OK) fixup_R_dep(pkgname, "2.10")
}
}
}
force <- FALSE
vignettes <- TRUE
manual <- TRUE
with_md5 <- FALSE
with_log <- FALSE
pkgs <- character()
options(showErrorCalls = FALSE, warn = 1)
Renv <- Sys.getenv("R_BUILD_ENVIRON", unset = NA_character_)
if(!is.na(Renv)) {
if(nzchar(Renv) && file.exists(Renv)) readRenviron(Renv)
} else {
rarch <- .Platform$r_arch
if (nzchar(rarch) &&
file.exists(Renv <- paste0("~/.R/build.Renviron.", rarch)))
readRenviron(Renv)
else if (file.exists(Renv <- "~/.R/build.Renviron"))
readRenviron(Renv)
}
compact_vignettes <- Sys.getenv("_R_BUILD_COMPACT_VIGNETTES_", "no")
resave_data <- Sys.getenv("_R_BUILD_RESAVE_DATA_", "gzip")
keep_empty <-
config_val_to_logical(Sys.getenv("_R_BUILD_KEEP_EMPTY_DIRS_", "FALSE"))
install_dependencies <- Sys.getenv("_R_BUILD_INSTALL_DEPENDENCIES_")
if(nzchar(install_dependencies) &&
(install_dependencies %notin% c("strong", "most", "all")))
install_dependencies <-
if(config_val_to_logical(install_dependencies)) "most" else ""
if (is.null(args)) {
args <- commandArgs(TRUE)
args <- paste(args, collapse = " ")
args <- strsplit(args,'nextArg', fixed = TRUE)[[1L]][-1L]
}
compression <- "gzip"
while(length(args)) {
a <- args[1L]
if (a %in% c("-h", "--help")) {
Usage()
do_exit(0L)
}
else if (a %in% c("-v", "--version")) {
cat("R add-on package builder: ",
R.version[["major"]], ".", R.version[["minor"]],
" (r", R.version[["svn rev"]], ")\n", sep = "")
cat("",
.R_copyright_msg(1997),
"This is free software; see the GNU General Public License version 2",
"or later for copying conditions. There is NO warranty.",
sep = "\n")
do_exit(0L)
} else if (a == "--force") {
force <- TRUE
} else if (a == "--keep-empty-dirs") {
keep_empty <- TRUE
} else if (a == "--no-build-vignettes") {
vignettes <- FALSE
} else if (a == "--no-vignettes") {
stop("'--no-vignettes' is defunct:\n use '--no-build-vignettes' instead",
call. = FALSE, domain = NA)
} else if (a == "--resave-data") {
resave_data <- "best"
} else if (a == "--no-resave-data") {
resave_data <- "no"
} else if (substr(a, 1, 14) == "--resave-data=") {
resave_data <- substr(a, 15, 1000)
} else if (a == "--no-manual") {
manual <- FALSE
} else if (substr(a, 1, 20) == "--compact-vignettes=") {
compact_vignettes <- substr(a, 21, 1000)
} else if (a == "--compact-vignettes") {
compact_vignettes <- "qpdf"
} else if (a == "--md5") {
with_md5 <- TRUE
} else if (a == "--log") {
with_log <- TRUE
} else if (substr(a, 1, 23) == "--install-dependencies=") {
install_dependencies <- substr(a, 24, 1000)
} else if (a == "--install-dependencies") {
install_dependencies <- "most"
} else if (substr(a, 1, 14) == "--compression=") {
compression <- match.arg(substr(a, 15, 1000),
c("none", "gzip", "bzip2", "xz"))
} else if (startsWith(a, "-")) {
message("Warning: unknown option ", sQuote(a))
} else pkgs <- c(pkgs, a)
args <- args[-1L]
}
if(compact_vignettes %notin% c("no", "qpdf", "gs", "gs+qpdf", "both")) {
warning(gettextf("invalid value for '--compact-vignettes', assuming %s",
"\"qpdf\""),
domain = NA)
compact_vignettes <-"qpdf"
}
Sys.unsetenv("R_DEFAULT_PACKAGES")
startdir <- getwd()
if (is.null(startdir))
stop("current working directory cannot be ascertained")
if (WINDOWS) {
rhome <- chartr("\\", "/", R.home())
Sys.setenv(R_HOME = rhome)
}
for(pkg in pkgs) {
pkg <- sub("/$", "", pkg)
Log <- if(with_log)
newLog(paste0(file.path(startdir, basename(pkg)),
"-00build.log"))
else
newLog()
setwd(startdir)
res <- tryCatch(setwd(pkg), error = function(e) {
errorLog(Log, "cannot change to directory ", sQuote(pkg))
do_exit(1L)
})
pkgdir <- getwd()
pkgname <- basename(pkgdir)
checkingLog(Log, "for file ", sQuote(file.path(pkg, "DESCRIPTION")))
f <- file.path(pkgdir, "DESCRIPTION")
if (file.exists(f)) {
desc <- try(.read_description(f))
if (inherits(desc, "try-error") || !length(desc)) {
resultLog(Log, "EXISTS but not correct format")
do_exit(1L)
}
resultLog(Log, "OK")
} else {
resultLog(Log, "NO")
do_exit(1L)
}
if(is.na(intname <- desc["Package"]) || !length(intname) ||
!nzchar(intname)) {
errorLog(Log, "invalid 'Package' field"); do_exit(1L)
}
setwd(dirname(pkgdir))
filename <- paste0(intname, "_", desc["Version"], ".tar")
filepath <- file.path(startdir, filename)
Tdir <- tempfile("Rbuild")
dir.create(Tdir, mode = "0755")
if (WINDOWS) {
if (!file.copy(pkgname, Tdir, recursive = TRUE, copy.date = TRUE)) {
errorLog(Log, "copying to build directory failed")
do_exit(1L)
}
} else {
ver <- suppressWarnings(system2("cp", "--version", stdout = TRUE,
stderr = FALSE))
GNU_cp <- any(grepl("GNU coreutils", ver))
cp_sw <- if(GNU_cp) "-LR --preserve=timestamps" else "-pLR"
if (system2("cp", c(cp_sw, shQuote(pkgname), shQuote(Tdir)))) {
errorLog(Log, "copying to build directory failed")
do_exit(1L)
}
}
setwd(Tdir)
if (pkgname != intname) {
if (!file.rename(pkgname, intname)) {
message(gettextf("Error: cannot rename directory to %s",
sQuote(intname)), domain = NA)
do_exit(1L)
}
pkgname <- intname
}
messageLog(Log, "preparing ", sQuote(pkgname), ":")
prepare_pkg(normalizePath(pkgname, "/"), desc, Log);
owd <- setwd(pkgname)
allfiles <- dir(".", all.files = TRUE, recursive = TRUE,
full.names = TRUE, include.dirs = TRUE)
allfiles <- substring(allfiles, 3L)
bases <- basename(allfiles)
exclude <- inRbuildignore(allfiles, pkgdir)
isdir <- dir.exists(allfiles)
exclude <- exclude | (isdir & (bases %in%
c("check", "chm", .vc_dir_names)))
exclude <- exclude | (isdir & grepl("([Oo]ld|\\.Rcheck)$", bases))
exclude <- exclude | bases %in% c("Read-and-delete-me", "GNUMakefile")
exclude <- exclude | startsWith(bases, "._")
exclude <- exclude | (isdir & grepl("^src.*/[.]deps$", allfiles))
exclude <- exclude | (allfiles == paste0("src/", pkgname, "_res.rc"))
exclude <- exclude | endsWith(allfiles, "inst/doc/.Rinstignore") |
endsWith(allfiles, "inst/doc/.build.timestamp") |
endsWith(allfiles, "vignettes/.Rinstignore")
exclude <- exclude | grepl("^.Rbuildindex[.]", allfiles)
exclude <- exclude | (bases %in% .hidden_file_exclusions)
exts <- "\\.(tar\\.gz|tar|tar\\.bz2|tar\\.xz|tgz|zip)"
exclude <- exclude | grepl(paste0("^", pkgname, "_[0-9.-]+", exts, "$"),
allfiles)
unlink(allfiles[exclude], recursive = TRUE, force = TRUE,
expand = FALSE)
setwd(owd)
res <- .check_package_subdirs(pkgname, TRUE)
if (any(lengths(res))) {
messageLog(Log, "excluding invalid files")
print(res)
}
setwd(Tdir)
if (!WINDOWS) .Call(C_dirchmod, pkgname, group.writable=FALSE)
add_build_stamp_to_description_file(file.path(pkgname, "DESCRIPTION"),
pkgdir)
add_expanded_R_fields_to_description_file(file.path(pkgname,
"DESCRIPTION"))
messageLog(Log,
"checking for LF line-endings in source and make files and shell scripts")
fix_nonLF_in_source_files(pkgname, Log)
fix_nonLF_in_make_files(pkgname, Log)
fix_nonLF_in_config_files(pkgname, Log)
messageLog(Log, "checking for empty or unneeded directories");
find_empty_dirs(pkgname)
for(dir in c("Meta", "R-ex", "chtml", "help", "html", "latex")) {
d <- file.path(pkgname, dir)
if (dir.exists(d)) {
msg <- paste("WARNING: Removing directory",
sQuote(d),
"which should only occur",
"in an installed package")
printLog(Log, paste(strwrap(msg, indent = 0L, exdent = 2L),
collapse = "\n"), "\n")
unlink(d, recursive = TRUE)
}
}
unlink(file.path(pkgname,
c("src-i386", "src-x64", "src-x86_64", "src-ppc")),
recursive = TRUE)
if(dir.exists(file.path(pkgname, "data")) ||
file_test("-f", file.path(pkgname, "R", "sysdata.rda"))) {
if(!str_parse_logic(desc["LazyData"], FALSE)) {
messageLog(Log,
"looking to see if a 'data/datalist' file should be added")
tryCatch(add_datalist(pkgname),
error = function(e)
printLog(Log, " unable to create a 'datalist' file: may need the package to be installed\n"))
}
resave_data1 <- parse_description_field(desc, "BuildResaveData",
resave_data, logical=FALSE)
resave_data_others(pkgname, resave_data1)
resave_data_rda(pkgname, resave_data1)
}
if (!dir.exists(file.path(pkgname, "data"))) {
desc <- file.path(pkgname, "DESCRIPTION")
db <- .read_description(desc)
ndb <- names(db)
omit <- character()
for (x in c("LazyData", "LazyDataCompression"))
if (x %in% ndb) omit <- c(omit, x)
if (length(omit)) {
printLog(Log,
sprintf("Omitted %s from DESCRIPTION\n",
paste(sQuote(omit), collapse = " and ")))
db <- db[!(names(db) %in% omit)]
.write_description(db, desc)
}
}
desc <- .read_description(file.path(pkgname, "DESCRIPTION"))
Rdeps <- .split_description(desc)$Rdepends2
hasDep350 <- FALSE
for(dep in Rdeps) {
if(dep$op != '>=') next
if(dep$version >= "3.5.0") hasDep350 <- TRUE
}
if (!hasDep350) {
allfiles <- dir(".", all.files = TRUE, recursive = TRUE,
full.names = TRUE)
allfiles <- substring(allfiles, 3L)
vers <- get_serialization_version(allfiles)
toonew <- names(vers[vers >= 3L])
if (length(toonew)) {
fixup_R_dep(pkgname, "3.5.0")
msg <- paste("WARNING: Added dependency on R >= 3.5.0 because",
"serialized objects in serialize/load version 3",
"cannot be read in older versions of R. File(s)",
"containing such objects:")
printLog(Log,
paste(c(strwrap(msg, indent = 2L, exdent = 2L),
paste0(" ", .pretty_format(sort(toonew)))),
collapse = "\n"),
"\n")
}
}
if(!file.exists(namespace <- file.path(pkgname, "NAMESPACE")) ) {
messageLog(Log, "creating default NAMESPACE file")
writeDefaultNamespace(namespace)
}
if(with_md5) {
messageLog(Log, "adding MD5 file")
.installMD5sums(pkgname)
} else {
unlink(file.path(pkgname, "MD5"))
}
ext <- switch(compression,
"none"="", "gzip"= ".gz", "bzip2" = ".bz2", "xz" = ".xz")
filename <- paste0(pkgname, "_", desc["Version"], ".tar", ext)
filepath <- file.path(startdir, filename)
messageLog(Log, "building ", sQuote(filename))
res <- utils::tar(filepath, pkgname, compression = compression,
compression_level = 9L,
tar = Sys.getenv("R_BUILD_TAR"),
extra_flags = NULL)
if (res) {
errorLog(Log, "packaging into tarball failed")
do_exit(1L)
}
message("")
setwd(startdir)
unlink(Tdir, recursive = TRUE)
closeLog(Log)
}
do_exit(0L)
} |
RcppKmomentEST = function(k,a,b,mu,Sigma,lambda,tau,nu)
{
p = length(mu)
tautil<-tau/sqrt(1+sum(lambda^2))
if(pt(tautil,nu) < pnorm(-37)){
Delta = sqrtm(Sigma)%*%lambda/sqrt(1+sum(lambda^2))
Gamma = Sigma - Delta%*%t(Delta)
rownames(Gamma) <- colnames(Gamma)
omega_tau = (nu+tautil^2)/(nu+1)
return(RcppKmomentT(k = k,a = a, b = b,mu = mu - tautil*Delta,Sigma = omega_tau*Gamma,nu=nu+1))
}
SS = sqrtm(Sigma)
varpsi = lambda/sqrt(1+sum(lambda^2))
Omega = cbind(rbind(Sigma,-t(varpsi)%*%SS),rbind(-SS%*%varpsi,1))
rownames(Omega) <- colnames(Omega)
return(RcppKmomentT(k = k,a = c(a,-10^7),b = c(b,tautil),mu = c(mu,0),Sigma = Omega,nu = nu)[,-(p+1)])
} |
map.bubble <-
function(shapedir= "WorkingDir",shapename,boundary.label,spe.vector=NULL,cex=0.8,col="lightblue",bubcex,bubpch=1,bublwd=2,bubcol="green"){
if(is.character(shapename)==FALSE | length(shapename)>1){
stop("shapename must be a character input of length 1")
}
if(shapedir == "WorkingDir"){
ogrListLayers(paste(shapename,".shp",sep=""))
shape <- readOGR(paste(shapename,".shp",sep=""), layer=shapename)
}else{
ogrListLayers(shapedir)
shape <- readOGR(dsn = shapedir, layer=shapename)
}
if(!is.null(spe.vector)){
if(is.null(boundary.label)){ stop("boundary.label must specify") }
if(length(col)==1){
members <- rep(0,nrow(shape))
members[match(spe.vector,shape[[boundary.label]])]=1
mycol <- ifelse(members==1,col,"white")
plot(shape,col=mycol)
points(getSpPPolygonsLabptSlots(shape),col=bubcol,pch=bubpch,lwd=bublwd,cex=bubcex)
invisible(text(getSpPPolygonsLabptSlots(shape),labels=as.character(shape[[boundary.label]]), cex=cex))
}else{
mycol <- rep("white",nrow(shape))
mycol[match(spe.vector,shape[[boundary.label]])] <- col
plot(shape,col=mycol)
points(getSpPPolygonsLabptSlots(shape),col=bubcol,pch=bubpch,lwd=bublwd,cex=bubcex)
invisible(text(getSpPPolygonsLabptSlots(shape),labels=as.character(shape[[boundary.label]]), cex=cex))
}
}else{
if(length(col)>1) {stop("spe.vector must specify for different color levels") }
plot(shape)
points(getSpPPolygonsLabptSlots(shape),col=bubcol,pch=bubpch,lwd=bublwd,cex=bubcex)
if(!is.null(boundary.label)){
invisible(text(getSpPPolygonsLabptSlots(shape),labels=as.character(shape[[boundary.label]]), cex=cex))
}
}} |
rm_numdiff_discrete_differences <- function(ll0, ll1, ll2, h)
{
d1 <- ( ll1 - ll2 ) / ( 2 * h )
d2 <- ( ll1 + ll2 - 2*ll0 ) / h^2
res <- list(d1=d1, d2=d2)
return(res)
} |
NULL
dDPH <- function(x, obj){
if (class(obj) == 'disc_phase_type') {
if (sum(x %% 1 > 0) > 0){
stop('x should only contain integers.')
}
e <- matrix(1, nrow = nrow(obj$subint_mat))
t <- e - obj$subint_mat %*% e
dens_vec <- c()
for(i in x){
if (i == 0) {
dens_vec <- c(dens_vec, obj$defect)
} else {
dens_vec <- c(dens_vec, obj$init_probs %*% (obj$subint_mat %^% (i-1))
%*% t)
}
}
return(dens_vec)
} else {
stop("Please provide an object of class 'disc_phase_type'.")
}
}
qDPH <- function(p, obj){
vec <- c()
inv <- function(y) uniroot(function(q) pDPH(q, obj)-y, c(0,400))$root[1]
if (class(obj) == 'disc_phase_type') {
for (i in p) {
vec <- c(vec, round(inv(i)))
}
} else {
stop("Please provide an object of class 'disc_phase_type'.")
}
return(vec)
}
pDPH <- function(q, obj){
if (class(obj) == 'disc_phase_type') {
e <- matrix(1, nrow = nrow(obj$subint_mat))
prob_vec <- c()
for(i in q){
prob_vec <- c(prob_vec, 1 - obj$init_probs %*% (obj$subint_mat %^% i)
%*% e)
}
return(prob_vec)
} else {
stop("Please provide an object of class 'disc_phase_type'.")
}
}
rDPH <- function(n, obj){
if (class(obj) == 'disc_phase_type') {
if (length(n) > 1){
n <- length(n)
}
subint_mat <- obj$subint_mat
init_probs <- c(obj$init_probs, obj$defect)
p <- nrow(subint_mat)
n_vec <- numeric(n)
int_mat <- cbind(subint_mat, 1 - rowSums(subint_mat))
for (i in 1:n) {
j <- sample(p + 1, 1, prob = init_probs)
while (j != (p + 1)) {
n_vec[i] <- n_vec[i] + 1
j <- sample(p + 1, 1, prob = int_mat[j,])
}
}
return(n_vec)
} else {
stop("Please provide an object of class 'disc_phase_type'.")
}
}
rFullDPH <- function(obj){
if (!(class(obj) == 'disc_phase_type')){
stop("Please provide an object of class 'disc_phase_type'.")
}
init_probs <- obj$init_probs
n <- length(init_probs)
subint_mat <- obj$subint_mat
exit_rate <- 1 - t(t(rowSums(subint_mat)))
int_mat <- cbind(subint_mat, exit_rate)
states <- 1:(n+1)
curstate <- sample(1:n, 1, prob = init_probs)
states <- curstate
times <- NULL
curtime <- 1
while(curstate <= n){
curstate <- sample(1:(n+1), 1, prob = int_mat[curstate,])
if (curstate == states[length(states)]){
curtime <- curtime + 1
} else {
times <- c(times, curtime)
states <- c(states, curstate)
curtime <- 1
}
}
return(data.frame(state = states[-length(states)], time = times))
} |
companion <- function(object, ...) {UseMethod("companion", object)}
companion.default <- function(object, ...) {
stop("No methods for class ",
paste0(class(object), collapse = " / "), " found.")
}
companion.bvar <- function(
object,
type = c("quantile", "mean"),
conf_bands = 0.5,
...) {
type <- match.arg(type)
K <- object[["meta"]][["K"]]
M <- object[["meta"]][["M"]]
lags <- object[["meta"]][["lags"]]
vars <- name_deps(object[["variables"]], M = M)
vars_expl <- name_expl(vars, M = M, lags = lags)[-1]
vars_dep <- c(vars, if(lags > 1) {rep("lag", M * (lags - 1))})
if(type == "quantile") {
quantiles <- quantile_check(conf_bands)
coefs <- apply(object[["beta"]], c(2, 3), quantile, quantiles)
} else {
quantiles <- 0.5
coefs <- apply(object[["beta"]], c(2, 3), mean)
}
if(length(quantiles) == 1) {
comp <- get_beta_comp(coefs, K, M, lags)
dimnames(comp) <- list(vars_dep, vars_expl)
} else {
comp <- array(NA, c(length(quantiles), K - 1, K - 1))
for(i in 1:length(quantiles)) {
comp[i, , ] <- get_beta_comp(coefs[i, , ], K, M, lags)
}
dimnames(comp)[[1]] <- dimnames(coefs)[[1]]
dimnames(comp)[[2]] <- vars_dep
dimnames(comp)[[3]] <- vars_expl
}
class(comp) <- append("bvar_comp", class(comp))
return(comp)
}
print.bvar_comp <- function(x, digits = 3L, complete = FALSE, ...) {
.print_coefs(x, digits, type = "companion", complete = complete, ...)
return(invisible(x))
} |
`SHOWTOMO` <-
function(MOD, colmap=topo.colors(100), zlim=NULL, MAP=NULL, I=1, J=2, bkgr="white" ,
linelty=1, linelwd=1, ptpch=".", ptcex=1 )
{
if(missing(colmap)) { colmap=tomo.colors(100) }
if(missing(MAP)) { MAP=NULL }
if(missing(I)) { I=1 }
if(missing(J)) { J=length(MOD$MOD) }
if(missing(zlim)) { zlim=NULL }
if(missing(bkgr)) { bkgr="white" }
opar = par(no.readonly = TRUE)
NTOT = (J-I+1)
KROW = floor(NTOT/5)
if(KROW>=1)
{
par(mfrow=c(KROW,5),mai=c(0, 0, 0,0))
}
else
{
par(mfrow=c(1, NTOT),mai=c(0, 0, 0,0))
}
KMAX = min(c(J, 25))
for( i in I:KMAX)
{
pltomo(MOD$x,MOD$y,MOD$MOD,i, colmap=colmap, zlim=zlim, bkgr= bkgr )
text(MOD$x[1],MOD$y[1], labels=paste(sep=" : ", paste(sep="", "LAY=", i) , paste(sep="", "Z=",MOD$D[i])), adj=c(0,0) ,xpd=TRUE, font=2)
box()
if(!is.null(MAP)) GEOmap::plotGEOmapXY(MAP, PROJ=MAP$PROJ, add=TRUE, xpd=FALSE, linelty=linelty, linelwd=linelwd, ptpch=ptpch, ptcex=ptcex )
}
newpar= par(no.readonly = TRUE)
par(opar)
invisible(newpar)
} |
EEB.conRej <-
function(beta,nu,delta=0,S=1,alpha=0.05,tol=1e-4,max.itr=5000)
{
tv<-qt(1-alpha,df=nu)
if(delta==0)
{
return(S*(qt(1-(alpha*(1-beta)/2),df=nu)+tv))
}else
{
a<-S*(tv+qt(1-alpha/2,df=nu))
b<-100
s<-0
while(s<max.itr)
{
c<-(a+b)/2
diff<-(b-a)/2
g<-pB.conRej(c,nu=nu,delta=delta,S=S,alpha=alpha)-beta
if(abs(g)<tol|diff<tol)
{
break
}else
{
if(g<0)
{
a<-c
}else
{
b<-c
}
s<-s+1
}
}
return(c)
}
} |
dfgps <-
function(X,y,penalty="enet", ex_para=c(0), STEP=10000, STEP.max=100000, DFtype="MODIFIED", p.max=300){
candidate <- c("enet","genet","alasso")
if(sum(candidate == penalty) != 1) stop('penalty must equal "enet", "genet" or "alasso".')
if(mode(X)!="numeric") stop(" X must be numeric.")
if(!is.matrix(X)) stop(" X must be a matrix.")
if(mode(y)!="numeric") stop(" y must be numeric.")
if (!is.vector(y)) stop("y must be a vector.")
if (nrow(X)!=length(y)) stop("The number of sample must not be differenet between X and y.")
if (sum(complete.cases(X)==FALSE)>0) stop("X must be complete data.")
if (sum(complete.cases(y)==FALSE)>0) stop("y must be complete data.")
penalty_int <- which(candidate == penalty)
penalty_int <- as.integer(penalty_int - 1)
if((penalty_int==0 || penalty_int==1) && length(ex_para)!=1) stop('"ex_para" must be a scalar.')
if(!is.numeric(ex_para)) stop('"ex_para" must be a numeric.')
if(penalty_int==0 && (ex_para < 0 || ex_para >=1)) stop('"ex_para" must be in [0,1).')
if(penalty_int==1 && (ex_para <= 0 || ex_para >=1)) stop('"ex_para" must be in (0,1).')
if(penalty_int==2 && length(ex_para)!=2) stop("ex_para must be a 2-dimensional vector.")
if(penalty_int==2 && length(ex_para)!=2) stop("ex_para must be a 2-dimensional vector.")
if(penalty_int==2 && ex_para[1]<0 ) stop("ex_para[1] must be non-negative.")
if(penalty_int==2 && ex_para[2]<0 ) stop("ex_para[2] must be non-negative.")
if(mode(STEP)!="numeric") stop('"STEP" must be numeric.')
if(STEP < 500) stop('"STEP" must be greater than or equal to 500.')
if(STEP >= 1e+8) stop('"STEP" must be less than 1e+8.')
candidate_DFtype <- c("NAIVE","MODIFIED")
if(sum(candidate_DFtype == DFtype) != 1) stop('DFtype must be "MODIFIED" or "NAIVE".')
if(mode(STEP)!="numeric") stop('"STEP" must be numeric.')
if(p.max < 1) stop('"p.max" must bea positive integer.')
if(p.max >= 10000) stop('"p.max" must be less than 10000.')
if(mode(STEP.max)!="numeric") stop('"STEP.max" must be numeric.')
if(STEP.max< 500) stop('"STEP.max" must be greater than or equal to 500.')
if(STEP.max >= 1e+8) stop('"STEP.max" must be less than 1e+8.')
if(penalty_int==2){
if(ncol(X) < nrow(X)) PLS <- solve(t(X)%*%X + ex_para[2]*diag(ncol(X)))%*%t(X)%*%y
if(ncol(X) >= nrow(X)) PLS <- t(X)%*%solve((X)%*%t(X) + ex_para[2]*diag(nrow(X)))%*%y
weight_vec <- (abs(PLS))^(-ex_para[1])
}else{
weight_vec <- rep(1,ncol(X))
}
meanX <- apply(X,2,mean)
meanX_mat <- sweep(X, 2, meanX)
standardize_vec <- 1 / sqrt(apply(meanX_mat^2,2,sum))
standardize_vec[standardize_vec==Inf] <- 0
standardize_mat <- matrix(rep(standardize_vec,nrow(X)),nrow(X),ncol(X),byrow=T)
meany <- mean(y)
X0 <- X
X <- meanX_mat * standardize_mat
y0 <- y
y <- y-meany
if( nrow(X) <= ncol(X)){
beta_OLS <- t(X)%*%solve(X%*%t(X) + 0.001*diag(nrow(X)))%*%y
}else if(det(t(X)%*%X) < 1e-3){
beta_OLS <- solve(t(X)%*%X + 0.001*diag(ncol(X))) %*%t(X)%*%y
}else{
beta_OLS <- solve(t(X)%*%X)%*%t(X)%*%y
}
delta_t <- sum(abs(beta_OLS)) / STEP
STEP.max=as.integer(STEP.max)
p.max=as.integer(p.max)
N <- length(y)
gps_C=.Call("gps", X,y,delta_t,penalty_int,ex_para, STEP.max,p.max,weight_vec,standardize_vec)
betagps_matrix <- gps_C[1][[1]]
STEP_adj <- gps_C[2][[1]]
RSS <- gps_C[3][[1]]
increment_covpenalty_vec_power <- gps_C[4][[1]]
selected_variable_index_vec <- gps_C[5][[1]]
betahat_index_vec_adj <- gps_C[6][[1]]
tuning <- gps_C[7][[1]]
tuning_stand <- gps_C[8][[1]]
selected_variable_index_vec <- selected_variable_index_vec[selected_variable_index_vec>0]
betagps_matrix <- betagps_matrix[1:STEP_adj]
RSS <- RSS[1:STEP_adj]
increment_covpenalty_vec_power <- increment_covpenalty_vec_power[1:STEP_adj]
betahat_index_vec_adj <- betahat_index_vec_adj[1:STEP_adj]
tuning <- tuning[1:STEP_adj]
tuning_stand <- tuning_stand[1:STEP_adj]
ex_X_selected = X[,selected_variable_index_vec]
betagps_matrix=as.integer(betagps_matrix)
STEP_adj2=as.integer(STEP_adj-1)
STEP_adj=as.integer(STEP_adj)
betahat_index_vec_adj = as.integer(betahat_index_vec_adj)
selected_variable_index_vec = as.integer(selected_variable_index_vec)
increment_covpenalty_vec0 <- (1-2*delta_t/N)^ increment_covpenalty_vec_power
increment_covpenalty_vec1 <- 1-(1-2*delta_t/N)^ increment_covpenalty_vec_power
t0 <- log( 1-2*delta_t/N*increment_covpenalty_vec_power ) / log(1-2*delta_t/N)
increment_covpenalty_vec2 <- 1-(1-2*delta_t/N)^ t0
increment_covpenalty_vec3 <- 2*delta_t/N * increment_covpenalty_vec_power
if(DFtype=="NAIVE"){
dfgps_vec=.Call("DFNAIVE2", ex_X_selected,y,betahat_index_vec_adj,STEP_adj2,increment_covpenalty_vec3)
if(sum( abs(dfgps_vec)>1e+10 | is.na(dfgps_vec) | dfgps_vec<0 ) > 1 ) stop("DF is not correct")
}else if(DFtype=="MODIFIED"){
qr_X <- qr(ex_X_selected)
qr_X_R <- qr.R(qr_X)
qr_X_R[is.nan(qr_X_R)] <- 0
dfgps_vec=.Call("DFMODIFIED2", qr_X_R, y, betahat_index_vec_adj, STEP_adj2, increment_covpenalty_vec3, selected_variable_index_vec)
if(sum( abs(dfgps_vec)>1e+10 | is.na(dfgps_vec) | dfgps_vec<0 ) > 1 ) stop("DF is not correct")
}
p<-ncol(X)
N<-nrow(X)
if(DFtype=="NAIVE" || DFtype=="MODIFIED" || DFtype=="MODIFIED2"){
ans <- list(N=N,p=p,delta_t=delta_t,coefficient_index=betagps_matrix,df=dfgps_vec,STEP_adj=STEP_adj2,RSS=RSS,tuning=tuning,tuning_stand=tuning_stand,X=X0,y=y0,ex_para=ex_para,beta_OLS=beta_OLS,Xstand=X,ystand=y)
}else{
ans <- list(N=N,p=p,delta_t=delta_t,coefficient_index=betagps_matrix,STEP_adj=STEP_adj2,RSS=RSS,tuning=tuning,tuning_stand=tuning_stand,X=X0,y=y0,ex_para=ex_para,beta_OLS=beta_OLS,Xstand=X,ystand=y)
}
class(ans) <- "dfgps"
return(invisible(ans))
} |
model{
for (i in 1:n) {
weight[i] ~ dnorm(mu[i], tau.within)
mu[i] <- b0 + b1[breed[i]]
}
for (j in 2:n.breeds) {
b1[j] ~ dnorm(0, tau.between)
}
b0 ~ dnorm(0, 1e-06)
b1[1] <- 0
tau.within <- sd.within^-0.5
tau.between <- sd.between^-0.5
sd.within ~ dunif(0, 10)
sd.between ~ dunif(0, 10)
} |
ggplot2::ggplot(datasets::airquality) +
ggplot2::geom_line(stat = "identity", ggplot2::aes(x = lubridate::make_date(lubridate::year(lubridate::today()), Month, Day), y = Wind)) + |
context("decrypt")
test_that("decryption works", {
path <- ifelse(dir.exists("sample-captchas/"), "sample-captchas/",
"./tests/testthat/sample-captchas/")
path2 <- ifelse(dir.exists("sample-captchas/"), "./sample-model.hdf5",
"./tests/testthat/sample-model.hdf5")
files <- list.files(path, pattern = "_", full.names = TRUE)
model <- load_model(path2)
cap <- read_captcha(files)
expect_equal(class(decrypt(files, model)), "character")
expect_equal(class(decrypt(files, path2)), "character")
expect_equal(class(decrypt(files, "rfb")), "character")
expect_equal(class(decrypt(cap, model)), "character")
expect_equal(class(decrypt(cap, path2)), "character")
expect_equal(class(decrypt(cap, "rfb")), "character")
})
test_that("decrypt works with raw vector", {
path <- ifelse(dir.exists("sample-captchas/"), "sample-captchas/",
"./tests/testthat/sample-captchas/")
captcha_raw <- readr::read_file_raw(paste0(path, "captcha372a5114848de_f9ccnk.png"))
expect_equal(class(decrypt(captcha_raw, "rfb")), "character")
}) |
UmxBestMatchesFromIsland <- function(Umatrix, BestMatches, Imx, Cls= NULL, Toroid=T , RemoveOcean = T){
if(Toroid){
tU <- ToroidUmatrix(Umatrix, BestMatches, Cls)
Umatrix <- tU$Umatrix
BestMatches <- tU$BestMatches
Cls <- tU$Cls
}
BestMatches = CheckBestMatches(BestMatches, Cls)
if(!is.null(BestMatches)){
BestMatchesFilter = rep(T,nrow(BestMatches))
}
if(!is.null(Imx)){
for(i in 1:nrow(Imx)){
for(j in 1:ncol(Imx)){
if(Imx[i,j] == 1){
Umatrix[i,j] = NA
if(!is.null(BestMatches))
BestMatchesFilter[(BestMatches[,2] == i) & (BestMatches[,3] == j)] = F
}
}
}
if(!is.null(BestMatches)) BestMatches = BestMatches[BestMatchesFilter,]
if((!is.null(Cls)) & (!is.null(BestMatches))) Cls = Cls[BestMatchesFilter]
}
if(RemoveOcean){
oceanLine = !apply(Umatrix, 1, function(x) any(x != -1))
startLine = min(which(!oceanLine),na.rm=T)
endLine = length(oceanLine) - min(which(rev(!oceanLine)),na.rm=T) + 1
oceanCol = !apply(Umatrix, 2, function(x) any(x != -1))
startCol = min(which(!oceanCol),na.rm=T)
endCol = length(oceanCol) - min(which(rev(!oceanCol)),na.rm=T) + 1
if(!is.null(BestMatches)){
BestMatches <- BestMatches - cbind(rep(0,nrow(BestMatches)),startLine-1,startCol-1)
}
Umatrix <- Umatrix[startLine:endLine,startCol:endCol]
}
Umatrix[which(is.na(Umatrix))] = 0
return(list(Umatrix = Umatrix, BestMatches = BestMatches, Cls=Cls))
} |
calc.poss.detect.dists.lines <- function(population, survey, perp.truncation, plot = FALSE){
transects <- [email protected]
individuals <- population@population
for(i in seq(along = individuals$object)){
x.coord <- individuals[i,"x"]
y.coord <- individuals[i,"y"]
transect.angle <- atan2(transects[["end.Y"]]-transects[["start.Y"]], transects[["end.X"]]-transects[["start.X"]])
animal.angle <- atan2(y.coord-transects[["start.Y"]], x.coord-transects[["start.X"]])
delta.angle <- abs(animal.angle-transect.angle)
delta.angle <- (ifelse(delta.angle > pi, 2*pi - delta.angle, delta.angle))
hyp <- sqrt((y.coord-transects[["start.Y"]])^2+(x.coord-transects[["start.X"]])^2)
all.perp.dists <- hyp*sin(delta.angle)
intersects.transects <- apply(cbind(transects[,c("start.X", "start.Y", "end.X", "end.Y", "length")], p.dist = all.perp.dists), 1, FUN = check.intersection.TP, point = data.frame(x = x.coord, y = y.coord), display.diagnostics = FALSE)
perp.dists <- ifelse(intersects.transects & all.perp.dists < perp.truncation, TRUE, FALSE)
detect.dists <- data.frame(object = rep(individuals[i,"object"], length(transects$ID)), transect.ID = transects$ID, distance = all.perp.dists, available = perp.dists)
if(i == 1){
poss.detect.dists <- detect.dists
}else{
poss.detect.dists <- rbind(poss.detect.dists, detect.dists)
}
}
poss.detect.dists <- poss.detect.dists[poss.detect.dists$available,]
poss.detect.dists <- merge(poss.detect.dists, individuals, by="object")
if(plot){
transect.IDs <- sort(unique(poss.detect.dists$transect.ID))
for(i in seq(along = transect.IDs)){
points(poss.detect.dists$x[poss.detect.dists$transect.ID == transect.IDs[i]], poss.detect.dists$y[poss.detect.dists$transect.ID == transect.IDs[i]], col = i, pch = 20)
points(poss.detect.dists$x[poss.detect.dists$transect.ID == transect.IDs[i]], poss.detect.dists$y[poss.detect.dists$transect.ID == transect.IDs[i]], col = i)
}
}
if(nrow(poss.detect.dists) > 0){
index <- order(poss.detect.dists$object)
poss.detect.dists <- poss.detect.dists[index,]
row.names(poss.detect.dists) <- 1:nrow(poss.detect.dists)
}
return(poss.detect.dists)
} |
formatted <-
function(x, digits=2){
output <- formatC(x, format="f", digits=digits)
return(output)
} |
associationsToDiamondPlotDf <- function(dat, covariates, criterion,
labels = NULL,
decreasing=NULL,
conf.level=.95,
esMetric = 'r') {
if (is.null(labels)) labels <- covariates;
assocMatrix <- ufs::associationMatrix(dat, x=covariates, y=criterion,
conf.level=conf.level);
resDf <- data.frame(lo = as.numeric(assocMatrix$output$raw$ci.lo),
es = as.numeric(assocMatrix$output$raw$es),
hi = as.numeric(assocMatrix$output$raw$ci.hi));
if (esMetric == 'r') {
resDf <- data.frame(matrix(sapply(1:length(covariates), function(i) {
if (assocMatrix$output$raw$esType[i] == 'd') {
return(ufs::convert.d.to.r(resDf[i, ]));
} else if ((assocMatrix$output$raw$esType[i] == 'etasq') ||
(assocMatrix$output$raw$esType[i] == 'omegasq')) {
return(sqrt(resDf[i, ]));
} else {
return(resDf[i, ]);
}
}), ncol=3, byrow=TRUE));
} else if (esMetric == 'd' | esMetric == 'g') {
resDf <- data.frame(matrix(sapply(1:length(covariates), function(i) {
if (assocMatrix$output$raw$esType[i] == 'r' | assocMatrix$output$raw$esType[i] == 'v') {
return(ufs::convert.r.to.d(resDf[i, ]));
} else if ((assocMatrix$output$raw$esType[i] == 'etasq') ||
(assocMatrix$output$raw$esType[i] == 'omegasq')) {
return(ufs::convert.r.to.d(sqrt(resDf[i, ])));
} else {
return(resDf[i, ]);
}
}), ncol=3, byrow=TRUE));
} else {
stop("No other effect size metrics implemented yet!");
}
names(resDf) <- c('lo', 'es', 'hi');
resDf$label <- labels;
resDf$rownr <- 1:nrow(resDf);
resDf$constant <- 1;
if (!is.null(decreasing)) {
sortedByMean <- order(unlist(resDf$es), decreasing=!decreasing);
resDf <- resDf[sortedByMean, ];
labels <- labels[sortedByMean];
} else {
sortedByMean <- 1:length(labels);
}
attr(resDf, 'sortedByMean') <- sortedByMean;
return(resDf);
} |
library(analogsea)
source("analog-keys.R")
install <- function(droplet){
droplet %>%
debian_add_swap() %>%
install_new_r() %>%
install_docker() %>%
prepare_plumber()
}
install_docker <- function(droplet){
droplet %>%
droplet_ssh(c("sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D",
"echo 'deb https://apt.dockerproject.org/repo ubuntu-focal main' > /etc/apt/sources.list.d/docker.list")) %>%
debian_apt_get_update() %>%
droplet_ssh("sudo apt-get install linux-image-extra-$(uname -r)") %>%
debian_apt_get_install("docker-engine") %>%
droplet_ssh(c("curl -L https://github.com/docker/compose/releases/download/1.7.0/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose",
"chmod +x /usr/local/bin/docker-compose"))
}
install_new_r <- function(droplet){
droplet %>%
droplet_ssh(c("echo 'deb https://cran.rstudio.com/bin/linux/ubuntu focal-cran40/' >> /etc/apt/sources.list",
"sudo apt-key adv --keyserver keys.gnupg.net --recv-key 'E298A3A825C0D65DFD57CBB651716619E084DAB9'")) %>%
debian_apt_get_update() %>%
debian_install_r()
}
prepare_plumber<- function(droplet){
droplet %>%
droplet_ssh("git clone https://github.com/rstudio/plumber.git") %>%
droplet_ssh("cd plumber/inst/hosted/ && docker-compose up -d --build")
} |
ste_dietrich2004 <- function(wave, U) {
e <- vector(mode="numeric", length=length(wave@left))
for (i in (U/2+1):(length(wave@left)-U/2)) {
values <- (i-U/2):(i+U/2)
values <- values[values > 0]
e[i] <- sum(abs(wave@left[values]))
}
return(e)
} |
apollo_estimateHB <- function(apollo_beta, apollo_fixed, apollo_probabilities, apollo_inputs, estimate_settings=NA){
time1 <- Sys.time()
database = apollo_inputs[["database"]]
apollo_control = apollo_inputs[["apollo_control"]]
draws = apollo_inputs[["draws"]]
apollo_randCoeff = apollo_inputs[["apollo_randCoeff"]]
apollo_lcPars = apollo_inputs[["apollo_lcPars"]]
apollo_HB = apollo_inputs[["apollo_HB"]]
workInLogs = apollo_control$workInLogs
estimationRoutine = tolower( estimate_settings[["estimationRoutine"]] )
maxIterations = estimate_settings[["maxIterations"]]
writeIter = estimate_settings[["writeIter"]]
hessianRoutine = estimate_settings[["hessianRoutine"]]
printLevel = estimate_settings[["printLevel"]]
silent = estimate_settings[["silent"]]
numDeriv_settings = estimate_settings[["numDeriv_settings"]]
constraints = estimate_settings[["constraints"]]
scaling = estimate_settings[["scaling"]]
bootstrapSE = estimate_settings[["bootstrapSE"]]
bootstrapSeed = estimate_settings[["bootstrapSeed"]]
if(length(apollo_inputs$apollo_scaling)>0 && !is.na(apollo_inputs$apollo_scaling)){
if(!is.null(apollo_HB$gVarNamesFixed)){
r <- ( names(apollo_beta) %in% names(apollo_inputs$apollo_scaling) ) & ( names(apollo_beta) %in% apollo_HB$gVarNamesFixed )
r <- names(apollo_beta)[r]
apollo_HB$FC[r] <- 1/apollo_inputs$apollo_scaling[r]*apollo_HB$FC[r]
rm(r)
}
if(!is.null(apollo_HB$gVarNamesNormal)){
r <- ( names(apollo_beta) %in% names(apollo_inputs$apollo_scaling) ) & ( names(apollo_beta) %in% apollo_HB$gVarNamesNormal )
r <- names(apollo_beta)[r]
dists_normal= names(apollo_HB$gDIST[r][apollo_HB$gDIST[r]==1])
dists_lnp = names(apollo_HB$gDIST[r][apollo_HB$gDIST[r]==2])
dists_lnn = names(apollo_HB$gDIST[r][apollo_HB$gDIST[r]==3])
dists_cnp = names(apollo_HB$gDIST[r][apollo_HB$gDIST[r]==4])
dists_cnn = names(apollo_HB$gDIST[r][apollo_HB$gDIST[r]==5])
dists_sb = names(apollo_HB$gDIST[r][apollo_HB$gDIST[r]==6])
s <- apollo_inputs$apollo_scaling
if(length(dists_normal)>0) apollo_HB$svN[dists_normal] <- 1/s[dists_normal]*apollo_HB$svN[dists_normal]
if(length(dists_lnp)>0) apollo_HB$svN[dists_lnp] <- -log(s[dists_lnp]) + apollo_HB$svN[dists_lnp]
if(length(dists_lnn)>0) apollo_HB$svN[dists_lnn] <- -log(s[dists_lnn]) + apollo_HB$svN[dists_lnn]
if(length(dists_cnp)>0) apollo_HB$svN[dists_cnp] <- 1/s[dists_cnp]*apollo_HB$svN[dists_cnp]
if(length(dists_cnn)>0) apollo_HB$svN[dists_cnn] <- 1/s[dists_cnn]*apollo_HB$svN[dists_cnn]
if(length(dists_sb)>0){
names(apollo_HB$gMINCOEF)=names(apollo_HB$svN)
names(apollo_HB$gMAXCOEF)=names(apollo_HB$svN)
apollo_HB$gMINCOEF[dists_sb] <- 1/s[dists_sb]*apollo_HB$gMINCOEF[dists_sb]
apollo_HB$gMAXCOEF[dists_sb] <- 1/s[dists_sb]*apollo_HB$gMAXCOEF[dists_sb]
}
rm(r, dists_normal, dists_lnp, dists_lnn, dists_cnp, dists_cnn, dists_sb)
}
}
apollo_probabilities <- apollo_insertComponentName(apollo_probabilities)
starttime <- Sys.time()
apollo_test_beta=apollo_beta
if(!apollo_control$noValidation){
if(!silent) cat("Testing probability function (apollo_probabilities)\n")
apollo_test_beta=apollo_beta
if(!is.null(apollo_HB$gVarNamesFixed)){
r <- ( names(apollo_beta) %in% apollo_HB$gVarNamesFixed )
r <- names(apollo_beta)[r]
apollo_test_beta[r] <- apollo_HB$FC[r]
}
if(!is.null(apollo_HB$gVarNamesNormal)){
r <- ( names(apollo_beta) %in% apollo_HB$gVarNamesNormal )
r <- names(apollo_beta)[r]
dists_normal=names(apollo_HB$gDIST[r][apollo_HB$gDIST[r]==1])
dists_lnp=names(apollo_HB$gDIST[r][apollo_HB$gDIST[r]==2])
dists_lnn=names(apollo_HB$gDIST[r][apollo_HB$gDIST[r]==3])
dists_cnp=names(apollo_HB$gDIST[r][apollo_HB$gDIST[r]==4])
dists_cnn=names(apollo_HB$gDIST[r][apollo_HB$gDIST[r]==5])
dists_sb=names(apollo_HB$gDIST[r][apollo_HB$gDIST[r]==6])
if(length(dists_normal)>0) apollo_test_beta[dists_normal] <- apollo_HB$svN[dists_normal]
if(length(dists_lnp)>0) apollo_test_beta[dists_lnp] <- exp(apollo_HB$svN[dists_lnp])
if(length(dists_lnn)>0) apollo_test_beta[dists_lnn] <- -exp(apollo_HB$svN[dists_lnn])
if(length(dists_cnp)>0) apollo_test_beta[dists_cnp] <- apollo_HB$svN[dists_cnp]*(apollo_HB$svN[dists_cnp]>0)
if(length(dists_cnn)>0) apollo_test_beta[dists_cnn] <- apollo_HB$svN[dists_cnn]*(apollo_HB$svN[dists_cnn]<0)
if(length(dists_sb)>0){
names(apollo_HB$gMINCOEF)=names(apollo_HB$svN)
names(apollo_HB$gMAXCOEF)=names(apollo_HB$svN)
apollo_test_beta[dists_sb] <- apollo_HB$gMINCOEF[dists_sb]+(apollo_HB$gMAXCOEF[dists_sb]-apollo_HB$gMINCOEF[dists_sb])/(1+exp(-apollo_HB$svN[dists_sb]))
}
}
apollo_probabilities(apollo_test_beta, apollo_inputs, functionality="validate")
testLL = apollo_probabilities(apollo_test_beta, apollo_inputs, functionality="estimate")
if(!workInLogs) testLL=log(testLL)
if(anyNA(testLL)) stop('Log-likelihood calculation fails at starting values!')
apollo_beta_base=apollo_test_beta+(!(names(apollo_beta)%in%(apollo_fixed)))*0.001*runif(length(apollo_beta))
base_LL=apollo_probabilities(apollo_beta_base, apollo_inputs, functionality="estimate")
if(workInLogs) base_LL=sum(base_LL) else base_LL=sum(log(base_LL))
freeparams=apollo_beta_base[!names(apollo_beta_base)%in%apollo_fixed]
for(p in names(freeparams)){
apollo_beta_test1=apollo_beta_base
apollo_beta_test2=apollo_beta_base
apollo_beta_test1[p]=apollo_beta_test1[p]-0.001
apollo_beta_test2[p]=apollo_beta_test2[p]+0.001
test1_LL=apollo_probabilities(apollo_beta_test1, apollo_inputs, functionality="estimate")
test2_LL=apollo_probabilities(apollo_beta_test2, apollo_inputs, functionality="estimate")
if(workInLogs){
test1_LL=sum(test1_LL)
test2_LL=sum(test2_LL)
} else{
test1_LL=sum(log( ifelse(!is.finite(test1_LL) | test1_LL<=0, .1, test1_LL) ))
test2_LL=sum(log( ifelse(!is.finite(test2_LL) | test2_LL<=0, .1, test2_LL) ))
}
if(is.na(test1_LL)) test1_LL <- base_LL + 1
if(is.na(test2_LL)) test2_LL <- base_LL + 2
if(base_LL==test1_LL & base_LL==test2_LL) stop("Parameter ",p," does not influence the log-likelihood of your model!")
}
}
tmp <- tryCatch( get("apollo_fixed", envir=globalenv()), error=function(e) 1 )
if( length(tmp)>0 && any(tmp %in% c(apollo_HB$gVarNamesFixed, apollo_HB$gVarNamesFixed)) ) stop("apollo_fixed seems to have changed since calling apollo_inputs.")
gFix <- apollo_HB$gVarNamesFixed
gNor <- apollo_HB$gVarNamesNormal
apollo_HB_likelihood=function(fc,b){
if(is.null(gFix)) fc1 <- NULL else fc1 <- stats::setNames(as.list(fc) , gFix)
if(is.null(gNor)) b1 <- NULL else b1 <- stats::setNames(as.data.frame(b), gNor)
if(length(apollo_fixed)==0) fp <- NULL else fp <- stats::setNames( as.list(apollo_beta[apollo_fixed]), apollo_fixed )
P <- apollo_probabilities(apollo_beta=c(fc1,b1,fp), apollo_inputs, functionality="estimate")
return(P)
}
time2 <- Sys.time()
currentWD <- getwd()
if(dir.exists(apollo_inputs$apollo_control$outputDirectory)) setwd(apollo_inputs$apollo_control$outputDirectory)
model <- RSGHB::doHB(apollo_HB_likelihood, database, apollo_HB)
setwd(currentWD)
time3 <- Sys.time()
model$apollo_HB <- apollo_HB
model$apollo_beta <- apollo_test_beta
model$LLStart <- sum(testLL)
if(workInLogs) model$LL0 <- sum((apollo_probabilities(apollo_beta, apollo_inputs, functionality="zero_LL"))) else model$LL0 <- sum(log(apollo_probabilities(apollo_beta, apollo_inputs, functionality="zero_LL")))
model$startTime <- starttime
model$apollo_control <- apollo_control
model$nObs <- nrow(database)
model$nIndivs <- length(unique(database[,apollo_control$indivID]))
endtime <- Sys.time()
model$timeTaken <- as.numeric(difftime(endtime,starttime,units='secs'))
model$apollo_fixed <- apollo_fixed
model$estimationRoutine <- "Hierarchical Bayes"
if(!is.null(model$F)){
tmp <- coda::geweke.diag(model$F[,2:(ncol(model$F))], frac1=0.1, frac2=0.5)[[1]]
names(tmp) <- model$params.fixed
model$F_convergence=tmp
}
if(!is.null(model$A)){
tmp <- coda::geweke.diag(model$A[,2:(ncol(model$A))], frac1=0.1, frac2=0.5)[[1]]
model$A_convergence=tmp
}
if(!is.null(model$D)){
tmp <- c()
for(i in 1:dim(model$D)[1]) for(j in 1:i){
if(i==1 & j==1) Dmatrix <- as.matrix(model$D[i,j,]) else Dmatrix <- cbind(Dmatrix, as.vector(model$D[i,j,]))
tmp <- c(tmp, paste(colnames(model$A)[i+1],colnames(model$A)[j+1], sep="_"))
}
colnames(Dmatrix) <- tmp
tmp <- coda::geweke.diag(Dmatrix, frac1=0.1, frac2=0.5)[[1]]
model$D_convergence=tmp
}
if(length(apollo_HB$gVarNamesFixed)>0 | length(model$apollo_fixed)>0){
if(length(apollo_HB$gVarNamesFixed)>0){
non_random=matrix(0,nrow=length(apollo_HB$gVarNamesFixed),2)
non_random[,1]=colMeans(model$F)[2:ncol(model$F)]
non_random[,2]=apply(model$F,FUN=stats::sd,2)[2:ncol(model$F)]
rownames(non_random)=apollo_HB$gVarNamesFixed}
if(length(model$apollo_fixed)>0){
if(length(apollo_HB$gVarNamesFixed)>0){
non_random=rbind(non_random,cbind(matrix(model$apollo_beta[model$apollo_fixed]),NA))
rownames(non_random)[(length(apollo_HB$gVarNamesFixed)+1):nrow(non_random)]=model$apollo_fixed
} else{
non_random=cbind(matrix(model$apollo_beta[model$apollo_fixed]),NA)
rownames(non_random)=model$apollo_fixed
}
}
colnames(non_random)=c("Mean","SD")
originalOrder <- names(model$apollo_beta)[names(model$apollo_beta) %in% rownames(non_random)]
model$chain_non_random=non_random[originalOrder,,drop=FALSE]
}
apollo_HB$gVarNamesFixed <- model$params.fixed
apollo_HB$gVarNamesNormal <- model$params.vary
if(any(!is.null(apollo_HB$gVarNamesNormal)) && length(apollo_HB$gVarNamesNormal)>0){
random_mean = matrix(0,nrow=length(apollo_HB$gVarNamesNormal),2)
random_mean[,1] = colMeans(model$A)[2:ncol(model$A)]
random_mean[,2] = apply(model$A,FUN=stats::sd,2)[2:ncol(model$A)]
rownames(random_mean)=apollo_HB$gVarNamesNormal
colnames(random_mean)=c("Mean","SD")
model$chain_random_mean=random_mean
random_cov_mean = apply(model$D,FUN=mean,c(1,2))
random_cov_sd = apply(model$D,FUN=stats::sd,c(1,2))
rownames(random_cov_mean) = apollo_HB$gVarNamesNormal
colnames(random_cov_mean) = apollo_HB$gVarNamesNormal
model$chain_random_cov_mean=random_cov_mean
rownames(random_cov_sd) = apollo_HB$gVarNamesNormal
colnames(random_cov_sd) = apollo_HB$gVarNamesNormal
model$chain_random_cov_sd=random_cov_sd
posterior=matrix(0,nrow=length(apollo_HB$gVarNamesNormal),2)
posterior[,1]=colMeans(model$C)[3:ncol(model$C)]
posterior[,2]=apply(model$C,FUN=stats::sd,2)[3:ncol(model$C)]
rownames(posterior)=apollo_HB$gVarNamesNormal
model$posterior_mean=posterior
colnames(model$posterior_mean)=c("Mean","SD")
draws=10000
covMat=random_cov_mean
meanA=random_mean[,1]
pars = length(meanA)
covMat=as.matrix(covMat)
Ndraws=mvtnorm::rmvnorm(draws,meanA,covMat,method="chol")
for(i in 1:pars){
if(apollo_HB$gDIST[i]==6){
Ndraws[,i]=apollo_HB$gMINCOEF+(apollo_HB$gMAXCOEF[i]-apollo_HB$gMINCOEF[i])*1/(1+exp(-Ndraws[,i]))
}
if(apollo_HB$gDIST[i]==5){
Ndraws[,i]=(Ndraws[,i]<0)*Ndraws[,i]
}
if(apollo_HB$gDIST[i]==4){
Ndraws[,i]=(Ndraws[,i]>0)*Ndraws[,i]
}
if(apollo_HB$gDIST[i]==3){
Ndraws[,i]=-exp(Ndraws[,i])
}
if(apollo_HB$gDIST[i]==2){
Ndraws[,i]=exp(Ndraws[,i])
}
}
}
if(length(scaling)>0 && !is.na(scaling)){
for(s in 1:length(scaling)){
ss=names(scaling)[s]
if(ss%in%colnames(model$C)) model$C[,ss]=scaling[s]*model$C[,ss]
if(ss%in%colnames(model$Csd)) model$Csd[,ss]=scaling[s]*model$Csd[,ss]
if(ss%in%colnames(model$F)) model$F[,ss]=scaling[s]*model$F[,ss]
if(any(!is.null(apollo_HB$gVarNamesNormal)) && length(apollo_HB$gVarNamesNormal)>0){if(ss%in%colnames(Ndraws)) Ndraws[,ss]=scaling[s]*Ndraws[,ss]}
if(ss%in%rownames(model$chain_non_random)) model$chain_non_random[ss,]=scaling[s]*model$chain_non_random[ss,]
if(ss%in%rownames(model$posterior_mean)) model$posterior_mean[ss,]=scaling[s]*model$posterior_mean[ss,]
}
model$scaling <- scaling
}
if(any(!is.null(apollo_HB$gVarNamesNormal)) && length(apollo_HB$gVarNamesNormal)>0){
model$random_coeff_summary=cbind(colMeans(Ndraws),apply(Ndraws,2,sd))
colnames(model$random_coeff_summary)=c("Mean","SD")
if(length(apollo_HB$gVarNamesNormal)>1){
model$random_coeff_covar=cov(Ndraws)
model$random_coeff_corr=cor(Ndraws)
}
}
panelData <- apollo_control$panelData
indivID <- database[,apollo_control$indivID]
nObs <- length(indivID)
if(!panelData) indivID <- 1:nObs
nIndiv <- length(unique(indivID))
obsPerIndiv <- as.vector(table(indivID))
if(is.null(model$chain_non_random)){
fc1 <- NULL
}else{
fc1 <- stats::setNames(as.list(model$chain_non_random[,1]),names(model$chain_non_random[,1]))
}
if(is.null(model$C)){
b1 <- NULL
}else{
M=model$C[,-c(1,2),drop=FALSE]
M1 <- matrix(0, nrow=nObs, ncol=ncol(M))
r1 <- 1
for(i in 1:nIndiv){
r2 <- r1 + obsPerIndiv[i] - 1
M1[r1:r2,] <- matrix(as.vector(M[i,]), nrow=r2-r1+1, ncol=ncol(M), byrow=TRUE)
r1 <- r2 + 1
}
b1 <- stats::setNames(as.data.frame(M1), colnames(M))
}
model$estimate=c(fc1,b1)
if(exists("apollo_HBcensor", envir=globalenv()) && !apollo_inputs$silent){
apollo_print(paste0('WARNING: RSGHB has censored the probabilities. ',
'Please note that in at least some iterations RSGHB has ',
'avoided numerical issues by left censoring the ',
'probabilities. This has the side effect of zero or ',
'negative probabilities not leading to failures!'))
rm("apollo_HBcensor", envir=globalenv())
}
time4 <- Sys.time()
model$timeTaken <- as.numeric(difftime(time4,time1,units='secs'))
model$timePre <- as.numeric(difftime(time2,time1,units='secs'))
model$timeEst <- as.numeric(difftime(time3,time2,units='secs'))
model$timePost <- as.numeric(difftime(time4,time3,units='secs'))
return(model)
} |
library(testthat)
if (Sys.getenv("TENSORFLOW_EAGER") == "TRUE")
tensorflow::tfe_enable_eager_execution()
library(keras)
if (identical(Sys.getenv("NOT_CRAN"), "true")) {
test_check("keras")
} |
makeEmissionFUN <-
function (errorRate = 0.01)
{
E <- log(errorRate)
E2 <- log(1 - errorRate)
E3 <- log(0.5)
function(h, x, n) {
if (h != 3)
return(ifelse(h == x, E2, E))
else return(n * E3)
}
} |
library(pROC)
test_that("roc rejects rejects invalid data", {
controls <- c(-Inf, 1,2,3,4,5)
cases <- c(2,3,4,5,6)
expect_warning(r <- roc(controls = controls, cases = cases), "Infinite value")
expect_equal(r, NaN)
controls <- c(1,2,3,4,5, Inf)
cases <- c(2,3,4,5,6)
expect_warning(r <- roc(controls = controls, cases = cases), "Infinite value")
expect_equal(r, NaN)
})
test_that("roc rejects rejects also valid data", {
controls <- c(1,2,3,4,5)
cases <- c(-Inf, 2,3,4,5,6)
expect_warning(r <- roc(controls = controls, cases = cases), "Infinite value")
expect_equal(r, NaN)
controls <- c(1,2,3,4,Inf)
cases <- c(2,3,4,5,6)
expect_warning(r <- roc(controls = controls, cases = cases), "Infinite value")
expect_equal(r, NaN)
}) |
em_mendelian <- function(typed.genos, M, ncores = 1) {
indices.typed.genos <- find_geno_index(typed.genos, M)
update.theta <- function(theta, mval) {
mu0 <- theta[1]
sd0 <- theta[2]
mu1 <- theta[3]
sd1 <- theta[4]
y <- c(dnorm(mval, mu0, sd0), dnorm(mval, mu1, sd1))
p01 <- matrix(y, length(mval), 2)
f <- function(j) {
datg <- typed.genos[[j]]
p01.fam <- p01[indices.typed.genos[[j]], ]
if (ncol(datg) == 2) p01.fam <- matrix(p01.fam, 1, )
p <- datg$p
for (k in 2:ncol(datg)) {
indices <- datg[, k] + 1
p <- p * p01.fam[k - 1, indices]
}
p <- p / sum(p)
g <- function(k) {
carriers <- (datg[, k] == 1)
sum(p[carriers])
}
q.famj <- sapply(2:ncol(datg), g)
q.famj
}
q.fams <- lapply(1:length(typed.genos), f)
q <- numeric(length(mval))
for (j in 1:length(typed.genos)) {
q[indices.typed.genos[[j]]] <- q.fams[[j]]
}
w0 <- (1 - q) / sum(1 - q)
w1 <- q / sum(q)
mu0 <- sum(w0 * mval)
sd0 <- sqrt(sum(w0 * (mval - mu0)^2))
mu1 <- sum(w1 * mval)
sd1 <- sqrt(sum(w1 * (mval - mu1)^2))
theta <- c(mu0, sd0, mu1, sd1)
theta
}
calc.ll.em <- function(theta, mval) {
mu0 <- theta[1]
sd0 <- theta[2]
mu1 <- theta[3]
sd1 <- theta[4]
y <- c(dnorm(mval, mu0, sd0), dnorm(mval, mu1, sd1))
p01 <- matrix(y, length(mval), 2)
f <- function(j) {
datg <- typed.genos[[j]]
p01.fam <- p01[indices.typed.genos[[j]], ]
if (ncol(datg) == 2) p01.fam <- matrix(p01.fam, 1, )
p <- datg$p
for (k in 2:ncol(datg)) {
indices <- datg[, k] + 1
p <- p * p01.fam[k - 1, indices]
}
ll.famj <- log(sum(p))
ll.famj
}
p.fams <- sapply(1:length(typed.genos), f)
ll <- sum(p.fams)
ll
}
em.maxlik <- function(i, tol = 1e-3, max.count = 200, theta.start = NULL) {
mval <- as.numeric(M[i, ])
n <- length(mval)
mu0 <- mean(mval)
sd0 <- sd(mval) * sqrt((n - 1) / n)
mu1 <- mu0
sd1 <- sd0
theta.null <- c(mu0, sd0, mu1, sd1)
if (is.null(theta.start)) {
theta.start <- theta.null
}
theta <- theta.start
continue <- TRUE
count <- 0
while (continue) {
count <- count + 1
theta.new <- update.theta(theta, mval)
continue <- all(!is.na(theta.new)) & any(abs(theta.new - theta) > tol) &
(count < max.count)
theta <- theta.new
}
ll.start <- calc.ll.em(theta.null, mval)
ll.end <- calc.ll.em(theta, mval)
c(ll.start, ll.end, theta.null[1:2], theta, count)
}
em.maxlik.best <- function(i) {
theta_start_values <- list(
NULL, c(-2, 1, 2, 1), c(2, 1, -2, 1)
)
out <- vector("list", length = 3)
for (j in seq_along(out)) {
out[[j]] <- em.maxlik(i, theta.start = theta_start_values[[j]])
}
out <- matrix(unlist(out, use.names = FALSE), nrow = 3, byrow = TRUE)
out_noinf <- rbind(out[out[, 2] != Inf, ])
max_out <- out_noinf[which.max(out_noinf[, 2]), ]
if (length(max_out) == 0) {
max_out <- out[1, ]
max_out[-c(1, 3, 4)] <- NA
}
max_out
}
nprobe <- nrow(M)
if (ncores == 1) {
out <- sapply(1:nprobe, em.maxlik.best)
} else {
cl <- parallel::makeCluster(ncores)
on.exit(parallel::stopCluster(cl), add = TRUE)
out <- parallel::parSapply(cl, 1:nprobe, em.maxlik.best)
}
out <- data.frame(probe = rownames(M)[1:nprobe], as.data.frame(t(out)))
names(out) <- c("probe", "ll.null", "ll.mendel", "mu.null", "sd.null",
"mu0", "sd0", "mu1", "sd1", "count")
out
} |
`analyse.models` <-
function (file, size.freq = TRUE, moco = c(20, 10), int.freq = TRUE,
kmax = 10, int.level = 2, bin.names = NULL)
{
ones <- function(model) {
(model[begin] == 1) * abs(model[begin + 1])
}
twos <- function(model) {
ifelse(model[begin] == 2, paste(model[begin + 1], model[begin +
2]), "0 0")
}
threes <- function(model) {
ifelse(model[begin] == 3, paste(model[begin + 1], model[begin +
2], model[begin + 3]), "0 0 0")
}
sub.names <- function(obj, new.names) {
nbin <- length(new.names)
temp <- paste("", names(obj), "")
for (i in -nbin:nbin){
temp <- gsub(pattern = paste("", i, ""), replacement = paste("",
paste(ifelse(i < 0, "-", ""), new.names[abs(i)], sep = ""), ""), x = temp)
}
end <- unlist(lapply(strsplit(temp, split = ""), length)) - 1
temp <- substring(temp, first = 2, last = end)
temp
}
erg <- read.table(file)
n <- dim(erg)[1]
fac <- ifelse(int.freq, 1, n)
sizes <- ifelse(size.freq, table(erg[, 1]), table(erg[,1])/n)
models <- erg[, 2:((int.level + 1) * kmax + 1)]
begin <- seq(1, by = int.level + 1, length.out = kmax)
only.ones <- apply(models, 1, ones)
only.ones <- sort(table(as.vector(only.ones[only.ones > 0])), decreasing = TRUE)/fac
if (!is.null(bin.names))
names(only.ones) <- sub.names(only.ones, new.names = bin.names)
only.twos <- apply(models, 1, twos)
only.twos <- sort(table(as.vector(only.twos[only.twos !=
"0 0"])), decreasing = TRUE)/fac
if (!is.null(bin.names)) {
names(only.twos) <- sub.names(only.twos, new.names = bin.names)
}
if (length(moco) > 2) {
only.threes <- apply(models, 1, threes)
only.threes <- sort(table(as.vector(only.threes[only.threes !=
"0 0 0"])), decreasing = TRUE)[1:moco[3]]/fac
if (!is.null(bin.names)) {
names(only.threes) <- sub.names(only.threes, new.names = bin.names)
}
}
else {
only.threes <- NULL
}
list(size = sizes, ones = only.ones[1:moco[1]], twos = only.twos[1:moco[2]],
threes = only.threes)
} |
setMethod("sqlCreateTable", signature("GreenplumConnection"),
function(con, table, fields, row.names = NA, temporary = FALSE,
distributed_by = NULL, ...) {
table <- dbQuoteIdentifier(con, table)
if (is.data.frame(fields)) {
fields <- DBI::sqlRownamesToColumn(fields, row.names)
fields <- vapply(fields, function(x) DBI::dbDataType(con, x), character(1))
}
field_names <- dbQuoteIdentifier(con, names(fields))
field_types <- unname(fields)
fields <- paste0(field_names, " ", field_types)
distribution <- if(is.null(distributed_by)){
"DISTRIBUTED RANDOMLY"
} else{
paste0("(",
paste(distributed_by, collapse = ", "),
")")
}
DBI::SQL(paste0(
"CREATE ", if (temporary) "TEMPORARY ", "TABLE ", table, " (\n",
" ", paste(fields, collapse = ",\n "), "\n)\n ",
distribution
))
}
) |
plot.pkgDepGraph <- function(
x, pkgsToHighlight,
main = paste(attr(x, "pkgs"), collapse = ", "),
legendPosition = c(-1.2, -1),
shape = "circle",
vertex.size = 8,
cex = 1,
...)
{
class(x) <- "igraph"
plotColours <- c("grey80", "orange")
if(missing("pkgsToHighlight")) {
pkgsToHighlight <- attr(x, "pkgs")
}
topLevel <- as.numeric(igraph::V(x)$name %in% pkgsToHighlight)
vColor <- plotColours[1 + topLevel]
vFont <- 1 + topLevel
vShape <- c("none", shape)[1 + topLevel]
edgeColor <- c(Imports = "red", Depends = "orange", Suggests = "grey80", Enhances = "blue", LinkingTo = "black")
eColor <- edgeColor[igraph::get.edge.attribute(x, "type")]
typesInGraph <- unique(igraph::get.edge.attribute(x, "type"))
edgeColor <- edgeColor[typesInGraph]
par(mai = rep(0.25, 4))
igraph::plot.igraph(x, vertex.size = vertex.size,
edge.arrow.size = 0.5,
edge.color = eColor,
vertex.label.cex = cex,
vertex.label.color = "black",
vertex.color = vColor,
vertex.shape = vShape,
vertex.label.font = vFont,
xlim = c(-1.5, 1)
)
pch1 <- rep(19, length(plotColours))
pch2 <- rep(-8594, length(edgeColor))
yjust <- function(x) 0.5 * (x + 1)
xjust <- function(x) 1
if(!is.null(legendPosition)) {
legend(x = legendPosition[1],
y = legendPosition[2],
xjust = xjust(legendPosition[1]),
yjust = yjust(legendPosition[2]),
legend = names(edgeColor),
col = edgeColor,
pch = pch2,
y.intersp = 0.75,
cex = cex)
}
title(main, cex = cex)
} |
observeEvent(input$finalok, {
num_data <- final_split$train[, sapply(final_split$train, is.factor)]
if (is.null(dim(num_data))) {
k <- final_split$train %>% map(is.factor) %>% unlist()
j <- names(which(k == TRUE))
numdata <- tibble::as_data_frame(num_data)
colnames(numdata) <- j
updateSelectInput(session, 'var1_cross',
choices = names(numdata), selected = names(numdata))
updateSelectInput(session, 'var2_cross',
choices = names(numdata), selected = names(numdata))
} else if (ncol(num_data) < 1) {
updateSelectInput(session, 'var1_cross',
choices = '', selected = '')
updateSelectInput(session, 'var2_cross',
choices = '', selected = '')
} else {
updateSelectInput(session, 'var1_cross', choices = names(num_data))
updateSelectInput(session, 'var2_cross', choices = names(num_data))
}
})
d_cross <- eventReactive(input$submit_cross, {
data <- final_split$train[, c(input$var1_cross, input$var2_cross)]
})
conames <- reactive({
colnames(d_cross())
})
cross_out <- eventReactive(input$submit_cross, {
k <- ds_cross_table(final_split$train, !! sym(as.character(input$var1_cross)),
!! sym(as.character(input$var2_cross)))
k
})
output$cross <- renderPrint({
cross_out()
})
c1_title <- eventReactive(input$submit_cross, {
h3('Stacked Bar Plot', style = 'align:center;')
})
output$cross1_title <- renderUI({
c1_title()
})
c2_title <- eventReactive(input$submit_cross, {
h3('Grouped Bar Plot', style = 'align:center;')
})
output$cross2_title <- renderUI({
c2_title()
})
c3_title <- eventReactive(input$submit_cross, {
h3('Proportional Bar Plot', style = 'align:center;')
})
output$cross3_title <- renderUI({
c3_title()
})
output$cross_bar_stacked <- renderPlot({
plot(cross_out(), stacked = TRUE)
})
output$cross_bar_grouped <- renderPlot({
plot(cross_out())
})
output$cross_bar_proportional <- renderPlot({
plot(cross_out(), proportional = TRUE)
}) |
"%primes%" <- function(size, start_at) {
assert_collection <- checkmate::makeAssertCollection()
checkmate::assert_count(x = size, add = assert_collection)
checkmate::assert_count(x = start_at, positive = TRUE, add = assert_collection)
checkmate::reportAssertions(assert_collection)
if (start_at >= size)
assert_collection$push("'start_at' must be smaller than 'size'.")
checkmate::reportAssertions(assert_collection)
n_groups <- ceiling(size / start_at)
group_data <- data.frame(
"groups" = seq_len(n_groups),
stringsAsFactors = FALSE
)
group_data <- group_data %>%
dplyr::mutate(
n_elements = create_n_primes(length(.data$groups), start_at),
cumsum = cumsum(as.numeric(.data$n_elements))
)
last_group_row <- group_data[group_data[["cumsum"]] >= size, ][1, ]
cumsum_last_group <- last_group_row[1, 3]
n_elements_last_group <- last_group_row[1, 2]
excess_elements <- cumsum_last_group - size
if (excess_elements == 0) {
remainder <- 0
} else {
remainder <- n_elements_last_group - excess_elements
}
remainder
} |
mc_index <- function() {
path <- "https://multicast.aspra.uni-bamberg.de/data/mcr/mc_index.tsv"
message("Retrieving Multi-CAST version index...")
index <- tryCatch(suppressWarnings(read.csv(path,
sep = "\t",
colClasses = c("factor",
"character",
"character",
rep("numeric", 2)),
header = TRUE)),
error = function(e) {
stop(paste0("Failed to download index. Cannot access file.\n",
" The University of Bamberg servers seem to be experiencing problems.\n",
" Please try again later."),
call. = FALSE)
}
)
message("Downloaded <1 KB.")
return(index)
} |
caImportance<-function(y,x)
{
options(contrasts=c("contr.sum", "contr.poly"))
outdec<-options(OutDec="."); on.exit(options(outdec))
options(OutDec=",")
y<-m2v(y)
m<-length(x)
n<-nrow(x)
S<-nrow(y)/n
xnms<-names(x)
Lj<-vector("numeric",m)
for (j in 1:m) {Lj[j]<-nlevels(factor(x[[xnms[j]]]))}
p<-sum(Lj)-m+1
xtmp<-paste("factor(x$",xnms,sep="",paste(")"))
xfrm<-paste(xtmp,collapse="+")
usl<-partutils(xfrm,y,x,n,p,S)
imps<-matrix(0,S,m)
for(s in 1:S)
{
u<-usl[s,]
ul<-utilities(u,Lj)
imp<-importance(ul,Lj)*100
imps[s,]<-imp
}
impS<-round(apply(imps,2,"mean"),2)
return(impS)
} |
print.rvfactor <- function(x, all.levels=FALSE, ...) {
s <- summary(x, all.levels=all.levels)
ds <- dimnames(s)
if (!is.null(.names <- names(x))) {
s <- cbind(name=.names, s)
} else if (!is.null(.dn <- dimnames(x))) {
sud <- rvpar("summary.dimnames")
if (!is.null(sud) && !is.na(sud) && is.logical(sud)) {
da <- lapply(.dn, function (na) if (is.null(na)) rep("", nrow(s)) else na)
rw <- da[[1]][row(x)]
cl <- da[[2]][col(x)]
s <- cbind(row=rw, col=cl, " "=":", s)
}
}
print(s)
} |
"battery_charging" |
trace_length_case <- function(eventlog) {
eventlog %>%
group_by_case %>%
summarize(absolute = n_distinct(!!activity_instance_id_(eventlog)))
} |
test_that("can convert unnamed vector", {
expect_identical(
enframe(3:1),
tibble(name = 1:3, value = 3:1)
)
})
test_that("can convert unnamed list", {
expect_identical(
enframe(as.list(3:1)),
tibble(name = 1:3, value = as.list(3:1))
)
})
test_that("can convert named vector", {
expect_identical(
enframe(c(a = 2, b = 1)),
tibble(name = letters[1:2], value = as.numeric(2:1))
)
})
test_that("can convert zero-length vector", {
expect_identical(
enframe(logical()),
tibble(name = integer(), value = logical())
)
})
test_that("can convert NULL (
expect_identical(
enframe(NULL),
tibble(name = integer(), value = logical())
)
})
test_that("can use custom names", {
expect_identical(
enframe(letters, name = "index", value = "letter"),
tibble(
index = seq_along(letters),
letter = letters
)
)
})
test_that("can enframe without names", {
expect_identical(
enframe(letters, name = NULL, value = "letter"),
tibble(letter = letters)
)
})
test_that("can't use value = NULL", {
expect_legacy_error(
enframe(letters, value = NULL),
error_enframe_value_null(),
fixed = TRUE
)
})
test_that("can't pass objects with dimensions", {
skip_enh_enframe_vector()
expect_legacy_error(
enframe(iris),
error_enframe_has_dim(iris),
fixed = TRUE
)
})
test_that("can deframe two-column data frame", {
expect_identical(
deframe(tibble(name = letters[1:3], value = 3:1)),
c(a = 3L, b = 2L, c = 1L)
)
})
test_that("can deframe one-column data frame", {
expect_identical(
deframe(tibble(value = 3:1)),
3:1
)
})
test_that("can deframe tibble with list column", {
expect_identical(
deframe(tibble(name = letters[1:3], value = as.list(3:1))),
setNames(as.list(3:1), nm = letters[1:3])
)
})
test_that("can deframe three-column data frame with warning", {
expect_warning(
expect_identical(
deframe(tibble(name = letters[1:3], value = 3:1, oops = 1:3)),
c(a = 3L, b = 2L, c = 1L)
),
"one- or two-column",
fixed = TRUE
)
}) |
leafsfirst.tail<-function(dendat, rho=0, refe=NULL, dist.type="euclid")
{
n<-dim(dendat)[1]
d<-dim(dendat)[2]
pcfhigh<-dendat+rho
pcfdown<-dendat-rho
if (is.null(refe)){
refe<-matrix(0,1,d)
for (i in 1:d) refe[1,i]<-mean(dendat[,i])
refe<-refe[1:d]
}
distat<-sqrt(pituus(dendat-t(matrix(refe,d,n))))
lkm<-n
infopointer<-seq(1,lkm)
if (length(rho)==1) rho<-rep(rho,lkm)
ord<-order(distat)
infopointer<-infopointer[ord]
parent<-matrix(0,lkm,1)
child<-matrix(0,lkm,1)
sibling<-matrix(0,lkm,1)
volume<-matrix(0,lkm,1)
radius<-matrix(0,lkm,1)
highestNext<-matrix(0,lkm,1)
boundrec<-matrix(0,lkm,2*d)
node<-lkm
parent[node]<-0
child[node]<-0
sibling[node]<-0
radius[node]<-distat[ord[node]]
volume[node]<-1
beg<-node
highestNext[node]<-0
note<-infopointer[node]
for (i in 1:d){
boundrec[node,2*i-1]<-pcfdown[note,i]
boundrec[node,2*i]<-pcfhigh[note,i]
}
j<-2
while (j<=lkm){
node<-lkm-j+1
highestNext[node]<-beg
beg<-node
rec1<-matrix(0,2*d,1)
note<-infopointer[node]
for (i in 1:d){
rec1[2*i-1]<-pcfdown[note,i]
rec1[2*i]<-pcfhigh[note,i]
}
boundrec[node,]<-rec1
radius[node]<-distat[ord[node]]
volume[node]<-1
curroot<-highestNext[beg]
prevroot<-beg
ekatouch<-0
while (curroot>0){
istouch<-touchstep.tail(node,curroot,boundrec,child,sibling,
infopointer,pcfdown,pcfhigh,rho,dendat,
dist.type=dist.type)
if (istouch==1){
parent[curroot]<-node
if (ekatouch==0) ekatouch<-1 else ekatouch<-0
if (ekatouch==1){
child[node]<-curroot
}
else{
sibling[lastsib]<-curroot
}
volume[node]<-volume[node]+volume[curroot]
radius[node]<-min(distat[ord[node]],distat[ord[curroot]])
rec1<-boundrec[node,]
rec2<-boundrec[curroot,]
boundrec[node,]<-boundbox(rec1,rec2)
highestNext[prevroot]<-highestNext[curroot]
}
if (istouch==0) prevroot<-curroot else lastsib<-curroot
curroot<-highestNext[curroot]
}
j<-j+1
}
root<-1
maxdis<-distat[ord[length(ord)]]
center<-t(dendat[infopointer,])
lf<-list(
parent=parent,volume=volume,center=center,level=radius,
root=root,
infopointer=infopointer,
refe=refe,maxdis=maxdis,
dendat=dendat)
return(lf)
} |
filter.dtplyr_step <- function(.data, ..., .preserve = FALSE) {
check_filter(...)
dots <- capture_dots(.data, ..., .j = FALSE)
if (filter_by_lgl_col(dots)) {
i <- call2("(", dots[[1]])
} else {
i <- Reduce(function(x, y) call2("&", x, y), dots)
}
step_subset_i(.data, i)
}
filter_by_lgl_col <- function(dots) {
if (length(dots) > 1) {
return(FALSE)
}
dot <- dots[[1]]
if (is_symbol(dot)) {
return(TRUE)
}
is_call(dot, name = "!", n = 1) && is_symbol(dot[[2]])
}
filter.data.table <- function(.data, ...) {
.data <- lazy_dt(.data)
filter(.data, ...)
}
check_filter <- function(...) {
dots <- enquos(...)
named <- have_name(dots)
for (i in which(named)) {
quo <- dots[[i]]
expr <- quo_get_expr(quo)
if (!is.logical(expr)) {
abort(c(
glue::glue("Problem with `filter()` input `..{i}`."),
x = glue::glue("Input `..{i}` is named."),
i = glue::glue("This usually means that you've used `=` instead of `==`."),
i = glue::glue("Did you mean `{name} == {as_label(expr)}`?", name = names(dots)[i])
), call = caller_env())
}
}
} |
library(aster2)
theta <- seq(-3, 3, 0.1)
cumfun <- function(theta) rep(0, length(theta))
moofun <- cumfun
voofun <- cumfun
thoofun <- cumfun
zeroth <- mapply(function(theta) cumulant(theta, fam.bernoulli(),
delta = -1)$zeroth, theta)
all.equal(zeroth, cumfun(theta))
first <- mapply(function(theta) cumulant(theta, fam.bernoulli(),
delta = -1, deriv = 1)$first, theta)
all.equal(first, moofun(theta))
second <- mapply(function(theta) cumulant(theta, fam.bernoulli(),
delta = - 1, deriv = 2)$second, theta)
all.equal(second, voofun(theta))
third <- mapply(function(theta) cumulant(theta, fam.bernoulli(),
delta = - 1, deriv = 3)$third, theta)
all.equal(third, thoofun(theta))
foo <- link(0, fam.bernoulli(), delta = -1, deriv = 1)
is.finite(foo$zeroth)
all.equal(0, foo$first)
cumfun <- function(theta) theta
moofun <- function(theta) rep(1, length(theta))
voofun <- function(theta) rep(0, length(theta))
thoofun <- voofun
zeroth <- mapply(function(theta) cumulant(theta, fam.bernoulli(),
delta = 1)$zeroth, theta)
all.equal(zeroth, cumfun(theta))
first <- mapply(function(theta) cumulant(theta, fam.bernoulli(),
delta = 1, deriv = 1)$first, theta)
all.equal(first, moofun(theta))
second <- mapply(function(theta) cumulant(theta, fam.bernoulli(),
delta = 1, deriv = 2)$second, theta)
all.equal(second, voofun(theta))
third <- mapply(function(theta) cumulant(theta, fam.bernoulli(),
delta = 1, deriv = 3)$third, theta)
all.equal(third, thoofun(theta))
foo <- link(1, fam.bernoulli(), delta = 1, deriv = 1)
is.finite(foo$zeroth)
all.equal(0, foo$first)
theta <- seq(-3, 3, 0.1)
cumfun <- function(theta) rep(0, length(theta))
moofun <- cumfun
voofun <- cumfun
thoofun <- cumfun
zeroth <- mapply(function(theta) cumulant(theta, fam.poisson(),
delta = -1)$zeroth, theta)
all.equal(zeroth, cumfun(theta))
first <- mapply(function(theta) cumulant(theta, fam.poisson(),
delta = -1, deriv = 1)$first, theta)
all.equal(first, moofun(theta))
second <- mapply(function(theta) cumulant(theta, fam.poisson(),
delta = - 1, deriv = 2)$second, theta)
all.equal(second, voofun(theta))
third <- mapply(function(theta) cumulant(theta, fam.poisson(),
delta = - 1, deriv = 3)$third, theta)
all.equal(third, thoofun(theta))
foo <- link(0, fam.poisson(), delta = -1, deriv = 1)
is.finite(foo$zeroth)
all.equal(0, foo$first)
cumfun <- function(theta) theta
moofun <- function(theta) rep(1, length(theta))
voofun <- function(theta) rep(0, length(theta))
thoofun <- voofun
zeroth <- mapply(function(theta) cumulant(theta, fam.zero.truncated.poisson(),
delta = -1)$zeroth, theta)
all.equal(zeroth, cumfun(theta))
first <- mapply(function(theta) cumulant(theta, fam.zero.truncated.poisson(),
delta = -1, deriv = 1)$first, theta)
all.equal(first, moofun(theta))
second <- mapply(function(theta) cumulant(theta, fam.zero.truncated.poisson(),
delta = -1, deriv = 2)$second, theta)
all.equal(second, voofun(theta))
third <- mapply(function(theta) cumulant(theta, fam.zero.truncated.poisson(),
delta = -1, deriv = 3)$third, theta)
all.equal(third, thoofun(theta))
foo <- link(1, fam.zero.truncated.poisson(), delta = -1, deriv = 1)
is.finite(foo$zeroth)
all.equal(0, foo$first)
set.seed(42)
d <- 4
theta <- matrix(rnorm(d * 25), ncol = d)
cumfun <- function(theta, delta) {
stopifnot(is.numeric(theta))
stopifnot(is.finite(theta))
stopifnot(length(theta) == d)
stopifnot(is.numeric(delta))
stopifnot(is.finite(delta))
stopifnot(length(delta) == d)
stopifnot(delta <= 0)
stopifnot(any(delta == 0))
inies <- delta == 0
d.too <- sum(inies)
if (d.too == 1) return(theta[inies])
theta.too <- theta[inies]
return(cumulant(theta.too, fam.multinomial(d.too))$zeroth)
}
moofun <- function(theta, delta) {
stopifnot(is.numeric(theta))
stopifnot(is.finite(theta))
stopifnot(length(theta) == d)
stopifnot(is.numeric(delta))
stopifnot(is.finite(delta))
stopifnot(length(delta) == d)
stopifnot(delta <= 0)
stopifnot(any(delta == 0))
inies <- delta == 0
d.too <- sum(inies)
if (d.too == 1) as.numeric(inies)
theta.too <- theta[inies]
foo <- cumulant(theta.too, fam.multinomial(d.too), deriv = 1)$first
bar <- rep(0, d)
bar[inies] <- foo
return(bar)
}
voofun <- function(theta, delta) {
stopifnot(is.numeric(theta))
stopifnot(is.finite(theta))
stopifnot(length(theta) == d)
stopifnot(is.numeric(delta))
stopifnot(is.finite(delta))
stopifnot(length(delta) == d)
stopifnot(delta <= 0)
stopifnot(any(delta == 0))
inies <- delta == 0
d.too <- sum(inies)
if (d.too == 1) return(matrix(0, d, d))
theta.too <- theta[inies]
foo <- cumulant(theta.too, fam.multinomial(d.too), deriv = 2)$second
bar <- matrix(0, d, d)
baz <- matrix(0, d, d.too)
baz[inies, ] <- foo
bar[ , inies] <- baz
return(bar)
}
thoofun <- function(theta, delta) {
stopifnot(is.numeric(theta))
stopifnot(is.finite(theta))
stopifnot(length(theta) == d)
stopifnot(is.numeric(delta))
stopifnot(is.finite(delta))
stopifnot(length(delta) == d)
stopifnot(delta <= 0)
stopifnot(any(delta == 0))
inies <- delta == 0
d.too <- sum(inies)
if (d.too == 1) return(array(0, rep(d, 3)))
theta.too <- theta[inies]
foo <- cumulant(theta.too, fam.multinomial(d.too), deriv = 3)$third
bar <- array(0, rep(d, 3))
baz <- array(0, c(d, d, d.too))
qux <- array(0, c(d, d.too, d.too))
qux[inies, , ] <- foo
baz[ , inies, ] <- qux
bar[ , , inies] <- baz
return(bar)
}
delta <- c(0, 0, 0, -1)
zeroth <- apply(theta, 1, function(theta) cumulant(theta, fam.multinomial(d),
delta = delta)$zeroth)
my.zeroth <- apply(theta, 1, cumfun, delta = delta)
all.equal(zeroth, my.zeroth)
first <- apply(theta, 1, function(theta) cumulant(theta, fam.multinomial(d),
delta = delta, deriv = 1)$first)
my.first <- apply(theta, 1, moofun, delta = delta)
all.equal(first, my.first)
second <- apply(theta, 1, function(theta) cumulant(theta, fam.multinomial(d),
delta = delta, deriv = 2)$second)
my.second <- apply(theta, 1, voofun, delta = delta)
all.equal(second, my.second)
third <- apply(theta, 1, function(theta) cumulant(theta, fam.multinomial(d),
delta = delta, deriv = 3)$third)
my.third <- apply(theta, 1, thoofun, delta = delta)
all.equal(third, my.third)
link(first[ , 1], fam.multinomial(d), delta = delta, deriv = 1)
delta <- c(0, 0, -1, -2)
zeroth <- apply(theta, 1, function(theta) cumulant(theta, fam.multinomial(d),
delta = delta)$zeroth)
my.zeroth <- apply(theta, 1, cumfun, delta = delta)
all.equal(zeroth, my.zeroth)
first <- apply(theta, 1, function(theta) cumulant(theta, fam.multinomial(d),
delta = delta, deriv = 1)$first)
my.first <- apply(theta, 1, moofun, delta = delta)
all.equal(first, my.first)
second <- apply(theta, 1, function(theta) cumulant(theta, fam.multinomial(d),
delta = delta, deriv = 2)$second)
my.second <- apply(theta, 1, voofun, delta = delta)
all.equal(second, my.second)
third <- apply(theta, 1, function(theta) cumulant(theta, fam.multinomial(d),
delta = delta, deriv = 3)$third)
my.third <- apply(theta, 1, thoofun, delta = delta)
all.equal(third, my.third)
link(first[ , 1], fam.multinomial(d), delta = delta, deriv = 1)
delta <- c(0, -0.5, -1, -2)
zeroth <- apply(theta, 1, function(theta) cumulant(theta, fam.multinomial(d),
delta = delta)$zeroth)
my.zeroth <- apply(theta, 1, cumfun, delta = delta)
all.equal(zeroth, my.zeroth)
first <- apply(theta, 1, function(theta) cumulant(theta, fam.multinomial(d),
delta = delta, deriv = 1)$first)
my.first <- apply(theta, 1, moofun, delta = delta)
all.equal(first, my.first)
second <- apply(theta, 1, function(theta) cumulant(theta, fam.multinomial(d),
delta = delta, deriv = 2)$second)
my.second <- apply(theta, 1, voofun, delta = delta)
all.equal(second, my.second)
third <- apply(theta, 1, function(theta) cumulant(theta, fam.multinomial(d),
delta = delta, deriv = 3)$third)
my.third <- apply(theta, 1, thoofun, delta = delta)
all.equal(third, my.third)
linkfun <- function(xi, delta) {
stopifnot(is.numeric(xi))
stopifnot(is.finite(xi))
stopifnot(length(xi) == d)
stopifnot(is.numeric(delta))
stopifnot(is.finite(delta))
stopifnot(length(delta) == d)
stopifnot(delta <= 0)
stopifnot(any(delta == 0))
inies <- delta == 0
d.too <- sum(inies)
if (d.too == 1) return(rep(0, d))
xi.too <- xi[inies]
foo <- link(xi.too, fam.multinomial(d.too))$zeroth
bar <- rep(0, d)
bar[inies] <- foo
return(bar)
}
dlinkfun <- function(xi, delta) {
stopifnot(is.numeric(xi))
stopifnot(is.finite(xi))
stopifnot(length(xi) == d)
stopifnot(is.numeric(delta))
stopifnot(is.finite(delta))
stopifnot(length(delta) == d)
stopifnot(delta <= 0)
stopifnot(any(delta == 0))
inies <- delta == 0
d.too <- sum(inies)
if (d.too == 1) return(matrix(0, d, d))
xi.too <- xi[inies]
foo <- link(xi.too, fam.multinomial(d.too), deriv = 1)$first
bar <- matrix(0, d, d)
baz <- matrix(0, d, d.too)
baz[inies, ] <- foo
bar[ , inies] <- baz
return(bar)
bar[inies] <- foo
return(bar)
}
delta <- c(0, 0, 0, -1)
xi <- apply(theta, 1, function(theta) cumulant(theta, fam.multinomial(d),
delta = delta, deriv = 1)$first)
xi <- t(xi)
zeroth <- apply(xi, 1, function(xi) link(xi, fam.multinomial(d),
delta = delta)$zeroth)
my.zeroth <- apply(xi, 1, linkfun, delta = delta)
all.equal(zeroth, my.zeroth)
first <- apply(xi, 1, function(xi) link(xi, fam.multinomial(d),
delta = delta, deriv = 1)$first)
my.first <- apply(xi, 1, dlinkfun, delta = delta)
all.equal(first, my.first)
delta <- c(0, 0, -1, -2)
xi <- apply(theta, 1, function(theta) cumulant(theta, fam.multinomial(d),
delta = delta, deriv = 1)$first)
xi <- t(xi)
zeroth <- apply(xi, 1, function(xi) link(xi, fam.multinomial(d),
delta = delta)$zeroth)
my.zeroth <- apply(xi, 1, linkfun, delta = delta)
all.equal(zeroth, my.zeroth)
first <- apply(xi, 1, function(xi) link(xi, fam.multinomial(d),
delta = delta, deriv = 1)$first)
my.first <- apply(xi, 1, dlinkfun, delta = delta)
all.equal(first, my.first)
delta <- c(0, -0.5, -1, -2)
xi <- apply(theta, 1, function(theta) cumulant(theta, fam.multinomial(d),
delta = delta, deriv = 1)$first)
xi <- t(xi)
zeroth <- apply(xi, 1, function(xi) link(xi, fam.multinomial(d),
delta = delta)$zeroth)
my.zeroth <- apply(xi, 1, linkfun, delta = delta)
all.equal(zeroth, my.zeroth)
first <- apply(xi, 1, function(xi) link(xi, fam.multinomial(d),
delta = delta, deriv = 1)$first)
my.first <- apply(xi, 1, dlinkfun, delta = delta)
all.equal(first, my.first) |
cournot <- function(prices,quantities,
margins = matrix(NA_real_ , nrow(quantities),ncol(quantities)),
demand = rep("linear",length(prices)),
cost = rep("linear",nrow(quantities)),
mcfunPre=list(),
mcfunPost=mcfunPre,
vcfunPre=list(),
vcfunPost=vcfunPre,
capacitiesPre = rep(Inf,nrow(quantities)),
capacitiesPost = capacitiesPre,
productsPre=!is.na(quantities),
productsPost=productsPre,
ownerPre,ownerPost,
mktElast = rep(NA_real_, length(prices)),
mcDelta =rep(0,nrow(quantities)),
quantityStart=as.vector(quantities),
control.slopes,
control.equ,
labels,
...
){
shares <- as.vector(quantities/sum(quantities))
if(missing(labels)){
if(is.null(dimnames(quantities))){
rname <- paste0("O",1:nrow(quantities))
cname <- paste0("P",1:ncol(quantities))
}
else{rname <- rownames(quantities)
cname <- colnames(quantities)
}
labels <- list(rname,cname)
}
result <- new("Cournot",prices=prices, quantities=quantities,margins=margins,
shares=shares,mcDelta=mcDelta, subset= rep(TRUE,length(shares)), demand = demand, cost=cost,
mcfunPre=mcfunPre, mcfunPost=mcfunPost,vcfunPre=vcfunPre, vcfunPost=vcfunPost,
capacitiesPre=capacitiesPre,capacitiesPost=capacitiesPost,
ownerPre=ownerPre, mktElast = mktElast,productsPre=productsPre,productsPost=productsPost,
ownerPost=ownerPost, quantityStart=quantityStart,labels=labels)
if(!missing(control.slopes)){
[email protected] <- control.slopes
}
if(!missing(control.equ)){
[email protected] <- control.equ
}
result@ownerPre <- ownerToMatrix(result,TRUE)
result@ownerPost <- ownerToMatrix(result,FALSE)
result <- calcSlopes(result)
result@quantityPre <- calcQuantities(result, preMerger = TRUE,...)
result@quantityPost <- calcQuantities(result,preMerger = FALSE,...)
result@pricePre <- calcPrices(result, preMerger = TRUE)
result@pricePost <- calcPrices(result,preMerger = FALSE)
return(result)
}
stackelberg <- function(prices,quantities,margins,
demand = rep("linear",length(prices)),
cost = rep("linear",nrow(quantities)),
isLeaderPre = matrix(FALSE,ncol = ncol(quantities), nrow= nrow(quantities)),
isLeaderPost= isLeaderPre,
mcfunPre=list(),
mcfunPost=mcfunPre,
vcfunPre=list(),
vcfunPost=vcfunPre,
dmcfunPre=list(),
dmcfunPost=dmcfunPre,
capacitiesPre = rep(Inf,nrow(quantities)),
capacitiesPost = capacitiesPre,
productsPre=!is.na(quantities),
productsPost=productsPre,
ownerPre,ownerPost,
mcDelta =rep(0,nrow(quantities)),
quantityStart=as.vector(quantities),
control.slopes,
control.equ,
labels,
...
){
shares <- as.vector(quantities/sum(quantities))
if(missing(labels)){
if(is.null(dimnames(quantities))){
rname <- paste0("O",1:nrow(quantities))
cname <- paste0("P",1:ncol(quantities))
}
else{rname <- rownames(quantities)
cname <- colnames(quantities)
}
labels <- list(rname,cname)
}
result <- new("Stackelberg",prices=prices, quantities=quantities,margins=margins,
shares=shares,mcDelta=mcDelta, subset= rep(TRUE,length(shares)), demand = demand, cost = cost,
mcfunPre=mcfunPre, mcfunPost=mcfunPost,vcfunPre=vcfunPre, vcfunPost=vcfunPost,
dmcfunPre=dmcfunPre, dmcfunPost=dmcfunPost, isLeaderPre = isLeaderPre, isLeaderPost = isLeaderPost,
ownerPre=ownerPre,productsPre=productsPre,productsPost=productsPost,
capacitiesPre=capacitiesPre,capacitiesPost=capacitiesPost,
ownerPost=ownerPost, quantityStart=quantityStart,labels=labels)
if(!missing(control.slopes)){
[email protected] <- control.slopes
}
if(!missing(control.equ)){
[email protected] <- control.equ
}
result@ownerPre <- ownerToMatrix(result,TRUE)
result@ownerPost <- ownerToMatrix(result,FALSE)
result <- calcSlopes(result)
result@quantityPre <- calcQuantities(result, preMerger = TRUE,...)
result@quantityPost <- calcQuantities(result,preMerger = FALSE,...)
result@pricePre <- calcPrices(result, preMerger = TRUE)
result@pricePost <- calcPrices(result,preMerger = FALSE)
return(result)
} |
envelope.Dtable <-
function (Y, fun = Kest, nsim = 99, nrank = 1, ..., funargs = list(),
funYargs = funargs, simulate = NULL, verbose = TRUE, savefuns = FALSE,
Yname = NULL, envir.simul = NULL)
{
cl <- spatstat.utils::short.deparse(sys.call())
if (is.null(Yname))
Yname <- spatstat.utils::short.deparse(substitute(Y))
if (is.null(fun))
fun <- Kest
envir.user <- if (!is.null(envir.simul))
envir.simul
else parent.frame()
envir.here <- sys.frame(sys.nframe())
if (is.null(simulate)) {
stop("The simulation function must be provided in the simulate argument.")
} else {
simrecipe <- simulate
X <- Y
}
envelopeEngine(X = X, fun = fun, simul = simrecipe, nsim = nsim,
nrank = nrank, ..., funargs = funargs, funYargs = funYargs,
verbose = verbose, clipdata = FALSE, transform = NULL,
global = FALSE, ginterval = NULL, use.theory = NULL,
alternative = c("two.sided", "less", "greater"), scale = NULL,
clamp = FALSE,
savefuns = savefuns, savepatterns = FALSE, nsim2 = nsim,
VARIANCE = FALSE, nSD = 2, Yname = Yname, maxnerr = nsim,
cl = cl, envir.user = envir.user, do.pwrong = FALSE,
foreignclass = "Dtable")
} |
RelCoef <-
function(IndividualGenom=matrix(0,nrow=0,ncol=0),ParentalLineGenom=matrix(0,nrow=0,ncol=0) ,Freq=matrix(0,nrow=0,ncol=0),Crossing=matrix(0,nrow=0,ncol=0),ParentPop=rep(0,0),Combination=list(),Phased=FALSE,Details=FALSE,NbInit=5,Prec=10^(-4),NbCores=NULL){
if ((nrow(IndividualGenom)==0) & (nrow(ParentalLineGenom)==0)){
stop("No genotype matrix provided")
}
if ((nrow(IndividualGenom)!=0) & (nrow(ParentalLineGenom)!=0)){
stop("Either a Parental genotype matrix OR a hybrid genotype matrix should be provided")
}
if (nrow(Freq)==0){
stop("Freq matrix should be provided")
}
if (nrow(IndividualGenom)!=0){
Genom <- IndividualGenom
if ((nrow(ParentalLineGenom)!=0) | (nrow(Crossing)!=0) | (length(ParentPop!=0))){
stop("If IndividualGenom is provided, ParentalLineGenom, Crossing and ParentPop cannot be provided")
}
Crossing <- matrix(seq(1:ncol(Genom)),ncol=2,byrow=T)
ParentPop <- rep(1,ncol(Genom))
} else {
Genom <- ParentalLineGenom
if (nrow(Crossing)==0){
stop("Crossing matrix should be provided")
}
if (max(Crossing) > ncol(Genom)){
stop("Nb of parents in crossing > Nb of parents in ParentalLineGenom")
}
if (length(ParentPop)==0){
ParentPop <- rep(1,ncol(Genom))
}
if (length(ParentPop) < max(Crossing)){
stop("Nb of parents differ between ParentPop and Crossing")
}
if (length(ParentPop) < ncol(Genom)){
stop("Nb of parents differ between ParentPop and ParentalLineGenom")
}
if (max(ParentPop) > dim(Freq)[2]){
stop("Number of Pop frequencies < number of Pop")
}
if (Phased==FALSE){
print("Argument Phased is change in TRUE")
Phased=TRUE
}
}
if (dim(Freq)[1] != dim(Genom)[1]){
stop("Nrow differs between the Genotype matrix and Pop frequencies.")
}
NbIndividual <- dim(Crossing)[1]
if(Phased==F){
NbIBD <- 9
} else {
NbIBD <- 15
}
if (length(NbCores)!=0){
if (Sys.info()[['sysname']]=="Windows"){
if (NbCores!=1){
NbCores <- 1
print("NbCores > 1 is not supported on Windows, NbCores is set to 1")
}
}
} else {
NbCores <- detectCores()-1
}
Crossing <- t(Crossing)
if (length(Combination)==0){
comb <- combn(1:NbIndividual , 2 , simplify = F)
CoupleTwoHybrids <- mclapply(comb , function(x) .RelatednessCouple(Genom[,c(Crossing[,x])],Freq,Crossing[,x],ParentPop[c(Crossing[,x])],Phased,NbInit,Prec) , mc.cores=NbCores)
CoupleOneHybridRepeted <- mclapply(1:NbIndividual , function(x) .RelatednessCouple(Genom[,c(Crossing[,c(x,x)])],Freq,Crossing[,c(x,x)],ParentPop[c(Crossing[,c(x,x)])],Phased,NbInit,Prec) , mc.cores=NbCores)
MatDelta <- sapply(CoupleTwoHybrids, function(x) x$Delta)
mat <- matrix(0,NbIndividual,NbIndividual)
DeltaTri <- lapply(1:NbIBD , function(x) .MatTriSup(mat,MatDelta[x,]))
DeltaDiag <- sapply(CoupleOneHybridRepeted, function(x) x$Delta)
Delta <- lapply(1:NbIBD , function(x) .AddDiag(DeltaTri[[x]],DeltaDiag[x,]))
names(Delta) <- paste0("Delta",1:NbIBD)
} else {
NamesCombination <- sapply(1:length(Combination) , function(x) paste0(Combination[[x]][1],"/",Combination[[x]][2]))
CoupleTwoHybrids <- mclapply(Combination , function(x) .RelatednessCouple(Genom[,c(Crossing[,x])],Freq,Crossing[,x],ParentPop[c(Crossing[,x])],Phased,NbInit,Prec) , mc.cores=NbCores)
Delta <- lapply(CoupleTwoHybrids , function(x) x$Delta)
names(Delta) <- NamesCombination
}
if (Details==TRUE){
if (Phased==TRUE){
.TheFifteenDeltaGraph()
}else{
.TheNineDeltaGraph()
}
}
return(Delta)
} |
make_option_impl <- function(getter, option_name = NULL, env = caller_env()) {
getter_body <- enexpr(getter)
if (is.null(option_name)) {
option_name <- getter_body[[2]]
stopifnot(is.character(option_name))
}
name <- sub(paste0(utils::packageName(env), "."), "", option_name, fixed = TRUE)
getter_name <- paste0("get_", utils::packageName(env), "_option_", name)
local_setter_name <- paste0("local_", utils::packageName(env), "_option_", name)
setter_name <- paste0("set_", utils::packageName(env), "_option_", name)
local_setter_body <- expr({
out <- !!call2("local_options", !!option_name := sym("value"), .frame = sym("env"))
!!call2(getter_name)
invisible(out[[1]])
})
setter_body <- expr({
out <- !!call2("options", !!option_name := sym("value"))
!!call2(getter_name)
invisible(out[[1]])
})
body <- expr({
if (missing(!!sym("value"))) {
if (!missing(local)) {
abort("Can't pass `local` argument if `value` is missing.")
}
!!getter_body
} else if (local) !!local_setter_body
else !!setter_body
})
args <- pairlist2(value = , local = FALSE, env = quote(caller_env()))
assign(getter_name, new_function(list(), getter_body, env = env), env)
assign(local_setter_name, new_function(args[c(1, 3)], local_setter_body, env = env), env)
assign(setter_name, new_function(args[1], setter_body, env = env), env)
new_function(args, body, env = env)
} |
AthenaDataType <- function(fields, ...) {
switch(
class(fields)[1],
logical = "BOOLEAN",
integer = "INT",
integer64 = "BIGINT",
numeric = "DOUBLE",
double = "DOUBLE",
factor = "STRING",
character = "STRING",
list = "STRING",
Date = "DATE",
POSIXct = "TIMESTAMP",
stop("Unknown class ", paste(class(fields), collapse = "/"), call. = FALSE)
)
}
AthenaToRDataType <- function(method, data_type) UseMethod("AthenaToRDataType")
AthenaToRDataType.athena_data.table <- function(method, data_type){
athena_to_r <- function(x){
switch(x,
boolean = "logical",
int ="integer",
integer = "integer",
tinyint = "integer",
smallint = "integer",
bigint = athena_option_env$bigint,
float = "double",
real = "double",
decimal = "double",
string = "character",
varchar = "character",
char = "character",
date = "Date",
timestamp = "POSIXct",
"timestamp with time zone" = "POSIXct",
array = "character",
row = "character",
map = "character",
json = "character",
ipaddress = "character",
varbinary = "character",
x)}
output <- vapply(data_type, athena_to_r, FUN.VALUE = character(1))
return(output)
}
AthenaToRDataType.athena_vroom <- function(method, data_type){
athena_to_r <- function(x){
switch(x,
boolean = "l",
int ="i",
integer = "i",
tinyint = "i",
smallint = "i",
bigint = athena_option_env$bigint,
double = "d",
float = "d",
real = "d",
decimal = "d",
string = "c",
varchar = "c",
char = "c",
date = "D",
timestamp = "T",
"timestamp with time zone" = "c",
array = "c",
row = "c",
map = "c",
json = "c",
ipaddress = "c",
varbinary = "c",
x)}
output <- vapply(data_type, athena_to_r, FUN.VALUE = character(1))
return(output)
} |
beta.div <- function (biom, method, weighted=TRUE, tree=NULL) {
methodList <- c("manhattan", "euclidean", "bray-curtis", "jaccard", "unifrac")
method <- methodList[pmatch(tolower(method), methodList)]
if (!is.logical(weighted)) stop(simpleError("Weighted must be TRUE/FALSE."))
if (length(method) != 1) stop(simpleError("Invalid method for beta.div()"))
if (is.na(method)) stop(simpleError("Invalid method for beta.div()"))
if (is(biom, "simple_triplet_matrix")) { counts <- biom
} else if (is(biom, "BIOM")) { counts <- biom$counts
} else if (is(biom, "matrix")) { counts <- slam::as.simple_triplet_matrix(biom)
} else {
stop(simpleError("biom must be a matrix, simple_triplet_matrix, or BIOM object."))
}
if (identical(method, "unifrac")) {
if (!is(tree, "phylo")) {
if (is(biom, "BIOM")) {
if (is(biom$phylogeny, "phylo")) {
tree <- biom$phylogeny
}
}
if (is(tree, "character")) {
if (file.exists(tree)) {
tree <- rbiom::read.tree(tree)
}
}
if (!is(tree, "phylo")) {
stop(simpleError("No tree provided to beta.div()."))
}
}
if (length(setdiff(rownames(counts), tree$tip.label)) > 0)
stop(simpleError("OTUs missing from reference tree."))
if (length(setdiff(tree$tip.label, rownames(counts))) > 0)
tree <- rbiom::subtree(tree, rownames(counts))
counts <- counts[as.character(tree$tip.label),]
}
ord <- order(counts$j, counts$i)
counts$i <- counts$i[ord]
counts$j <- counts$j[ord]
counts$v <- counts$v[ord]
if (identical(method, "unifrac")) {
par_unifrac(counts, tree, ifelse(weighted, 1L, 0L))
} else {
counts <- t(as.matrix(counts))
dm <- par_beta_div(counts, method, ifelse(weighted, 1L, 0L))
dm <- as.dist(dm)
attr(dm, 'Labels') <- rownames(counts)
return (dm)
}
} |
context("Test ra_prospect")
library(hBayesDM)
test_that("Test ra_prospect", {
skip_on_cran()
expect_output(ra_prospect(
data = "example", niter = 10, nwarmup = 5, nchain = 1, ncore = 1))
}) |
eco.phy.regression <- function(data,
randomisation=c("taxa.labels", "richness", "frequency", "sample.pool", "phylogeny.pool", "independentswap", "trialswap"), permute=0, method=c("quantile", "lm", "mantel"), indep.swap=1000, abundance=TRUE, ...){
if(! inherits(data, "comparative.comm")) stop("'data' must be a comparative community ecology object")
randomisation <- match.arg(randomisation)
method <- match.arg(method)
if(permute < 0) stop("Can't have negative null permutations!")
if(abundance==FALSE)
data$comm[data$comm>1] <- 1
eco.matrix <- as.dist(1 - as.matrix(comm.dist(data$comm)))
phy.matrix <- as.dist(cophenetic.phylo(data$phy))
observed <- .eco.phy.regression(eco.matrix, phy.matrix, method, ...)
randomisations <- vector(mode="list", length=permute)
for(i in seq(from=1, length.out=permute)){
curr.rnd <- .eco.null(data$comm, randomisation, swap.iter=indep.swap)
rnd.mat <- as.dist(1 - as.matrix(comm.dist(curr.rnd)))
if(any(is.na(rnd.mat))){
warning("NAs in permuted community matrix; skipping this iteration")
next()
}
randomisations[[i]] <- .eco.phy.regression(rnd.mat, phy.matrix, method, ...)
}
output <- .prepare.regression.output(observed, randomisations, method, permute, "eco.phy.regression")
output$data <- data
return(output)
}
.eco.phy.regression <- function(eco.mat, phy.mat, method=c("quantile", "lm", "mantel"), ...){
method <- match.arg(method)
if(method == 'lm')
model <- lm(as.numeric(eco.mat) ~ as.numeric(phy.mat), ...)
if(method == "quantile")
model <- rq(as.numeric(eco.mat) ~ as.numeric(phy.mat), ...)
if(method == "mantel")
model <- mantel(eco.mat, phy.mat, ...)
return(model)
} |
"leds.mspct" |
B <- rbind(c(1, 0, 0),
c(1, sqrt(5), 0),
c(1, 2/sqrt(5), 1/sqrt(5)))
B %*% t(B) |
"mcdermit_filters" |
dpareto.ll <- function(x, theta = NULL, ...){
th.table <- cbind(c(0,0.050,0.089,0.126,0.164,0.203,0.244,0.286,0.332,0.380,
0.431,0.486,0.545,0.609,0.678,0.753,0.835,0.925,1.025,1.135,
1.258,1.395,1.549,1.723,1.921,2.146,2.404,2.701,3.044,3.442,
3.904,4.442,5.071,5.807,6.669,7.682,8.870),seq(0,.36,by=.01))
inf.test <- which(x==Inf)
if(length(inf.test)>0){
x[inf.test] <- max(x[-inf.test])
warning("Values of x equal to 'Inf' are set to the maximum finite value.",call.=FALSE)
}
x.bar <- mean(x)
if(is.null(theta)){
if(x.bar<=max(th.table[,1])){
ind <- max(which(th.table[,1]<x.bar))
theta <- approx(th.table[ind:(ind+1),],xout=x.bar)$y
} else{
S.hat <- sapply(1:length(x),function(i) mean(x>=x[i]))
theta <- prod(S.hat^((sum(log(1+x)))^(-1)))
}
}
tmp <- which(ddpareto(x,theta=theta,log=TRUE)==-Inf)
if(length(tmp)>0){
x[tmp] <- max(x[-tmp])
warning("Numerical overflow problem when calculating log-density of some x values. The problematic values are set to the maximum finite value calculated.",call.=FALSE)
}
ll.f <- function(theta) -sum(ddpareto(x,theta=theta,log=TRUE))
fit <- try(suppressWarnings(stats4::mle(ll.f,start=list(theta=theta),lower=0,upper=1,method="Brent")),silent=TRUE)
if(class(fit)=="try-error") stop(paste("Numerical optimization of the MLE failed. Consider trying a different starting value for theta.","\n"))
fit
} |
`TDiM` <-
function(S,R){
N<-length(S)
(1/sqrt(N-1))*(sum(S*R) - N*mean(S)*mean(R))/(sd(S)*sd(R))
} |
HDistSize <- function(b1,b2,n2){
b2c <- c(b1,b2)
b2min <- min(b2c)
b2max <- max(b2c)
P1 <- density(b1,from=b2min,to=b2max,n=n2)
Q1 <- density(b2,from=b2min,to=b2max,n=n2)
Pdiff1 <- P1$y
Qdiff1 <- Q1$y
step1 <- P1$x[2]-P1$x[1]
diver1 <- (sqrt(Pdiff1)-sqrt(Qdiff1))^2*step1
res1 <- sqrt(sum(diver1)/2)
return(res1)
}
NULL
NULL
NULL
NULL
NULL |
"c30_df" |
eqv.ssd <- function(conc, time, group, dependent=FALSE, method=c("fieller", "z", "boott"), conf.level=0.90, strata=NULL, nsample=1000, data){
auc <- function(conc, time){
xq <- tapply(conc, time, mean)
sxq <- tapply(conc, time, var) / tapply(conc, time, length)
est <- sum(.weight(time)*xq)
var <- sum(.weight(time)^2*sxq)
return(list(est=est, var=var))
}
asymp <- function(data1, data2, alpha){
auc1 <- auc(data1$conc, data1$time)
auc2 <- auc(data2$conc, data2$time)
auc1$var <- auc1$var
auc2$var <- auc2$var
var.asymp <- auc1$var/(auc2$est^2) + (auc1$est^2)/(auc2$est^4)*auc2$var
z <- qnorm(p=1-alpha/2, mean=0, sd=sqrt(1))
est <- auc1$est / auc2$est
lower <- est - z*sqrt(var.asymp)
upper <- est + z*sqrt(var.asymp)
res <- c(lower, upper)
return(res)
}
boott <- function(data1, data2, alpha, nsample, bystrata1, bystrata2){
obsv.auc1 <- auc(data1$conc, data1$time)
obsv.auc2 <- auc(data2$conc, data2$time)
obsv.est <- obsv.auc1$est / obsv.auc2$est
obsv.var <- obsv.auc1$var/(obsv.auc2$est^2) + (obsv.auc1$est^2)/(obsv.auc2$est^4)*obsv.auc2$var
boot.stat <- rep(NA,nsample)
for(i in 1:nsample){
boot.data1 <- data.frame(time=data1$time, conc=unlist(tapply(data1$conc, bystrata1, sample, replace=TRUE)), stringsAsFactors = TRUE)
boot.data2 <- data.frame(time=data2$time, conc=unlist(tapply(data2$conc, bystrata2, sample, replace=TRUE)), stringsAsFactors = TRUE)
boot.auc1 <- auc(boot.data1$conc, boot.data1$time)
boot.auc2 <- auc(boot.data2$conc, boot.data2$time)
boot.est <- boot.auc1$est / boot.auc2$est
boot.var <- boot.auc1$var/(boot.auc2$est^2) + (boot.auc1$est^2)/(boot.auc2$est^4)*boot.auc2$var
boot.stat[i] <- (boot.est - obsv.est) / sqrt(boot.var)
}
t.lb <- quantile(boot.stat, probs=c(alpha/2), method=5, na.rm=TRUE)
t.ub <- quantile(boot.stat, probs=c(1-alpha/2), method=5, na.rm=TRUE)
base <- data.frame(est=obsv.est, t.lb=t.lb, t.ub=t.ub, stringsAsFactors = TRUE)
base$lower <- base$est - base$t.ub*sqrt(obsv.var)
base$upper <- base$est - base$t.lb*sqrt(obsv.var)
return(c(base$lower, base$upper))
}
if(!missing(data)){
cnames <- colnames(data)
if(!any(cnames=='conc')){stop("data does not contain a variable conc")}
if(!any(cnames=='time')){stop("data does not contain a variable time")}
conc <- data$conc
time <- data$time
if(any(cnames=='group')){
group <- data$group
}
}
method <- match.arg(method, several.ok=TRUE)
method <- sort(method)
if(length(conc) != length(time)){stop('different length of input vectors')}
if(!is.null(strata)){
if(length(conc)!=length(strata)){stop('different length of input vectors')}
if(method!='boott'){warning("strata variable only applicable in method boott")}
}
if(!is.null(group)){
if(length(conc)!=length(group)){stop('different length of input vectors')}
if(length(unique(group))!=2){stop("limited for comparison of 2 groups")}
}
data <- data.frame(conc=conc, time=time, stringsAsFactors = TRUE)
if(is.null(strata)){strata <- rep(1, nrow(data))}
data <- cbind(data, group=as.factor(group), strata=as.factor(strata))
data <- na.omit(data[order(data$strata, data$group, data$time),])
grpfact <- levels(data$group)
if(any(na.omit(as.vector(tapply(data$conc, list(data$strata, data$group, data$time), length))) < 2)){
stop('at least 2 observations per strata, group and time point required')
}
if(any(method=='boott')){
bystrata <- data.frame(time, group, strata, stringsAsFactors = TRUE)
bystrata <- bystrata[order(bystrata$group, bystrata$strata, bystrata$time),]
bystrata1 <- as.list(subset(bystrata, bystrata$group==grpfact[1]))
bystrata2 <- as.list(subset(bystrata, bystrata$group==grpfact[2]))
}
alpha <- 1-conf.level
w1 <- .weight(unlist(subset(data, data$group==grpfact[1], select='time')))
w2 <- .weight(unlist(subset(data, data$group==grpfact[2], select='time')))
if(length(w1) != length(w2) || !all(w1==w2)){stop('time points are not identical for both groups')}
w <- .weight(data$time)
data1 <- subset(data, data$group==grpfact[1])
data2 <- subset(data, data$group==grpfact[2])
auc1 <- auc.ssd(conc=data1$conc,data1$time,method='t',conf.level=conf.level)
auc2 <- auc.ssd(conc=data2$conc,data2$time,method='t',conf.level=conf.level)
n1 <- tapply(data1$conc, data1$time, length)
n2 <- tapply(data2$conc, data2$time, length)
sd1 <- tapply(data1$conc, data1$time, sd)
sd2 <- tapply(data2$conc, data2$time, sd)
est <- auc1$est/auc2$est
num <- (sum(w^2*sd1^2*n1^-1) + est^2*sum(w^2*sd2^2*n2^-1))^2
den <- sum(w^4*sd1^4*(n1^2*(n1-1))^-1) + est^4*sum(w^4*sd2^4*(n2^2*(n2-1))^-1)
df.fieller <- num/den
sd1 <- auc1$CIs[1,2]
sd2 <- auc2$CIs[1,2]
var.asymp <- sd1^2/(auc2$est^2) + (auc1$est^2)/(auc2$est^4)*sd2^2
res <- NULL
df <- NULL
if(any(method=='boott')){
res <- rbind(res, boott(data1=data1, data2=data2, alpha=alpha, nsample=nsample, bystrata1=bystrata1, bystrata2=bystrata2))
df <- rbind(df,NA)
}
if(any(method=='fieller')){
res <- rbind(res,res <- .fieller.ind(auc1=auc1$est, auc2=auc2$est, var1=sd1^2, var2=sd2^2, df=df.fieller, conf.level=conf.level))
df <- rbind(df,df.fieller)
}
if(any(method=='z')){
res <- rbind(res,asymp(data1=data1, data2=data2, alpha=alpha))
df <- rbind(df,NA)
}
rownames(res) <- method
rownames(df) <- method
colnames(res) <- c('lower','upper')
out <- NULL
out$est <- matrix(as.double(est),ncol=1)
rownames(out$est) <- 'ratio of independent AUCs to tlast'
colnames(out$est) <- 'est'
out$CIs<-data.frame(est=rep(est,length(method)), stderr=rep(sqrt(var.asymp),length(method)), lower=res[,1], upper=res[,2], df=df ,method=method, stringsAsFactors = TRUE)
rownames(out$CIs) <- paste(conf.level*100,'% CI using a ', method,'-interval for the ratio of independent AUCs to tlast', sep='')
out$design<-"ssd"
out$conf.level <- conf.level
out$conc <- conc
out$time <- time
out$group <- group
class(out)<-"PK"
return(out)
} |
"abundances" |
rectangleSP_test <- function (x0, y0, w, h) {
x = c(x0 + w/2, x0 - w/2, x0 - w/2, x0 + w/2)
y = c(y0 + h/2, y0 + h/2, y0 - h/2, y0 - h/2)
x = c(x, x[1])
y = c(y, y[1])
return(sp::SpatialPolygons(list(sp::Polygons(list(with(list(x = x, y = y),
sp::Polygon(cbind(x,y)))),1))) )
} |
fitG <- function(x, verbose = 'v') {
t0 <- Sys.time()
out <- fitG_mle(x, verbose)
out$call <- match.call()
out$x <- x
out$time <- Sys.time() - t0
out$n <- length(x)
out$k <- 3
out$df <- out$n - 3
depo <- with(out,
is_GHvalid(estimate[1], estimate[2], estimate[3], 0)
)
if (is.character(depo)) {
out$loglik <- NA
} else {
out$loglik <- loglikG(out$estimate, x)
out$AIC <- 2 * out$df - 2 * out$loglik
out$BIC <- out$df * log(out$n) - 2 * out$loglik
}
return(out)
}
fitG_mle <- function(x, verbose) {
vmessage(verbose, 1, TRUE, 'Maximum likelihood fitting')
vmessage(verbose, 2, TRUE, 'Initialisation...')
out <- new_fitGH()
fitGH_hoaglin1985(x) %>%
use_series('estimate') %>%
unname -> init
xst <- (x - init[1]) / init[2]
vmessage(verbose, 2, TRUE, 'Estimation...')
depo <- nlm(
f = function(theta, xdata) { -loglikG(c(0, 1, theta), xdata) },
p = mean(c(1 / max(xst), -1 / min(xst))),
xdata = xst
)
vmessage(verbose, 2, TRUE, 'Preparing output...')
out$distr <- 'g'
out$method <- 'mle'
out$textmethod <- 'Maxmimum likelihood'
out$estimate[1:4] <- c(init[1:2], depo$estimate, 0)
out$estimator <- depo
vmessage(verbose, 1, TRUE, 'Done!')
return(out)
}
print.fitG <- function(x, ...) {
cat('\nCall:\n')
print(x$call)
cat('\nPoint estimates:\n')
print(x$estimate)
invisible(x)
}
coef.fitG <- function(object, ...) { object$estimate }
summary.fitG <- function(object, ...) {
cat('\nFitted', toupper(object$distr), 'distribution\n')
cat('\nCall:\n')
print(object$call)
cat('\nParameters:\n\n')
depo <- as.matrix(object$estimate)
colnames(depo) <- 'Estimate'
rownames(depo) %<>% paste0(' ')
print(signif(depo, 4))
cat('\n',
'Fitting method: ', object$textmethod, ', ',
'Computation time: ', signif(object$time, 3), ' ', units(object$time), '\n',
'Observations: ', object$n, ', degrees of freedom: ', object$df,
ifelse(
test = is.na(object$loglik),
yes = '',
no = paste0(
', Log-lik: ', format(object$loglik), '\n', 'AIC: ',
format(object$AIC), ', ', 'BIC: ', format(object$BIC)
)
), '\n', sep = ''
)
invisible(object)
} |
stat_density_ridges <- function(mapping = NULL, data = NULL, geom = "density_ridges",
position = "identity", na.rm = FALSE, show.legend = NA,
inherit.aes = TRUE, bandwidth = NULL, from = NULL, to = NULL,
jittered_points = FALSE, quantile_lines = FALSE, calc_ecdf = FALSE, quantiles = 4,
quantile_fun = quantile, n = 512, ...)
{
layer(
stat = StatDensityRidges,
data = data,
mapping = mapping,
geom = geom,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(bandwidth = bandwidth,
from = from,
to = to,
calc_ecdf = calc_ecdf,
quantiles = quantiles,
jittered_points = jittered_points,
quantile_lines = quantile_lines,
quantile_fun = quantile_fun,
n = n,
na.rm = na.rm, ...)
)
}
StatDensityRidges <- ggproto("StatDensityRidges", Stat,
required_aes = "x",
default_aes = aes(height = ..density..),
calc_panel_params = function(data, params) {
if (is.null(params$bandwidth)) {
xdata <- na.omit(data.frame(x=data$x, group=data$group))
xs <- split(xdata$x, xdata$group)
xs_mask <- vapply(xs, length, numeric(1)) > 1
bws <- vapply(xs[xs_mask], bw.nrd0, numeric(1))
bw <- mean(bws, na.rm = TRUE)
message("Picking joint bandwidth of ", signif(bw, 3))
params$bandwidth <- bw
}
if (is.null(params$from)) {
params$from <- min(data$x, na.rm=TRUE) - 3 * params$bandwidth
}
if (is.null(params$to)) {
params$to <- max(data$x, na.rm=TRUE) + 3 * params$bandwidth
}
data.frame(
bandwidth = params$bandwidth,
from = params$from,
to = params$to
)
},
setup_params = function(self, data, params) {
panels <- split(data, data$PANEL)
pardata <- lapply(panels, self$calc_panel_params, params)
pardata <- reduce(pardata, rbind)
if (length(params$quantiles) > 1 &&
(max(params$quantiles, na.rm = TRUE) > 1 || min(params$quantiles, na.rm = TRUE) < 0)) {
stop('invalid quantiles used: c(', paste0(params$quantiles, collapse = ','), ') must be within [0, 1] range')
}
params$bandwidth <- pardata$bandwidth
params$from <- pardata$from
params$to <- pardata$to
params
},
compute_group = function(data, scales, from, to, bandwidth = 1,
calc_ecdf = FALSE, jittered_points = FALSE, quantile_lines = FALSE,
quantiles = 4, quantile_fun = quantile, n = 512) {
if(nrow(data) < 3) return(data.frame())
if (is.null(calc_ecdf)) calc_ecdf <- FALSE
if (is.null(jittered_points)) jittered_points <- FALSE
if (is.null(quantile_lines)) quantile_lines <- FALSE
if (quantile_lines) calc_ecdf <- TRUE
panel <- unique(data$PANEL)
if (length(panel) > 1) {
stop("Error: more than one panel in compute group; something's wrong.")
}
panel_id <- as.numeric(panel)
d <- stats::density(
data$x,
bw = bandwidth[panel_id], from = from[panel_id], to = to[panel_id], na.rm = TRUE,
n = n
)
maxdens <- max(d$y, na.rm = TRUE)
densf <- approxfun(d$x, d$y, rule = 2)
if (jittered_points) {
df_jittered <- data.frame(
x = data$x,
density = densf(data$x),
ndensity = densf(data$x) / maxdens,
datatype = "point", stringsAsFactors = FALSE)
df_points <- data[grepl("point_", names(data))]
if (ncol(df_points) == 0) {
df_points <- NULL
df_points_dummy <- NULL
}
else {
df_jittered <- cbind(df_jittered, df_points)
df_points_dummy <- na.omit(df_points)[1, , drop = FALSE]
}
} else {
df_jittered <- NULL
df_points_dummy <- NULL
}
if ((length(quantiles)==1) && (all(quantiles >= 1))) {
if (quantiles > 1) {
probs <- seq(0, 1, length.out = quantiles + 1)[2:quantiles]
}
else {
probs <- NA
}
} else {
probs <- quantiles
probs[probs < 0 | probs > 1] <- NA
}
qx <- na.omit(quantile_fun(data$x, probs = probs))
df_quantiles <- NULL
if (quantile_lines && length(qx) > 0) {
qy <- densf(qx)
df_quantiles <- data.frame(
x = qx,
density = qy,
ndensity = qy / maxdens,
datatype = "vline",
stringsAsFactors = FALSE
)
if (!is.null(df_points_dummy)){
df_quantiles <- data.frame(df_quantiles, as.list(df_points_dummy))
}
}
df_nondens <- rbind(df_quantiles, df_jittered)
if (calc_ecdf) {
n <- length(d$x)
ecdf <- c(0, cumsum(d$y[1:(n-1)]*(d$x[2:n]-d$x[1:(n-1)])))
ecdf_fun <- approxfun(d$x, ecdf, rule = 2)
ntile <- findInterval(d$x, qx, left.open = TRUE) + 1
if (!is.null(df_nondens)) {
df_nondens <- data.frame(
df_nondens,
ecdf = ecdf_fun(df_nondens$x),
quantile = findInterval(df_nondens$x, qx, left.open = TRUE) + 1
)
}
df_density <- data.frame(
x = d$x,
density = d$y,
ndensity = d$y / maxdens,
ecdf = ecdf,
quantile = ntile,
datatype = "ridgeline",
stringsAsFactors = FALSE
)
}
else {
df_density <- data.frame(
x = d$x,
density = d$y,
ndensity = d$y / maxdens,
datatype = "ridgeline",
stringsAsFactors = FALSE
)
}
if (!is.null(df_points_dummy)){
df_density <- data.frame(df_density, as.list(df_points_dummy))
}
df_final <- rbind(df_density, df_nondens)
if ("quantile" %in% names(df_final)) {
df_final$quantile <- factor(df_final$quantile)
}
df_final
}
)
stat_binline <- function(mapping = NULL, data = NULL,
geom = "density_ridges", position = "identity",
...,
binwidth = NULL,
bins = NULL,
center = NULL,
boundary = NULL,
breaks = NULL,
closed = c("right", "left"),
pad = TRUE,
draw_baseline = TRUE,
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE) {
layer(
data = data,
mapping = mapping,
stat = StatBinline,
geom = geom,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
binwidth = binwidth,
bins = bins,
center = center,
boundary = boundary,
breaks = breaks,
closed = closed,
pad = pad,
draw_baseline = draw_baseline,
na.rm = na.rm,
...
)
)
}
StatBinline <- ggproto("StatBinline", StatBin,
required_aes = "x",
default_aes = aes(height = ..density..),
setup_params = function(data, params) {
if (is.null(params$pad)) {
params$pad <- TRUE
}
if (is.null(params$draw_baseline)) {
params$draw_baseline <- TRUE
}
if (!is.null(params$boundary) && !is.null(params$center)) {
stop("Only one of `boundary` and `center` may be specified.", call. = FALSE)
}
if (is.null(params$breaks) && is.null(params$binwidth) && is.null(params$bins)) {
message("`stat_binline()` using `bins = 30`. Pick better value with `binwidth`.")
params$bins <- 30
}
params
},
compute_group = function(self, data, scales, binwidth = NULL, bins = NULL,
center = NULL, boundary = NULL,
closed = c("right", "left"), pad = TRUE,
breaks = NULL, origin = NULL, right = NULL,
drop = NULL, width = NULL, draw_baseline = TRUE) {
binned <- ggproto_parent(StatBin, self)$compute_group(data = data,
scales = scales, binwidth = binwidth,
bins = bins, center = center, boundary = boundary,
closed = closed, pad = pad, breaks = breaks)
result <- rbind(transform(binned, x=xmin), transform(binned, x=xmax-0.00001*width))
result <- result[order(result$x), ]
if (!draw_baseline) {
zeros <- result$count == 0
protected <- (zeros & !c(zeros[2:length(zeros)], TRUE)) | (zeros & !c(TRUE, zeros[1:length(zeros)-1]))
to_remove <- zeros & !protected
result$count[to_remove] <- NA
result$density[to_remove] <- NA
}
result
}
) |
skip_on_cran()
library("adea")
demo("cardealers")
test_that("cardealers demo", {
expect_s3_class(sol.adea, "adea")
expect_equal(sol.adea$load$load, 0.66666667)
expect_equal(names(sol.adea$load$iinput), c("Employees"))
expect_s3_class(sol.ah, "adeahierarchical")
expect_equal(length(sol.ah$models), 4)
expect_equal(sol.ah$models[[4]]$load$load, 0.66666667)
expect_equal(names(sol.ah$models[[4]]$load$iinput), c("Employees"))
expect_s3_class(sol.ap, "adeaparametric")
expect_equal(length(sol.ap$models), 4)
expect_equal(sol.ap$models[[4]]$load$load, 0.66666667)
expect_equal(names(sol.ap$models[[4]]$load$iinput), c("Employees"))
})
demo("spanishuniversities2018")
test_that("spanishuniversities2018 demo", {
expect_s3_class(sol.dea, "Farrell")
expect_equal(sol.dea$eff[1], 0.77542788)
expect_equal(sol.dea$ux[1], 0.00091240876)
expect_s3_class(sol.adea, "adea")
expect_equal(sol.adea$load$load, 0.58985841)
expect_equal(as.numeric(sol.adea$load$ratios$output[7]), 0.75717137)
})
demo("tokyo_libraries")
test_that("cardealers demo", {
expect_s3_class(sol.adea, "adea")
expect_equal(sol.adea$load$load, 0.455467)
expect_equal(names(sol.adea$load$iinput), c("Area.I1"))
expect_s3_class(sol.adea, "adea")
expect_equal(sol.adea$load$load, 0.455467)
expect_equal(names(sol.adea$load$iinput), c("Area.I1"))
}) |
sbpr<-function(age=NULL,ssbwgt=NULL,partial=NULL,pmat=pmat,M=NULL,pF=NULL, pM=NULL,MSP=40,plus=FALSE,oldest=NULL,maxF=2,incrF=0.0001,graph=TRUE){
if(is.null(age))
stop ("age vector is missing")
if(is.null(ssbwgt))
stop (" ssbwgt vector is missing.")
if(is.null(partial))
stop ("partial recruitment vector is missing.")
if(is.null(pmat))
stop ("pmat vector is missing.")
if(is.null(M))
stop ("M value or vector is missing")
if(is.null(pF))
stop ("pF value is missing.")
if(is.null(pM))
stop ("pM value is missing.")
if(plus==TRUE & is.null(oldest)) stop("oldest must be specified for plus group calculation.")
if(any(length(age)!=c(length(age),length(ssbwgt),length(partial),length(pmat))))
stop("Length of vectors unequal")
if(length(M)==1) M<-rep(M,length(age))
data<-as.data.frame(cbind(age,ssbwgt,partial,M,pmat,pF,pM))
SPR<-as.data.frame(cbind(rep(NA,ceiling(maxF/incrF)+1),
rep(NA,ceiling(maxF/incrF)+1)))
names(SPR)<-c("F","SPR")
if(plus==TRUE){
len<-oldest-min(data$age)+1
if(oldest>max(data$age)){
pdata<-data[rep(length(data$age),times=oldest-data$age[length(data$age)]), ]
pdata$age<-seq(max(data$age)+1,oldest,1)
data<-rbind(data,pdata)}
}
if(plus==FALSE) len<-max(data$age)-min(data$age)+1
F<-0
for (i in 1:length(SPR$F))
{
data$SB<-exp(-(data$partial*data$pF*F+data$pM*data$M))
data$S<-cumprod(exp(-(data$partial*F+data$M)))
data$psb[1]<-1
for(y in 2:len)
{
data$psb[y]<-data$S[y-1]
}
data$SPR<-data$psb*data$SB*data$ssbwgt*data$pmat
SPR$SPR[i]<-sum(data$SPR)
SPR$F[i]<-F
F<-F+incrF
}
SPR$PSPR<-SPR$SPR/SPR$SPR[1]*100
sss<-NULL
getF<-function(x){
data$SB<-exp(-(data$partial*data$pF*x+data$pM*data$M))
data$S<-cumprod(exp(-(data$partial*x+data$M)))
data$psb[1]<-1
for(y in 2:len)
{
data$psb[y]<-data$S[y-1]
}
data$SPR<-data$psb*data$SB*data$ssbwgt*data$pmat
sss<<-sum(data$SPR)
return(((sum(data$SPR)/SPR$SPR[1]*100)-MSP)^2)
}
Fsp<-optimize(getF,c(0,maxF),tol=0.0000001)[1]
ans<-NULL
ans<-matrix(NA,1L,2L)
ans<-rbind(cbind(Fsp,sss))
dimnames(ans)<-list(c(paste("F at ",MSP,"%MSP",sep="")),c("F","SSB_Per_Recruit"))
outpt<-list(ans,SPR);names(outpt)<-c("Reference_Point","F_vs_SPR")
if(graph==TRUE){
par(mfrow=c(1,2))
plot(SPR[,2]~SPR[,1],ylab="SPR",xlab="F",type="l")
plot(SPR[,3]~SPR[,1],ylab="% Max SPR",xlab="F",type="l")
}
return(outpt)
} |
WrapKrigSpTi <- function(
WrapSpTi_out,
coords_obs,
coords_nobs,
times_obs,
times_nobs,
x_obs
)
{
pp <- unlist(WrapSpTi_out)
sigma2 <- as.numeric(pp[regexpr("sigma2",names(pp)) == 1])
alpha <- as.numeric(pp[regexpr("alpha",names(pp)) == 1])
rho_sp <- as.numeric(pp[regexpr("rho_sp",names(pp)) == 1])
rho_t <- as.numeric(pp[regexpr("rho_t",names(pp)) == 1])
sep_par <- as.numeric(pp[regexpr("sep_par",names(pp)) == 1])
row.k <- nrow(WrapSpTi_out[[1]]$k)
pp2 <- as.numeric(pp[regexpr("k",names(pp)) == 1])
k <- matrix(pp2,nrow = row.k)
rm(pp,pp2)
MeanCirc <- atan2(sum(sin(x_obs)),sum(cos(x_obs)))
x_obs <- (x_obs - MeanCirc + pi) %% (2*pi)
alpha <- (alpha + MeanCirc - pi) %% (2*pi)
n <- nrow(k)
nprev <- nrow(coords_nobs)
nsample <- ncol(k)
H_tot <- as.matrix(stats::dist(rbind(coords_obs,coords_nobs)))
Ht_tot <- as.matrix(stats::dist(c(times_obs,times_nobs)))
out <- WrapKrigSpTiCpp(sigma2, alpha, rho_sp, rho_t,sep_par, k,
n, nsample, H_tot, Ht_tot, nprev, x_obs)
out$Prev_out <- (out$Prev_out - pi + MeanCirc) %% (2*pi)
out$M_out <- (out$M_out - pi + MeanCirc) %% (2*pi)
return(out)
} |
context("&& and ||")
test_that("&& works well", {
f <- function() { FALSE && TRUE }
expect_equal(cyclocomp(f), 2)
})
test_that("|| works well", {
f <- function() { FALSE || TRUE }
expect_equal(cyclocomp(f), 2)
}) |
source("setup/setup.R")
library(dm)
flights_dm <- dm_nycflights13()
tbl(flights_dm, "airports")
flights_dm$planes
flights_dm[["weather"]]
dm_apply_filters_to_tbl(flights_dm, airlines) |
get_nns_ratio <- function(x,
N = 10,
groups,
numerator = NULL,
candidates = character(0),
pre_trained,
transform = TRUE,
transform_matrix,
bootstrap = TRUE,
num_bootstraps = 10,
permute = TRUE,
num_permutations = 100,
verbose = TRUE){
if(class(x)[1] != "tokens") stop("data must be of class tokens")
group_vars <- unique(groups)
if(is.null(group_vars) | length(group_vars)!=2) stop("a binary grouping variable must be provided")
if(!is.null(numerator)){
if(!(numerator %in% group_vars)) stop("numerator must refer to one of the two groups in the groups argument")
}
denominator <- setdiff(group_vars, numerator)
if(!is.null(groups)) quanteda::docvars(x) <- NULL; quanteda::docvars(x, "group") <- groups
x_dfm <- quanteda::dfm(x, tolower = FALSE)
x_dem <- dem(x = x_dfm, pre_trained = pre_trained, transform = transform, transform_matrix = transform_matrix, verbose = FALSE)
wvs <- dem_group(x = x_dem, groups = x_dem@docvars$group)
if(length(candidates) > 0) candidates <- intersect(candidates, rownames(pre_trained))
nnsdfs <- nns(x = wvs, N = Inf, candidates = candidates, pre_trained = pre_trained, as_list = TRUE)
nnsdf1 <- if(is.null(N)) nnsdfs[[numerator]]$feature else nnsdfs[[numerator]]$feature[1:N]
nnsdf2 <- if(is.null(N)) nnsdfs[[denominator]]$feature else nnsdfs[[denominator]]$feature[1:N]
union_nns <- union(nnsdf1, nnsdf2)
if(!bootstrap){
result <- nns_ratio(x = wvs, N = N, numerator = numerator, candidates = union_nns, pre_trained = pre_trained)
}else{
cat('starting bootstraps \n')
nnsratiodf_bs <- replicate(num_bootstraps,
nns_ratio_boostrap(x = x,
groups = groups,
numerator = numerator,
candidates = union_nns,
pre_trained = pre_trained,
transform = transform,
transform_matrix = transform_matrix),
simplify = FALSE)
result <- do.call(rbind, nnsratiodf_bs) %>%
dplyr::group_by(feature) %>%
dplyr::summarise(std.error = sd(value),
value = mean(value),
.groups = 'keep') %>%
dplyr::ungroup() %>%
dplyr::select('feature','value', 'std.error') %>%
dplyr::arrange(-value)
cat('done with bootstraps \n')
}
if(permute){
cat('starting permutations \n')
permute_out <- replicate(num_permutations, nns_ratio_permute(x,
groups = groups,
numerator = numerator,
candidates = union_nns,
pre_trained = pre_trained,
transform = transform,
transform_matrix = transform_matrix),
simplify = FALSE)
dev1 <- result %>% dplyr::mutate(value = abs(value - 1)) %>% as.data.frame()
dev1_perm <- lapply(permute_out, function(perm) perm[order(match(perm[,1],dev1[,1])),'value'])
dev1_perm <- do.call(rbind, dev1_perm)
dev1_perm <- abs(dev1_perm - 1)
dev1_perm <- apply(dev1_perm, 1, function(i) i >= dev1$value)
p.value <- apply(dev1_perm, 1, function(i) sum(i)/length(i))
result <- result %>% dplyr::mutate(p.value = p.value)
cat('done with permutations \n')
}
result <- result %>% dplyr::mutate(group = dplyr::case_when((feature %in% nnsdf1) & (feature %in% nnsdf2) ~ "shared",
(feature %in% nnsdf1) & !(feature %in% nnsdf2) ~ numerator,
!(feature %in% nnsdf1) & (feature %in% nnsdf2) ~ denominator))
attr(result, "numerator") <- numerator
if(verbose) cat("NOTE: values refer to the ratio", paste0(numerator, "/", denominator, "."))
return(result)
}
nns_ratio_boostrap <- function(x,
groups,
numerator = NULL,
candidates = character(0),
pre_trained = pre_trained,
transform = TRUE,
transform_matrix = transform_matrix){
x <- quanteda::tokens_sample(x = x, size = table(groups), replace = TRUE, by = groups)
x_dfm <- quanteda::dfm(x, tolower = FALSE)
x_dem <- dem(x = x_dfm, pre_trained = pre_trained, transform = transform, transform_matrix = transform_matrix, verbose = FALSE)
wvs <- dem_group(x = x_dem, groups = x_dem@docvars$group)
result <- nns_ratio(x = wvs, N = NULL, numerator = numerator, candidates = candidates, pre_trained = pre_trained, verbose = FALSE)
return(result)
}
nns_ratio_permute <- function(x,
groups,
numerator = NULL,
candidates = character(0),
pre_trained,
transform = TRUE,
transform_matrix){
quanteda::docvars(x, 'group') <- sample(groups)
x_dfm <- quanteda::dfm(x, tolower = FALSE)
x_dem <- dem(x = x_dfm, pre_trained = pre_trained, transform = transform, transform_matrix = transform_matrix, verbose = FALSE)
wvs <- dem_group(x = x_dem, groups = x_dem@docvars$group)
result <- nns_ratio(x = wvs, N = NULL, numerator = numerator, candidates = candidates, pre_trained = pre_trained, verbose = FALSE)
return(result)
} |
SampDist2Props <-
function(form,data,max.sample.sizes=100,sim.reps=1000) {
if (!("manipulate" %in% installed.packages())) {
return(cat(paste0("You must be on R Studio with package manipulate installed\n",
"in order to run this function.")))
}
data <- data[complete.cases(data),]
prsd <- with(data,ParseFormula(form))
Explanatory.Variable <- as.character(prsd$rhs)[2]
Response.Variable <- as.character(prsd$rhs)[3]
breaker <- data[,Explanatory.Variable]
respvals <- data[,Response.Variable]
twopops <- split(respvals,breaker)
pop1 <- twopops[[1]]
pop2 <- twopops[[2]]
explanatory <- breaker
response <- respvals
results <- table(explanatory,response)
print(results)
cat("\n")
N1 <- sum(results[1,])
N2 <- sum(results[2,])
p1 <- results[1,1]/N1
p2 <- results[2,1]/N2
if (max.sample.sizes>min(N1,N2)) {
stop("Desired sample sizes must be less than pop sizes: choose lower value for max.sample.sizes argument.")
}
manipulate(
n1=slider(1,max.sample.sizes,initial=1,label="Sample Size n1"),
n2=slider(1,max.sample.sizes,initial=1,label="Sample Size n2"),
curvetype=picker("None","Density Estimate","Theoretical Normal"),
{
big <- max.sample.sizes
sddiffprops <- sqrt(p1*(1-p1)/n1*(N1-n1)/(N1-1)+p2*(1-p2)/n2*(N2-n2)/(N2-1))
minsddiffprops <- sqrt(p1*(1-p1)/big*(N1-big)/(N1-1)+p2*(1-p2)/big*(N2-big)/(N2-1))
ymax <- 1.2/(sqrt(2*3.1416)*minsddiffprops)
reps <- sim.reps
sim.pdiffs <- numeric(reps)
for (i in 1:reps) {
samp1 <- sample(pop1,n1,replace=FALSE)
res.1 <- table(samp1)
samp2 <- sample(pop2,n2,replace=FALSE)
res.2 <- table(samp2)
sim.pdiffs[i] <- res.1[1]/n1-res.2[1]/n2
}
hist(sim.pdiffs,freq=FALSE,xlim=c(-1,1),
xlab=expression(hat(p)[1]-hat(p)[2]),
ylim=c(0,ymax),
col="blue",
main=paste(reps,"simulations"))
if(curvetype=="Density Estimate") {
d <- density(sim.pdiffs)
lines(d$x,d$y,col="red")
}
if (curvetype=="Theoretical Normal") {
curve(dnorm(x,mean=(p1-p2),sd=sddiffprops),
xlim=c(-1,1),add=TRUE,col="red",n=1001)
}
quantities <- c("p1-p2","expected value of p1hat-p2hat","stdev of p1hat-p2hat")
theoretical <-c(p1-p2,p1-p2,sddiffprops)
in.simulation <- c(NA,mean(sim.pdiffs),sd(sim.pdiffs))
frm <- data.frame(theoretical,in.simulation)
rownames(frm) <- quantities
print(frm)
cat("\n")
}
)
}
if(getRversion() >= "2.15.1") utils::globalVariables(c("picker", "curvetype","n1","n2")) |
resource_specialisation_case <- function(eventlog) {
freq <- NULL
nr_of_activity_types <- NULL
eventlog %>%
group_by(!!resource_id_(eventlog), !!activity_id_(eventlog), !!case_id_(eventlog)) %>%
summarize() %>%
group_by(!!case_id_(eventlog)) %>%
mutate(nr_of_activity_types = n_distinct(!!activity_id_(eventlog))) %>%
group_by(!!case_id_(eventlog), nr_of_activity_types, !!resource_id_(eventlog)) %>%
summarize(freq = n()) %>%
grouped_summary_statistics("freq")
} |
.refit <- function(object, fitting.method = "quadratic", jackknife.estimation = "quadratic",
asymptotic = TRUE, allowed.fitting = c("quad", "line", "nonl", "logl",
"log2"), allowed.jackknife = c("quad", "line", "nonl", "logl",
FALSE), ...) {
fitting.method <- substr(fitting.method, 1, 4)
if (object$fitting.method == fitting.method)
stop("Model is already fitted with the specified fitting method",
call. = FALSE)
if (!any(fitting.method == allowed.fitting)) {
warning("Fitting method not implemented. Using: quadratic", call. = FALSE)
fitting.method <- "quad"
}
if (jackknife.estimation != FALSE)
jackknife.estimation <- substr(jackknife.estimation, 1, 4)
if (!any(jackknife.estimation == allowed.jackknife)) {
warning("Fitting method (jackknife) not implemented. Using: quadratic",
call. = FALSE)
jackknife.estimation <- "quad"
}
if (!any(names(object) == "variance.jackknife") && jackknife.estimation !=
FALSE) {
warning("Jackknife variance estimation is not possible, due to the lack of it in the supplied model. Will be ignored.",
call. = FALSE)
jackknife.estimation <- FALSE
}
if (!any(names(object) == "variance.asymptotic") && asymptotic) {
warning("Asymptotic variance estimation is not possible, due to the lack of it in the supplied model. Will be ignored.",
call. = FALSE)
asymptotic <- FALSE
}
cl <- class(object)
if (any(names(object) == "variance.asymptotic") && asymptotic == FALSE) {
object <- object[setdiff(names(object), c("PSI", "c11", "a11",
"sigma", "sigma.gamma", "g", "s", "variance.asymptotic"))]
}
if (any(names(object) == "variance.jackknife") && jackknife.estimation ==
FALSE) {
object <- object[setdiff(names(object), c("extrapolation.variance",
"variance.jackknife", "variance.jackknife.lambda"))]
}
class(object) <- cl
estimates <- object$SIMEX.estimates[-1, -1]
lambda <- object$lambda
ncoef <- length(coef(object))
ndes <- dim(object$model$model)[1]
p.names <- names(coef(object))
SIMEX.estimate <- vector(mode = "numeric", length = ncoef)
switch(fitting.method, quad = extrapolation <- lm(estimates ~ lambda +
I(lambda^2)), line = extrapolation <- lm(estimates ~ lambda), logl = extrapolation <- lm(I(log(t(t(estimates) +
(abs(apply(estimates, 2, min)) + 1) * (apply(estimates, 2, min) <=
0)))) ~ lambda), log2 = extrapolation <- fit.logl(lambda, p.names,
estimates), nonl = extrapolation <- fit.nls(lambda, p.names, estimates))
if (any(class(extrapolation) == "lm") && fitting.method == "log2")
fitting.method <- "logl"
switch(fitting.method, quad = SIMEX.estimate <- predict(extrapolation,
newdata = data.frame(lambda = -1)), line = SIMEX.estimate <- predict(extrapolation,
newdata = data.frame(lambda = -1)), nonl = for (i in 1:length(p.names)) SIMEX.estimate[i] <- predict(extrapolation[[p.names[i]]],
newdata = data.frame(lambda = -1)), log2 = for (i in 1:length(p.names)) SIMEX.estimate[i] <- predict(extrapolation[[p.names[i]]],
newdata = data.frame(lambda = -1)) - ((abs(apply(estimates, 2,
min)) + 1) * (apply(estimates, 2, min) <= 0))[i], logl = SIMEX.estimate <- exp(predict(extrapolation,
newdata = data.frame(lambda = -1))) - (abs(apply(estimates, 2,
min)) + 1) * (apply(estimates, 2, min) <= 0))
if (jackknife.estimation != FALSE) {
variance.jackknife <- object$variance.jackknife.lambda[-1, -1]
switch(jackknife.estimation, quad = extrapolation.variance <- lm(variance.jackknife ~
lambda + I(lambda^2)), line = extrapolation.variance <- lm(variance.jackknife ~
lambda), logl = extrapolation.variance <- lm(I(log(t(t(variance.jackknife) +
(abs(apply(variance.jackknife, 2, min)) + 1) * (apply(variance.jackknife,
2, min) <= 0)))) ~ lambda), nonl = extrapolation.variance <- fit.nls(lambda,
1:NCOL(variance.jackknife), variance.jackknife))
variance.jackknife2 <- vector("numeric", ncoef^2)
switch(jackknife.estimation, nonl = for (i in 1:NCOL(variance.jackknife)) variance.jackknife2[i] <- predict(extrapolation.variance[[i]],
newdata = data.frame(lambda = -1)), quad = variance.jackknife2 <- predict(extrapolation.variance,
newdata = data.frame(lambda = -1)), line = variance.jackknife2 <- predict(extrapolation.variance,
newdata = data.frame(lambda = -1)), logl = variance.jackknife2 <- exp(predict(extrapolation.variance,
newdata = data.frame(lambda = -1))) - (abs(apply(variance.jackknife,
2, min)) + 1) * (apply(variance.jackknife, 2, min) <= 0))
variance.jackknife <- rbind(variance.jackknife2, variance.jackknife)
variance.jackknife.lambda <- cbind(c(-1, lambda), variance.jackknife)
variance.jackknife <- matrix(variance.jackknife[1, ], nrow = ncoef,
ncol = ncoef, byrow = TRUE)
dimnames(variance.jackknife) <- list(p.names, p.names)
object$variance.jackknife.lambda <- variance.jackknife.lambda
object$variance.jackknife <- variance.jackknife
object$extrapolation.variance <- extrapolation.variance
}
if (asymptotic) {
sigma <- object$sigma
s <- construct.s(ncoef, lambda, fitting.method, extrapolation)
d.inv <- solve(s %*% t(s))
sigma.gamma <- d.inv %*% s %*% sigma %*% t(s) %*% d.inv
g <- list()
switch(fitting.method, quad = g <- c(1, -1, 1), line = g <- c(1,
-1), logl = for (i in 1:ncoef) g[[i]] <- c(exp(coef(extrapolation)[1,
i] - coef(extrapolation)[2, i]), -exp(coef(extrapolation)[1,
i] - coef(extrapolation)[2, i])), log2 = for (i in 1:ncoef) g[[i]] <- c(exp(coef(extrapolation[[i]])[1] -
coef(extrapolation[[i]])[2]), -exp(coef(extrapolation[[i]])[1] -
coef(extrapolation[[i]])[2])), nonl = for (i in 1:ncoef) g[[i]] <- c(-1,
-(coef(extrapolation[[i]])[3] - 1)^-1, coef(extrapolation[[i]])[2]/(coef(extrapolation[[i]])[3] -
1)^2))
g <- diag.block(g, ncoef)
variance.asymptotic <- (t(g) %*% sigma.gamma %*% g)/ndes
dimnames(variance.asymptotic) <- list(p.names, p.names)
object$sigma.gamma <- sigma.gamma
object$g <- g
object$s <- s
object$variance.asymptotic <- variance.asymptotic
}
object$call$fitting.method <- fitting.method
object$call$jackknife.estimation <- jackknife.estimation
object$call$asymptotic <- asymptotic
object$SIMEX.estimates[1, ] <- c(-1, SIMEX.estimate)
object$coefficients <- as.vector(SIMEX.estimate)
names(object$coefficients) <- p.names
fitted.values <- predict(object, newdata = object$model$model[, -1,
drop = FALSE], type = "response")
object$fitted.values <- fitted.values
if (is.factor(object$model$model[, 1]))
object$residuals <- as.numeric(levels(object$model$model[, 1]))[object$model$model[,
1]] - fitted.values else object$model$model[, 1] - fitted.values
object$extrapolation <- extrapolation
return(object)
} |
chart.Correlation <-
function (R, histogram = TRUE, method=c("pearson", "kendall", "spearman"), pch=1, ...)
{
x = checkData(R, method="matrix")
if(missing(method)) method=method[1]
cormeth <- method
panel.cor <- function(x, y, digits=2, prefix="", use="pairwise.complete.obs", method=cormeth, cex.cor, ...)
{
usr <- par("usr"); on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r <- cor(x, y, use=use, method=method)
txt <- format(c(r, 0.123456789), digits=digits)[1]
txt <- paste(prefix, txt, sep="")
if(missing(cex.cor)) cex <- 0.8/strwidth(txt)
test <- cor.test(as.numeric(x),as.numeric(y), method=method)
Signif <- symnum(test$p.value, corr = FALSE, na = FALSE,
cutpoints = c(0, 0.001, 0.01, 0.05, 0.1, 1),
symbols = c("***", "**", "*", ".", " "))
text(0.5, 0.5, txt, cex = cex * (abs(r) + .3) / 1.3)
text(.8, .8, Signif, cex=cex, col=2)
}
f <- function(t) {
dnorm(t, mean=mean(x), sd=sd.xts(x) )
}
dotargs <- list(...)
dotargs$method <- NULL
rm(method)
hist.panel = function (x, ...=NULL ) {
par(new = TRUE)
hist(x,
col = "light gray",
probability = TRUE,
axes = FALSE,
main = "",
breaks = "FD")
lines(density(x, na.rm=TRUE),
col = "red",
lwd = 1)
rug(x)
}
if(histogram)
pairs(x, gap=0, lower.panel=panel.smooth, upper.panel=panel.cor, diag.panel=hist.panel, pch=pch, ...)
else
pairs(x, gap=0, lower.panel=panel.smooth, upper.panel=panel.cor, pch=pch, ...)
} |
expected <- eval(parse(text="structure(c(794, 150, 86, 570), .Dim = c(2L, 2L), .Dimnames = structure(list(`2nd Survey` = c(\"Approve\", \"Disapprove\"), `1st Survey` = c(\"Approve\", \"Disapprove\")), .Names = c(\"2nd Survey\", \"1st Survey\")))"));
test(id=0, code={
argv <- eval(parse(text="list(structure(c(794, 86, 150, 570), .Dim = c(2L, 2L), .Dimnames = structure(list(`1st Survey` = c(\"Approve\", \"Disapprove\"), `2nd Survey` = c(\"Approve\", \"Disapprove\")), .Names = c(\"1st Survey\", \"2nd Survey\"))))"));
.Internal(t.default(argv[[1]]));
}, o=expected); |
context("Karcher mean")
test_that("Karcher mean with zero-row gives correct output ", {
expect_that(karcher.mean.sphere(matrix(NA, nrow=0, ncol=2)), equals(c(phi=NA, lambda=NA)))
expect_that(karcher.mean.sphere(matrix(NA, nrow=0, ncol=2), var=TRUE), equals(list(mean=c(phi=NA, lambda=NA), var=c(phi=NA, lambda=NA))))
})
test_that("Karcher mean with one-row gives correct output ", {
x <- cbind(phi=1, lambda=0)
expect_that(karcher.mean.sphere(x), equals(c(phi=1, lambda=0)))
expect_that(karcher.mean.sphere(x, var=TRUE), equals(list(mean=c(phi=1, lambda=0), var=c(phi=NA, lambda=NA))))
})
test_that("Karcher mean with two-rows gives correct output ", {
x <- cbind(phi=c(0, 0), lambda=c(0, pi/2))
expect_that(karcher.mean.sphere(x), equals(c(phi=0, lambda=pi/4)))
expect_that(karcher.mean.sphere(x, var=TRUE), equals(list(mean=c(phi=0, lambda=pi/4), var=(pi/4)^2)))
}) |
format_bdvis <- function(indf, source=NULL, config=NULL, quiet=FALSE, gettaxo=F, ...) {
bd_check_df(indf)
if (!(is.null(source))) {
match.arg(source, sources_list)
if (!(quiet)) message(c("Mapping according to ", source, " format"))
new_fields <- bd_get_source(source)
} else if (!(is.null(config))) {
if (!(quiet)) message("Mapping according to config object")
new_fields <- bd_parse_config(config)
} else {
if (!(quiet)) message("Mapping via individual parameters")
new_fields <- bd_parse_args(list(...))
}
if (!(is.null(new_fields$Latitude)) && new_fields$Latitude != "Latitude") {
if (new_fields$Latitude %in% names(indf)) {
if ("Latitude" %in% names(indf)) {
names(indf)[names(indf)=="Latitude"] <- "Latitude::original"
if (!(quiet)) message("Changed \"Latitude\" to \"Latitude::original\"")
}
names(indf)[names(indf)==new_fields$Latitude] <- "Latitude"
if (!(quiet)) message(c("Changed \"",new_fields$Latitude,"\" to \"Latitude\""))
}
}
if (!(is.null(new_fields$Longitude)) && new_fields$Longitude != "Longitude") {
if (new_fields$Longitude %in% names(indf)) {
if ("Longitude" %in% names(indf)) {
names(indf)[names(indf)=="Longitude"] <- "Longitude::original"
if (!(quiet)) message("Changed \"Longitude\" to \"Longitude::original\"")
}
names(indf)[names(indf)==new_fields$Longitude] <- "Longitude"
if (!(quiet)) message(c("Changed \"",new_fields$Longitude,"\" to \"Longitude\""))
}
}
if (!(is.null(new_fields$Date_collected)) && new_fields$Date_collected != "Date_collected") {
if (new_fields$Date_collected %in% names(indf)) {
if ("Date_collected" %in% names(indf)) {
names(indf)[names(indf)=="Date_collected"] <- "Date_collected::original"
if (!(quiet)) message("Changed \"Date_collected\" to \"Date_collected::original\"")
}
names(indf)[names(indf)==new_fields$Date_collected] <- "Date_collected"
if (!(quiet)) message(c("Changed \"",new_fields$Date_collected,"\" to \"Date_collected\""))
}
}
if (!(is.null(new_fields$Scientific_name)) && new_fields$Scientific_name != "Scientific_name") {
if (new_fields$Scientific_name %in% names(indf)) {
if ("Scientific_name" %in% names(indf)) {
names(indf)[names(indf)=="Scientific_name"] <- "Scientific_name::original"
if (!(quiet)) message("Changed \"Scientific_name\" to \"Scientific_name::original\"")
}
names(indf)[names(indf)==new_fields$Scientific_name] <- "Scientific_name"
if (!(quiet)) message(c("Changed \"",new_fields$Scientific_name,"\" to \"Scientific_name\""))
}
}
indf$Latitude <- as.numeric(indf$Latitude)
indf$Longitude <- as.numeric(indf$Longitude)
indf <- getcellid(indf)
if(gettaxo){
indf <- gettaxo(indf)
}
indf
}
sources_list <- c(
"rgbif",
"rvertnet",
"rinat"
)
bd_get_source <- function(source) {
bd_sources <- list(
rgbif = list(
Latitude = "decimalLatitude",
Longitude = "decimalLongitude",
Date_collected = "eventDate",
Scientific_name = "name"
),
rvertnet = list(
Latitude = "decimallatitude",
Longitude = "decimallongitude",
Date_collected = "eventdate",
Scientific_name = "scientificname"
),
rinat = list(
Latitude = "latitude",
Longitude = "longitude",
Date_collected = "observed_on",
Scientific_name = "taxon.name"
)
)
return(bd_sources[[source]])
}
bd_parse_config <- function(config){
if (!("Latitude" %in% names(config))) stop("\"Latitude\" missing from configuration object")
if (!("Longitude" %in% names(config))) stop("\"Longitude\" missing from configuration object")
if (!("Date_collected" %in% names(config))) stop("\"Date_collected\" missing from configuration object")
if (!("Scientific_name" %in% names(config))) stop("\"Scientific_name\" missing from configuration object")
return(config)
}
bd_parse_args <- function(args) {
bd_args <- list()
if ("Latitude" %in% names(args)) {
bd_args$Latitude <- args$Latitude
} else {
bd_args$Latitude <- NULL
}
if ("Longitude" %in% names(args)) {
bd_args$Longitude <- args$Longitude
} else {
bd_args$Longitude <- NULL
}
if ("Date_collected" %in% names(args)) {
bd_args$Date_collected <- args$Date_collected
} else {
bd_args$Date_collected <- NULL
}
if ("Scientific_name" %in% names(args)) {
bd_args$Scientific_name <- args$Scientific_name
} else {
bd_args$Scientific_name <- NULL
}
return(bd_args)
}
bd_check_df <- function(indf) {
if(is.na(indf) || (is.data.frame(indf) && nrow(indf) == 0)) stop("Input data frame missing or empty")
if(!(is.data.frame(indf))) stop("Provided argument is not a data.frame")
return(invisible())
} |
`implementInteractions.fnc` <-
function(m) {
nams = strsplit(colnames(m), ":")
for (i in 1:length(nams)) {
if (length(nams[[i]]) > 1) {
m[,i] = m[,nams[[i]][1]]
for (j in 2:length(nams[[i]])) {
m[,i] = m[,i]*m[,nams[[i]][j]]
}
}
}
return(m)
} |
fslmean = function(img, nonzero = FALSE, verbose = TRUE, ts = FALSE){
opts = "-m"
opts = ifelse(nonzero, toupper(opts), opts)
val = fslstats(img, opts = opts, verbose = verbose, ts = ts)
val = strsplit(val, " ")
if (length(val) == 1) {
val = as.numeric(val[[1]])
} else {
val = sapply(val, as.numeric)
}
val
} |
WITSclean<-function(CSVfile,YEAR,threshold,cutoff){
DATAV1<-utils::read.csv(CSVfile)
DATA<-dplyr::filter(DATAV1,DATAV1$Year==YEAR)
Sender<-as.vector(DATA[,"PartnerISO3"])
Sender<- gsub('SER', 'SRB', Sender)
Sender<- gsub('TMP', 'TLS', Sender)
Sender<- gsub('ZAR', 'COD', Sender)
Sender<- gsub('ROM', 'ROU', Sender)
Sender<- gsub('SUD', 'SDN', Sender)
Sender<- gsub('MNT', 'MNE', Sender)
Receiver<-as.vector(DATA[,"ReporterISO3"])
Receiver<- gsub('SER', 'SRB', Receiver)
Receiver<- gsub('TMP', 'TLS', Receiver)
Receiver<- gsub('ZAR', 'COD', Receiver)
Receiver<- gsub('ROM', 'ROU', Receiver)
Receiver<- gsub('SUD', 'SDN', Receiver)
Receiver<- gsub('MNT', 'MNE', Receiver)
VAL<-DATA[,"TradeValue.in.1000.USD"]
FULLel<-data.frame(Sender=Sender,
Receiver=Receiver,
VAL=VAL,stringsAsFactors = FALSE)
WDIDataSeries<-WDI::WDI_data
WDICountryInfo<-WDIDataSeries$country
WD<-as.data.frame(WDICountryInfo,stringsAsFactors = FALSE)
COUNTRYlist<-WDICountryInfo[,"iso3c"]
REGIONlist<-WDICountryInfo[,"region"]
INCOMElist<-WDICountryInfo[,"income"]
CountryRegion<-cbind(COUNTRYlist,REGIONlist)
CountryIncome<-cbind(COUNTRYlist,INCOMElist)
AggReg<-c("All","EUN","UNS","OAS","FRE",
"SPE","VAT","UMI","ATA","PCN","AIA","COK",
"SHN","MSR","NIU",
"BES","BLM","BUN","BVT","CCK","CXR","FLK",
"HMD","IOT","NFK","SGS","TKL",
"ESH","SPM","ATF"
)
AggRegMat<-matrix("Aggregates",length(AggReg),2)
AggRegMat[,1]<-AggReg
CountryRegion<-rbind(CountryRegion,AggRegMat)
CountryIncome<-rbind(CountryIncome,AggRegMat)
CR<-as.data.frame(CountryRegion,stringsAsFactors = FALSE)
ALL_AGG<-dplyr::filter(CR,REGIONlist=="Aggregates")
ALL_AGG<-ALL_AGG$COUNTRYlist
ALL_AGG<-as.vector(ALL_AGG)
WDIgdp1<-WDI::WDI(country="all",indicator = "NY.GDP.PCAP.KD", start = YEAR, end=YEAR )
WDIgdp1<-as.data.frame(WDIgdp1,stringsAsFactors=FALSE)
WDIgdp1$iso3<-WD$iso3c[match(WDIgdp1$iso2c,WD$iso2c)]
WDIgdp2<-cbind(as.vector(WDIgdp1$iso3),
as.vector(WDIgdp1$NY.GDP.PCAP.KD))
colnames(WDIgdp2)<-c("iso3","GDP")
WDIgdp2<-as.data.frame(WDIgdp2,stringsAsFactors=FALSE)
WDIGDPgrowth1<-WDI::WDI(country="all",indicator = "NY.GDP.MKTP.KD.ZG", start = YEAR, end=YEAR )
WDIGDPgrowth1<-as.data.frame(WDIGDPgrowth1,stringsAsFactors=FALSE)
WDIGDPgrowth1$iso3<-WD$iso3c[match(WDIGDPgrowth1$iso2c,WD$iso2c)]
WDIGDPgrowth2<-cbind(as.vector(WDIGDPgrowth1$iso3),
as.vector(WDIGDPgrowth1$NY.GDP.MKTP.KD.ZG))
colnames(WDIGDPgrowth2)<-c("iso3","GDPgrowth")
WDIGDPgrowth2<-as.data.frame(WDIGDPgrowth2,stringsAsFactors=FALSE)
WDIGDPPC1<-WDI::WDI(country="all",indicator = "NY.GDP.PCAP.PP.KD", start = YEAR, end=YEAR )
WDIGDPPC1<-as.data.frame(WDIGDPPC1,stringsAsFactors=FALSE)
WDIGDPPC1$iso3<-WD$iso3c[match(WDIGDPPC1$iso2c,WD$iso2c)]
WDIGDPPC2<-cbind(as.vector(WDIGDPPC1$iso3),as.vector(WDIGDPPC1$NY.GDP.PCAP.PP.KD))
colnames(WDIGDPPC2)<-c("iso3","GDPPC")
WDIGDPPC2<-as.data.frame(WDIGDPPC2,stringsAsFactors=FALSE)
WDIFDI1<-WDI::WDI(country="all",indicator = "BN.KLT.DINV.CD", start = YEAR, end=YEAR )
WDIFDI1<-as.data.frame(WDIFDI1,stringsAsFactors=FALSE)
WDIFDI1$iso3<-WD$iso3c[match(WDIFDI1$iso2c,WD$iso2c)]
WDIFDI2<-cbind(as.vector(WDIFDI1$iso3),as.vector(WDIFDI1$BN.KLT.DINV.CD))
colnames(WDIFDI2)<-c("iso3","FDI")
WDIFDI2<-as.data.frame(WDIFDI2,stringsAsFactors=FALSE)
TotalCountryExports<-subset(FULLel,Receiver %in% "All")
AllAllTotal<-as.matrix(subset(TotalCountryExports,Sender %in% "All"))
GrandTotal<-as.numeric(AllAllTotal[,3])
Share<-list()
for (i in 1:length(VAL)){
Share[[i]]<-(VAL[i]/GrandTotal)*100
}
Share <-plyr::ldply(Share, data.frame)
colnames(Share)<-"Share"
FULLel<-cbind(FULLel,Share)
G1<-igraph::graph_from_data_frame(FULLel,direct=TRUE)
igraph::E(G1)$weight<-FULLel[,4]
igraph::V(G1)$id<-igraph::V(G1)$name
CountryNames<-igraph::V(G1)$name
NotCovered<-subset(CountryNames,!(CountryNames %in% CountryRegion[,1]))
NotCoveredWDI<-subset(CountryNames,!(CountryNames %in% WDIgdp2$iso3))
mm<-matrix("NA",length(NotCovered),2)
mm[,1]<-NotCovered
CountryRegion2<-rbind(CountryRegion,mm)
CountryIncome2<-rbind(CountryIncome,mm)
mm2<-matrix("NA",length(NotCoveredWDI),2)
mm2[,1]<-NotCoveredWDI
colnames(mm2)<-colnames(WDIgdp2)
WDIgdp3<-rbind(WDIgdp2,mm2)
colnames(mm2)<-colnames(WDIGDPPC2)
WDIGDPPC3<-rbind(WDIGDPPC2,mm2)
colnames(mm2)<-colnames(WDIGDPgrowth2)
WDIGDPgrowth3<-rbind(WDIGDPgrowth2,mm2)
colnames(mm2)<-colnames(WDIFDI2)
WDIFDI3<-rbind(WDIFDI2,mm2)
RegionListAttr<-list()
IncomeListAttr<-list()
GDPListattr<-list()
GDPPCListattr<-list()
GDPgrowthListAttr<-list()
FDIListAttr<-list()
for (i in 1:length(CountryNames)){
RegionListAttr[[i]]<-subset(CountryRegion2,COUNTRYlist %in% CountryNames[i])
IncomeListAttr[[i]]<-subset(CountryIncome2,COUNTRYlist %in% CountryNames[i])
GDPListattr[[i]]<-subset(WDIgdp3,WDIgdp3$iso3 %in% CountryNames[i])
GDPPCListattr[[i]]<-subset(WDIGDPPC3,WDIGDPgrowth3$iso3 %in% CountryNames[i])
GDPgrowthListAttr[[i]]<-subset(WDIGDPgrowth3,WDIGDPgrowth3$iso3 %in% CountryNames[i])
FDIListAttr[[i]]<-subset(WDIFDI3,WDIFDI3$iso3 %in% CountryNames[i])
}
dfREG<-plyr::ldply(RegionListAttr, data.frame)
dfINC<-plyr::ldply(IncomeListAttr,data.frame)
dfGDP<-plyr::ldply(GDPListattr,data.frame)
dfGDPPC<-plyr::ldply(GDPPCListattr, data.frame)
dfGDPgrowth<-plyr::ldply(GDPgrowthListAttr, data.frame)
dfFDI<-plyr::ldply(FDIListAttr, data.frame)
target<-CountryNames
dfREG<-dfREG[match(target,dfREG$COUNTRYlist),]
RR<-as.vector(dfREG[,2])
RR1<-c(RR)
RR1<-unlist(RR1)
RR2<-as.factor(RR1)
H<-as.character(dfREG$REGIONlist)
dfINC<-dfINC[match(target,dfINC$COUNTRYlist),]
KK<-as.vector(dfINC[,2])
KK1<-c(KK)
KK1<-unlist(KK1)
KK2<-as.factor(KK1)
U<-as.character(dfINC$INCOMElist)
A<-levels(RR2)
B<-1:length(A)
KEY<-cbind(A,B)
Ainc<-levels(KK2)
Binc<-1:length(Ainc)
dfGDP<-dfGDP[match(target,dfGDP$iso3),]
dfGDPPC<-dfGDPPC[match(target,dfGDPPC$iso3),]
dfGDPgrowth<-dfGDPgrowth[match(target,dfGDPgrowth$iso3),]
dfFDI<-dfFDI[match(target,dfFDI$iso3),]
GGDP<-as.vector(dfGDP[,2])
GGDP<-suppressWarnings(as.numeric(GGDP))
GGDPPC<-as.vector(dfGDPPC[,2])
GGDPPC<-suppressWarnings(as.numeric(GGDPPC))
GGDPgrowth<-as.vector(dfGDPgrowth[,2])
GFDI<-as.vector(dfFDI[,2])
IH<-fastmatch::fmatch(KK,Ainc)
CH<-fastmatch::fmatch(RR, A)
igraph::V(G1)$regionNAME<-H
igraph::V(G1)$region<-CH
igraph::V(G1)$income<-IH
igraph::V(G1)$GDP<-GGDP
igraph::V(G1)$GDPPC<-GGDPPC
igraph::V(G1)$logGDP<-log(GGDP)
igraph::V(G1)$logGDPPC<-log(GGDPPC)
igraph::V(G1)$GDPgrowth<-GGDPgrowth
igraph::V(G1)$FDI<-GFDI
DEL_LIST<-subset(igraph::V(G1)$name,
igraph::V(G1)$name %in% ALL_AGG)
G1<-igraph::delete_vertices(G1,c(DEL_LIST,"All"))
G3<-igraph::delete.vertices(G1, which(is.na(igraph::V(G1)$region)))
BACK_BONE_PREP<-igraph::get.data.frame(G3,what="vertices")
if(threshold==TRUE){
G4<-igraph::delete.edges(G3,which(igraph::E(G3)$weight<cutoff))
} else {
G4<-get.backbone(G3,cutoff,TRUE)
BB_ID<-igraph::get.data.frame(G4,what="vertices")
BB_ATTR<-merge(BB_ID,BACK_BONE_PREP,by.all="name",all.x=TRUE,
all.y=FALSE)
BB_ATTR<-BB_ATTR[order(match(BB_ATTR[, "name"],igraph::V(G4)$name)),]
igraph::V(G4)$region<-BB_ATTR$region
igraph::V(G4)$regionNAME<-BB_ATTR$regionNAME
igraph::V(G4)$income<-BB_ATTR$income
igraph::V(G4)$GDP<-BB_ATTR$GDP
igraph::V(G4)$GDPPC<-BB_ATTR$GDPPC
igraph::V(G4)$logGDP<-BB_ATTR$logGDP
igraph::V(G4)$logGDPPC<-BB_ATTR$logGDPPC
igraph::V(G4)$GDPgrowth<-BB_ATTR$GDPgrowth
igraph::V(G4)$FDI<-BB_ATTR$FDI
igraph::V(G4)$id<-BB_ATTR$id
}
G5<-igraph::delete.vertices(G4, which(igraph::degree(G4)==0))
EW<-igraph::E(G5)$weight
EW2<-as.numeric(EW)
G6<-igraph::delete_edge_attr(G5,"weight")
igraph::E(G6)$weight<-EW2
return(G6)
} |
if (getRversion() < 3.6) {
hcl.colors <- function (n, ...) {
if (n == 4) {
return(c("
} else {
colorRampPalette(c("
"
}
}
}
data("Seatbelts")
head(Seatbelts)
library("Ternary")
seat <- c("drivers", "front", "rear")
TernaryPlot(alab = seat[1], blab = seat[2], clab = seat[3])
TernaryPoints(Seatbelts[, seat])
par(mar = c(0, 0, 0, 0))
plot(0:20, rep(2, 21), pch = 0:20,
cex = 2, ylim = c(-1, 3), ann = FALSE)
text(0:20, rep(0, 21), 0:20)
beltLawPch <- ifelse(Seatbelts[, 'law'], 3, 1)
par(mar = c(0, 0, 0, 0))
TernaryPlot(alab = seat[1], blab = seat[2], clab = seat[3])
legend('topleft', c('Belt law', 'No law'), pch = c(3, 1))
TernaryPoints(Seatbelts[, seat], pch = beltLawPch)
nPoints <- nrow(Seatbelts)
rowCol <- hcl.colors(nPoints, palette = "viridis", alpha = 0.8)
SpectrumLegend <- function (spectrum, labels) {
nCol <- length(spectrum)
xMax <- TernaryXRange()[2]
yMax <- TernaryYRange()[2]
legendY0 <- yMax * 0.75
legendY1 <- yMax * 0.95
legendHeight <- legendY1 - legendY0
segX <- rep(xMax, nCol)
segY <- seq_len(nCol) / nCol
segY <- legendY0 + (segY * legendHeight)
segments(segX, segY - segY[1] + legendY0, y1 = segY, col = spectrum,
lwd = 4)
text(segX, c(legendY0, legendY1), labels, pos = 2)
}
par(mar = c(0, 0, 0, 0))
TernaryPlot(alab = seat[1], blab = seat[2], clab = seat[3])
legend('topleft', c('Belt law', 'No law'), pch = c(3, 1))
SpectrumLegend(rowCol, labels = c('Jan 1969', 'Dec 1984'))
TernaryPoints(Seatbelts[, seat], pch = beltLawPch,
lwd = 2,
col = rowCol)
fourSeasons <- hcl.colors(4, 'Spectral')
monthCol <- colorRampPalette(fourSeasons[c(1:4, 1)])(13)[c(7:12, 1:6)]
par(mar = c(0, 0, 0, 0))
TernaryPlot(alab = seat[1], blab = seat[2], clab = seat[3])
legend('topleft', c('Belt law', 'No law'), pch = c(3, 1))
SpectrumLegend(monthCol, c('Jan', 'Dec'))
TernaryPoints(Seatbelts[, seat], pch = beltLawPch,
lwd = 2,
col = monthCol)
par(mar = c(0, 0, 0, 0))
TernaryPlot(alab = seat[1], blab = seat[2], clab = seat[3],
xlim = c(0.055, 0.095), ylim = c(0.48, 0.52))
legend('topleft', c('No law', 'Belt law'),
col = 2:3, pch = 1, lwd = 2, lty = NA)
sizes <- c(3, 7, 12)
scale <- 200
legend('topright', title = 'Casualties / Mm', legend = sizes,
pt.cex = sizes / 1000 * scale,
pch = 1, lwd = 2, lty = NA)
TernaryPoints(Seatbelts[, seat], pch = 1, lwd = 2,
cex = Seatbelts[, 'DriversKilled'] / Seatbelts[, 'kms'] * scale,
col = 2 + Seatbelts[, 'law'])
oct <- month.name == 'October'
octBelts <- Seatbelts[oct, ]
par(mar = c(0, 0, 0, 0))
TernaryPlot(alab = seat[1], blab = seat[2], clab = seat[3],
xlim = c(0.055, 0.095), ylim = c(0.48, 0.52),
padding = 0.04)
TernarySegments(octBelts[-nrow(octBelts), seat], octBelts[-1, seat],
col = rowCol, lwd = 2)
TernaryText(octBelts[, seat], paste0("'", 69:84),
font = 2, cex = 1.5,
col = adjustcolor(rowCol[oct], alpha.f = 0.8)) |
plot.importance <- function(x, ..., top = 10, radar = TRUE, text_start_point = 0.5, text_size=3.5,
xmeasure = "sumCover", ymeasure = "sumGain"){
Feature <- sumGain <- sumCover <- meanGain <- meanCover <-
mean5Gain <- . <- value <- variable <- hjust <- NULL
if (is.null(top))
top <- nrow(x)
if (radar == FALSE) {
ggplot(data.frame(x[1:top, ]),
aes_string(x = xmeasure, y = ymeasure, label = "Feature")) +
geom_point() +
scale_size() + geom_label_repel() + theme_drwhy()
}else{
import <- as.data.table(x[1:top, ])
import <- import[1:top, .(Feature,
sumGain = sumGain / max(import[, sumGain]),
sumCover = sumCover / max(import[, sumCover]),
meanGain = meanGain / max(import[, meanGain]),
meanCover = meanCover / max(import[, meanCover]),
mean5Gain = mean5Gain / max(import[, mean5Gain]),
frequency = frequency / max(import[, frequency]))]
data<-import[,Feature:= ifelse(nchar(import[,Feature])>20, gsub(":", ": :",import[,Feature]),Feature)]
import$Feature <- factor(import$Feature, levels = import$Feature[order(import$sumGain, decreasing = TRUE)])
numberOfBars=nrow(import)
angle= 90-360*(row(import)[,1]-0.5)/numberOfBars
import$hjust<-ifelse( angle < -90, 1, 0)
import$angle<-ifelse(angle < -90, angle+180, angle)
data_to_plot <- melt(import, id = c(1,8,9), measures = 2:6, value.factor = FALSE)
data<-data_to_plot[,.(hjust=mean(hjust),angle=mean(angle)), by=Feature]
ggplot(data.frame(data_to_plot),
aes(x = Feature, y = value, colour = variable, group = variable)) +
geom_line(size = 1.5) +
geom_point(size = 2.5) +
theme_drwhy() +
theme(axis.title.x = element_blank(),
axis.title.y = element_blank(),
legend.position = "bottom",
panel.grid.major.y = element_line(colour = "gray68", linetype = "dashed", size = 0.4),
axis.line = element_blank(),
axis.text.x=element_blank(),) +
labs(fill = "Measures")+
coord_radar() +
geom_text(data=data, aes(x=Feature, y= rep(text_start_point,top), label=lapply(strwrap(data[,Feature], width = 10, simplify = FALSE), paste, collapse="\n"), hjust=hjust), color="
}
} |
get_dummies. <- function(.df,
cols = c(where(is.character), where(is.factor)),
prefix = TRUE,
prefix_sep = "_",
drop_first = FALSE,
dummify_na = TRUE) {
UseMethod("get_dummies.")
}
get_dummies..tidytable <- function(.df,
cols = c(where(is.character), where(is.factor)),
prefix = TRUE,
prefix_sep = "_",
drop_first = FALSE,
dummify_na = TRUE) {
.df <- shallow(.df)
vec_assert(prefix, logical(), 1)
vec_assert(prefix_sep, character(), 1)
vec_assert(drop_first, logical(), 1)
vec_assert(dummify_na, logical(), 1)
cols <- tidyselect_syms(.df, {{ cols }})
original_cols <- copy(names(.df))
ordered_cols <- character()
for (col in cols) {
col_name <- as.character(col)
if (drop_first) {
unique_vals <- vec_unique(as.character(.df[[col_name]]))[-1]
} else {
unique_vals <- vec_unique(as.character(.df[[col_name]]))
}
if (dummify_na) {
unique_vals <- unique_vals %|% "NA"
} else {
unique_vals <- unique_vals[!is.na(unique_vals)]
}
if (prefix) {
new_names <- paste(col_name, unique_vals, sep = prefix_sep)
} else {
new_names <- unique_vals
}
not_na_cols <- new_names[unique_vals != "NA"]
unique_vals <- unique_vals[unique_vals != "NA"]
dummy_calls <- vector("list", length(unique_vals))
names(dummy_calls) <- not_na_cols
for (i in seq_along(unique_vals)) {
dummy_calls[[i]] <- expr(ifelse.(!!col == !!unique_vals[i], 1L, 0L, 0L))
}
.df <- mutate.(.df, !!!dummy_calls)
if (dummify_na) {
na_col <- new_names[new_names %notin% not_na_cols]
if (length(na_col) > 0) {
.df <- mutate.(.df, !!na_col := as.integer(is.na(!!col)))
}
}
new_names <- f_sort(new_names)
ordered_cols <- c(ordered_cols, new_names)
}
final_order <- c(original_cols, ordered_cols)
setcolorder(.df, final_order)
.df
}
get_dummies..data.frame <- function(.df,
cols = c(where(is.character), where(is.factor)),
prefix = TRUE,
prefix_sep = "_",
drop_first = FALSE,
dummify_na = TRUE) {
.df <- as_tidytable(.df)
get_dummies.(.df, {{ cols }}, prefix, prefix_sep, drop_first, dummify_na)
}
globalVariables("where") |
surv_br23 <- function(x){
covariates <- c('BRBI','BRSEF','BRSEE','BRFU','BRST','BRBS','BRAS','BRHL')
arm1_qol <- brc_qol(x[x$arm==1,])
arm2_qol <- brc_qol(x[x$arm==2,])
univ_formulas <- sapply(covariates,function(y) as.formula(paste('Surv(time, event)~', y)))
univ_arm1 <- lapply(univ_formulas,function(y){coxph(y, data = arm1_qol)})
univ_arm2 <- lapply(univ_formulas,function(y){coxph(y, data = arm2_qol)})
univ_results <- mapply(function(x1,x2){
x1 <- summary(x1)
x2 <- summary(x2)
HR <- signif(x2$coef[2]/x1$coef[2], digits=3);
HR.confint.lower <- signif(x2$conf.int[,"lower .95"]/x1$conf.int[,"lower .95"], 3)
HR.confint.upper <- signif(x2$conf.int[,"upper .95"]/x1$conf.int[,"upper .95"],3)
res<-c(HR,HR.confint.lower,HR.confint.upper)
names(res) <- c('HR','Lower 95% CI','Upper 95% CI')
return(res)},
univ_arm1,univ_arm2)
relative.HR <- t(as.data.frame(univ_results, check.names = FALSE))
return(relative.HR)
}
utils::globalVariables(c("as.data.frame","as.formula")) |
rblnd <- function(obswin, bufferdist, lambda, meanlog, sdlog, seed = NULL){
wsim <- Frame(dilation(obswin, bufferdist))
if (!missing(seed)){set.seed(seed)}
pp <- rpoispp(lambda, win = wsim, nsim = 1, drop = TRUE)
if (!missing(seed)){set.seed(seed)}
radius <- rlnorm(pp$n, meanlog = meanlog, sdlog = sdlog)
pointlocations <- cbind(X = pp$x, Y = pp$y)
pointlocations <- split(cbind(pointlocations), row(pointlocations))
grains <- mapply(disc, radius = radius, centre = pointlocations, SIMPLIFY = FALSE)
xisim <- union.owin(as.solist(grains))
xi <- intersect.owin(xisim, obswin)
return(xi)
} |
BIFIE.BIFIEcdata2BIFIEdata <- function( bifieobj, varnames=NULL, impdata.index=NULL )
{
if ( ! bifieobj$cdata ){
stop( "You may want to use 'BIFIE.BIFIEdata2BIFIEcdata'\n")
}
bifieobj <- BIFIE.cdata.select( bifieobj=bifieobj, varnames=varnames,
impdata.index=impdata.index )
bifieobj$datalistM <- bifiesurvey_rcpp_bifiecdata2bifiedata(
datalistM_ind=as.matrix(bifieobj$datalistM_ind),
datalistM_imputed=as.matrix(bifieobj$datalistM_imputed),
Nimp=bifieobj$Nimp, dat1=as.matrix(bifieobj$dat1),
datalistM_impindex=as.matrix(bifieobj$datalistM_impindex) )$datalistM
bifieobj$cdata <- FALSE
bifieobj$datalistM_imputed <- NULL
bifieobj$datalistM_impindex <- NULL
bifieobj$datalistM_ind <- NULL
bifieobj$wgtrep <- as.matrix(bifieobj$wgtrep)
return(bifieobj)
} |
ggEdges <- function(ggObj, states) {
active <- states$active
activeNode <- states$nodes[active]
activeX <- states$x[active]
activeY <- states$y[active]
isActiveEdge <- states$activeEdge
lapply(seq_len(length(activeNode)),
function(i) {
nodeFrom <- activeNode[i]
nodeFrom_EdgeId <- which(states$from[isActiveEdge] == nodeFrom)
if (length(nodeFrom_EdgeId) != 0) {
nodeTo <- states$to[isActiveEdge][nodeFrom_EdgeId]
nodeTo_CoordId <- which(activeNode %in% nodeTo)
numNodesTo <- length(nodeTo_CoordId)
x <- c(rep(activeX[i], numNodesTo), activeX[nodeTo_CoordId])
y <- c(rep(activeY[i], numNodesTo), activeY[nodeTo_CoordId])
id <- rep(nodeTo_CoordId, 2)
ggObj <<- ggObj +
ggplot2::geom_path(
data = data.frame(
x = x,
y = y,
id = id
),
mapping = ggplot2::aes(x = x, y = y, group = id, colour = as.factor(id)),
colour = rep(states$colorEdge[isActiveEdge][nodeFrom_EdgeId][nodeTo %in% activeNode], 2),
inherit.aes = FALSE
)
}
NULL
}
)
return(ggObj)
}
ggLabels <- function(ggObj, states) {
active <- states$active
activeNode <- states$nodes[active]
activeX <- states$x[active]
activeY <- states$y[active]
activeAngle <- states$orbitAngle[active]
orbitDistance <- states$orbitDistance
lapply(seq_len(length(activeNode)),
function(i) {
if(states$showOrbit) {
x <- activeX[i] + mm2native(orbitDistance) * cos(activeAngle[i])
y <- activeY[i] + mm2native(orbitDistance) * sin(activeAngle[i])
label <- activeNode[i]
ggObj <<- ggObj +
ggplot2::geom_text(
data = data.frame(
x = x,
y = y,
label = label
),
mapping = ggplot2::aes(x = x, y = y, label = label),
colour= loon::l_getOption("foreground"),
inherit.aes = FALSE
)
}
NULL
}
)
return(ggObj)
}
ggNodes <- function(ggObj, states) {
active <- states$active
pch <- glyph_to_pch(states$glyph[active])
colour <- get_display_color(states$color[active],
states$selected[active])
fill <- rep(NA, length(colour))
fill[pch %in% 21:24] <- colour[pch %in% 21:24]
colour[pch %in% 21:24] <- loon::l_getOption("foreground")
x <- states$x[active]
y <- states$y[active]
ggObj <- ggObj +
ggplot2::geom_point(
data = data.frame(x = x, y = y),
mapping = ggplot2::aes(x = x, y = y),
shape = pch,
colour = colour,
fill = fill,
size = as_ggplot_size(states$size[active]),
inherit.aes = FALSE
)
return(ggObj)
}
ggNavPaths <- function(ggObj, states, nav_ids, widget) {
x <- states$x
y <- states$y
node <- states$nodes
fromLineSize <- 3
toLineSize <- 1
lapply(nav_ids,
function(nav_id) {
navigator <- loon::l_create_handle(c(widget, nav_id))
color <- as_hex6color(navigator['color'])
from <- navigator['from']
to <- navigator['to']
prop <- navigator['proportion']
if(length(from) != 0 && length(to) != 0) {
fromId <- sapply(1:length(from), function(i){which(node %in% from[i] == TRUE)})
toId <- sapply(1:length(to), function(i){which(node %in% to[i] == TRUE)})
if(length(to) >= 2) {
lapply(1:(length(to) - 1),
function(i){
ggObj <<- ggObj +
ggplot2::geom_path(
data = data.frame(
x = c(x[toId[i]], x[toId[i+1]]),
y = c(y[toId[i]], y[toId[i+1]])
),
mapping = ggplot2::aes(x = x, y = y),
colour = color,
size = toLineSize,
inherit.aes = FALSE
)
}
)
}
if(length(from) >= 2) {
lapply(1:(length(from) - 1),
function(i) {
ggObj <<- ggObj +
ggplot2::geom_path(
data = data.frame(
x = c(x[fromId[i]], x[fromId[i+1]]),
y = c(y[fromId[i]], y[fromId[i+1]])
),
mapping = ggplot2::aes(x = x, y = y),
colour = color,
size = fromLineSize,
inherit.aes = FALSE
)
}
)
}
xn <- (1 - prop) * x[fromId[length(fromId)]] + prop * x[toId[1]]
yn <- (1 - prop) * y[fromId[length(fromId)]] + prop * y[toId[1]]
ggObj <<- ggObj +
ggplot2::geom_path(
data = data.frame(
x = c(x[fromId[length(fromId)]], xn),
y = c(y[fromId[length(fromId)]], yn)
),
mapping = ggplot2::aes(x = x, y = y),
colour = color,
size = fromLineSize,
inherit.aes = FALSE
) +
ggplot2::geom_path(
data = data.frame(
x = c(xn, x[toId[1]]),
y = c(yn, y[toId[1]])
),
mapping = ggplot2::aes(x = x, y = y),
colour = color,
size = toLineSize,
inherit.aes = FALSE
)
}
NULL
}
)
return(ggObj)
}
ggNavPoints <- function(ggObj, states, nav_ids, widget) {
activeNavigator <- widget["activeNavigator"]
x <- states$x
y <- states$y
node <- states$nodes
sel_color <- as.character(loon::l_getOption("select-color"))
if (grepl("^
sel_color <- loon::hex12tohex6(sel_color)
}
navPointsSize <- 12
navTextSize <- 3
navDotsSize <- 2
stroke <- 2
lapply(nav_ids,
function(nav_id) {
navigator <- loon::l_create_handle(c(widget, nav_id))
color <- as_hex6color(navigator['color'])
from <- navigator['from']
to <- navigator['to']
prop <- navigator['proportion']
label <- navigator['label']
if(length(activeNavigator) != 0) {
if(activeNavigator == navigator) {
boundColor <- sel_color
} else {
boundColor <- loon::l_getOption("foreground")
stroke <- 1
}
} else {
boundColor <- loon::l_getOption("foreground")
stroke <- 1
}
fromId <- sapply(1:length(from), function(i){which(node %in% from[i] == TRUE)})
toId <- sapply(1:length(to), function(i){which(node %in% to[i] == TRUE)})
if(length(from) == 0) {
ggObj <<- ggObj +
ggplot2::geom_point(
data = data.frame(x = 0.1, y = 0.9),
mapping = ggplot2::aes(x = x, y = y),
size = navPointsSize,
fill = color,
colour = boundColor,
stroke = stroke,
shape = 21,
inherit.aes = FALSE
) +
ggplot2::geom_text(
data = data.frame(x = 0.1, y = 0.9, label = paste(label, collapse = " ")),
mapping = ggplot2::aes(x = x, y = y, label = label),
colour = loon::l_getOption("foreground"),
size = navTextSize,
inherit.aes = FALSE
)
} else if(length(from) == 1 & length(to) == 0) {
ggObj <<- ggObj +
ggplot2::geom_point(
data = data.frame(x = x[fromId], y = y[fromId]),
mapping = ggplot2::aes(x = x, y = y),
size = navPointsSize,
fill = color,
colour = boundColor,
stroke = stroke,
shape = 21,
inherit.aes = FALSE
) +
ggplot2::geom_text(
data = data.frame(x = x[fromId], y = y[fromId],
label = paste(label, collapse = " ")),
mapping = ggplot2::aes(x = x, y = y, label = label),
colour = loon::l_getOption("foreground"),
size = navTextSize,
inherit.aes = FALSE
)
} else {
xx <- (1 - prop) * x[fromId[length(fromId)]] + prop * x[toId[1]]
yy <- (1 - prop) * y[fromId[length(fromId)]] + prop * y[toId[1]]
ggObj <<- ggObj +
ggplot2::geom_point(
data = data.frame(
x = x[toId[length(toId)]],
y = y[toId[length(toId)]]
),
mapping = ggplot2::aes(x = x, y = y),
size = navDotsSize,
fill = color,
colour = loon::l_getOption("foreground"),
inherit.aes = FALSE,
shape = 21,
) +
ggplot2::geom_point(
data = data.frame(x = xx, y = yy),
mapping = ggplot2::aes(x = x, y = y),
size = navPointsSize,
fill = color,
colour = boundColor,
stroke = stroke,
shape = 21,
inherit.aes = FALSE
) +
ggplot2::geom_text(
data = data.frame(x = x[fromId], y = y[fromId],
label = paste(label, collapse = " ")),
mapping = ggplot2::aes(x = x, y = y, label = label),
colour = loon::l_getOption("foreground"),
size = navTextSize,
inherit.aes = FALSE
)
}
NULL
}
)
return(ggObj)
}
mm2native <- function(x) x/50 |
knitr::opts_chunk$set(
collapse = TRUE,
eval = nzchar(Sys.getenv("COMPILE_VIG")),
comment = "
)
library(tongfen)
library(dplyr)
library(ggplot2)
library(tidyr)
library(cancensus)
vsb_regions <- list(CSD=c("5915022","5915803"),
CT=c("9330069.01","9330069.02","9330069.00"))
geo_identifiers <- c()
years <- seq(2001,2016,5)
geo_identifiers <- paste0("GeoUIDCA",substr(as.character(years),3,4))
data <- years %>%
lapply(function(year){
dataset <- paste0("CA",substr(as.character(year),3,4))
uid_label <- paste0("GeoUID",dataset)
get_census(dataset, regions=vsb_regions, geo_format = 'sf', level="CT", quiet=TRUE) %>%
sf::st_sf() %>%
rename(!!as.name(uid_label):=GeoUID) %>%
mutate(Year=year)
}) %>% setNames(years)
data %>%
bind_rows() %>%
ggplot() +
geom_sf(fill="steelblue",colour="brown") +
coord_sf(datum=NA) +
facet_wrap("Year") +
labs(title="Vancouver census tracts",caption="StatCan Census 2001-2016")
correspondence <- estimate_tongfen_correspondence(data, geo_identifiers,
tolerance=200, computation_crs=3347)
head(correspondence)
tongfen_area_check <- check_tongfen_areas(data,correspondence)
tongfen_area_check %>%
filter(max_log_ratio>0.1)
mismatched_tongfen_ids <- tongfen_area_check %>%
filter(max_log_ratio>0.1) %>%
pull(TongfenID)
mismatch_correspondence <- correspondence %>%
filter(TongfenID %in% mismatched_tongfen_ids)
c(2001,2016) %>%
lapply(function(year){
tongfen_aggregate(data,mismatch_correspondence,base_geo = year) %>%
mutate(Year=year)
}) %>%
bind_rows() %>%
ggplot() +
geom_sf(data=sf::st_union(data[[4]])) +
geom_sf(fill="steelblue",colour="brown") +
coord_sf(datum=NA) +
facet_wrap("Year") +
labs(title="Tongfen area mismatch check",caption="StatCan Census 2001-2016")
years %>%
lapply(function(year){
tongfen_aggregate(data,correspondence,base_geo = year) %>%
mutate(Year=year)
}) %>%
bind_rows() %>%
ggplot() +
geom_sf(fill="steelblue",colour="brown") +
coord_sf(datum=NA) +
facet_wrap("Year") +
labs(title="Tongfen aggregates visual inspection",caption="StatCan Census 2001-2016")
meta <- meta_for_additive_variables(years,"Population")
meta
breaks = c(-0.15,-0.1,-0.075,-0.05,-0.025,0,0.025,0.05,0.1,0.2,0.3)
labels = c("-15% to -10%","-10% to -7.5%","-7.5% to -5%","-5% to -2.5%","-2.5% to 0%","0% to 2.5%","2.5% to 5%","5% to 10%","10% to 20%","20% to 30%")
colors <- RColorBrewer::brewer.pal(10,"PiYG")
compute_population_change_metrics <- function(data) {
geometric_average <- function(x,n){sign(x) * (exp(log(1+abs(x))/n)-1)}
data %>%
mutate(`2001 - 2006`=geometric_average((`Population_2006`-`Population_2001`)/`Population_2001`,5),
`2006 - 2011`=geometric_average((`Population_2011`-`Population_2006`)/`Population_2006`,5),
`2011 - 2016`=geometric_average((`Population_2016`-`Population_2011`)/`Population_2011`,5),
`2001 - 2016`=geometric_average((`Population_2016`-`Population_2001`)/`Population_2001`,15)) %>%
gather(key="Period",value="Population Change",c("2001 - 2006","2006 - 2011","2011 - 2016","2001 - 2016")) %>%
mutate(Period=factor(Period,levels=c("2001 - 2006","2006 - 2011","2011 - 2016","2001 - 2016"))) %>%
mutate(c=cut(`Population Change`,breaks=breaks, labels=labels))
}
plot_data <- tongfen_aggregate(data,correspondence,meta=meta,base_geo = "2001") %>%
compute_population_change_metrics()
ggplot(plot_data,aes(fill=c)) +
geom_sf(size=0.1) +
scale_fill_manual(values=setNames(colors,labels)) +
facet_wrap("Period",ncol=2) +
coord_sf(datum=NA) +
labs(fill="Average Annual\nPopulation Change",
title="Vancouver population change",
caption = "StatCan Census 2001-2016") |
wages_monotonic <- wages %>%
features(ln_wages, feat_monotonic)
test_that("feat_monotonic returns the right names", {
expect_equal(names(wages_monotonic),
c("id",
"increase",
"decrease",
"unvary",
"monotonic"))
})
test_that("feat_monotonic returns the right dimensions", {
expect_equal(dim(wages_monotonic),
c(888, 5))
})
library(dplyr)
test_that("feat_monotonic returns all ids", {
expect_equal(n_distinct(wages_monotonic$id), 888)
}) |
Lsys <- function(init = NULL, rules = NULL, n = 5,
retAll = TRUE, verbose = 1L) {
nc <- nchar(rules$inp)
if (any(nc > 1)) stop("Input variables must be a single character")
if (verbose == 1L) cat("\nCycle 0 string has length ", nchar(init), "\n", sep = "")
if (verbose == 1L) cat("Cycle 0:", init, "\n")
curr <- init
out <- rep(NA_character_, n+1)
out[1] <- init
for (j in 1:n) {
RR <- vector("list")
for (i in 1:nrow(rules)) {
rr <- str_locate_all(curr, rules[i,1])
if (verbose > 1L) cat("Processing rule", i, "\n")
if (dim(rr[[1]])[1] == 0) {
if (verbose > 1L) cat("\tRule", i, "was not needed\n")
next
}
RR[i] <- rr
}
print(RR)
for (i in 1:length(RR)) {
tmp <- as.data.frame(RR[i])
tmp$insert <- rules[i,2]
RR[[i]] <- tmp
}
print(RR)
RRdf <- as.data.frame(RR[1])
for (i in 2:length(RR)) {
RRdf <- rbind(RRdf, as.data.frame(RR[i]))
}
if (verbose > 1L) print(RRdf)
curr <- unlist(strsplit(curr, ""))
curr[RRdf$start] <- RRdf$insert
curr <- paste0(curr, collapse = "")
out[j+1] <- curr
if (verbose == 1L) cat("\nCycle ", j, " string has length ", nchar(curr), "\n", sep = "")
if (verbose == 1L) cat("Cycle ", j, ": ", curr, "\n", sep = "")
}
if (retAll) return(out)
curr
}
|
predict.aster <- function(object, x, root, modmat, amat,
parm.type = c("mean.value", "canonical"),
model.type = c("unconditional", "conditional"),
is.always.parameter = FALSE,
se.fit = FALSE, info = c("expected", "observed"),
info.tol = sqrt(.Machine$double.eps), newcoef = NULL,
gradient = se.fit, ...)
{
parm.type <- match.arg(parm.type)
model.type <- match.arg(model.type)
info <- match.arg(info)
if (! object$converged)
stop("aster model fit not converged")
if (missing(modmat)) {
modmat <- object$modmat
x <- object$x
root <- object$root
}
setfam(object$famlist)
stopifnot(is.logical(se.fit))
stopifnot(length(se.fit) == 1)
stopifnot(is.logical(gradient))
stopifnot(length(gradient) == 1)
stopifnot(all(dim(modmat)[2:3] == dim(object$modmat)[2:3]))
stopifnot(all(dimnames(modmat)[[2]] == dimnames(object$modmat)[[2]]))
stopifnot(all(dimnames(modmat)[[3]] == dimnames(object$modmat)[[3]]))
if (parm.type == "mean.value") {
stopifnot(dim(root) == dim(modmat)[1:2])
if (model.type == "conditional") {
stopifnot(dim(x) == dim(modmat)[1:2])
}
}
if (parm.type == "mean.value") {
if (missing(root))
stop("parm.type == \"mean.value\" and root missing\n")
if (model.type == "conditional") {
if (missing(x))
stop("parm.type == \"mean.value\" && model.type == \"conditional\" and x missing\n")
}
}
if (! missing(amat)) {
if (is.array(amat)) {
if (length(dim(amat)) != 3)
stop("amat is array but not 3-dimensional")
if (! all(dim(amat)[1:2] == dim(modmat)[1:2]))
stop("amat is array but dimensions 1 and 2 do not match modmat")
} else {
if (is.matrix(amat)) {
if (dim(amat)[1] != prod(dim(modmat)[1:2]))
stop("amat is matrix but first dimension does not match dimensions 1 and 2 of modmat")
} else {
stop("amat is neither array nor matrix")
}
}
}
nind <- dim(modmat)[1]
nnode <- dim(modmat)[2]
ncoef <- dim(modmat)[3]
if (ncoef != length(object$coefficients))
stop("object$coefficients does not match dim(modmat)[3]")
if (se.fit) {
if (info == "expected")
infomat <- object$fisher
else
infomat <- object$hessian
fred <- eigen(infomat, symmetric = TRUE)
sally <- fred$values < max(fred$values) * info.tol
if (any(sally)) {
cat("apparent null eigenvectors of information matrix\n")
cat("directions of recession or constancy of log likelihood\n")
print(zapsmall(fred$vectors[ , sally]))
stop("cannot compute standard errors")
}
}
beta <- object$coefficients
if (! is.null(newcoef)) {
stopifnot(is.numeric(newcoef))
stopifnot(is.finite(newcoef))
stopifnot(length(newcoef) == length(object$coefficients))
beta <- newcoef
}
eta <- .C(C_aster_mat_vec_mult,
nrow = as.integer(nind * nnode),
ncol = as.integer(ncoef),
a = as.double(modmat),
b = as.double(beta),
c = matrix(as.double(0), nind, nnode))$c
origin <- object$origin
stopifnot(is.numeric(origin))
stopifnot(all(is.finite(origin)))
stopifnot(is.matrix(origin))
stopifnot(ncol(origin) == nnode)
origin.row <- origin[1, ]
origin <- matrix(origin.row, nrow = nind, ncol = nnode, byrow = TRUE)
eta <- eta + origin
pred <- object$pred
fam <- object$fam
if (parm.type == "canonical") {
if (model.type == object$type) {
zeta <- eta
if (se.fit | gradient)
gradmat <- matrix(modmat, ncol = ncoef)
} else {
if (model.type == "unconditional") {
zeta <- .C(C_aster_theta2phi,
nind = as.integer(nind),
nnode = as.integer(nnode),
pred = as.integer(pred),
fam = as.integer(fam),
theta = as.double(eta),
phi = matrix(as.double(0), nind, nnode))$phi
if (se.fit | gradient)
gradmat <- .C(C_aster_D_beta2theta2phi,
nind = as.integer(nind),
nnode = as.integer(nnode),
ncoef = as.integer(ncoef),
pred = as.integer(pred),
fam = as.integer(fam),
theta = as.double(eta),
modmat = as.double(modmat),
gradmat = matrix(as.double(0), nind * nnode, ncoef)
)$gradmat
}
if (model.type == "conditional") {
zeta <- .C(C_aster_phi2theta,
nind = as.integer(nind),
nnode = as.integer(nnode),
pred = as.integer(pred),
fam = as.integer(fam),
phi = as.double(eta),
theta = matrix(as.double(0), nind, nnode))$theta
if (se.fit | gradient)
gradmat <- .C(C_aster_D_beta2phi2theta,
nind = as.integer(nind),
nnode = as.integer(nnode),
ncoef = as.integer(ncoef),
pred = as.integer(pred),
fam = as.integer(fam),
theta = as.double(zeta),
modmat = as.double(modmat),
gradmat = matrix(as.double(0), nind * nnode, ncoef)
)$gradmat
}
}
} else {
if (model.type == "conditional" && object$type == "conditional") {
ctau <- .C(C_aster_theta2ctau,
nind = as.integer(nind),
nnode = as.integer(nnode),
pred = as.integer(pred),
fam = as.integer(fam),
theta = as.double(eta),
ctau = matrix(as.double(0), nind, nnode))$ctau
xpred <- .C(C_aster_xpred,
nind = as.integer(nind),
nnode = as.integer(nnode),
pred = as.integer(pred),
fam = as.integer(fam),
x = as.double(x),
root = as.double(root),
xpred = double(nind * nnode))$xpred
if (is.always.parameter) {
zeta <- ctau
} else {
zeta <- xpred * ctau
}
if (se.fit | gradient) {
grad.ctau <- .C(C_aster_theta2whatsis,
nind = as.integer(nind),
nnode = as.integer(nnode),
pred = as.integer(pred),
fam = as.integer(fam),
deriv = as.integer(2),
theta = as.double(eta),
result = double(nind * nnode))$result
gradmat <- matrix(modmat, ncol = ncoef)
if (is.always.parameter) {
gradmat <- sweep(gradmat, 1, grad.ctau, "*")
} else {
gradmat <- sweep(gradmat, 1, xpred * grad.ctau, "*")
}
}
}
if (model.type == "unconditional" && object$type == "unconditional") {
theta <- .C(C_aster_phi2theta,
nind = as.integer(nind),
nnode = as.integer(nnode),
pred = as.integer(pred),
fam = as.integer(fam),
phi = as.double(eta),
theta = matrix(as.double(0), nind, nnode))$theta
ctau <- .C(C_aster_theta2ctau,
nind = as.integer(nind),
nnode = as.integer(nnode),
pred = as.integer(pred),
fam = as.integer(fam),
theta = as.double(theta),
ctau = matrix(as.double(0), nind, nnode))$ctau
zeta <- .C(C_aster_ctau2tau,
nind = as.integer(nind),
nnode = as.integer(nnode),
pred = as.integer(pred),
fam = as.integer(fam),
root = as.double(root),
ctau = as.double(ctau),
tau = matrix(as.double(0), nind, nnode))$tau
if (se.fit | gradient)
gradmat <- .C(C_aster_D_beta2phi2tau,
nind = as.integer(nind),
nnode = as.integer(nnode),
ncoef = as.integer(ncoef),
pred = as.integer(pred),
fam = as.integer(fam),
beta = as.double(beta),
root = as.double(root),
origin = as.double(origin),
modmat = as.double(modmat),
gradmat = matrix(as.double(0), nind * nnode, ncoef))$gradmat
}
if (model.type == "conditional" && object$type == "unconditional") {
theta <- .C(C_aster_phi2theta,
nind = as.integer(nind),
nnode = as.integer(nnode),
pred = as.integer(pred),
fam = as.integer(fam),
phi = as.double(eta),
theta = matrix(as.double(0), nind, nnode))$theta
ctau <- .C(C_aster_theta2ctau,
nind = as.integer(nind),
nnode = as.integer(nnode),
pred = as.integer(pred),
fam = as.integer(fam),
theta = as.double(theta),
ctau = matrix(as.double(0), nind, nnode))$ctau
xpred <- .C(C_aster_xpred,
nind = as.integer(nind),
nnode = as.integer(nnode),
pred = as.integer(pred),
fam = as.integer(fam),
x = as.double(x),
root = as.double(root),
xpred = matrix(as.double(0), nind, nnode))$xpred
if (is.always.parameter) {
zeta <- ctau
} else {
zeta <- xpred * ctau
}
if (se.fit | gradient) {
gradmat <- .C(C_aster_D_beta2phi2theta,
nind = as.integer(nind),
nnode = as.integer(nnode),
ncoef = as.integer(ncoef),
pred = as.integer(pred),
fam = as.integer(fam),
theta = as.double(theta),
modmat = as.double(modmat),
gradmat = matrix(as.double(0), nind * nnode, ncoef))$gradmat
grad.ctau <- .C(C_aster_theta2whatsis,
nind = as.integer(nind),
nnode = as.integer(nnode),
pred = as.integer(pred),
fam = as.integer(fam),
deriv = as.integer(2),
theta = as.double(theta),
result = double(nind * nnode))$result
if (is.always.parameter) {
deltheta2xi <- grad.ctau
} else {
deltheta2xi <- as.numeric(xpred) * grad.ctau
}
gradmat <- sweep(gradmat, 1, deltheta2xi, "*")
}
}
if (model.type == "unconditional" && object$type == "conditional") {
ctau <- .C(C_aster_theta2ctau,
nind = as.integer(nind),
nnode = as.integer(nnode),
pred = as.integer(pred),
fam = as.integer(fam),
theta = as.double(eta),
ctau = matrix(as.double(0), nind, nnode))$ctau
zeta <- .C(C_aster_ctau2tau,
nind = as.integer(nind),
nnode = as.integer(nnode),
pred = as.integer(pred),
fam = as.integer(fam),
root = as.double(root),
ctau = as.double(ctau),
tau = matrix(as.double(0), nind, nnode))$tau
if (se.fit | gradient)
gradmat <- .C(C_aster_D_beta2theta2tau,
nind = as.integer(nind),
nnode = as.integer(nnode),
ncoef = as.integer(ncoef),
pred = as.integer(pred),
fam = as.integer(fam),
beta = as.double(beta),
root = as.double(root),
modmat = as.double(modmat),
gradmat = matrix(as.double(0), nind * nnode, ncoef))$gradmat
}
}
clearfam()
result <- as.double(zeta)
if (! missing(amat)) {
amat <- matrix(amat, nrow = nind * nnode)
result <- as.numeric(t(amat) %*% result)
if (se.fit | gradient)
gradmat <- t(amat) %*% gradmat
}
if (! (se.fit | gradient)) {
return(result)
} else if (se.fit) {
fred <- .C(C_aster_diag_mat_mat_mat_mult,
nrow = nrow(gradmat),
ncol = ncol(gradmat),
a = as.double(gradmat),
b = as.double(solve(infomat)),
c = double(nrow(gradmat)))$c
return(list(fit = result, se.fit = sqrt(as.numeric(fred)),
gradient = gradmat))
} else {
return(list(fit = result, gradient = gradmat))
}
}
predict.aster.formula <- function(object, newdata, varvar, idvar, root, amat,
parm.type = c("mean.value", "canonical"),
model.type = c("unconditional", "conditional"),
is.always.parameter = FALSE,
se.fit = FALSE, info = c("expected", "observed"),
info.tol = sqrt(.Machine$double.eps), newcoef = NULL,
gradient = se.fit, ...)
{
parm.type <- match.arg(parm.type)
model.type <- match.arg(model.type)
info <- match.arg(info)
if (missing(newdata)) {
class(object) <- "aster"
return(NextMethod("predict"))
}
tt <- object$terms
mf <- match.call(expand.dots = FALSE)
m <- match(c("newdata", "varvar", "idvar", "root"), names(mf), 0)
mf <- mf[c(1, m)]
mf$drop.unused.levels <- TRUE
mf[[1]] <- as.name("model.frame")
mf$formula <- tt
mf$xlev <- object$xlevels
mf$data <- mf$newdata
mf$newdata <- NULL
mf <- eval.parent(mf)
mt <- attr(mf, "terms")
x <- model.response(mf, "numeric")
if (is.empty.model(mt)) {
stop("empty model")
} else {
modmat <- model.matrix(mt, mf)
}
varvar <- mf[["(varvar)"]]
idvar <- mf[["(idvar)"]]
root <- mf[["(root)"]]
nind <- length(unique(idvar))
nnode <- length(unique(varvar))
if (nind * nnode != length(varvar))
stop("nrow(data) not nind * nnode")
varvarmat <- matrix(as.vector(varvar), nind, nnode)
idvarmat <- matrix(as.vector(idvar), nind, nnode)
foo <- apply(varvarmat, 2, function(x) length(unique(x)))
bar <- apply(idvarmat, 1, function(x) length(unique(x)))
if (! (all(foo == 1) & all(bar == 1)))
stop("data not nind by nnode matrix with rows individuals and columns variables")
varlab <- varvarmat[1, ]
idlab <- idvarmat[ , 1]
if (all(idlab == seq(along = idlab)))
idlab <- NULL
if (! is.numeric(x))
stop("response not numeric")
if (length(x) != nind * nnode)
stop("response not nind by nnode matrix with rows individuals and columns variables")
x <- matrix(x, nind, nnode)
dimnames(x) <- list(idlab, varlab)
if (! is.numeric(root))
stop("root not numeric")
if (length(root) != nind * nnode)
stop("root not nind by nnode matrix with rows individuals and columns variables")
root <- matrix(root, nind, nnode)
dimnames(root) <- list(idlab, varlab)
if (! is.numeric(root))
stop("root not numeric")
if (length(root) != nind * nnode)
stop("root not nind by nnode matrix with rows individuals and columns variables")
root <- matrix(root, nind, nnode)
dimnames(root) <- list(idlab, varlab)
if (! is.numeric(modmat))
stop("model matrix not numeric")
if (! is.matrix(modmat))
stop("model matrix not matrix")
if (nrow(modmat) != nind * nnode)
stop("nrow of model matrix not nind * nnode")
coeflab <- dimnames(modmat)[[2]]
objcoeflab <- names(object$coefficients)
if (! all(is.element(objcoeflab, coeflab)))
stop("regression coefficients do not match in object and new model matrix")
inies <- is.element(coeflab, objcoeflab)
modmat <- modmat[ , inies]
coeflab <- dimnames(modmat)[[2]]
ncoef <- length(coeflab)
modmat <- array(as.numeric(modmat), c(nind, nnode, ncoef))
dimnames(modmat) <- list(idlab, varlab, coeflab)
if (missing(amat)) {
foo <- predict.aster(object, x, root, modmat,
parm.type = parm.type, model.type = model.type,
is.always.parameter = is.always.parameter,
se.fit = se.fit, info = info, info.tol = info.tol,
newcoef = newcoef, gradient = gradient, ...)
} else {
foo <- predict.aster(object, x, root, modmat, amat,
parm.type = parm.type, model.type = model.type,
is.always.parameter = is.always.parameter,
se.fit = se.fit, info = info, info.tol = info.tol,
newcoef = newcoef, gradient = gradient, ...)
}
if (is.list(foo)) {
foo$modmat <- modmat
}
return(foo)
} |
llsearch.D <-function(x, y, n, jlo, jhi, klo, khi,plot)
{
fjk <- matrix(0, n, n)
fxy <- matrix(0, (jhi - jlo + 1), (khi - klo + 1))
jkgrid <- expand.grid(jlo:jhi, klo:khi)
res <- data.frame(j = jkgrid[,1],
k = jkgrid[,2],
k.ll = apply(jkgrid, 1, p.estFUN.D, x = x,
y = y, n = n))
fxy <- matrix(res$k.ll, nrow = jhi-jlo+1, ncol = khi-klo+1)
rownames(fxy) <- jlo:jhi
colnames(fxy) <- klo:khi
if (plot == "TRUE") {
jx<-jlo:jhi
ky<-klo:khi
persp(jx, ky, fxy, xlab = "j", ylab = "k", zlab = "LL(x,y,j,k)")
title("Log-likelihood Surface")
}
z <- findmax(fxy)
jcrit <- z$imax + jlo - 1
kcrit <- z$jmax + klo - 1
list(jhat = jcrit, khat = kcrit, value = max(fxy))
}
p.estFUN.D <- function(jk, x, y, n){
j = jk[1]
k = jk[2]
a <- p.est.D(x,y,n,j,k)
s2 <- a$sigma2
t2 <- a$tau2
u2 <- a$u2
return(p.ll.D(n, j, k, s2, t2, u2))
}
p.est.D <-function(x,y,n,j,k){
xa<-x[1:j]
ya<-y[1:j]
jp1 <- j+1
xb <- x[jp1:k]
yb <- y[jp1:k]
kp1 <- k+1
xc <- x[kp1:n]
yc <- y[kp1:n]
g1 <- lm(ya ~ xa)
g2 <- lm(yb ~ xb)
g3 <- lm(yc ~ xc)
beta <- c(g1$coef[1],g1$coef[2],g2$coef[1],g2$coef[2],g3$coef[1],g3$coef[2])
s2 <- sum((ya-g1$fit)^2)/j
t2 <- sum((yb-g2$fit)^2)/(k-j)
u2 <- sum((yc-g3$fit)^2)/(n-k)
list(a0=beta[1],a1=beta[2],b0=beta[3],b1=beta[4],c0=beta[5],c1=beta[6],sigma2=s2,tau2=t2,u2=u2,xj=x[j],xk=x[k])
}
p.ll.D<-function(n, j, k, s2, t2, u2){
q1 <- n * log(sqrt(2 * pi))
q2 <- 0.5 * (j) * (1 + log(s2))
q3 <- 0.5 * (k - j) * (1 + log(t2))
q4 <- 0.5*(n-k)*(1+log(u2))
- (q1 + q2 + q3 + q4)
}
findmax <-function(a)
{
maxa<-max(a)
imax<- which(a==max(a),arr.ind=TRUE)[1]
jmax<-which(a==max(a),arr.ind=TRUE)[2]
list(imax = imax, jmax = jmax, value = maxa)
} |
prep_odds_against <- function(dat,
x_axis,
groupings = NULL) {
{ if (!tibble::is_tibble(dat)) {
base::stop("dat must be a tibble")
}
if (!base::is.character(x_axis)) {
base::stop("x_axis must be a string indicating the x-axis variable")
}
if (!base::all(base::is.character(groupings)) & !base::is.null(groupings)) {
base::stop("groupings must be a character or vector of characters for column names")
} }
new_col <- glue::glue("{x_axis}_against")
dat <- dat %>%
dplyr::group_by_at(dplyr::vars(dplyr::one_of(groupings))) %>%
dplyr::mutate(!!{{ new_col }} := (1 - .data[[x_axis]]) / (.data[[x_axis]])) %>%
dplyr::arrange(.data[[new_col]],
.by_group = TRUE
)
base::return(dat)
} |
knitr::opts_chunk$set(warning = FALSE, message = TRUE)
library(ggplot2)
library(ggshadow)
library(ggshadow)
ggplot(economics_long, aes(date, value01, colour = variable)) + geom_shadowline()
library(ggshadow)
ggplot(economics_long, aes(date, value01, group = variable, colour=value01, shadowcolor='grey', shadowalpha=0.5, shadowsize=5*(1-value01))) + geom_shadowline()
ggplot(mtcars, aes(wt, mpg)) + geom_shadowpoint(aes( color = carb, shadowcolour = ifelse(vs == 1, 'red', 'blue') )) |
bufferforsiland<-function(d,sfGIS,loc.sf,landnames,border=F)
{
if(class(loc.sf)[1]!="sf")
{
loc.sf=st_as_sf(as.data.frame(loc.sf),coords = c("X","Y"))
st_crs(loc.sf)<-st_crs(sfGIS)
}
if(length(d) != length(landnames))
stop("length(d) and length(landnames) have to be equal")
n=nrow(loc.sf)
nvar=length(landnames)
matBuffer=matrix(0,nrow=n,ncol=nvar)
for(k in 1:nvar)
{
if(!border)
{
geom_buff=st_geometry(st_buffer(loc.sf,d[k]))
ll=list(NULL)
for(i in 1:length(geom_buff))
{
ll[[i]]=geom_buff[i]
}
}
if(border)
{
options(warn=-1)
stinter=sf::st_intersects(loc.sf,sfGIS)
options(warn=0)
loc2gis=unlist(lapply(stinter,function(x){if(length(x)==1) return(x) else return(-1000)} ))
numerr=c(1:length(loc2gis))[loc2gis==-1000]
if(sum(loc2gis==-1000)>0)
{
locerr=c(1:length(loc2gis))[loc2gis==-1000]
cat("problem with observations : \n")
print(locerr)
stop("Some observations are not located inside polygons.")
}
geom_buff=st_geometry(st_buffer(sfGIS$geometry[loc2gis],d[k]))
geom_buff0=st_geometry(st_buffer(sfGIS$geometry[loc2gis],0))
ll=list(NULL)
for(i in 1:length(geom_buff))
{
ll[[i]]=st_make_valid(st_difference(geom_buff[i],geom_buff0[i]))
if(st_is_empty(ll[[i]]))
stop("stop empty 1")
}
}
areaBuff=unlist(lapply(ll,function(x){
res=sum(st_area(x))
if(length(res)==0)
return(0)
else
return(res)
}))
currentland=st_make_valid(st_geometry(sfGIS[unlist(sfGIS[landnames[k]]) ==1,]))
listArea=lapply(ll,function(x){uu=st_intersection(x,currentland)
res=as.numeric(sum(st_area(uu)))
if(length(res)==0)
return(0)
else
return(res)
})
Areavector=unlist(listArea)
ind=c(areaBuff!=0)
propBuffer=rep(0,length(areaBuff))
propBuffer[ind]=Areavector[ind]/areaBuff[ind]
matBuffer[,k]=propBuffer
}
colnames(matBuffer)=landnames
invisible(return(matBuffer))
} |
getCommonPrefix <- function(strs, suffix=FALSE, ...) {
nchars <- sapply(strs, FUN=nchar)
chars <- strsplit(strs, split="")
if (suffix) {
chars <- lapply(chars, FUN=rev)
}
naValue <- NA_character_
data <- matrix(naValue, nrow=length(chars), ncol=max(nchars))
for (kk in seq_along(chars)) {
cc <- seq_len(nchars[kk])
data[kk,cc] <- chars[[kk]]
}
count <- 0
for (cc in seq_len(ncol(data))) {
uchars <- unique(data[,cc])
if (length(uchars) > 1)
break
count <- cc
}
prefix <- chars[[1]][seq_len(count)]
if (suffix) {
prefix <- rev(prefix)
}
prefix <- paste(prefix, collapse="")
prefix
} |
call(3,
b = 2, c
)
gs(3,
b = 2,
c
)
call(3, b = 2, c)
map(data, fun,
x = 3, z = 33
)
map2(
dat1, data2, fun, x, y,
z
)
map2(dat1, data2, fun,
x = 1, y = 2,
z
) |
dccLink <- function(children=NULL, id=NULL, href=NULL, refresh=NULL, className=NULL, style=NULL, title=NULL, target=NULL, loading_state=NULL) {
props <- list(children=children, id=id, href=href, refresh=refresh, className=className, style=style, title=title, target=target, loading_state=loading_state)
if (length(props) > 0) {
props <- props[!vapply(props, is.null, logical(1))]
}
component <- list(
props = props,
type = 'Link',
namespace = 'dash_core_components',
propNames = c('children', 'id', 'href', 'refresh', 'className', 'style', 'title', 'target', 'loading_state'),
package = 'dashCoreComponents'
)
structure(component, class = c('dash_component', 'list'))
} |
rbenttest <- function(y, z, x, NB = 1000, myseed = 1 ){
wilcoxon <- function(t){
sqrt(12)*(t-0.5)
}
testFun <- function(tau){
n <- length(y)
xz <- cbind(z, x)
fit.rank <- rfit(y~ xz-1)
res.rank <- as.vector(residuals(fit.rank))
phi.res <- wilcoxon(rank(res.rank)/(n+1))
Rn.rank <- rep(0, length(tau))
for (kk in 1:length(tau)){
Rn.rank[kk] <- 1/sqrt(n)*sum(
phi.res*(x-tau[kk])*ifelse(x<=tau[kk], 1, 0)
)
}
Tn.rank <- max(abs(Rn.rank))
return(Tn.rank)
}
testFun.resample <- function(tau){
n<- length(y)
e <- rnorm(n, 0, 1)
B <- runif(n, 0, 1)
v <- (-1)*(B<=0.5) +1*(B>0.5)
ve <- v*e
xz <- cbind(z, x)
Sn <- (t(xz)%*%xz)/n
xc <- xz - mean(xz)
Sn.rank <- mean(xc^2)
fit.rank <- rfit(y~ xz-1)
res.rank <- as.vector(residuals(fit.rank))
h <- 1.06* n^(-1/5)* sd(res.rank)
pdf<- approxfun(density(res.rank, kernel= "epanechnikov", bw=h))
fe.rank <- pdf(res.rank)
cdf <- ecdf(res.rank)
Fe.rank <- cdf(res.rank)
taus <- fit.rank$taushat
tauphi <- fit.rank$tauhat
Rn.rank <- rep(0, length(tau))
for (kk in 1:length(tau)){
Sn.rank.tau <- apply(
sqrt(12)*fe.rank*xz*(x-tau[kk])*ifelse(x<= tau[kk], 1, 0), 2, mean
)
Rn.rank[kk] <- 1/sqrt(n)*sum(
ve*(
wilcoxon(Fe.rank)*(x-tau[kk])*ifelse(x<= tau[kk],1,0)-
wilcoxon(Fe.rank)* tauphi* xz%*% solve(Sn) %*% Sn.rank.tau
)
)
}
Tn.rank <- max(abs(Rn.rank))
return(Tn.rank)
}
set.seed(myseed)
tau <- seq(min(x)+0.01, max(x)-0.01, length=100)
Tn <- testFun(tau)
Tn.NB <- replicate(NB, testFun.resample(tau))
pv <- mean(Tn.NB >Tn, na.rm=TRUE)
return(list(Tn = Tn, Tn.NB = Tn.NB, p.value = pv))
} |
NULL
emmeans_test <- function(data, formula, covariate = NULL, ref.group = NULL,
comparisons = NULL, p.adjust.method = "bonferroni",
conf.level = 0.95, model = NULL, detailed = FALSE){
. <- NULL
covariate <- rlang::enquos(covariate = covariate) %>%
get_quo_vars_list(data, .) %>% unlist()
args <- as.list(environment()) %>%
.add_item(method = "emmeans_test")
required_package("emmeans")
outcome <- get_formula_left_hand_side(formula)
rhs <- group <- get_formula_right_hand_side(formula)
grouping.vars <- NULL
if(is_grouped_df(data)){
grouping.vars <- dplyr::group_vars(data)
rhs <- c(grouping.vars, rhs) %>%
paste(collapse = "*")
data <- dplyr::ungroup(data)
}
if(!is.null(covariate)){
covariate <- paste(covariate, collapse = "+")
rhs <- paste(covariate, rhs, sep = "+")
}
data <- data %>% .as_factor(group, ref.group = ref.group)
group.levels <- data %>% get_levels(group)
formula <- stats::as.formula(paste(outcome, rhs, sep = " ~ "))
if(is.null(model))
model <- stats::lm(formula, data)
if (is.null(comparisons)) {
comparisons <- get_comparisons(data, variable = !!group, ref.group = !!ref.group)
}
method <- get_emmeans_contrasts(data, group, comparisons)
formula.emmeans <- stats::as.formula(paste0("~", rhs))
res.emmeans <- emmeans::emmeans(model, formula.emmeans)
comparisons <- pairwise_emmeans_test(
res.emmeans, grouping.vars, method = method,
p.adjust.method = p.adjust.method,
conf.level = conf.level
)
res.emmeans <- res.emmeans %>%
tibble::as_tibble() %>%
dplyr::arrange(!!!syms(grouping.vars)) %>%
dplyr::rename(se = .data$SE, conf.low = .data$lower.CL, conf.high = .data$upper.CL) %>%
mutate(method = "Emmeans test")
if(!detailed){
to.remove <- c("estimate", "estimate1", "estimate2", "se", "conf.low", "conf.high", "method", "null.value")
to.keep <- setdiff(colnames(comparisons), to.remove)
comparisons <- comparisons[, to.keep]
}
comparisons %>%
add_column(.y. = outcome, .before = "group1") %>%
set_attrs(args = args, emmeans = res.emmeans) %>%
add_class(c("rstatix_test", "emmeans_test"))
}
get_emmeans <- function(emmeans.test){
if(!inherits(emmeans.test, "emmeans_test")){
stop("An object of class 'emmeans_test' required.")
}
attr(emmeans.test, "emmeans")
}
pairwise_emmeans_test <- function(res.emmeans, grouping.vars = NULL, method = "pairwise",
p.adjust.method = "bonferroni", conf.level = 0.95){
comparisons <- emmeans::contrast(
res.emmeans, by = grouping.vars, method = method,
adjust = "none"
)
comparisons <- tidy(comparisons, conf.int = TRUE, conf.level = conf.level)
comparisons <- comparisons %>%
tidyr::separate(col = "contrast", into = c("group1", "group2"), sep = "-") %>%
dplyr::rename(se = .data$std.error, p = .data$p.value) %>%
dplyr::select(!!!syms(grouping.vars), everything())
p.adjusted <- emmeans::contrast(
res.emmeans, by = grouping.vars, method = method,
adjust = p.adjust.method
) %>%
as.data.frame() %>%
pull("p.value")
comparisons <- comparisons %>%
mutate(p.adj = p.adjusted) %>%
add_significance("p.adj")
comparisons %>%
dplyr::arrange(!!!syms(grouping.vars))
}
get_emmeans_contrasts <- function(data, group, comparisons){
get_dummy_code <- function(level, group.levels){
dummy.code <- rep(0, length(group.levels))
lev.pos <- which(group.levels == level)
dummy.code[lev.pos] <- 1
dummy.code
}
make_emmeans_contrast <- function(groups, contrasts.list ){
group1 <- groups[1]
group2 <- groups[2]
contrasts.list[[group1]]-contrasts.list[[group2]]
}
make_comparison_name <- function(groups){
paste(groups[1], groups[2], sep = "-")
}
group.levels <- get_levels(data, group)
contrasts.list <- group.levels %>%
map(get_dummy_code, group.levels)
names(contrasts.list) <- group.levels
comparison.contrasts <- comparisons %>%
map(make_emmeans_contrast, contrasts.list)
comparison.names <- comparisons %>% map(make_comparison_name)
names(comparison.contrasts) <- comparison.names
comparison.contrasts
} |
steepest <- function(K,y,theta){
foo <- function(x,K,y,theta,gv) {
get.l(theta+x*gv, K, y)
}
gv <- get.gl(theta,K,y)$grad
gv <- gv/sqrt(sum(gv^2))
poop <- optimize(foo,c(-1,1),maximum=TRUE,K=K,y=y,
theta=theta,gv=gv)
con <- poop$maximum
theta + con*gv
} |
kpAddCytobandsAsLine <- function(karyoplot, color.table=NULL, color.schema='only.centromeres', lwd=3, lend=1, clipping=TRUE, ...) {
if(missing(karyoplot)) stop("The parameter 'karyoplot' is required")
if(!methods::is(karyoplot, "KaryoPlot")) stop("'karyoplot' must be a valid 'KaryoPlot' object")
if(!is.null(karyoplot$cytobands) && length(karyoplot$cytobands)>0) {
cyto <- karyoplot$cytobands
} else {
cyto <- karyoplot$genome
mcols(cyto) <- data.frame(name=seqnames(cyto), gieStain="gpos50", stringsAsFactors=FALSE)
}
if(!methods::is(cyto, "GRanges")) stop("'cytobands' must be a GRanges object")
if(!("gieStain" %in% colnames(mcols(cyto)))) {
warning("No 'gieStain' column found in cytobands. Using 'gpos50' (gray) for all of them")
mcols(cyto) <- cbind(mcols(cyto), gieStain="gpos50")
}
cyto <- filterChromosomes(cyto, keep.chr = karyoplot$chromosomes)
karyoplot$beginKpPlot()
on.exit(karyoplot$endKpPlot())
ccf <- karyoplot$coord.change.function
pp <- karyoplot$plot.params
mids <- karyoplot$ideogram.mid
color.table <- getCytobandColors(color.table, color.schema)
ybottom <- mids(as.character(seqnames(cyto)))
ytop <- mids(as.character(seqnames(cyto)))
xleft <- ccf(x=start(cyto), chr=as.character(seqnames(cyto)), data.panel="ideogram")$x
xright <- ccf(x=end(cyto), chr=as.character(seqnames(cyto)), data.panel="ideogram")$x
col <- color.table[as.character(cyto$gieStain)]
if(karyoplot$zoom==TRUE) {
if(clipping==TRUE) {
clip.xleft <- ccf(x=start(karyoplot$plot.region), chr=as.character(seqnames(karyoplot$plot.region)), data.panel="ideogram")$x
clip.xright <- ccf(x=end(karyoplot$plot.region), chr=as.character(seqnames(karyoplot$plot.region)), data.panel="ideogram")$x
clip.ybottom <- ybottom - 10
clip.ytop <- ytop + 10
graphics::clip(x1 = clip.xleft, x2 = clip.xright, y1 = clip.ybottom, y2=clip.ytop)
}
}
graphics::segments(x0 = xleft, x1=xright, y0=ybottom, y1=ytop, col=col, lwd=lwd, lend=lend)
invisible(karyoplot)
} |
context("test-caesar-cipher")
test_that("Caesar encryption works", {
expect_equal(caesar("Experience is the teacher of all things.",
shift = 3),
"HAshulhqfhclvcwkhcwhdfkhucricdoocwklqjva")
expect_equal(caesar(c("Experience is the teacher of all things.",
"The best way of avenging thyself is not to become like the wrong doer."),
shift = 3),
c("HAshulhqfhclvcwkhcwhdfkhucricdoocwklqjva",
"WkhcehvwczdBcricdyhqjlqjcwkBvhoiclvcqrwcwrcehfrphcolnhcwkhczurqjcgrhua"))
expect_equal(caesar("Experience is the teacher of all things.",
shift = 0),
"Experience is the teacher of all things.")
expect_equal(caesar("Experience is the teacher of all things.",
shift = 5),
"JCujwnjshjenxeymjeyjfhmjwetkefqqeymnslxc")
expect_equal(caesar("Experience is the teacher of all things.",
shift = -10),
"unf<h/<d:<{/i{j.<{j<}:.<h{e>{}bb{j./d,i[")
expect_equal(caesar("It is easier to find men who will volunteer to die, than to find those who are willing to endure pain with patience.",
shift = 15),
"XIoxHotpHxtGoIDouxCsoBtCoLwDoLxAAoKDAJCIttGoIDosxtloIwpCoIDouxCsoIwDHtoLwDopGtoLxAAxCvoIDotCsJGtoEpxCoLxIwoEpIxtCrtm")
expect_equal(caesar("It is easier to find men who will volunteer to die, than to find those who are willing to endure pain with patience.",
shift = -17),
"rc_;b_[+b;[a_c._];,~_>[,_f}._f;<<_e.<d,c[[a_c._~;[(_c}+,_c._];,~_c}.b[_f}._+a[_f;<<;,{_c._[,~da[_/+;,_f;c}_/+c;[,`[)")
expect_equal(caesar("What we wish, we readily believe, and what we ourselves think, we imagine others think also. 123 !@
shift = -17),
"F}+c_f[_f;b}(_f[_a[+~;<h_=[<;[e[(_+,~_f}+c_f[_.dab[<e[b_c};,'(_f[_;>+{;,[_.c}[ab_c};,'_+<b.)_KLM_TUV")
expect_equal(caesar("What we wish, we readily believe, and what we ourselves think, we imagine others think also. 123 !@
shift = 1),
"Xibuaxfaxjti.axfasfbejmzacfmjfwf.aboeaxibuaxfapvstfmwftauijol.axfajnbhjofapuifstauijolabmtp/a234a@
expect_equal(caesar("Veni, vidi, vici. (I came, I saw, I conquered.)",
shift = 1),
"Wfoj.awjej.awjdj/a)Jadbnf.aJatbx.aJadporvfsfe/-")
expect_equal(caesar("Veni, vidi, vici. (I came, I saw, I conquered.)",
shift = 5),
"0jsnbeAninbeAnhnce=NehfrjbeNexfBbeNehtsvzjwjic`")
expect_equal(caesar("Veni, vidi, vici. (I came, I saw, I conquered.)",
shift = 100),
"4nwrfiErmrfiErlrgi]RiljvnfiRiBjFfiRilxwzDnAnmg{")
expect_equal(caesar("Veni, vidi, vici. (I came, I saw, I conquered.)",
shift = 1500),
"aW50OR$0V0OR$0U0PRx[RUS4WOR[R!S%OR[RU658
expect_equal(caesar("Veni, vidi, vici. (I came, I saw, I conquered.)",
shift = -100),
"M>e [}m < [}m ' ]}9z}';d>[}z}j;n[}z}'fehl>i><]!")
expect_equal(caesar("Veni, vidi, vici. (I came, I saw, I conquered.)",
shift = -123),
"p@-^36{^!^36{^9^46Mc697)@36c6~7}36c69_-=]@`@!4N")
})
test_that("Caesar deencryption works", {
expect_equal(caesar("HAshulhqfhclvcwkhcwhdfkhucricdoocwklqjva",
shift = 3,
decrypt = TRUE),
"Experience is the teacher of all things.")
expect_equal(caesar("Experience is the teacher of all things.",
shift = 0,
decrypt = TRUE),
"Experience is the teacher of all things.")
expect_equal(caesar("JCujwnjshjenxeymjeyjfhmjwetkefqqeymnslxc",
shift = 5,
decrypt = TRUE),
"Experience is the teacher of all things.")
expect_equal(caesar("unf<h/<d:<{/i{j.<{j<}:.<h{e>{}bb{j./d,i[",
shift = -10,
decrypt = TRUE),
"Experience is the teacher of all things.")
expect_equal(caesar("XIoxHotpHxtGoIDouxCsoBtCoLwDoLxAAoKDAJCIttGoIDosxtloIwpCoIDouxCsoIwDHtoLwDopGtoLxAAxCvoIDotCsJGtoEpxCoLxIwoEpIxtCrtm",
shift = 15,
decrypt = TRUE),
"It is easier to find men who will volunteer to die, than to find those who are willing to endure pain with patience.")
expect_equal(caesar("rc_;b_[+b;[a_c._];,~_>[,_f}._f;<<_e.<d,c[[a_c._~;[(_c}+,_c._];,~_c}.b[_f}._+a[_f;<<;,{_c._[,~da[_/+;,_f;c}_/+c;[,`[)",
shift = -17,
decrypt = TRUE),
"It is easier to find men who will volunteer to die, than to find those who are willing to endure pain with patience.")
expect_equal(caesar("F}+c_f[_f;b}(_f[_a[+~;<h_=[<;[e[(_+,~_f}+c_f[_.dab[<e[b_c};,'(_f[_;>+{;,[_.c}[ab_c};,'_+<b.)_KLM_TUV",
shift = -17,
decrypt = TRUE),
"What we wish, we readily believe, and what we ourselves think, we imagine others think also. 123 !@
expect_equal(caesar("Xibuaxfaxjti.axfasfbejmzacfmjfwf.aboeaxibuaxfapvstfmwftauijol.axfajnbhjofapuifstauijolabmtp/a234a@
shift = 1,
decrypt = TRUE),
"What we wish, we readily believe, and what we ourselves think, we imagine others think also. 123 !@
expect_equal(caesar("Wfoj.awjej.awjdj/a)Jadbnf.aJatbx.aJadporvfsfe/-",
shift = 1,
decrypt = TRUE),
"Veni, vidi, vici. (I came, I saw, I conquered.)")
expect_equal(caesar("0jsnbeAninbeAnhnce=NehfrjbeNexfBbeNehtsvzjwjic`",
shift = 5,
decrypt = TRUE),
"Veni, vidi, vici. (I came, I saw, I conquered.)")
expect_equal(caesar("4nwrfiErmrfiErlrgi]RiljvnfiRiBjFfiRilxwzDnAnmg{",
shift = 100,
decrypt = TRUE),
"Veni, vidi, vici. (I came, I saw, I conquered.)")
expect_equal(caesar("aW50OR$0V0OR$0U0PRx[RUS4WOR[R!S%OR[RU658
shift = 1500,
decrypt = TRUE),
"Veni, vidi, vici. (I came, I saw, I conquered.)")
expect_equal(caesar("M>e [}m < [}m ' ]}9z}';d>[}z}j;n[}z}'fehl>i><]!",
shift = -100,
decrypt = TRUE),
"Veni, vidi, vici. (I came, I saw, I conquered.)")
expect_equal(caesar("p@-^36{^!^36{^9^46Mc697)@36c6~7}36c69_-=]@`@!4N",
shift = -123,
decrypt = TRUE),
"Veni, vidi, vici. (I came, I saw, I conquered.)")
}) |
test_that("egor32 has factor vars", {
expect_s3_class(egor32$ego$variables$sex, "factor")
expect_s3_class(egor32$ego$variables$age, "factor")
expect_s3_class(egor32$alter$sex, "factor")
expect_s3_class(egor32$alter$age, "factor")
})
test_that("egor32: ego-level is active", {
expect_equal(attr(egor32, "active"), "ego")
}) |
context("String manipulation utilities")
test_that("substr2 edge cases", {
expect_identical(substr2("1234567890", 1, 1), "")
expect_identical(substr2("1234567890", 0, 0), "")
expect_identical(substr2("1234567890", 99, 100), "")
expect_identical(substr2("1234567890", 9, 100), "90")
expect_identical(substr2("1234567890", c(1,3)), "12")
expect_identical(substr2("1234567890", c(1,3), c(2,4)), c("1", "3"))
expect_identical(substr2("1234567890", c(1,2), 3), c("12", "2"))
expect_identical(substr2("1234567890", -3, 2), "1")
expect_error(substr2(c("abc", "def"), 1, 3))
})
test_that("gfsub basics", {
expect_identical(
gfsub("abcdefghijklmnopqrstuvwxyz", "[aeiouy]", toupper),
"AbcdEfghIjklmnOpqrstUvwxYz"
)
gfsub("&", "&(.*?);", function(match, entity) {
expect_identical(entity, "amp")
NULL
})
output <- gfsub("hello & goodbye", "&(.*?);", function(match, entity, bad_param) {
expect_error(bad_param)
NULL
})
expect_identical(output, "hello goodbye")
}) |
calculate_irr_periods <-
function(dates = c(
"2016-06-01",
"2017-05-31",
"2018-05-31",
"2019-05-31",
"2020-05-31",
"2021-05-31",
"2022-05-31",
"2023-05-31",
"2024-05-31",
"2025-05-31",
"2026-05-31"
),
cash_flows = c(
3000,
-478.515738547242,
-478.515738547242,
-478.515738547242,
-478.515738547242,
-478.515738547242,
-478.515738547242,
-478.515738547242,
-478.515738547242,
-478.515738547242,
-478.515738547278
),
date_format = '%Y-%m-%d',
scale_to_100 = F,
return_percentage = F,
return_df = T,
return_wide = T,
return_message = T) {
secant <-
function(par,
fn,
tol = 1.e-07,
itmax = 100,
trace = TRUE,
...) {
if (length(par) != 2)
stop("You must specify a starting parameter vector of length 2")
p.2 <- par[1]
p.1 <- par[2]
f <- rep(NA, length(par))
f[1] <- fn(p.1, ...)
f[2] <- fn(p.2, ...)
iter <- 1
pchg <- abs(p.2 - p.1)
fval <- f[2]
if (trace)
while (pchg >= tol & abs(fval) > tol & iter <= itmax) {
p.new <- p.2 - (p.2 - p.1) * f[2] / (f[2] - f[1])
pchg <- abs(p.new - p.2)
fval <- fn(p.new, ...)
p.1 <- p.2
p.2 <- p.new
f[1] <- f[2]
f[2] <- fval
iter <- iter + 1
}
list(par = p.new,
value = fval,
iter = iter)
}
npv <-
function (irr, cashFlow, times)
sum(cashFlow / (1 + irr) ^ times)
cfDate <-
dates %>%
as.Date(format = date_format)
times <-
difftime(cfDate, cfDate[1], units = "days") %>% as.numeric() / 365.24
s <-
secant(
par = c(0, 0.1),
fn = npv,
cashFlow = cash_flows,
times = times
)
irr <-
s$par
if (return_percentage == T & scale_to_100 == T) {
stop("Sorry you cannot return a percentage and scale to 100")
}
if (return_percentage) {
irr <-
irr %>% formattable::percent()
}
if (scale_to_100) {
irr <-
irr * 100
}
dateStart <-
min(dates) %>% ymd
dateEnd <-
max(dates) %>% ymd
equityContributions <-
cash_flows[cash_flows > 0] %>%
sum() %>%
formattable::currency(digits = 2)
equityDistributions <-
cash_flows[cash_flows < 0] %>%
sum() %>%
formattable::currency(digits = 2)
multipleCapital <-
(abs(equityDistributions) / equityContributions) %>% digits(digits = 3)
amountProfit <-
-(equityDistributions + equityContributions)
if (return_df)
data <-
tibble(
dateStart,
dateEnd,
equityContributions,
equityDistributions,
pctIRR = irr,
amountProfit,
multipleCapital,
dateTimeCF = Sys.time()
) %>%
mutate(pctIRR = percent(pctIRR, digits = 3))
else {
data <-
irr
}
if (return_message) {
"\nCash Flow Produces a " %>%
paste0(
formattable::percent(irr, digits = 3),
' IRR\nFrom ',
dateStart,
' to ',
dateEnd,
'\n',
'Profit of ',
formattable::currency(amountProfit, digits = 2),
'\nCapital Multiple of ',
multipleCapital,
"\n"
) %>%
cat(fill = T)
}
if (!return_wide) {
data <-
data %>%
gather(metric, value, -c(dateStart, dateEnd, dateTimeCF))
}
return(data)
}
calculate_cash_flow_dates <-
function(dates = c(
"2016-09-01",
"2017-08-31",
"2018-08-31",
"2019-08-31",
"2020-08-31",
"2021-08-31",
"2022-08-31",
"2023-08-31"
),
cash_flows = c(
-4151601,
119499.036215643,
257186.036215643,
447646.036215643,
200652.036215643,
510409.036215643,
193.036215643166,
8788626.76409155
),
working_capital = 125000,
remove_cumulative_cols = TRUE,
include_final_day = TRUE,
is_annual_budget = T,
distribution_frequency = NA) {
distribution_frequencies <-
c(NA,
'weekly',
'monthly',
'quarterly',
'yearly',
'sale')
distribution_frequency <-
distribution_frequency %>% str_to_lower() %>%
str_replace_all('yearly', 'annually') %>% str_replace_all('residual', 'sale')
if (!distribution_frequency %>% str_to_lower() %in% distribution_frequencies) {
distribution_frequency <- 'annually'
}
dates <-
dates %>%
ymd()
is_annual_budget <-
as.numeric((dates[2] - dates[1] + 1)) %%
364 == 1
if (distribution_frequency %in% c(NA, 'sale')) {
is_at_sale <-
T
} else {
is_at_sale <-
F
}
distribution_dates_df <-
tibble(date = dates) %>%
mutate(idPeriod = 0:(nrow(.) - 1))
distribution_dates_df <-
distribution_dates_df %>%
mutate(
isDistribution =
case_when(
(
idPeriod > 0 &
is_annual_budget == T &
(!distribution_frequency == 'sale')
) ~ T,
(
is_at_sale == T &
distribution_dates_df$idPeriod == max(distribution_dates_df$idPeriod)
) ~ T,
NA ~ F
))
distribution_dates_df <-
distribution_dates_df %>%
mutate_at('isDistribution',
funs(ifelse(isDistribution %>% is.na(), FALSE, .) %>% as.numeric()))
if (working_capital > 0) {
working_capital <-
-working_capital
}
cf_data <-
tibble(date = dates %>% ymd(),
cashFlow = cash_flows %>% currency(digits = 2)) %>%
mutate(idPeriod = 0:(nrow(.) - 1)) %>%
dplyr::select(idPeriod, everything()) %>%
left_join(distribution_dates_df) %>%
suppressMessages()
cf_data <-
cf_data %>%
mutate(
isSalePeriod = if_else(idPeriod == max(idPeriod), T, F),
workingCapital =
case_when(
cf_data$idPeriod == 0 ~ working_capital,
cf_data$idPeriod == max(cf_data$idPeriod) ~ -working_capital,
TRUE ~ 0
),
totalCF = workingCapital + cashFlow,
capitalContribution = if_else(totalCF < 0, -totalCF, 0),
cashAvailableDistribution = if_else(totalCF > 0, -totalCF, 0),
capitalDistributionCurrent = cashAvailableDistribution * isDistribution,
undistributedCash = capitalDistributionCurrent - cashAvailableDistribution,
cumUndistributedCash = -cumsum(undistributedCash),
distributionAccruedCash = cumUndistributedCash * isDistribution,
capitalDistribution = distributionAccruedCash + capitalDistributionCurrent,
capitalCF = capitalContribution + capitalDistribution
) %>%
dplyr::select(
-c(
undistributedCash,
cumUndistributedCash,
distributionAccruedCash,
capitalDistributionCurrent
)
) %>%
mutate(
daysAccrued = as.numeric((date - dplyr::lag(date))),
daysAccrued = ifelse(daysAccrued %>% is.na(), 0, daysAccrued),
cumDays = cumsum(daysAccrued),
cumContribution = cumsum(capitalContribution),
cumDistribution = cumsum(capitalDistribution),
ratioCapitalReturned = (abs(cumDistribution) / cumContribution) %>% as.numeric,
cumCF = cumsum(cashFlow),
endCash = cumContribution + cumDistribution + cumCF,
beginCash = ifelse(idPeriod == 0, 0, dplyr::lag(endCash))
) %>%
mutate_at(
.vars = c(
"cashFlow",
"workingCapital",
'capitalCF',
"totalCF",
"capitalContribution",
"capitalDistribution",
"beginCash",
"endCash",
"cumContribution",
"cumDistribution",
"cumCF"
),
currency
) %>%
dplyr::select_(
.dots = c(
"idPeriod",
"date",
'daysAccrued',
'cumDays',
"cashFlow",
"isSalePeriod",
"workingCapital",
"totalCF",
"capitalContribution",
"capitalDistribution",
'capitalCF',
'ratioCapitalReturned',
"beginCash",
"endCash",
"cumContribution",
"cumDistribution",
"cumCF"
)
)
if (!(cf_data$totalCF %>% sum) + ((cf_data$capitalContribution %>% sum) + (cf_data$capitalDistribution %>% sum)) == 0) {
stop("Cash does not balance")
}
if (remove_cumulative_cols) {
cf_data <-
cf_data %>%
dplyr::select(-dplyr::matches("cum"))
}
return(cf_data)
}
calculate_cash_flows_returns <-
function(dates = c(
"2016-09-01",
"2017-08-31",
"2018-08-31",
"2019-08-31",
"2020-08-31",
"2021-08-31",
"2022-08-31",
"2023-08-31"
),
cash_flows = c(
-4151601,
119499.036215643,
257186.036215643,
447646.036215643,
200652.036215643,
510409.036215643,
193.036215643166,
8788626.76409155
),
working_capital = 125000,
remove_cumulative_cols = T,
distribution_frequency = 'annually',
date_format = '%Y-%m-%d',
scale_to_100 = F,
return_percentage = F,
return_df = T,
return_message = T) {
cf_data <-
calculate_cash_flow_dates(
dates = dates,
cash_flows = cash_flows,
working_capital = working_capital,
distribution_frequency = distribution_frequency,
remove_cumulative_cols = remove_cumulative_cols
)
cf_return_data <-
calculate_irr_periods(
dates = cf_data$date,
cash_flows = cf_data$capitalCF,
date_format = date_format,
return_percentage = return_percentage,
return_df = return_df,
return_message = return_message
)
cf_Data
}
scale_to_pct <- function(x) {
if (x > 1) {
x <-
x / 100
}
return(x)
}
.parse_promote_structure <-
function(promote_structure = "20 over a 12") {
hit_words <-
c(" over a ", " over ", "over", "over ", "over a",
"/", " / ", " until ", " on ")
has_no_promote <-
!promote_structure %>%
str_detect(hit_words %>% paste0(collapse = "|"))
if (has_no_promote) {
stop(
"No promote structure detected\nPromote structure looks like 20 over a 12, 30 over 5x, 12 / 9"
)
}
typeHurdle <-
case_when(promote_structure %>% str_detect("x") ~ "multiple",
promote_structure %>% str_detect("until") ~ "amount",
TRUE ~ "pref")
hurdle_promote <-
promote_structure %>%
str_split(pattern = hit_words %>% paste0(collapse = "|")) %>%
future_map(parse_number) %>%
flatten_dbl()
case_when(
typeHurdle == "multiple" ~ hurdle_promote[[1]] %>% scale_to_pct()
)
if (typeHurdle %in% c('multiple', "amount")) {
hurdle_promote[[1]] <-
hurdle_promote[[1]] %>%
scale_to_pct()
} else if (typeHurdle == "pref") {
hurdle_promote <-
hurdle_promote %>%
map_dbl(scale_to_pct)
}
items <-
c("pctPromote", 'valueHurdle')
promote_df <-
tibble(typeHurdle,
item = items,
value = hurdle_promote) %>%
mutate(
item =
case_when(
typeHurdle == "pref" ~ item %>% str_replace(pattern = "valueHurdle", 'pctPref'),
typeHurdle == "multiple" ~ item %>% str_replace(pattern = "valueHurdle", 'ratioCapitalMultiple'),
TRUE ~ item %>% str_replace(pattern = "valueHurdle", 'amountFee')
)
) %>%
spread(item, value)
return(promote_df)
}
tidy_promote_structure <-
function(promote_structures = c("20 over a 12", '30 / 18', "40 over a 10x"),
return_wide = F) {
parse_promote_structure_safe <-
purrr::possibly(.parse_promote_structure, tibble())
promote_data <-
seq_along(promote_structures) %>%
future_map_dfr(function(x) {
parse_promote_structure_safe(promote_structure = promote_structures[x]) %>%
mutate(tierWaterfall = x) %>%
dplyr::select(tierWaterfall, everything())
})
if (return_wide) {
promote_data <-
promote_data %>%
gather(item, hurdle, -c(tierWaterfall)) %>%
replace_na(list(hurdle = 0)) %>%
arrange(tierWaterfall) %>%
unite(item, item, tierWaterfall, sep = '') %>%
mutate(item = item %>% factor(ordered = T, levels = item)) %>%
spread(item, hurdle)
promote_data <-
promote_data %>%
mutate_at(.vars =
promote_data %>% dplyr::select(dplyr::matches("^pct[A-Z]|ratio[A-Z]")) %>% names,
funs(. %>% as.numeric)) %>%
mutate_at(.vars =
promote_data %>% dplyr::select(dplyr::matches("^pct[A-Z]")) %>% names,
funs(. %>% percent))
} else {
promote_data <-
promote_data %>%
mutate(nameTier = promote_structures) %>%
dplyr::select(tierWaterfall, nameTier, everything())
promote_data <-
promote_data %>%
mutate_at(.vars =
promote_data %>% dplyr::select(dplyr::matches("^pct[A-Z]")) %>% names,
funs(. %>% percent)) %>%
dplyr::select(
tierWaterfall,
nameTier,
typeHurdle,
dplyr::matches("pctPref|ratioCapitalMultiple|amountFee"),
pctPromote,
everything()
)
}
return(promote_data)
}
calculate_days_accrued_pref <-
function(pct_pref = .1,
is_actual_360 = TRUE,
days = 31,
equity_bb = 1700000.00,
pref_accrued_bb = 0) {
if (is_actual_360) {
accrual_days <-
360
} else {
accrual_days <-
365
}
calc_basis <-
(equity_bb + pref_accrued_bb) %>% currency(digits = 2)
accrued_pref <-
((pct_pref / accrual_days) * days * calc_basis)
return(accrued_pref)
}
.get_pct_to_promote <-
function(promote_df, tier_waterfall = 2) {
if (tier_waterfall %in% promote_df$tierWaterfall) {
to_promote <-
promote_df %>%
dplyr::filter(tierWaterfall == tier_waterfall) %>%
.$pctPromote
} else {
to_promote <-
0
}
return(to_promote)
}
.waterfall_tier_df <-
function(tiers = 1:5,
return_wide = F) {
waterfall_df <-
tibble(
tierWaterfall = 1,
bbAccruedPref = 0,
accruedPref = 0,
distributionPriorPref = 0,
toAccruedPref = 0,
ebAccruedPref = 0,
bbCapitalMultiple = 0,
capitalMultipleDraw = 0,
toEquity = 0,
distributionPriorMultiple = 0,
toCapitalMultiple = 0,
ebCapitalMultiple = 0,
bbFee = 0,
feeDraw = 0,
distributionPriorFee = 0,
toFee = 0,
ebFee = 0,
toPromote = 0,
toCapital = 0
)
if (tiers %>% length > 1) {
other_tiers <-
tiers[tiers > 1] %>%
future_map_dfr(function(x) {
tibble(
tierWaterfall = rep(x),
bbAccruedPref = 0,
accruedPref = 0,
distributionPriorPref = 0,
toAccruedPref = 0,
ebAccruedPref = 0,
bbCapitalMultiple = 0,
capitalMultipleDraw = 0,
toEquity = 0,
distributionPriorMultiple = 0,
toCapitalMultiple = 0,
ebCapitalMultiple = 0,
bbFee = 0,
feeDraw = 0,
distributionPriorFee = 0,
toFee = 0,
ebFee = 0,
toPromote = 0,
toCapital = 0
)
})
waterfall_df <-
waterfall_df %>%
bind_rows(other_tiers)
}
convert_cols <- waterfall_df %>% select_if(is.numeric) %>% select(-dplyr::matches("idPeriod|tier")) %>% names()
waterfall_df <-
waterfall_df %>%
mutate_at(convert_cols, currency)
if (return_wide) {
waterfall_df <-
waterfall_df %>%
gather(item, amount, -tierWaterfall, convert = F) %>%
arrange(tierWaterfall, item) %>%
unite(item, item, tierWaterfall, sep = '') %>%
suppressWarnings()
col_order <-
waterfall_df$item
waterfall_df <-
waterfall_df %>%
spread(item, amount) %>%
dplyr::select(one_of(col_order))
}
return(waterfall_df)
}
.get_initial_equity_df <-
function(equity_bb = 0,
to_equity = 0,
period = 0,
equity_draw = 0) {
equityBB <-
equity_bb
toEquity <-
to_equity
equityDraw <-
equity_draw
equityEB <-
equityBB + equityDraw + toEquity
equity_df <-
tibble(idPeriod = period,
equityBB,
equityDraw,
toEquity,
equityEB) %>%
mutate_at(
.vars = c('equityBB', 'equityDraw',
'toEquity', 'equityEB'),
.funs = currency
)
return(equity_df)
}
calculate_cash_flow_waterfall <-
function(dates =
c("2015-03-11", "2015-11-20", "2016-10-15"),
cash_flows = c(-100000, -200000, 698906.76849),
working_capital = 0,
promote_structure = c("20 / 12", "30 / 18"),
distribution_frequency = NA,
is_actual_360 = TRUE,
widen_promote_structure = FALSE,
bind_to_cf = FALSE,
remove_zero_cols = TRUE,
widen_waterfall = FALSE) {
options(scipen = 999999)
if (class(dates) != "Date") {
dates <-
readr::parse_date(dates)
}
cf_data <-
calculate_cash_flow_dates(
dates = dates,
cash_flows = cash_flows,
remove_cumulative_cols = T,
working_capital = working_capital,
distribution_frequency = distribution_frequency
)
waterfall_data <-
cf_data %>%
dplyr::select(idPeriod:daysAccrued,
capitalContribution,
capitalDistribution,
capitalCF) %>%
mutate(cashDistributionAvailable = -pmin(0, capitalCF) %>% currency())
promote_df <-
tidy_promote_structure(promote_structures = promote_structure,
return_wide = widen_promote_structure)
waterfall_periods <-
waterfall_data$idPeriod
tiers <-
promote_df$tierWaterfall
waterfall_df <-
tibble()
for (x in seq_along(waterfall_periods)) {
period <-
waterfall_periods[x]
period_data <-
waterfall_data %>%
dplyr::filter(idPeriod == period)
days <-
period_data$daysAccrued
equityDraw <-
period_data$capitalContribution
periodCAD <-
period_data$cashDistributionAvailable %>% as.numeric() %>%digits(2) %>% currency
if (period == 0) {
equity_df <-
.get_initial_equity_df(
equity_bb = 0,
to_equity = 0,
equity_draw = equityDraw,
period = 0
)
waterfall_df <-
.waterfall_tier_df(tiers = tiers) %>%
mutate(idPeriod = period) %>%
dplyr::select(idPeriod, everything())
}
if (period > 0) {
equityBB <-
equity_df %>% dplyr::filter(idPeriod == period - 1) %>% .$equityEB
for (tier in tiers) {
typeHurdle <-
promote_df %>% dplyr::filter(tierWaterfall == tier) %>% .$typeHurdle
bbAccruedPref <-
max(
0,
waterfall_df %>% dplyr::filter(idPeriod == period - 1 &
tierWaterfall == tier) %>% .$ebAccruedPref
)
bbCapitalMultiple <-
max(
0,
waterfall_df %>% dplyr::filter(idPeriod == period - 1, tierWaterfall == tier) %>% .$ebCapitalMultiple
)
tierPromote <-
promote_df %>% dplyr::filter(tierWaterfall == tier) %>% .$pctPromote
if (tier == max(tiers)) {
is_max_tier <-
T
} else {
is_max_tier <-
F
}
if (typeHurdle == 'pref') {
tierPref <-
promote_df %>% dplyr::filter(tierWaterfall == tier) %>% .$pctPref
accruedPref <-
calculate_days_accrued_pref(
pct_pref = tierPref,
is_actual_360 = is_actual_360,
days = days,
equity_bb = equityBB,
pref_accrued_bb = bbAccruedPref
)
capitalMultipleDraw <-
0
distributionPriorMultiple <-
0
toCapitalMultiple <-
0
ebCapitalMultiple <-
0
if (tier == 1) {
toAccruedPref <-
-min(periodCAD, (accruedPref + bbAccruedPref)) %>% as.numeric() %>%digits(2)
ebAccruedPref <-
(bbAccruedPref + accruedPref + toAccruedPref) %>% as.numeric() %>%digits(2)
cash_to_equity <-
max(0, (periodCAD + toAccruedPref)) %>% as.numeric() %>%digits(2)
toEquity <-
-min(cash_to_equity, (equityBB + equityDraw)) %>% as.numeric() %>%digits(2)
equityEB <-
(equityBB + equityDraw + toEquity) %>% as.numeric() %>%digits(2)
to_promote_tier <-
max(0, (cash_to_equity + toEquity)) %>% as.numeric() %>%digits(2)
distributionPriorPref <-
0
if (is_max_tier) {
toPromote <-
-to_promote_tier * tierPromote
toCapital <-
-to_promote_tier * (1 - tierPromote)
}
if (is_max_tier == F) {
typeNextTier <-
promote_df %>%
dplyr::filter(tierWaterfall == tier + 1) %>% .$typeHurdle
if (typeNextTier == 'pref') {
nextPref <-
promote_df %>%
dplyr::filter(tierWaterfall == tier + 1) %>% .$pctPref
bbAccruedPrefNext <-
waterfall_df %>%
dplyr::filter(tierWaterfall == tier + 1) %>% .$bbAccruedPref
prefAccruedNext <-
calculate_days_accrued_pref(
pct_pref = nextPref,
is_actual_360 = is_actual_360,
days = days,
equity_bb = equityBB,
pref_accrued_bb = bbAccruedPrefNext
)
cash_for_promote <-
-min(to_promote_tier, max(
0,
(bbAccruedPref + prefAccruedNext + toAccruedPref) / (1 - tierPromote)
))
}
if (typeNextTier == 'multiple') {
nextMultiple <-
promote_df %>%
dplyr::filter(tierWaterfall == tier + 1) %>% .$ratioCapitalMultiple
bbCapitalMultipleNext <-
waterfall_df %>% dplyr::filter(idPeriod == period - 1, tierWaterfall == tier + 1) %>% .$ebCapitalMultiple
capitalMultipleDrawNext <-
(equity_df %>% dplyr::filter(idPeriod == period - 1) %>% .$equityDraw) * nextMultiple
cash_for_promote <-
-min(to_promote_tier, max(
0,
(
bbCapitalMultipleNext + capitalMultipleDrawNext + toAccruedPref
) / (1 - tierPromote)
))
}
toCapital <-
cash_for_promote * (1 - tierPromote)
toPromote <-
cash_for_promote * (tierPromote)
}
}
if (tier > 1) {
priorLevelDistribution <-
waterfall_df %>%
dplyr::filter(idPeriod == period &
tierWaterfall < tier) %>% dplyr::select(toAccruedPref,
toPromote,
toCapital,
toCapitalMultiple) %>% gather(item, value) %>%
.$value %>% sum
equityDistribution <-
equity_df %>% dplyr::filter(idPeriod == period) %>% dplyr::select(toEquity) %>% gather(item, value) %>% .$value %>% sum
remainingCash <-
periodCAD + priorLevelDistribution + equityDistribution
distributionPriorMultiple <-
waterfall_df %>% dplyr::filter(idPeriod == period &
tierWaterfall == (tier - 1)) %>% dplyr::select(distributionPriorMultiple) %>% gather(item, value) %>% .$value %>% sum
distributionPriorPref <-
waterfall_df %>% dplyr::filter(idPeriod == period &
tierWaterfall == (tier - 1)) %>% dplyr::select(distributionPriorPref, toAccruedPref) %>% gather(item, value) %>% .$value %>% sum
toAccruedPref <-
-max(0, min(
remainingCash,
(
bbAccruedPref + accruedPref + distributionPriorPref + distributionPriorMultiple
)
))
to_promote_tier <-
remainingCash + toAccruedPref
ebAccruedPref <-
bbAccruedPref + accruedPref + distributionPriorPref + distributionPriorMultiple + toAccruedPref
if (is_max_tier) {
toPromote <-
-to_promote_tier * tierPromote
toCapital <-
-to_promote_tier * (1 - tierPromote)
}
if (is_max_tier == F) {
typeNextTier <-
promote_df %>%
dplyr::filter(tierWaterfall == tier + 1) %>% .$typeHurdle
if (typeNextTier == 'pref') {
nextPref <-
promote_df %>%
dplyr::filter(tierWaterfall == tier + 1) %>% .$pctPref
bbAccruedPrefNext <-
waterfall_df %>%
dplyr::filter(tierWaterfall == tier + 1) %>% .$bbAccruedPref
prefAccruedNext <-
calculate_days_accrued_pref(
pct_pref = nextPref,
is_actual_360 = is_actual_360,
days = days,
equity_bb = equityBB,
pref_accrued_bb = bbAccruedPrefNext
)
cash_for_promote <-
-min(to_promote_tier, max(
0,
(bbAccruedPref + prefAccruedNext + toAccruedPref) / (1 - tierPromote)
))
}
if (typeNextTier == 'multiple') {
nextMultiple <-
promote_df %>%
dplyr::filter(tierWaterfall == tier + 1) %>% .$ratioCapitalMultiple
bbCapitalMultipleNext <-
waterfall_df %>% dplyr::filter(idPeriod == period - 1, tierWaterfall == tier + 1) %>% .$ebCapitalMultiple
capitalMultipleDrawNext <-
(equity_df %>% dplyr::filter(idPeriod == period - 1) %>% .$equityDraw) * nextMultiple
cash_for_promote <-
-min(to_promote_tier, max(
0,
(
bbCapitalMultipleNext + capitalMultipleDrawNext + toAccruedPref + distributionPriorPref + toEquity
) / (1 - tierPromote)
))
}
toCapital <-
cash_for_promote * (1 - tierPromote)
toPromote <-
cash_for_promote * (tierPromote)
}
}
}
if (typeHurdle == 'multiple') {
ratioCapitalMultiple <-
promote_df %>% dplyr::filter(tierWaterfall == tier) %>% .$ratioCapitalMultiple
if (tier == 1) {
accruedPref <-
0
toAccruedPref <-
0
ebAccruedPref <-
0
cash_to_equity <-
max(0, (periodCAD + toAccruedPref)) %>% as.numeric() %>%digits(2) %>% currency
toEquity <-
-min(cash_to_equity, (equityBB + equityDraw)) %>% as.numeric() %>%digits(2) %>% currency
equityEB <-
(equityBB + equityDraw + toEquity) %>% as.numeric() %>%digits(2) %>% currency
cash_to_multiple <-
max(0, (cash_to_equity + toEquity)) %>% as.numeric() %>%digits(2) %>% currency
capitalMultipleDraw <-
(equity_df %>% dplyr::filter(idPeriod == period - 1) %>% .$equityDraw) * ratioCapitalMultiple
toCapitalMultiple <-
-min(cash_to_multiple,
(bbCapitalMultiple + capitalMultipleDraw + toEquity)) %>% as.numeric() %>%digits(2) %>%
currency
ebCapitalMultiple <-
bbCapitalMultiple + capitalMultipleDraw + toEquity + toCapitalMultiple
to_promote_tier <-
max(0, (cash_to_multiple + toCapitalMultiple)) %>% as.numeric() %>%digits(2) %>%
currency
distributionPriorPref <-
0
distributionPriorMultiple <-
0
if (is_max_tier) {
toPromote <-
-to_promote_tier * tierPromote
toCapital <-
-to_promote_tier * (1 - tierPromote)
}
if (is_max_tier == F) {
nextMultiple <-
promote_df %>%
dplyr::filter(tierWaterfall == tier + 1) %>% .$ratioCapitalMultiple
bbCapitalMultipleNext <-
waterfall_df %>% dplyr::filter(idPeriod == period - 1, tierWaterfall == tier + 1) %>% .$ebCapitalMultiple
capitalMultipleDrawNext <-
(equity_df %>% dplyr::filter(idPeriod == period - 1) %>% .$equityDraw) * nextMultiple
cash_for_promote <-
-min(to_promote_tier, max(
0,
(
bbCapitalMultipleNext + capitalMultipleDrawNext + toCapitalMultiple + toEquity
) / (1 - tierPromote)
))
toCapital <-
cash_for_promote * (1 - tierPromote)
toPromote <-
cash_for_promote * (tierPromote)
}
}
if (tier > 1) {
priorLevelDistribution <-
waterfall_df %>%
dplyr::filter(idPeriod == period &
tierWaterfall < tier) %>%
dplyr::select(tierWaterfall,
toAccruedPref,
toPromote,
toCapital,
toCapitalMultiple) %>%
gather(item, value, -tierWaterfall) %>% .$value %>% sum %>%
currency
equityDistribution <-
equity_df %>% dplyr::filter(idPeriod == period) %>% dplyr::select(toEquity) %>% gather(item, value) %>% .$value %>% sum %>% currency
remainingCash <-
periodCAD + priorLevelDistribution + equityDistribution
accruedPref <-
0
toAccruedPref <-
0
ebAccruedPref <-
bbAccruedPref + accruedPref + toAccruedPref
capitalMultipleDraw <-
(equity_df %>% dplyr::filter(idPeriod == period - 1) %>% .$equityDraw) * ratioCapitalMultiple %>%
currency()
distributionPriorMultiple <-
waterfall_df %>%
dplyr::filter(idPeriod == period &
tierWaterfall <= (tier - 1)) %>%
dplyr::select(toCapitalMultiple, toCapital) %>% gather(item, value) %>%
.$value %>%
sum %>%
currency
distributionPriorPref <-
waterfall_df %>% dplyr::filter(idPeriod == period &
tierWaterfall <= (tier - 1)) %>% dplyr::select(toAccruedPref) %>% gather(item, value) %>% .$value %>% sum %>%
currency
priorEquity <-
waterfall_df %>% dplyr::filter(idPeriod == period &
tierWaterfall == (tier - 1)) %>% dplyr::select(toEquity) %>% gather(item, value) %>% .$value %>% sum %>%
currency()
toCapitalMultiple <-
-max(0,
min(
remainingCash,
(
bbCapitalMultiple + capitalMultipleDraw + distributionPriorMultiple + distributionPriorPref + priorEquity
)
)) %>%
currency()
to_promote_tier <-
remainingCash + toCapitalMultiple
ebCapitalMultiple <-
bbCapitalMultiple + capitalMultipleDraw + distributionPriorMultiple + distributionPriorPref + priorEquity + toCapitalMultiple
if (is_max_tier) {
toPromote <-
-to_promote_tier * tierPromote
toCapital <-
-to_promote_tier * (1 - tierPromote)
}
if (is_max_tier == F) {
nextMultiple <-
promote_df %>%
dplyr::filter(tierWaterfall == tier + 1) %>% .$ratioCapitalMultiple
bbCapitalMultipleNext <-
waterfall_df %>% dplyr::filter(idPeriod == period - 1, tierWaterfall == tier + 1) %>% .$ebCapitalMultiple %>%
currency()
capitalMultipleDrawNext <-
(equity_df %>% dplyr::filter(idPeriod == period - 1) %>% .$equityDraw) * nextMultiple %>%
currency
cash_for_promote <-
-min(to_promote_tier, max(
0,
(
bbCapitalMultipleNext + capitalMultipleDrawNext + toCapitalMultiple + distributionPriorMultiple + distributionPriorPref + equityDistribution
) / (1 - tierPromote)
))
toCapital <-
cash_for_promote * (1 - tierPromote)
toPromote <-
cash_for_promote * (tierPromote)
}
}
}
period_waterfall <-
tibble(
idPeriod = period,
tierWaterfall = tier,
bbAccruedPref,
accruedPref,
distributionPriorPref = distributionPriorPref,
toAccruedPref,
ebAccruedPref,
bbCapitalMultiple,
capitalMultipleDraw,
distributionPriorMultiple,
toEquity,
toCapitalMultiple,
ebCapitalMultiple,
toPromote,
toCapital
) %>%
mutate_if(is.numeric, as.numeric)
waterfall_df <-
waterfall_df %>%
mutate_if(is.numeric, as.numeric) %>%
bind_rows(period_waterfall) %>%
distinct()
period_equity <-
tibble(idPeriod = period,
equityBB,
equityDraw,
toEquity,
equityEB)
equity_df <-
equity_df %>%
bind_rows(period_equity) %>%
distinct()
}
}
}
if ('toEquity' %in% names(waterfall_df)) {
waterfall_df <-
waterfall_df %>% dplyr::select(-toEquity)
}
if (remove_zero_cols) {
waterfall_df <-
waterfall_df %>% dplyr::select(which(colSums(abs(.) != 0) > 0))
}
waterfall_df <-
waterfall_df %>%
mutate_at(waterfall_df %>% dplyr::select(-c(idPeriod, tierWaterfall)) %>% names,
.funs = currency)
equity_df <-
equity_df %>%
mutate_at(equity_df %>% dplyr::select(-c(idPeriod)) %>% names,
.funs = currency)
equityDistributions <-
equity_df$toEquity %>% sum
levelDistributions <-
waterfall_df %>% dplyr::select(dplyr::matches("to")) %>% gather(item, value) %>% .$value %>% sum %>% currency %>% suppressWarnings()
cash_check <-
((cf_data$capitalDistribution %>% sum %>% abs) + (equityDistributions + levelDistributions)
) %>% as.integer()
if (!cash_check == 0) {
stop("Waterfall does not tie to distributable by\n" %>%
paste0(cash_check %>% currency))
}
if (widen_waterfall) {
col_order <-
waterfall_df %>%
gather(item, value, -c(idPeriod, tierWaterfall)) %>%
arrange(idPeriod, tierWaterfall) %>%
unite(item, item, tierWaterfall, sep = '') %>%
.$item %>%
suppressWarnings()
waterfall_df <-
waterfall_df %>%
gather(item, value, -c(idPeriod, tierWaterfall)) %>%
arrange(idPeriod, tierWaterfall) %>%
unite(item, item, tierWaterfall, sep = '') %>%
spread(item, value) %>%
mutate_at(col_order, currency) %>%
dplyr::select(one_of(c('idPeriod', col_order))) %>%
suppressWarnings()
waterfall_df <-
equity_df %>%
left_join(waterfall_df) %>%
suppressMessages()
if (bind_to_cf) {
waterfall_df <-
cf_data %>%
left_join(waterfall_df) %>%
suppressMessages()
}
}
if (widen_waterfall == F) {
waterfall_df <-
waterfall_df %>%
bind_rows(equity_df %>% mutate(tierWaterfall = 0)) %>%
arrange(idPeriod, tierWaterfall) %>%
mutate(tierWaterfall = tierWaterfall + 1)
numeric_cols <-
waterfall_df %>% dplyr::select(-c(idPeriod, tierWaterfall)) %>% names
waterfall_df <-
waterfall_df %>%
mutate_if(is.numeric, as.numeric) %>%
mutate_at(
.vars = numeric_cols,
.funs = function(x)
if_else(x %>% is.na, 0, x)
)
currency_vars <- waterfall_df %>%
select_if(is.numeric) %>%
select(-c(idPeriod, tierWaterfall)) %>%
names()
waterfall_df <-
waterfall_df %>%
mutate_at(currency_vars, formattable::currency)
}
waterfall_df <-
waterfall_df %>%
left_join(waterfall_data %>% dplyr::select(idPeriod, date)) %>%
dplyr::select(idPeriod, date, everything()) %>%
suppressMessages(
)
waterfall_df
}
calculate_cash_flow_waterfall_partnership <-
function(dates =
c("2016-09-01",
"2017-08-31"),
cash_flows = c(-1500000,
105000000),
working_capital = 200000,
promote_structure = c("20 over 12", "30 over 20", "50 over 3.5x", "100 over 10x"),
assign_to_environment = TRUE,
general_partner_pct = .05,
gp_promote_share = 1,
unnest_data = FALSE,
exclude_partnership_total = FALSE,
distribution_frequency = NA,
is_actual_360 = TRUE,
widen_promote_structure = FALSE,
bind_to_cf = FALSE,
remove_zero_cols = TRUE,
widen_waterfall = FALSE) {
options(scipen = 9999999)
pct_gp <-
general_partner_pct %>% scale_to_pct()
pct_lp <-
1 - pct_gp
share_gp_promote <-
gp_promote_share
share_lp_promote_share <-
1 - gp_promote_share
promote_name_df <-
tibble(tierWaterfall = 1,
nameTier = "Return of Equity") %>%
bind_rows(
tidy_promote_structure(promote_structures = promote_structure,
return_wide = F) %>%
mutate(tierWaterfall = tierWaterfall + 1) %>%
dplyr::select(tierWaterfall, nameTier)
)
waterfall_data <-
calculate_cash_flow_waterfall(
dates = dates,
cash_flows = cash_flows,
working_capital = working_capital,
is_actual_360 = is_actual_360,
promote_structure = promote_structure,
bind_to_cf = F,
widen_promote_structure = F
) %>%
dplyr::select(idPeriod:tierWaterfall,
equityDraw, toEquity, everything())
entity_waterfall <-
waterfall_data %>%
dplyr::select(idPeriod,
date,
tierWaterfall,
dplyr::matches("equityDraw"),
dplyr::matches("to")) %>%
gather(item, toCF, -c(idPeriod, date, tierWaterfall)) %>%
dplyr::filter(!toCF == 0) %>%
mutate(
toGP = -ifelse(item == 'toPromote', toCF * share_gp_promote, toCF * pct_gp),
toLP = -ifelse(item == 'toPromote',
toCF * share_lp_promote_share,
toCF * pct_lp)
) %>%
mutate_at(.vars = c('toCF', 'toGP', 'toLP'),
.funs = currency) %>%
arrange(idPeriod, tierWaterfall) %>%
left_join(promote_name_df, by = "tierWaterfall") %>%
dplyr::select(idPeriod:tierWaterfall, nameTier, everything())
cash_check <-
((entity_waterfall$toCF %>% sum()) + (entity_waterfall$toGP %>% sum()) + (entity_waterfall$toLP %>% sum())
) %>% as.integer()
if (!cash_check == 0) {
stop("Cash does not tie")
}
entity_cf <-
entity_waterfall %>%
dplyr::select(date, toCF) %>%
group_by(date) %>%
summarise(totalCF = sum(as.numeric(toCF))) %>%
ungroup() %>%
mutate(totalCF = totalCF %>% currency())
gp_cf <-
entity_waterfall %>%
dplyr::select(date, toGP) %>%
group_by(date) %>%
summarise(totalCF = sum(toGP)) %>%
ungroup()
lp_cf <-
entity_waterfall %>%
dplyr::select(date, toLP) %>%
group_by(date) %>%
summarise(totalCF = sum(toLP))
"Partnership returns:" %>% message()
total_return_df <-
calculate_irr_periods(
dates = entity_cf$date,
cash_flows = entity_cf$totalCF,
return_percentage = T,
return_df = T
) %>%
mutate(typeEntity = 'Partnership') %>%
dplyr::select(typeEntity, everything()) %>%
suppressWarnings()
"General Partner returns:" %>% message()
gp_return_df <-
calculate_irr_periods(
dates = gp_cf$date,
cash_flows = -gp_cf$totalCF,
return_percentage = T,
return_df = T
) %>%
mutate(typeEntity = 'General Partner') %>%
dplyr::select(typeEntity, everything())
"Limited Partner returns:" %>% message()
lp_return_df <-
calculate_irr_periods(
dates = lp_cf$date,
cash_flows = -lp_cf$totalCF,
return_percentage = T,
return_df = T
) %>%
mutate(typeEntity = 'Limited Partner') %>%
dplyr::select(typeEntity, everything())
partnership_return_summary <-
gp_return_df %>%
bind_rows(list(lp_return_df, total_return_df))
if (partnership_return_summary %>% tibble::has_name("value")) {
partnership_return_summary <-
partnership_return_summary %>%
spread(metric, value)
}
partnership_return_summary <-
partnership_return_summary %>%
mutate_at(.vars = c('pctIRR'),
.funs = percent) %>%
mutate_at(
.vars = c(
"equityContributions",
"equityDistributions",
"amountProfit"
),
.funs = currency
)
if (exclude_partnership_total) {
partnership_return_summary <-
partnership_return_summary %>%
dplyr::filter(!typeEntity %in% 'Partnership')
}
data <-
tibble(
nameTable =
c(
'Cash Flow Waterfall',
'Entity Waterfall',
'Partnership Return Summary'
),
dataTable =
list(
waterfall_data,
entity_waterfall,
partnership_return_summary
)
)
if (assign_to_environment) {
data <-
data %>%
left_join(tibble(
nameTable = c(
"Cash Flow Waterfall",
"Entity Waterfall",
"Partnership Return Summary"
),
idDF = c(
'cashflowWaterfall',
'entityWaterfall',
'partnershipReturns'
)
)) %>%
suppressMessages()
1:nrow(data) %>%
future_map(function(x) {
table_data <-
data$dataTable[[x]]
df_name <-
data$idDF[[x]]
assign(x = df_name, eval(table_data), envir = .GlobalEnv)
})
data <-
data %>%
dplyr::select(-idDF)
}
if (unnest_data) {
data <-
data %>%
unnest()
}
data
} |
structure(list(
url = "http://httpbin.org/status/204", status_code = 204L,
headers = structure(list(
`content-length` = "0", connection = "keep-alive",
server = "meinheld/0.6.1", date = "Sat, 24 Feb 2018 00:22:11 GMT",
`content-type` = "text/html; charset=utf-8", `access-control-allow-origin` = "*",
`access-control-allow-credentials` = "true", `x-powered-by` = "Flask",
`x-processed-time` = "0", via = "1.1 vegur"
), .Names = c(
"content-length",
"connection", "server", "date", "content-type", "access-control-allow-origin",
"access-control-allow-credentials", "x-powered-by", "x-processed-time",
"via"
), class = c("insensitive", "list")), all_headers = list(
structure(list(status = 204L, version = "HTTP/1.1", headers = structure(list(
`content-length` = "0", connection = "keep-alive",
server = "meinheld/0.6.1", date = "Sat, 24 Feb 2018 00:22:11 GMT",
`content-type` = "text/html; charset=utf-8", `access-control-allow-origin` = "*",
`access-control-allow-credentials` = "true", `x-powered-by` = "Flask",
`x-processed-time` = "0", via = "1.1 vegur"
), .Names = c(
"content-length",
"connection", "server", "date", "content-type", "access-control-allow-origin",
"access-control-allow-credentials", "x-powered-by", "x-processed-time",
"via"
), class = c("insensitive", "list"))), .Names = c(
"status",
"version", "headers"
))
), cookies = structure(list(
domain = logical(0),
flag = logical(0), path = logical(0), secure = logical(0),
expiration = structure(numeric(0), class = c(
"POSIXct",
"POSIXt"
)), name = logical(0), value = logical(0)
), .Names = c(
"domain",
"flag", "path", "secure", "expiration", "name", "value"
), row.names = integer(0), class = "data.frame"),
content = charToRaw(""), date = structure(1519431731, class = c(
"POSIXct",
"POSIXt"
), tzone = "GMT"), times = structure(c(
0, 4.5e-05,
4.8e-05, 0.000105, 0.100235, 0.100262
), .Names = c(
"redirect",
"namelookup", "connect", "pretransfer", "starttransfer",
"total"
))
), .Names = c(
"url", "status_code", "headers", "all_headers",
"cookies", "content", "date", "times"
), class = "response") |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.