code
stringlengths 1
13.8M
|
---|
context("Tournaments")
test_that("test get_tournaments for wrong input errors", {
testthat::skip_if_offline()
testthat::skip_on_cran()
expect_error(squashinformr::get_tournaments(year = "2020", world_tour = TRUE))
expect_error(squashinformr::get_tournaments(year = 2020, world_tour = "TRUE"))
expect_error(squashinformr::get_tournaments(year = 20, world_tour = TRUE))
expect_error(squashinformr::get_tournaments(year = -2020, world_tour = TRUE))
})
test_that("test get_tournaments for proper outputs", {
testthat::skip_if_offline()
testthat::skip_on_cran()
df <- squashinformr::get_tournaments(year = 2021, world_tour = FALSE)
expect_is(df, "data.frame")
expect_is(df, "tbl")
expect_equal(length(unique(na.omit(df$category))), 2)
expect_is(sample(df$date, 1), "Date")
expect_equal(year(sample(df$date, 1)), 2021)
df <- squashinformr::get_tournaments(year = 2020, world_tour = TRUE)
expect_is(df, "data.frame")
expect_is(df, "tbl")
expect_equal(length(unique(na.omit(df$category))), 2)
expect_is(sample(df$date, 1), "Date")
expect_equal(year(sample(df$date, 1)), 2020)
})
|
test_that("get_bearer", {
skip_on_cran()
ORI_BEARER <- Sys.getenv("TWITTER_BEARER")
Sys.setenv("TWITTER_BEARER" = "")
expect_error(get_bearer())
Sys.setenv("TWITTER_BEARER" = "ABC")
expect_error(get_bearer(), NA)
expect_equal(get_bearer(), "ABC")
Sys.setenv("TWITTER_BEARER" = ORI_BEARER)
})
with_mock_api({
test_that("integration with get_all_tweets", {
skip_if(!dir.exists("api.twitter.com"))
emptydir <- academictwitteR:::.gen_random_dir()
ORI_BEARER <- Sys.getenv("TWITTER_BEARER")
Sys.setenv("TWITTER_BEARER" = "")
expect_error(w1 <- capture_warnings(get_all_tweets(query = "
Sys.setenv("TWITTER_BEARER" = "ABC")
expect_error(w1 <- capture_warnings(get_all_tweets(query = "
unlink(emptydir)
Sys.setenv("TWITTER_BEARER" = ORI_BEARER)
})
})
|
library(dash)
app <- dash_app("test app")
app %>% set_layout(div(
h1('Hello Dash'),
"Dash: A web application framework for R.",
br(),
"Time: ", as.character(Sys.time()),
dccGraph(
figure=list(
data=list(
list(
x=list(1, 2, 3),
y=list(4, 1, 2),
type='bar',
name='SF'
),
list(
x=list(1, 2, 3),
y=list(2, 4, 5),
type='bar',
name='Montr\U{00E9}al'
)
),
layout = list(title='Dash Data Visualization')
)
))
)
app %>% run_app()
|
smd_stat<-function(x,z,xf,zf){
nvar=dim(x)[2]
x1=x[z==1,]
x0=x[z==0,]
smd=numeric(nvar)
where=numeric(nvar)
smd=DiPs::check(xf,x,zf,z)[,4]
smd
}
|
library(gridGraphics)
require(grDevices)
matplot1 <- function() {
matplot((-4:5)^2, main = "Quadratic")
}
sines <- outer(1:20, 1:4, function(x, y) sin(x / 20 * pi * y))
matplot2 <- function() {
matplot(sines, pch = 1:4, type = "o", col = rainbow(ncol(sines)))
}
matplot3 <- function() {
matplot(sines, type = "b", pch = 21:23, col = 2:5, bg = 2:5,
main = "matplot(...., pch = 21:23, bg = 2:5)")
}
x <- 0:50/50
matplot4 <- function() {
matplot(x, outer(x, 1:8, function(x, k) sin(k*pi * x)),
ylim = c(-2,2), type = "plobcsSh",
main= "matplot(,type = \"plobcsSh\" )")
}
matplot5 <- function() {
matplot(x, outer(x, 1:4, function(x, k) sin(k*pi * x)),
pch = letters[1:4], type = c("b","p","o"))
}
matplot6 <- function() {
lends <- c("round","butt","square")
matplot(matrix(1:12, 4), type="c", lty=1, lwd=10, lend=lends)
text(cbind(2.5, 2*c(1,3,5)-.4), lends, col= 1:3, cex = 1.5)
}
table(iris$Species)
iS <- iris$Species == "setosa"
iV <- iris$Species == "versicolor"
matplot7 <- function() {
par(bg = "bisque")
matplot(c(1, 8), c(0, 4.5), type = "n", xlab = "Length", ylab = "Width",
main = "Petal and Sepal Dimensions in Iris Blossoms")
matpoints(iris[iS,c(1,3)], iris[iS,c(2,4)], pch = "sS", col = c(2,4))
matpoints(iris[iV,c(1,3)], iris[iV,c(2,4)], pch = "vV", col = c(2,4))
legend(1, 4, c(" Setosa Petals", " Setosa Sepals",
"Versicolor Petals", "Versicolor Sepals"),
pch = "sSvV", col = rep(c(2,4), 2))
}
nam.var <- colnames(iris)[-5]
nam.spec <- as.character(iris[1+50*0:2, "Species"])
iris.S <- array(NA, dim = c(50,4,3),
dimnames = list(NULL, nam.var, nam.spec))
for(i in 1:3) iris.S[,,i] <- data.matrix(iris[1:50+50*(i-1), -5])
matplot8 <- function() {
matplot(iris.S[, "Petal.Length",], iris.S[, "Petal.Width",], pch = "SCV",
col = rainbow(3, start = 0.8, end = 0.1),
sub = paste(c("S", "C", "V"), dimnames(iris.S)[[3]],
sep = "=", collapse= ", "),
main = "Fisher's Iris Data")
}
plotdiff(expression(matplot1()), "matplot-1")
plotdiff(expression(matplot2()), "matplot-2")
plotdiff(expression(matplot3()), "matplot-3")
plotdiff(expression(matplot4()), "matplot-4")
plotdiff(expression(matplot5()), "matplot-5")
plotdiff(expression(matplot6()), "matplot-6")
plotdiff(expression(matplot7()), "matplot-7")
plotdiff(expression(matplot8()), "matplot-8")
plotdiffResult()
|
kfweOrd<-function(p,k=1,alpha=.01,ord=NULL,alpha.prime=alpha,J=qnbinom(alpha,k,alpha.prime),disp=TRUE,GD=FALSE){
if(!is.null(ord)){
o <- order(ord,decreasing=T)
}
else{o <- 1:length(p)}
ps <- p[o]
if(GD) alpha1 <- k*alpha.prime/(J+k)
else alpha1 <- alpha.prime
u<-cumsum(ps>alpha1)
if(sum(u<=J)>0){
h <- rep(0,length(p))
h[1:max(which(u<=J))] <- 1
h[ps>alpha1] <- 0
if(sum(h)<k) h[(h==0)&&(ps<=alpha1)][1:min(sum((h==0)&&(ps<=alpha1)),k-1-sum(h))]=1
h[o] <- h
}
else{h <- rep(0,length(p))}
if(disp==T) cat(paste("Ordered k-FWER procedure\n ",length(p)," tests, k=", k, ", alpha=",alpha,", individual alpha threshold=",round(alpha1,digits=7),"\n ",J," jumps allowed","\n ",sum(h)," rejections\n\n",sep=""))
return(h==1)}
kfweGR<-function(p,k=1,alpha=.01,disp=TRUE,SD=TRUE,const=10,alpha.prime=getAlpha(k=k,s=length(p),alpha=alpha,const=const)) {
if(is.null(alpha.prime)) alpha.prime=getAlpha(k=k,s=length(p),alpha=alpha,const=50)
rej <- rep(0,length(p))
rej[p<=alpha.prime] <- 1
n.rej <- sum(p<=alpha.prime)
sd=((n.rej>=k)&(SD))
while (sd){
alpha.prime=getAlpha(k=k,s=length(p)-n.rej+k-1,alpha=alpha,const=const)
rej[p<=alpha.prime] <- 1
sd=n.rej<sum(rej)
n.rej=sum(p<=alpha.prime)
}
if(disp) cat(paste("Guo and Romano k-FWER ",switch(SD,"Step Down ",""),"procedure\n ",length(p)," tests, k=", k, ", alpha=",alpha, "\n ",round(alpha.prime,digits=7)," individual alpha threshold\n ",n.rej," rejections\n\n",sep=""))
return(rej==1)
}
kfweLR <- function(p,k=1,alpha=0.01,disp=TRUE) {
s <- length(p)
sdconst <- rep(1,s)
sdconst[1:min(k,s)] <- k*alpha/s
if(s>k) sdconst[(k+1):s] <- k*alpha/(s+k-((k+1):s))
ps <- sort(p)
u <- ps<sdconst
res <- 0
if(any(u)) {
w <- min(which(!u))-1
res <- ps[w]}
p[which(p>res)] <- 1
p[p<=alpha] <- 0
h=(!p)
if(disp) cat(paste("Lehmann e Romano k-FWER Step Down procedure\n ",length(p)," tests, k=", k, ", alpha=",alpha, "\n ",sum(h)," rejections\n\n",sep=""))
return(h==1)
}
getAlpha <- function(s,k=1,alpha=.01,const=10){
start<-1E-8
stop<-1
delta<-1
while(delta>1E-8){
alphas<-start+((0:const)/(const))*(stop-start)
temp<-round(pbinom(k-1,s,alphas,lower.tail=F),digits=7)-alpha
temp2<-max(which(temp<=0))
alpha.prime<-alphas[temp2]
start<-alpha.prime
stop<-alphas[temp2+1]
delta<-abs(temp[temp2])
}
return(alpha.prime)
}
|
text <- readLines(file.choose())
text
docs = Corpus(VectorSource(text))
docs
toSpace <- content_transformer(function (x , pattern ) gsub(pattern, " ", x))
docs <- tm_map(docs, toSpace, "/")
docs <- tm_map(docs, toSpace, "@")
docs <- tm_map(docs, toSpace, "\\|")
docs <- tm_map(docs, content_transformer(tolower))
docs <- tm_map(docs, removeNumbers)
docs <- tm_map(docs, removeWords, stopwords("english"))
docs <- tm_map(docs, removeWords, c("blabla1", "blabla2"))
docs <- tm_map(docs, removePunctuation)
docs <- tm_map(docs, stripWhitespace)
docs <- tm_map(docs, stemDocument)
dtm <- TermDocumentMatrix(docs)
m <- as.matrix(dtm)
v <- sort(rowSums(m),decreasing=TRUE)
d <- data.frame(word = names(v),freq=v)
head(d, 10)
set.seed(1234)
wordcloud(words = d$word, freq = d$freq, min.freq = 1,
max.words=200, random.order=FALSE, rot.per=0.35,
colors=brewer.pal(8, "Dark2"))
findFreqTerms(dtm, lowfreq = 4)
findAssocs(dtm, terms = "freedom", corlimit = 0.3)
head(d, 10)
barplot(d[1:10,]$freq, las = 2, names.arg = d[1:10,]$word,
col ="lightblue", main ="Most frequent words",
ylab = "Word frequencies")
|
data(fremantle)
fm.gev <- gev.fit(fremantle[,2])
gev.diag(fm.gev)
gev.profxi(fm.gev, -0.5, 0.1)
gev.prof(fm.gev, 100, 1.8, 2.2)
covar <- cbind((fremantle[,1] - 1943)/46, fremantle[,3])
gev.fit(fremantle[,2], ydat = covar, mul = 1)
gev.fit(fremantle[,2], ydat = covar, mul = 1, sigl = 1, siglink = exp)
fm.gev2 <- gev.fit(fremantle[,2], ydat = covar, mul = c(1,2))
gev.diag(fm.gev2)
|
geojson_validate <- function(x, inform = FALSE, error = FALSE,
greedy = FALSE) {
UseMethod("geojson_validate")
}
geojson_validate.default <- function(x, inform = FALSE, error = FALSE,
greedy = FALSE) {
stop("no geojson_validate method for ", class(x), call. = FALSE)
}
geojson_validate.character <- function(x, inform = FALSE, error = FALSE,
greedy = FALSE) {
validate_geojson(json = x, verbose = inform, greedy = greedy,
error = error)
}
geojson_validate.location <- function(x, inform = FALSE, error = FALSE,
greedy = FALSE) {
on.exit(close_conns())
res <- switch(
attr(x, "type"),
file = paste0(readLines(x), collapse = ""),
url = jsonlite::minify(c_get(x)$parse("UTF-8"))
)
validate_geojson(json = res, verbose = inform, greedy = greedy,
error = error)
}
geojson_validate.geojson <- function(x, inform = FALSE, error = FALSE,
greedy = FALSE) {
validate_geojson(json = unclass(x), verbose = inform, greedy = greedy,
error = error)
}
geojson_validate.json <- function(x, inform = FALSE, error = FALSE,
greedy = FALSE) {
validate_geojson(json = x, verbose = inform, greedy = greedy,
error = error)
}
|
stat_contour_fill <- function(mapping = NULL, data = NULL,
geom = "polygon", position = "identity",
...,
breaks = MakeBreaks(),
bins = NULL,
binwidth = NULL,
global.breaks = TRUE,
kriging = FALSE,
na.fill = FALSE,
show.legend = NA,
inherit.aes = TRUE) {
.check_wrap_param(list(...))
ggplot2::layer(
data = data,
mapping = mapping,
stat = StatContourFill,
geom = geom,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
na.rm = FALSE,
na.fill = na.fill,
breaks = breaks,
bins = bins,
binwidth = binwidth,
global.breaks = global.breaks,
kriging = kriging,
...
)
)
}
StatContourFill <- ggplot2::ggproto("StatContourFill", ggplot2::Stat,
required_aes = c("x", "y", "z"),
default_aes = ggplot2::aes(fill = ..level_mid.., order = ..level..),
setup_params = function(data, params) {
if (is.null(params$global) || isTRUE(params$global.breaks)) {
params$breaks <- setup_breaks(data,
breaks = params$breaks,
bins = params$bins,
binwidth = params$binwidth)
}
return(params)
},
compute_layer = function(self, data, params, layout) {
ggplot2:::check_required_aesthetics(
self$required_aes,
c(names(data), names(params)),
ggplot2:::snake_class(self)
)
params <- params[intersect(names(params), self$parameters())]
args <- c(list(data = quote(data), scales = quote(scales)), params)
data <- plyr::ddply(data, "PANEL", function(data) {
scales <- layout$get_scales(data$PANEL[1])
tryCatch(do.call(self$compute_panel, args), error = function(e) {
warningf("Computation failed in `%s()`:\n %s",
ggplot2:::snake_class(self), e$message,
call. = FALSE)
data.frame()
})
})
if (nrow(data) > 0) {
data$level_d <- data$level
class(data$level_d) <- c("metR_discretised", class(data$level_d))
}
data
},
compute_group = function(data, scales, bins = NULL, binwidth = NULL,
breaks = scales::fullseq, complete = TRUE,
na.rm = FALSE, xwrap = NULL,
ywrap = NULL, na.fill = FALSE, global.breaks = TRUE,
proj = NULL, kriging = FALSE) {
data.table::setDT(data)
if (isFALSE(global.breaks)) {
breaks <- setup_breaks(data,
breaks = breaks,
bins = bins,
binwidth = binwidth)
}
data <- data[!(is.na(y) | is.na(x)), ]
if (!isFALSE(na.fill)) {
complete.grid <- with(data, .is.regular_grid(x, y))
if (complete.grid == FALSE) {
if (complete == FALSE) {
warningf("The data must be a complete regular grid.", call. = FALSE)
return(data.frame())
} else {
data <- .complete(data, x, y)
}
}
data <- .impute_data(data, na.fill)
} else {
data <- data[!is.na(z), ]
}
if (kriging) {
check_packages("kriging", "kriging")
pixels <- 40
data <- try(with(data, setNames(kriging::kriging(x, y, z, pixels = pixels)$map,
c("x", "y", "z"))), silent = TRUE)
if (inherits(data, "try-error")) {
warningf("kriging failed. Perhaps the number of points is too small.")
return(data.frame())
}
data.table::setDT(data)
}
if (!is.null(xwrap)) {
data <- suppressWarnings(WrapCircular(data, "x", xwrap))
}
if (!is.null(ywrap)) {
data <- suppressWarnings(WrapCircular(data, "y", ywrap))
}
cont <- data.table::setDT(.contour_bands(data, breaks, complete = complete))
cont[, int.level := (level_high + level_low)/2]
cont[, level_mid := int.level]
cont[, nlevel := level_high/max(level_high)]
if (!is.null(proj)) {
if (is.function(proj)) {
cont <- proj(cont)
} else {
if (is.character(proj)) {
if (!requireNamespace("proj4", quietly = TRUE)) {
stopf("Projection requires the proj4 package. Install it with 'install.packages(\"proj4\")'.")
}
cont <- data.table::copy(cont)[, c("x", "y") := proj4::project(list(x, y), proj,
inverse = TRUE)][]
}
}
}
cont
}
)
.contour_bands <- function(data, breaks, complete = FALSE) {
band <- level_high <- level_low <- NULL
x_pos <- as.integer(factor(data$x, levels = sort(unique(data$x))))
y_pos <- as.integer(factor(data$y, levels = sort(unique(data$y))))
nrow <- max(y_pos)
ncol <- max(x_pos)
z <- matrix(NA_real_, nrow = nrow, ncol = ncol)
z[cbind(y_pos, x_pos)] <- data$z
cl <- isoband::isobands(x = sort(unique(data$x)),
y = sort(unique(data$y)),
z = z,
levels_low = breaks[-length(breaks)],
levels_high = breaks[-1])
if (length(cl) == 0) {
warningf("Not possible to generate contour data.", call. = FALSE)
return(data.frame())
}
bands <- pretty_isoband_levels(names(cl))
cont <- data.table::rbindlist(lapply(cl, data.table::as.data.table), idcol = "band")
cont[, c("level_low", "level_high") := data.table::tstrsplit(band, ":")]
cont[, `:=`(level_low = as.numeric(level_low), level_high = as.numeric(level_high))]
cont[, level := ordered(pretty_isoband_levels(band), bands)]
cont[, piece := as.numeric(interaction(band))]
cont[, group := factor(paste(data$group[1], sprintf("%03d", piece), sep = "-"))]
cont[, .(level = level, level_low, level_high, x, y, piece, group, subgroup = id)]
}
pretty_isoband_levels <- function(isoband_levels, dig.lab = 3) {
interval_low <- gsub(":.*$", "", isoband_levels)
interval_high <- gsub("^[^:]*:", "", isoband_levels)
label_low <- format(as.numeric(interval_low), digits = dig.lab, trim = TRUE)
label_high <- format(as.numeric(interval_high), digits = dig.lab, trim = TRUE)
sprintf("(%s, %s]", label_low, label_high)
}
|
CompTTP <- function(patdata, cwm=matrix(c(0, 0.1, 0.25, 0.5, 1, 10,
0, 0.2, 0.5, 1, 2, 10,
0, 0.2, 0.4, 1, NA, NA), byrow=TRUE, nrow=3)) {
toxdata <- matrix(as.numeric(as.matrix(patdata[ , c("Grade 0", "Grade 1", "Grade 2", "Grade 3", "Grade 4", "Grade 5")])),nrow=dim(cwm)[1])
epmat <- toxdata * cwm
colmax <- apply(epmat, 1, max, na.rm=TRUE)
ttp <- sqrt( t(colmax) %*% colmax )
if (max(colmax) > 1 ) dlt = 1
else dlt = 0
uplim <- apply(cwm, 1, max, na.rm=TRUE)
maxttp <- sqrt( t(uplim) %*% uplim )
nttp <- ttp / (maxttp + 0.0001)
result <- list(ttp, nttp, dlt)
names(result) <- c("TTP", "nTTP", "DLT")
return(result)
}
|
expected <- eval(parse(text="12"));
test(id=0, code={
argv <- eval(parse(text="list(FALSE, 5L, 12)"));
.Internal(`pmax`(argv[[1]], argv[[2]], argv[[3]]));
}, o=expected);
|
rm(list=ls(all=TRUE))
graphics.off()
closeAllConnections()
library(lubridate)
met_path <- file.path('/Volumes/data/Model_Data/sites/PA-Bar/NGEETropics_source/')
met_drivers <- read.csv(file = file.path(met_path,'BCI_met_drivers_2003_2016.csv'), header=T)
met_output_path <- file.path('/Volumes/data/Model_Data/sites/PA-Bar/MAAT_drivers/')
site_name <- "PA-Bar"
pressure <- TRUE
wind <- TRUE
names(met_drivers)
head(met_drivers)
MAAT_Time <- lubridate::mdy_hm(as.character(met_drivers$Date_UTC_start), tz="UTC")
date_range <- unique(lubridate::year(MAAT_Time))
head(MAAT_Time)
met_yr_subset <- c(2015,2016)
met_years <- lubridate::year(MAAT_Time)
met_drivers$Time <- MAAT_Time
met_drivers$PAR_umols_m2_s <- met_drivers$SR_W_m2.*2.114
met_drivers$Tair_degC <- met_drivers$Temp_o_C.
met_drivers$RH_perc <- met_drivers$RH_.
met_drivers$VPD_kPa <- PEcAn.data.atmosphere::get.vpd(met_drivers$RH_perc, met_drivers$Tair_degC) / 10
met_drivers$Prec_mm <- met_drivers$RA_mm_d/24
if (pressure){
met_drivers$Press_Pa <- udunits2::ud.convert(met_drivers$BP_hPa, "mmHg", "Pa")
} else {
met_drivers$Press_Pa <- rep(101325,length(met_drivers$Time))
}
if (wind) {
met_drivers$Windspeed_m_s <- met_drivers$WS_m_s
}
if (met_yr_subset[2]-met_yr_subset[1] != 0 ) {
met_driver_subset <- subset(met_drivers, met_years %in% seq(met_yr_subset[1], met_yr_subset[2],1))
} else {
met_driver_subset <- subset(met_drivers, met_years == met_yr_subset[1])
}
met_years <- lubridate::year(met_driver_subset$Time)
if (wind) {
output_met_driver <- cbind.data.frame(Time = met_driver_subset$Time,
Year = met_years,
DOY = lubridate::yday(met_driver_subset$Time),
Hour = strftime(met_driver_subset$Time,"%H:%M:%S", tz="UTC"),
Tair_degC = met_driver_subset$Tair_degC,
Prec_mm = met_driver_subset$Prec_mm,
Atm_press_Pa = met_driver_subset$Press_Pa,
RH_perc = met_driver_subset$RH_perc,
VPD_kPa = met_driver_subset$VPD_kPa,
PAR_umols_m2_s = met_driver_subset$PAR_umols_m2_s,
Windspeed_m_s = met_driver_subset$Windspeed_m_s
)
leaf_user_met_list <- list(leaf = list(env = list(time = "'Time'", temp = "'Tair_degC'", par = "'PAR_umols_m2_s'",vpd="'VPD_kPa'",
atm_press="'Atm_press_Pa'",wind="'Windspeed_m_s'")))
} else {
output_met_driver <- cbind.data.frame(Time = met_driver_subset$Time,
Year = met_years,
DOY = lubridate::yday(met_driver_subset$Time),
Hour = strftime(met_driver_subset$Time,"%H:%M:%S", tz="UTC"),
Tair_degC = met_driver_subset$Tair_degC,
Prec_mm = met_driver_subset$Prec_mm,
Atm_press_Pa = met_driver_subset$Press_Pa,
RH_perc = met_driver_subset$RH_perc,
VPD_kPa = met_driver_subset$VPD_kPa,
PAR_umols_m2_s = met_driver_subset$PAR_umols_m2_s
)
leaf_user_met_list <- list(leaf = list(env = list(time = "'Time'", temp = "'Tair_degC'",
par = "'PAR_umols_m2_s'",vpd="'VPD_kPa'",
atm_press="'Atm_press_Pa'")))
}
leaf_user_met_xml <- PEcAn.settings::listToXml(leaf_user_met_list, "met_data_translator")
write.csv(output_met_driver,
file = file.path(met_output_path,paste0(site_name,"_NGEETropics_",met_yr_subset[1],"_",
met_yr_subset[2],"_UTC.csv")),row.names = F)
PREFIX_XML <- "<?xml version=\"1.0\"?>\n"
XML::saveXML(leaf_user_met_xml,
file = file.path(met_output_path, "leaf_user_met.xml"),
indent = TRUE,
prefix = PREFIX_XML)
|
sim.spatialDS <- function(N=1000, beta = 1, sigma=1, keep.all=FALSE,
B=3, model=c("logit", "halfnorm"), lambda = B/3, useHabitat, show.plot=TRUE){
N <- round(N[1])
stopifNegative(sigma, allowZero=FALSE)
stopifNegative(B, allowZero=FALSE)
model <- match.arg(model)
delta <- (2*B-0)/30
grx <- seq(delta/2, 2*B - delta/2, delta)
gr <- expand.grid(grx, grx, KEEP.OUT.ATTRS = FALSE)
if(missing(useHabitat)) {
V <- exp(-e2dist(gr,gr)/lambda)
x <- t(chol(V))%*%rnorm(900)
} else {
x <- useHabitat$Habitat
if(is.null(x) ||
is.null(dim(x)) ||
dim(x)[2] != 1 ||
dim(x)[1] != 900)
stop("useHabitat is not valid output from sim.spatialDS.")
}
probs <- exp(beta*x)/sum(exp(beta*x))
pixel.id <- sample(1:900, N, replace=TRUE, prob=probs)
u1 <- gr[pixel.id,1]
u2 <- gr[pixel.id,2]
d <- sqrt((u1 - B)^2 + (u2-B)^2)
N.real <- sum(d <= B)
if(model=="halfnorm")
p <- exp(-d*d/(2*sigma*sigma))
if(model=="logit")
p<- 2*plogis( -d*d/(2*sigma*sigma) )
y <- rbinom(N, 1, p)
if(show.plot) {
op <- par(mar=c(3,3,3,6)) ; on.exit(par(op))
tryPlot <- try( {
image(rasterFromXYZ(cbind(as.matrix(gr),x)), col=topo.colors(10), asp=1, bty='n')
rect(0, 0, 2*B, 2*B)
points(B, B, pch="+", cex=3)
image_scale(x, col=topo.colors(10))
title("Extremely cool figure")
points(u1, u2, pch = 16, col = c("black", "red")[y+1])
}, silent = TRUE)
if(inherits(tryPlot, "try-error"))
tryPlotError(tryPlot)
}
if(!keep.all){
u1 <- u1[y==1]
u2 <- u2[y==1]
d <- d[y==1]
pixel.id <- pixel.id[y==1]
}
return(list(model=model, N=N, beta=beta, B=B, u1=u1, u2=u2, d=d, pixel.id=pixel.id,
y=y, N.real=N.real, Habitat=x, grid=gr))
}
|
order.vine.level <- function(tree,help.env) {
l.search<-get("lambda.search",help.env)
id<-get("id",help.env)
RVM<-get("RVM",help.env)
len <- length(tree)
base <- get("base",help.env)
order.stat <- get("order.stat",help.env)
pairs.old <- get("pairs.fit",help.env)
q <- get("q",help.env)
base <- get("base",help.env)
val <- unique(c(pairs.old))
D.struc<-get("D.struc",help.env)
count <- 1
pairs.new <- matrix(NA,1,2)
for(i in 1:length(val)) {
val.temp <- c()
for(j in 1:dim(pairs.old)[1]) {
if(any(pairs.old[j,]%in%val[i])) val.temp <- c(val.temp,j)
}
if(length(val.temp)==2) {
if(count==1) {
pairs.new[count,] <- val.temp
count <- count+1
}
else {
pairs.new <- rbind(pairs.new,val.temp)
count <- count+1
}
}
if(length(val.temp)>2) {
for(j in 1:(length(val.temp)-1)) {
for(k in (j+1):length(val.temp)) {
if(count==1) {
pairs.new[count,] <- c(val.temp[j],val.temp[k])
count <- count+1
}
else {
pairs.new <- rbind(pairs.new,c(val.temp[j],val.temp[k]))
count <- count+1
}
}
}
}
}
no.pairs <- dim(pairs.new)[1]
mcoptions <- list(preschedule=FALSE)
if(!is.null(RVM)) {
cops<-get("cops",help.env)[[get("level",help.env)]]
h.help <- foreach(i=1:no.pairs,.combine=rbind,.multicombine=TRUE,.options.multicore=mcoptions) %dopar% {
UU <- c()
help.j1 <- c(tree[[pairs.new[i,1]]]$j1,tree[[pairs.new[i,1]]]$j2,tree[[pairs.new[i,1]]]$D)
help.j2 <- c(tree[[pairs.new[i,2]]]$j1,tree[[pairs.new[i,2]]]$j2,tree[[pairs.new[i,2]]]$D)
j1 <- help.j1[!help.j1%in%help.j2]
j2 <- help.j2[!help.j2%in%help.j1]
D.help <- c(tree[[pairs.new[i,1]]]$D,tree[[pairs.new[i,2]]]$D)
D <- sort(unique(c(help.j1[help.j1%in%help.j2],D.help)))
c(j1,j2,D)
}
ind<-rep(NA,dim(cops)[1])
ind2<-c()
h.help2<-cbind(h.help[,2],h.help[,1],h.help[,-c(1,2)])
for(k in 1:dim(h.help)[1]) {
for(ll in 1:dim(cops)[1]) {
if(identical(h.help[k,],cops[ll,])) ind[ll]<-k
if(identical(h.help2[k,],cops[ll,])) {
ind[ll]<-k
ind2<-c(ind2,k)
}
}
}
if(!is.null(ind2)) pairs.new[ind2,]<-c(pairs.new[ind2,2],pairs.new[ind2,1])
pairs.new<-pairs.new[ind,]
}
no.pairs <- dim(pairs.new)[1]
h1 <- foreach(i=1:no.pairs,.combine=c,.multicombine=TRUE,.options.multicore=mcoptions) %do%{
UU <- c()
help.j1 <- c(tree[[pairs.new[i,1]]]$j1,tree[[pairs.new[i,1]]]$j2,tree[[pairs.new[i,1]]]$D)
help.j2 <- c(tree[[pairs.new[i,2]]]$j1,tree[[pairs.new[i,2]]]$j2,tree[[pairs.new[i,2]]]$D)
j1 <- help.j1[!help.j1%in%help.j2]
j2 <- help.j2[!help.j2%in%help.j1]
D.help <- c(tree[[pairs.new[i,1]]]$D,tree[[pairs.new[i,2]]]$D)
D.index <- sort(unique(c(help.j1[help.j1%in%help.j2],D.help)))
len.D<-length(D.index)
p<-2+len.D
index <- list(c(j1,D),c(j2,D))
if(p>(2+D.struc)) p<-2+D.struc
for(j in 1:2)
{
indexi <- c(tree[[pairs.new[i,j]]]$j1,tree[[pairs.new[i,j]]]$j2,tree[[pairs.new[i,j]]]$D)
index.ancestor <- c()
for (ml in 1:length(tree))
{
index.ancestor <- c(index.ancestor, all(indexi==(c(tree[[ml]]$j1,tree[[ml]]$j2,tree[[ml]]$D))))
}
ancestor.knot <- tree[index.ancestor][[1]]
if(ancestor.knot$cond&dim(ancestor.knot$U)[2]==3) diff.help<-c(TRUE,TRUE,FALSE)
if(ancestor.knot$cond&dim(ancestor.knot$U)[2]==4) diff.help<-c(TRUE,TRUE,FALSE,FALSE)
if(!ancestor.knot$cond) diff.help<-c(TRUE,TRUE)
if(j==1) {
diff.help[!(c(tree[[pairs.new[i,1]]]$j1,tree[[pairs.new[i,1]]]$j2)%in%j1)]<-FALSE
}
if(j==2) {
diff.help[!(c(tree[[pairs.new[i,2]]]$j1,tree[[pairs.new[i,2]]]$j2)%in%j2)]<-FALSE
}
if(j==1&!ancestor.knot$cond) UU <-cbind(UU,hierarchbs.cond.cop(data=ancestor.knot$U,coef=ancestor.knot$v,intp=diff.help,d=get("d",help.env),D=get("D",help.env),p=2,q=q))
if(j==1&ancestor.knot$cond) UU <-cbind(UU,hierarchbs.cond.cop(data=ancestor.knot$U,coef=ancestor.knot$v,intp=diff.help,d=get("d2",help.env),D=get("D3",help.env),p=dim(ancestor.knot$U)[2],q=q))
if(j==2&!ancestor.knot$cond) UU <-cbind(UU,hierarchbs.cond.cop(data=ancestor.knot$U,coef=ancestor.knot$v,intp=diff.help,d=get("d",help.env),D=get("D",help.env),p=2,q=q))
if(j==2&ancestor.knot$cond) UU <-cbind(UU,hierarchbs.cond.cop(data=ancestor.knot$U,coef=ancestor.knot$v,intp=diff.help,d=get("d2",help.env),D=get("D3",help.env),p=dim(ancestor.knot$U)[2],q=q))
}
if(any(UU>1)) UU[which(UU>1)] <-1
if(any(UU<0)) UU[which(UU<0)] <-0
if(!get("mod.cond",help.env)) {
if(l.search) model.l<-lam.search(data=UU,d=get("d",help.env),D=get("D",help.env),lam=get("lam1.vec",help.env),m=get("m",help.env),max.iter=get("max.iter",help.env),q=get("q",help.env),cond=FALSE,id=get("id",help.env),l.lam=2,fix.lambda=get("fix.lambda",help.env),test.ind=get("test.ind",help.env))
if(!l.search) {
model.l<-pencopula(data=UU,d=get("d",help.env),D=get("D",help.env),pen.order=get("m",help.env),base="B-spline",lambda=rep(get("lambda",help.env)[1],2),max.iter=get("max.iter",help.env),q=get("q",help.env),id=get("id",help.env),cond=FALSE,fix.lambda=get("fix.lambda",help.env),test.ind=get("test.ind",help.env))
}
assign("indep",FALSE,model.l)
cond<-FALSE
prcomp.D<-NULL
}
if(get("mod.cond",help.env)) {
print(paste("len.D=",len.D,".j1=",j1,".j2=",j2,".D=",D.index,sep=""))
l.lam<-2+len.D
if(l.lam>(2+D.struc)) l.lam<-2+D.struc
if(len.D<=D.struc) {
if(!get("cal.cond",help.env)) {
if(get("test.cond",help.env)==1) pacotestOptions=pacotestset(testType='VI')
if(get("test.cond",help.env)==2) pacotestOptions=pacotestset(testType='ECORR')
if(get("test.cond",help.env)==2|get("test.cond",help.env)==1) test.res<-pacotest(UU,get("data",help.env)[,D.index],pacotestOptions)$pValue
if(is.null(get("test.cond",help.env))) test.res<-0.051
if(test.res<0.05) {
UU<-cbind(UU,get("data",help.env)[,D.index])
colnames(UU)<-NULL
cond<-TRUE
prcomp.D<-NULL
if(l.search) model.l<-lam.search(data=UU,d=get("d2",help.env),D=get("D3",help.env),lam=get("lam2.vec",help.env),m=get("m",help.env),max.iter=get("max.iter",help.env),q=get("q",help.env),cond=TRUE,id=get("id",help.env),l.lam=l.lam,fix.lambda=get("fix.lambda",help.env),test.ind=get("test.ind",help.env))
if(!l.search) model.l<-pencopula(data=UU,d=get("d2",help.env),D=get("D3",help.env),pen.order=get("m",help.env),base="B-spline",lambda=rep(get("lambda",help.env)[2],l.lam),max.iter=get("max.iter",help.env),cond=TRUE,q=get("q",help.env),id=get("id",help.env),fix.lambda=get("fix.lambda",help.env),test.ind=get("test.ind",help.env))
assign("indep",FALSE,model.l)
}
else {
prcomp.D<-NULL
cond <- FALSE
if(l.search) model.l<-lam.search(data=UU,d=get("d",help.env),D=get("D",help.env),lam=get("lam1.vec",help.env),m=get("m",help.env),max.iter=get("max.iter",help.env),q=get("q",help.env),cond=FALSE,id=get("id",help.env),l.lam=2,fix.lambda=get("fix.lambda",help.env),test.ind=get("test.ind",help.env))
if(!l.search) model.l<-pencopula(data=UU,d=get("d",help.env),D=get("D",help.env),pen.order=get("m",help.env),base="B-spline",lambda=rep(get("lambda",help.env)[1],2),max.iter=get("max.iter",help.env),cond=FALSE,q=get("q",help.env),id=id,fix.lambda=get("fix.lambda",help.env),test.ind=get("test.ind",help.env))
}
}
if(get("cal.cond",help.env)) {
UU<-cbind(UU,get("data",help.env)[,D.index])
prcomp.D<-NULL
l.lam<-2+len.D
if(l.lam>(2+D.struc)) l.lam<-2+D.struc
cond<-TRUE
if(l.search) model.l<-lam.search(data=UU,d=get("d2",help.env),D=get("D3",help.env),lam=get("lam2.vec",help.env),m=get("m",help.env),max.iter=get("max.iter",help.env),q=get("q",help.env),cond=TRUE,id=get("id",help.env),l.lam=l.lam,fix.lambda=get("fix.lambda",help.env),test.ind=get("test.ind",help.env))
if(!l.search) model.l<-pencopula(data=UU,d=get("d2",help.env),D=get("D3",help.env),pen.order=get("m",help.env),base="B-spline",lambda=rep(get("lambda",help.env)[2],l.lam),max.iter=get("max.iter",help.env),cond=TRUE,q=get("q",help.env),id=get("id",help.env),fix.lambda=get("fix.lambda",help.env),test.ind=get("test.ind",help.env))
assign("indep",FALSE,model.l)
}
}
if(len.D>D.struc) {
if(!get("cal.cond",help.env)) {
if(get("test.cond",help.env)==1) pacotestOptions=pacotestset(testType='VI')
if(get("test.cond",help.env)==2) pacotestOptions=pacotestset(testType='ECORR')
if(get("test.cond",help.env)==2|get("test.cond",help.env)==1) test.res<-pacotest(UU,get("data",help.env)[,D.index],pacotestOptions)$pValue
if(is.null(get("test.cond",help.env))) test.res<-0.051
if(test.res<0.05) {
pca.temp<-cal.pca(help.env,val=get("data",help.env)[,D.index])
prcomp.D<-pca.temp$prcomp.D
UU<-cbind(UU,pca.temp$data.distr)
colnames(UU) <- NULL
cond<-TRUE
if(l.search) model.l<-lam.search(data=UU,d=get("d2",help.env),D=get("D3",help.env),lam=get("lam2.vec",help.env),m=get("m",help.env),max.iter=get("max.iter",help.env),q=get("q",help.env),cond=TRUE,id=get("id",help.env),l.lam=l.lam,fix.lambda=get("fix.lambda",help.env),test.ind=get("test.ind",help.env))
if(!l.search) model.l<-pencopula(data=UU,d=get("d2",help.env),D=get("D3",help.env),pen.order=get("m",help.env),base="B-spline",lambda=rep(get("lambda",help.env)[2],l.lam),max.iter=get("max.iter",help.env),cond=TRUE,q=get("q",help.env),id=get("id",help.env),fix.lambda=get("fix.lambda",help.env),test.ind=get("test.ind",help.env))
}
else {
prcomp.D<-NULL
cond <- FALSE
if(l.search) model.l<-lam.search(data=UU,d=get("d",help.env),D=get("D",help.env),lam=get("lam1.vec",help.env),m=get("m",help.env),max.iter=get("max.iter",help.env),q=get("q",help.env),cond=FALSE,id=get("id",help.env),l.lam=2,fix.lambda=get("fix.lambda",help.env),test.ind=get("test.ind",help.env))
if(!l.search) model.l<-pencopula(data=UU,d=get("d",help.env),D=get("D",help.env),pen.order=get("m",help.env),base="B-spline",lambda=rep(get("lambda",help.env)[1],2),max.iter=get("max.iter",help.env),cond=FALSE,q=get("q",help.env),id=id,fix.lambda=get("fix.lambda",help.env),test.ind=get("test.ind",help.env))
}
}
if(get("cal.cond",help.env)) {
pca.temp<-cal.pca(help.env,val=get("data",help.env)[,D.index])
UU<-cbind(UU,pca.temp$data.distr)
colnames(UU) <- c()
cond<-TRUE
prcomp.D<-pca.temp$prcomp.D
if(l.search) model.l<-lam.search(data=UU,d=get("d2",help.env),D=get("D3",help.env),lam=get("lam2.vec",help.env),m=get("m",help.env),max.iter=get("max.iter",help.env),q=get("q",help.env),cond=TRUE,id=get("id",help.env),l.lam=l.lam,fix.lambda=get("fix.lambda",help.env),test.ind=get("test.ind",help.env))
if(!l.search) model.l<-pencopula(data=UU,d=get("d2",help.env),D=get("D3",help.env),pen.order=get("m",help.env),base="B-spline",lambda=rep(get("lambda",help.env)[2],l.lam),max.iter=get("max.iter",help.env),cond=TRUE,q=get("q",help.env),id=get("id",help.env),fix.lambda=get("fix.lambda",help.env),test.ind=get("test.ind",help.env))
assign("indep",FALSE,model.l)
}
}
}
assign("prcomp.D",prcomp.D,model.l)
model.l
}
if(dim(pairs.new)[1]==1) {
assign("pairs.fit",pairs.new,help.env)
assign("pairs.new",pairs.new,help.env)
assign(paste("fit.level",get("level",help.env),sep=""),h1,help.env)
}
if(dim(pairs.new)[1]>1) {
if(is.null(RVM)) {
h <- foreach(i=1:no.pairs,.combine=rbind) %dopar% {
c(pairs.new[i,],get(get("order.stat",help.env),h1[[i]]))
}
colnames(h) <- c("i","j","log.like")
mat <- matrix(0,len,len)
diag(mat) <- rep(0,len)
for(i in 1:dim(pairs.new)[1]) {
mat[pairs.new[i,1],pairs.new[i,2]] <- mat[pairs.new[i,2],pairs.new[i,1]] <- h[which(h[,1]==pairs.new[i,1] & h[,2]==pairs.new[i,2]),3]
}
}
assign("pairs.new",pairs.new,help.env)
assign(paste("fit.level",get("level",help.env),sep=""),h1,help.env)
if(is.null(RVM)) {
obj <- minimum.spanning.tree(graph.adjacency(mat,diag=FALSE,mode="lower",weighted=TRUE),algorithm="prim")
pairs.fit <- get.edgelist(obj, names=TRUE)
pairs.fit <- pairs.fit[order(pairs.fit[,1]),]
}
else pairs.fit<-pairs.new
assign("pairs.fit",pairs.fit,help.env)
}
}
|
.checkrasterMemory <- function(cells,n=1) {
cells <- ceiling(sqrt(cells))
canProcessInMemory(raster(nrows=cells, ncols=cells, xmn=0, xmx=cells,vals=NULL),n)
}
if (!isGeneric("entrogram")) {
setGeneric("entrogram", function(x, width, cutoff,...)
standardGeneric("entrogram"))
}
setMethod('entrogram', signature(x='RasterLayer'),
function(x, width, cutoff, categorical, nc, dif, cloud=FALSE, s=NULL,stat,verbose=TRUE,...) {
re <- res(x)[1]
if (missing(verbose)) verbose <- TRUE
if (missing(stat)) stat <- 'ELSA'
else {
stat <- toupper(stat)
if (!stat %in% c('ELSA','EA','EC')) stop('stat should be either of "ELSA", "Ea", "Ec"!')
}
if (missing(cutoff)) cutoff<- sqrt((xmin(x)-xmax(x))^2+(ymin(x)-ymax(x))^2) / 3
if (missing(width)) width <- re
else if (width < re) width <- re
if (cutoff < width) stop("cutoff should be greater than width size")
nlag <- ceiling(cutoff / width)
n <- ncell(x) - cellStats(x,'countNA')
if (is.null(s)) {
if (!.checkrasterMemory(n,nlag)) {
s <- c()
for (i in (nlag-1):1) s <- c(s,.checkrasterMemory(n,i))
s <- which(s)
if (length(s) > 0) {
s <- (nlag - s[1]) / (2*nlag)
s <- ceiling(n * s)
s <- sampleRandom(x,s,cells=TRUE)[,1]
} else {
s <- 1 / (2 * nlag)
s <- ceiling(n * s)
while (!.checkrasterMemory(s,1)) s <- ceiling(s / 2)
s <- sampleRandom(x,s,cells=TRUE)[,1]
}
} else {
s <- (1:ncell(x))[which(!is.na(x[]))]
}
} else {
if (!is.numeric(s)) stop("s argument should be an integer number or NULL!")
while (!.checkrasterMemory(s[1],1)) s <- ceiling(s[1] * 0.8)
if (s > n) s <- n
s <- sampleRandom(x,s,cells=TRUE)[,1]
}
if (!missing(nc)) {
if (missing(categorical)) {
if (missing(dif)) categorical <- FALSE
else {
categorical <- TRUE
if (verbose) cat("input data is considered categorical, and nc is ignored!\n")
}
}
} else {
if (missing(categorical) && !missing(dif)) categorical <- TRUE
}
if (missing(categorical) || !is.logical(categorical)) {
if (.is.categorical(x)) {
categorical <- TRUE
if (verbose) cat("the input is considered as a categorical variable...\n")
} else {
categorical <- FALSE
if (verbose) cat("the input is considered as a continuous variable...\n")
}
}
if (!categorical && missing(nc)) {
nc <- nclass(x)
} else if (categorical) {
classes <- unique(x)
nc <- length(classes)
}
if (categorical) {
if (missing(dif)) {
dif <- rep(1,nc*nc)
for (i in 1:nc) dif[(i-1)*nc+i] <-0
} else {
dif <- .checkDif(dif,classes)
}
}
if (!categorical) x <- categorize(x,nc)
ncl <- ncol(x)
nrw <- nrow(x)
out <- new("Entrogram")
out@width <- width
out@cutoff <- cutoff
if (cloud) {
out@entrogramCloud <- matrix(NA,nrow=length(s),ncol=nlag)
for (i in 1:nlag) {
w <-.Filter(r=res(x)[1],d1=0,d2=i*width)
w <- w[[2]]
if (categorical) {
if (is.null(stat) || stat == 'ELSA') out@entrogramCloud[,i] <- .Call('v_elsac_cell', as.integer(x[]), as.integer(ncl), as.integer(nrw), as.integer(nc), as.integer(w[,1]), as.integer(w[,2]),as.integer(classes),dif, as.integer(s), PACKAGE='elsa')
else if (stat == 'EA') out@entrogramCloud[,i] <- .Call('v_elsac_cell_Ea', as.integer(x[]), as.integer(ncl), as.integer(nrw), as.integer(nc), as.integer(w[,1]), as.integer(w[,2]),as.integer(classes),dif, as.integer(s), PACKAGE='elsa')
else if (stat == 'EC') out@entrogramCloud[,i] <- .Call('v_elsac_cell_Ec', as.integer(x[]), as.integer(ncl), as.integer(nrw), as.integer(nc), as.integer(w[,1]), as.integer(w[,2]),as.integer(classes),dif, as.integer(s), PACKAGE='elsa')
} else {
if (is.null(stat) || stat == 'ELSA') out@entrogramCloud[,i] <- .Call('v_elsa_cell', as.integer(x[]), as.integer(ncl), as.integer(nrw), as.integer(nc), as.integer(w[,1]), as.integer(w[,2]),as.integer(s), PACKAGE='elsa')
else if (stat == 'EA') out@entrogramCloud[,i] <- .Call('v_elsa_cell_Ea', as.integer(x[]), as.integer(ncl), as.integer(nrw), as.integer(nc), as.integer(w[,1]), as.integer(w[,2]),as.integer(s), PACKAGE='elsa')
else if (stat == 'EC') out@entrogramCloud[,i] <- .Call('v_elsa_cell_Ec', as.integer(x[]), as.integer(ncl), as.integer(nrw), as.integer(nc), as.integer(w[,1]), as.integer(w[,2]),as.integer(s), PACKAGE='elsa')
}
}
out@entrogram <- data.frame(distance=seq(width,width*nlag,width) - (width/2),E=apply(out@entrogramCloud,2,mean,na.rm=TRUE))
} else {
d <- seq(width,width*nlag,width) - (width/2)
out@entrogram <- data.frame(distance=d,E=rep(NA,length(d)))
for (i in 1:nlag) {
w <-.Filter(r=res(x)[1],d1=0,d2=i*width)[[2]]
if (categorical) {
if (is.null(stat) || stat == 'ELSA') out@entrogram [i,2] <- mean(.Call('v_elsac_cell', as.integer(x[]), as.integer(ncl), as.integer(nrw), as.integer(nc), as.integer(w[,1]), as.integer(w[,2]),as.integer(classes),dif, as.integer(s), PACKAGE='elsa'),na.rm=TRUE)
else if (stat == 'EA') out@entrogram [i,2] <- mean(.Call('v_elsac_cell_Ea', as.integer(x[]), as.integer(ncl), as.integer(nrw), as.integer(nc), as.integer(w[,1]), as.integer(w[,2]),as.integer(classes),dif, as.integer(s), PACKAGE='elsa'),na.rm=TRUE)
else if (stat == 'EC') out@entrogram [i,2] <- mean(.Call('v_elsac_cell_Ec', as.integer(x[]), as.integer(ncl), as.integer(nrw), as.integer(nc), as.integer(w[,1]), as.integer(w[,2]),as.integer(classes),dif, as.integer(s), PACKAGE='elsa'),na.rm=TRUE)
} else {
if (is.null(stat) || stat == 'ELSA') out@entrogram [i,2] <- mean(.Call('v_elsa_cell', as.integer(x[]), as.integer(ncl), as.integer(nrw), as.integer(nc), as.integer(w[,1]), as.integer(w[,2]),as.integer(s), PACKAGE='elsa'),na.rm=TRUE)
else if (stat == 'EA') out@entrogram [i,2] <- mean(.Call('v_elsa_cell_Ea', as.integer(x[]), as.integer(ncl), as.integer(nrw), as.integer(nc), as.integer(w[,1]), as.integer(w[,2]),as.integer(s), PACKAGE='elsa'),na.rm=TRUE)
else if (stat == 'EC') out@entrogram [i,2] <- mean(.Call('v_elsa_cell_Ec', as.integer(x[]), as.integer(ncl), as.integer(nrw), as.integer(nc), as.integer(w[,1]), as.integer(w[,2]),as.integer(s), PACKAGE='elsa'),na.rm=TRUE)
}
}
}
out
}
)
setMethod('entrogram', signature(x='SpatialPolygonsDataFrame'),
function(x, width, cutoff, categorical, nc, dif, zcol, cloud=FALSE, s=NULL,method,longlat,stat,verbose=TRUE,...) {
n <- nrow(x)
if (missing(verbose)) verbose <- TRUE
if (missing(longlat)) longlat <- NULL
if (missing(stat)) stat <- 'ELSA'
else {
stat <- toupper(stat)
if (!stat %in% c('ELSA','EA','EC')) stop('stat should be either of "ELSA", "Ea", "Ec"!')
}
if (missing(cutoff)) cutoff<- sqrt((xmin(x)-xmax(x))^2+(ymin(x)-ymax(x))^2) / 3
if (missing(width)) width <- cutoff / 15
if (cutoff < width) stop("cutoff should be greater than width size")
nlag <- ceiling(cutoff / width)
if (missing(zcol)) {
if (ncol(x@data) > 1) stop("zcol should be specified!")
else zcol <- 1
} else if (is.character(zcol)) {
w <- which(colnames(x@data) == zcol[1])
if (w == 0) stop('the specified variable in zcol does not exist in the data')
zcol <- w
} else if (is.numeric(zcol)) {
zcol <- zcol[1]
if (zcol > ncol(x@data)) stop('the zcol number is greater than the number of columns in data!')
} else stop("zcol should be a character or a number!")
if (missing(method)) method <- 'centroid'
else {
if (tolower(method)[1] %in% c('bnd','bound','boundary','bond','b')) method <- 'bound'
else method <- 'centroid'
}
if (method == 'centroid') xy <- coordinates(x)
else xy <- x
x <- x@data[,zcol]
if (!is.null(s) && is.numeric(s) && s < n) {
x <- x[sample(n,s)]
n <- length(n)
}
if (!missing(nc)) {
if (missing(categorical)) {
if (missing(dif)) categorical <- FALSE
else {
categorical <- TRUE
if (verbose) cat("input data is considered categorical, and nc is ignored!\n")
}
}
} else {
if (missing(categorical) && !missing(dif)) categorical <- TRUE
}
if (missing(categorical) || !is.logical(categorical)) {
if (.is.categorical(x)) {
categorical <- TRUE
if (verbose) cat("the input is considered as a categorical variable...\n")
} else {
categorical <- FALSE
if (verbose) cat("the input is considered as a continuous variable...\n")
}
}
if (!categorical && missing(nc)) {
nc <- nclass(x)
classes <- 1:nc
} else if (categorical) {
classes <- unique(x)
nc <- length(classes)
}
if (categorical) {
if (missing(dif)) {
dif <- rep(1,nc*nc)
for (i in 1:nc) dif[(i-1)*nc+i] <-0
} else {
dif <- .checkDif(dif,classes)
}
}
if (!categorical) x <- categorize(x,nc)
out <- new("Entrogram")
out@width <- width
out@cutoff <- cutoff
if (cloud) {
out@entrogramCloud <- matrix(NA,nrow=n,ncol=nlag)
for (i in 1:nlag) {
d <- dneigh(xy,d1=0,d2=i*width,method = method,longlat = longlat)@neighbours
if (categorical) {
if (is.null(stat) || stat == 'ELSA') out@entrogramCloud[,i] <- .Call('v_elsac_vector', as.integer(x), d, as.integer(nc), as.integer(classes),dif, PACKAGE='elsa')
else if (stat == 'EA') out@entrogramCloud[,i] <- .Call('v_elsac_vector_Ea', as.integer(x), d, as.integer(nc), as.integer(classes),dif, PACKAGE='elsa')
else if (stat == 'EC') out@entrogramCloud[,i] <- .Call('v_elsac_vector_Ec', as.integer(x), d, as.integer(nc), as.integer(classes),dif, PACKAGE='elsa')
} else {
if (is.null(stat) || stat == 'ELSA') out@entrogramCloud[,i] <-.Call('v_elsa_vector', as.integer(x), d, as.integer(nc), PACKAGE='elsa')
else if (stat == 'EA') out@entrogramCloud[,i] <-.Call('v_elsa_vector_Ea', as.integer(x), d, as.integer(nc), PACKAGE='elsa')
else if (stat == 'EC') out@entrogramCloud[,i] <-.Call('v_elsa_vector_Ec', as.integer(x), d, as.integer(nc), PACKAGE='elsa')
}
}
out@entrogram <- data.frame(distance=seq(width,width*nlag,width) - (width/2),E=apply(out@entrogramCloud,2,mean,na.rm=TRUE))
} else {
d <- seq(width,width*nlag,width) - (width/2)
out@entrogram <- data.frame(distance=d,E=rep(NA,length(d)))
for (i in 1:nlag) {
d <- dneigh(xy,d1=0,d2=i*width,method = method,longlat = longlat)@neighbours
if (categorical) {
if (is.null(stat) || stat == 'ELSA') out@entrogram [i,2] <- mean(.Call('v_elsac_vector', as.integer(x), d, as.integer(nc), as.integer(classes),dif, PACKAGE='elsa'),na.rm=TRUE)
else if (stat == 'EA') out@entrogram [i,2] <- mean(.Call('v_elsac_vector_Ea', as.integer(x), d, as.integer(nc), as.integer(classes),dif, PACKAGE='elsa'),na.rm=TRUE)
else if (stat == 'EC') out@entrogram [i,2] <- mean(.Call('v_elsac_vector_Ec', as.integer(x), d, as.integer(nc), as.integer(classes),dif, PACKAGE='elsa'),na.rm=TRUE)
} else {
if (is.null(stat) || stat == 'ELSA') out@entrogram [i,2] <- mean(.Call('v_elsa_vector', as.integer(x), d, as.integer(nc), PACKAGE='elsa'),na.rm=TRUE)
else if (stat == 'EA') out@entrogram [i,2] <- mean(.Call('v_elsa_vector_Ea', as.integer(x), d, as.integer(nc), PACKAGE='elsa'),na.rm=TRUE)
else if (stat == 'EC') out@entrogram [i,2] <- mean(.Call('v_elsa_vector_Ec', as.integer(x), d, as.integer(nc), PACKAGE='elsa'),na.rm=TRUE)
}
}
}
out
}
)
setMethod('entrogram', signature(x='SpatialPointsDataFrame'),
function(x, width, cutoff, categorical, nc, dif, zcol, cloud=FALSE, s=NULL,longlat,stat,verbose=TRUE,...) {
n <- nrow(x)
if (missing(verbose)) verbose <- TRUE
if (missing(stat)) stat <- 'ELSA'
else {
stat <- toupper(stat)
if (!stat %in% c('ELSA','EA','EC')) stop('stat should be either of "ELSA", "Ea", "Ec"!')
}
if (missing(longlat)) longlat <- NULL
if (missing(cutoff)) cutoff<- sqrt((xmin(x)-xmax(x))^2+(ymin(x)-ymax(x))^2) / 3
if (missing(width)) width <- cutoff / 15
if (cutoff < width) stop("cutoff should be greater than width size")
nlag <- ceiling(cutoff / width)
if (missing(zcol)) {
if (ncol(x@data) > 1) stop("zcol should be specified!")
else zcol <- 1
} else if (is.character(zcol)) {
w <- which(colnames(x@data) == zcol[1])
if (w == 0) stop('the specified variable in zcol does not exist in the data')
zcol <- w
} else if (is.numeric(zcol)) {
zcol <- zcol[1]
if (zcol > ncol(x@data)) stop('the zcol number is greater than the number of columns in data!')
} else stop("zcol should be a character or a number!")
xy <- coordinates(x)
x <- x@data[,zcol]
if (!is.null(s) && is.numeric(s) && s < n) {
x <- x[sample(n,s)]
n <- length(n)
}
if (!missing(nc)) {
if (missing(categorical)) {
if (missing(dif)) categorical <- FALSE
else {
categorical <- TRUE
if (verbose) cat("input data is considered categorical, and nc is ignored!\n")
}
}
} else {
if (missing(categorical) && !missing(dif)) categorical <- TRUE
}
if (missing(categorical) || !is.logical(categorical)) {
if (.is.categorical(x)) {
categorical <- TRUE
if (verbose) cat("the input is considered as a categorical variable...\n")
} else {
categorical <- FALSE
if (verbose) cat("the input is considered as a continuous variable...\n")
}
}
if (!categorical && missing(nc)) {
nc <- nclass(x)
classes <- 1:nc
} else if (categorical) {
classes <- unique(x)
nc <- length(classes)
}
if (categorical) {
if (missing(dif)) {
dif <- rep(1,nc*nc)
for (i in 1:nc) dif[(i-1)*nc+i] <-0
} else {
dif <- .checkDif(dif,classes)
}
}
if (!categorical) x <- categorize(x,nc)
out <- new("Entrogram")
out@width <- width
out@cutoff <- cutoff
if (cloud) {
out@entrogramCloud <- matrix(NA,nrow=n,ncol=nlag)
for (i in 1:nlag) {
d <- dneigh(xy,d1=0,d2=i*width,longlat = longlat)@neighbours
if (categorical) {
if (is.null(stat) || stat == 'ELSA') out@entrogramCloud[,i] <- .Call('v_elsac_vector', as.integer(x), d, as.integer(nc), as.integer(classes),dif, PACKAGE='elsa')
else if (stat == 'EA') out@entrogramCloud[,i] <- .Call('v_elsac_vector_Ea', as.integer(x), d, as.integer(nc), as.integer(classes),dif, PACKAGE='elsa')
else if (stat == 'EC') out@entrogramCloud[,i] <- .Call('v_elsac_vector_Ec', as.integer(x), d, as.integer(nc), as.integer(classes),dif, PACKAGE='elsa')
} else {
if (is.null(stat) || stat == 'ELSA') out@entrogramCloud[,i] <-.Call('v_elsa_vector', as.integer(x), d, as.integer(nc))
else if (stat == 'EA') out@entrogramCloud[,i] <-.Call('v_elsa_vector_Ea', as.integer(x), d, as.integer(nc))
else if (stat == 'EC') out@entrogramCloud[,i] <-.Call('v_elsa_vector_Ec', as.integer(x), d, as.integer(nc))
}
}
out@entrogram <- data.frame(distance=seq(width,width*nlag,width) - (width/2),E=apply(out@entrogramCloud,2,mean,na.rm=TRUE))
} else {
d <- seq(width,width*nlag,width) - (width/2)
out@entrogram <- data.frame(distance=d,E=rep(NA,length(d)))
for (i in 1:nlag) {
d <- dneigh(xy,d1=0,d2=i*width,longlat = longlat)@neighbours
if (categorical) {
if (is.null(stat) || stat == 'ELSA') out@entrogram [i,2] <- mean(.Call('v_elsac_vector', as.integer(x), d, as.integer(nc), as.integer(classes),dif, PACKAGE='elsa'),na.rm=TRUE)
else if (stat == 'EA') out@entrogram [i,2] <- mean(.Call('v_elsac_vector_Ea', as.integer(x), d, as.integer(nc), as.integer(classes),dif, PACKAGE='elsa'),na.rm=TRUE)
else if (stat == 'EC') out@entrogram [i,2] <- mean(.Call('v_elsac_vector_Ec', as.integer(x), d, as.integer(nc), as.integer(classes),dif, PACKAGE='elsa'),na.rm=TRUE)
} else {
if (is.null(stat) || stat == 'ELSA') out@entrogram [i,2] <- mean(.Call('v_elsa_vector', as.integer(x), d, as.integer(nc), PACKAGE='elsa'),na.rm=TRUE)
else if (stat == 'EA') out@entrogram [i,2] <- mean(.Call('v_elsa_vector_Ea', as.integer(x), d, as.integer(nc), PACKAGE='elsa'),na.rm=TRUE)
else if (stat == 'EC') out@entrogram [i,2] <- mean(.Call('v_elsa_vector_Ec', as.integer(x), d, as.integer(nc), PACKAGE='elsa'),na.rm=TRUE)
}
}
}
out
}
)
|
runmcmc_cp1_right <- function(data, iter, start.vals, prop_var, cp_prop_var, tol_edge = 50, warmup = 500, verbose = FALSE)
{
lognormal_ou_pdf <- function(x, mu, sigma, l)
{
n <- length(x)
rho <- exp(-1/l)
return(-n/2 * log(2 * pi) - n * log(sigma) - ((n - 1)/2) * log(1 - rho^2)
- 1/2 * 1/(sigma^2 * (1 - rho^2)) * ((x[1] - mu[1])^2 + (x[n] - mu[n])^2 + (1 + rho^2) * sum((x[2:(n-1)] - mu[2:(n-1)])^2)
- 2 * rho * sum((x[1:(n-1)] - mu[1:(n-1)]) * (x[2:n] - mu[2:n]))))
}
par <- list()
par$sigma <- matrix(nrow = warmup + 1, ncol = 2)
par$sigma[1,] <- start.vals$sigma
par$l <- matrix(nrow = warmup + 1, ncol = 2)
par$l[1,] <- start.vals$l
par$cp <- matrix(nrow = warmup + 1, ncol = 1)
par$cp[1,] <- start.vals$cp
par$beta <- matrix(nrow = warmup + 1, ncol = 1)
par$beta[1,] <- start.vals$beta
par$intercept <- matrix(nrow = warmup + 1, ncol = 1)
par$intercept[1,] <- start.vals$intercept
interval <- range(data$x)
sigma <- start.vals$sigma
l <- start.vals$l
cp <- start.vals$cp
beta <- start.vals$beta
intercept <- start.vals$intercept
accept <- list()
accept$gp_par <- matrix(data = c(0,0), nrow = 1, ncol = 2)
accept$cp <- 0
for(i in 1:(warmup))
{
xrange <- matrix(nrow = 2, ncol = 2)
xrange[1,] <- c(interval[1], cp[1])
xrange[2,] <- c(cp[1], interval[2])
for(j in 1:2)
{
if(j == 2)
{
prop <- as.numeric(mvtnorm::rmvnorm(n = 1, mean = c(sigma[j], l[j], beta, intercept), sigma = prop_var[[j]]))
}
if(j == 1)
{
prop <- as.numeric(mvtnorm::rmvnorm(n = 1, mean = c(sigma[j], l[j]), sigma = prop_var[[j]]))
}
if(verbose == TRUE)
{
print(paste("iteration: ",i))
print(paste(j,"-th GP parameter proposal: ", prop))
}
if(j == 2)
{
if(any(prop[1:2] <= 0) || prop[3] <= 0)
{
next
}
}
if(j == 1)
{
if(any(prop <= 0))
{
next
}
}
temp_dat <- data[data$x <= xrange[j,2] & data$x > xrange[j,1], ]$y
if(j == 2)
{
med <- median(data$x)
mu <- ((data[data$x <= xrange[j,2] & data$x > xrange[j,1], ]$x - med)/(xrange[2,2] - xrange[1,1])) * beta[1] + intercept
prop_mu <- ((data[data$x <= xrange[j,2] & data$x > xrange[j,1], ]$x - med) / (xrange[2,2] - xrange[1,1])) * prop[3] + prop[4]
log_accept_ratio <- lognormal_ou_pdf(x = temp_dat, mu = prop_mu, sigma = prop[1], l = prop[2]) +
dgamma(x = prop[2], shape = 3, rate = 5, log = TRUE) +
dnorm(x = prop[3], mean = 0, sd = 10, log = TRUE) +
dnorm(x = prop[4], mean = 0, sd = 10, log = TRUE) +
dnorm(x = prop[1], mean = 0, sd = 1, log = TRUE) -
(lognormal_ou_pdf(x = temp_dat, mu = mu, sigma = sigma[j], l = l[j]) +
dgamma(x = l[j], shape = 3, rate = 5, log = TRUE) +
dnorm(x = sigma[j], mean = 0, sd = 1, log = TRUE) +
dnorm(x = beta, mean = 0, sd = 10, log = TRUE) +
dnorm(x = intercept, mean = 0, sd = 10, log = TRUE))
if(log(runif(n = 1, min = 0, max = 1)) <= log_accept_ratio)
{
sigma[j] <- prop[1]
l[j] <- prop[2]
beta <- prop[3]
intercept <- prop[4]
}
}
if(j == 1)
{
log_accept_ratio <- lognormal_ou_pdf(x = temp_dat, mu = rep(0, times = length(temp_dat)), sigma = prop[1], l = prop[2]) +
dgamma(x = prop[2], shape = 3, rate = 5, log = TRUE) +
dnorm(x = prop[1], mean = 0, sd = 1, log = TRUE) -
(lognormal_ou_pdf(x = temp_dat, mu = rep(0, times = length(temp_dat)), sigma = sigma[j], l = l[j]) +
dgamma(x = l[j], shape = 3, rate = 5, log = TRUE) +
dnorm(x = sigma[j], mean = 0, sd = 1, log = TRUE))
if(log(runif(n = 1, min = 0, max = 1)) <= log_accept_ratio)
{
sigma[j] <- prop[1]
l[j] <- prop[2]
}
}
}
par$sigma[i + 1,] <- sigma
par$l[i + 1,] <- l
par$beta[i + 1,] <- beta
par$intercept[i + 1,] <- intercept
prop <- as.numeric(rnorm(n = 1, mean = cp, sd = sqrt(cp_prop_var)))
if(verbose == TRUE)
{
print(paste(i,"-th CP proposal: ", prop))
}
if(prop <= tol_edge + interval[1] || prop >= -tol_edge + interval[2])
{
par$cp[i + 1,] <- cp
}
else{
temp_dat1 <- data[data$x <= xrange[1,2] & data$x > xrange[1,1], ]$y
temp_dat2 <- data[data$x <= xrange[2,2] & data$x > xrange[2,1], ]$y
prop_temp_dat1 <- data[data$x <= prop & data$x > interval[1], ]$y
prop_temp_dat2 <- data[data$x < interval[2] & data$x > prop, ]$y
med2 <- median(data$x)
mu2 <- ((data[data$x <= xrange[2,2] & data$x > xrange[2,1], ]$x - med2) / (xrange[2,2] - xrange[1,1])) * beta[1] + intercept
mu1 <- rep(0, times = length(temp_dat1))
prop_med2 <- median(data$x)
prop_mu2 <- ((data[data$x > prop & data$x <= interval[2], ]$x - prop_med2) / (xrange[2,2] - xrange[1,1])) * beta[1] + intercept
prop_mu1 <- rep(0, times = length(prop_temp_dat1))
log_accept_ratio <- lognormal_ou_pdf(x = prop_temp_dat1, mu = prop_mu1, sigma = sigma[1], l = l[1]) +
lognormal_ou_pdf(x = prop_temp_dat2, mu = prop_mu2, sigma = sigma[2], l = l[2]) -
(lognormal_ou_pdf(x = temp_dat1, mu = mu1, sigma = sigma[1], l = l[1]) +
lognormal_ou_pdf(x = temp_dat2, mu = mu2, sigma = sigma[2], l = l[2]))
if(log(runif(n = 1, min = 0, max = 1)) <= log_accept_ratio)
{
cp <- prop
}
}
par$cp[i + 1,] <- cp
}
prop_var[[2]] <- 2.4^2 * var(cbind(par$sigma[round(warmup/2):warmup,2], par$l[round(warmup/2):warmup,2], par$beta[round(warmup/2):warmup,1], par$intercept[round(warmup/2):warmup,1])) / 4 + 1e-1 * diag(4)
prop_var[[1]] <- 2.4^2 * var(cbind(par$sigma[round(warmup/2):warmup,1], par$l[round(warmup/2):warmup,1])) / 2 + 1e-1 * diag(2)
cp_prop_var <- 2.4^2 * var(par$cp[round(warmup/2):warmup,]) + 1
lp <- numeric()
lpost <- numeric()
par <- list()
par$sigma <- matrix(nrow = iter + 1, ncol = 2)
par$sigma[1,] <- sigma
par$l <- matrix(nrow = iter + 1, ncol = 2)
par$l[1,] <- l
par$beta <- matrix(nrow = iter + 1, ncol = 1)
par$beta[1,] <- beta
par$intercept <- matrix(nrow = iter + 1, ncol = 1)
par$intercept[1,] <- intercept
par$cp <- matrix(nrow = iter + 1, ncol = 1)
par$cp[1,] <- cp
for(i in 1:(iter))
{
xrange <- matrix(nrow = 2, ncol = 2)
xrange[1,] <- c(interval[1], cp[1])
xrange[2,] <- c(cp[1], interval[2])
for(j in 1:2)
{
if(j == 2)
{
prop <- as.numeric(mvtnorm::rmvnorm(n = 1, mean = c(sigma[j], l[j], beta, intercept), sigma = prop_var[[j]]))
}
if(j == 1)
{
prop <- as.numeric(mvtnorm::rmvnorm(n = 1, mean = c(sigma[j], l[j]), sigma = prop_var[[j]]))
}
if(verbose == TRUE)
{
print(paste("iteration: ",i))
print(paste(j,"-th GP parameter proposal: ", prop))
}
if(j == 2)
{
if(any(prop[1:2] <= 0) || prop[3] <= 0)
{
next
}
}
if(j == 1)
{
if(any(prop <= 0))
{
next
}
}
temp_dat <- data[data$x <= xrange[j,2] & data$x > xrange[j,1], ]$y
if(j == 2)
{
med <- median(data$x)
mu <- ((data[data$x <= xrange[j,2] & data$x > xrange[j,1], ]$x - med)/(xrange[2,2] - xrange[1,1])) * beta[1] + intercept
prop_mu <- ((data[data$x <= xrange[j,2] & data$x > xrange[j,1], ]$x - med) / (xrange[2,2] - xrange[1,1])) * prop[3] + prop[4]
log_accept_ratio <- lognormal_ou_pdf(x = temp_dat, mu = prop_mu, sigma = prop[1], l = prop[2]) +
dgamma(x = prop[2], shape = 3, rate = 5, log = TRUE) +
dnorm(x = prop[3], mean = 0, sd = 10, log = TRUE) +
dnorm(x = prop[4], mean = 0, sd = 10, log = TRUE) +
dnorm(x = prop[1], mean = 0, sd = 1, log = TRUE) -
(lognormal_ou_pdf(x = temp_dat, mu = mu, sigma = sigma[j], l = l[j]) +
dgamma(x = l[j], shape = 3, rate = 5, log = TRUE) +
dnorm(x = sigma[j], mean = 0, sd = 1, log = TRUE) +
dnorm(x = beta, mean = 0, sd = 10, log = TRUE) +
dnorm(x = intercept, mean = 0, sd = 10, log = TRUE))
if(log(runif(n = 1, min = 0, max = 1)) <= log_accept_ratio)
{
accept$gp_par[1,j] <- accept$gp_par[1,j] + 1/iter
sigma[j] <- prop[1]
l[j] <- prop[2]
beta <- prop[3]
intercept <- prop[4]
}
}
if(j == 1)
{
log_accept_ratio <- lognormal_ou_pdf(x = temp_dat, mu = rep(0, times = length(temp_dat)), sigma = prop[1], l = prop[2]) +
dgamma(x = prop[2], shape = 3, rate = 5, log = TRUE) +
dnorm(x = prop[1], mean = 0, sd = 1, log = TRUE) -
(lognormal_ou_pdf(x = temp_dat, mu = rep(0, times = length(temp_dat)), sigma = sigma[j], l = l[j]) +
dgamma(x = l[j], shape = 3, rate = 5, log = TRUE) +
dnorm(x = sigma[j], mean = 0, sd = 1, log = TRUE))
if(log(runif(n = 1, min = 0, max = 1)) <= log_accept_ratio)
{
accept$gp_par[1,j] <- accept$gp_par[1,j] + 1/iter
sigma[j] <- prop[1]
l[j] <- prop[2]
}
}
}
par$sigma[i + 1,] <- sigma
par$l[i + 1,] <- l
par$beta[i + 1,] <- beta
par$intercep[i + 1,] <- intercept
prop <- as.numeric(rnorm(n = 1, mean = cp, sd = sqrt(cp_prop_var)))
if(verbose == TRUE)
{
print(paste(i,"-th CP proposal: ", prop))
}
if(prop <= tol_edge + interval[1] || prop >= -tol_edge + interval[2])
{
par$cp[i + 1,] <- cp
lp[i] <- (lognormal_ou_pdf(x = temp_dat1, mu = rep(0, times = length(temp_dat1)), sigma = sigma[1], l = l[1]) +
lognormal_ou_pdf(x = temp_dat2, mu = rep(0, times = length(temp_dat2)), sigma = sigma[2], l = l[2]))
lpost[i] <- lp[i] + dgamma(x = l[1], shape = 3, rate = 5, log = TRUE) +
dnorm(x = sigma[1], mean = 0, sd = 1, log = TRUE) +
dnorm(x = beta, mean = 0, sd = 10, log = TRUE) +
dnorm(x = intercept, mean = 0, sd = 10, log = TRUE) +
dgamma(x = l[2], shape = 3, rate = 5, log = TRUE) +
dnorm(x = sigma[2], mean = 0, sd = 1, log = TRUE)
}
else{
temp_dat1 <- data[data$x <= xrange[1,2] & data$x > xrange[1,1], ]$y
temp_dat2 <- data[data$x <= xrange[2,2] & data$x > xrange[2,1], ]$y
prop_temp_dat1 <- data[data$x <= prop & data$x > interval[1], ]$y
prop_temp_dat2 <- data[data$x < interval[2] & data$x > prop, ]$y
med2 <- median(data$x)
mu2 <- ((data[data$x <= xrange[2,2] & data$x > xrange[2,1], ]$x - med2) / (xrange[2,2] - xrange[1,1])) * beta[1] + intercept
mu1 <- rep(0, times = length(temp_dat1))
prop_med2 <- median(data$x)
prop_mu2 <- ((data[data$x > prop & data$x <= interval[2], ]$x - prop_med2) / (xrange[2,2] - xrange[1,1])) * beta[1] + intercept
prop_mu1 <- rep(0, times = length(prop_temp_dat1))
log_accept_ratio <- lognormal_ou_pdf(x = prop_temp_dat1, mu = prop_mu1, sigma = sigma[1], l = l[1]) +
lognormal_ou_pdf(x = prop_temp_dat2, mu = prop_mu2, sigma = sigma[2], l = l[2]) -
(lognormal_ou_pdf(x = temp_dat1, mu = mu1, sigma = sigma[1], l = l[1]) +
lognormal_ou_pdf(x = temp_dat2, mu = mu2, sigma = sigma[2], l = l[2]))
lp[i] <- (lognormal_ou_pdf(x = temp_dat1, mu = rep(0, times = length(temp_dat1)), sigma = sigma[1], l = l[1]) +
lognormal_ou_pdf(x = temp_dat2, mu = rep(0, times = length(temp_dat2)), sigma = sigma[2], l = l[2]))
lpost[i] <- lp[i] + dgamma(x = l[1], shape = 3, rate = 5, log = TRUE) +
dnorm(x = sigma[1], mean = 0, sd = 1, log = TRUE) +
dnorm(x = beta, mean = 0, sd = 10, log = TRUE) +
dnorm(x = intercept, mean = 0, sd = 10, log = TRUE) +
dgamma(x = l[2], shape = 3, rate = 5, log = TRUE) +
dnorm(x = sigma[2], mean = 0, sd = 1, log = TRUE)
if(log(runif(n = 1, min = 0, max = 1)) <= log_accept_ratio)
{
cp <- prop
accept$cp <- accept$cp + 1/iter
lp[i] <- lognormal_ou_pdf(x = prop_temp_dat1, mu = rep(0, times = length(prop_temp_dat1)), sigma = sigma[1], l = l[1]) +
lognormal_ou_pdf(x = prop_temp_dat2, mu = rep(0, times = length(prop_temp_dat2)), sigma = sigma[2], l = l[2])
lpost[i] <- lp[i] + dgamma(x = l[1], shape = 3, rate = 5, log = TRUE) +
dnorm(x = sigma[1], mean = 0, sd = 1, log = TRUE) +
dnorm(x = beta, mean = 0, sd = 10, log = TRUE) +
dnorm(x = intercept, mean = 0, sd = 10, log = TRUE) +
dgamma(x = l[2], shape = 3, rate = 5, log = TRUE) +
dnorm(x = sigma[2], mean = 0, sd = 1, log = TRUE)
}
}
par$cp[i + 1,] <- cp
}
return(list("parameters" = par, "accept" = accept, "lp" = lp, "lpost" = lpost,"gp_prop_var" = prop_var, "cp_prop_var" = cp_prop_var))
}
|
`cusp.subspacerss` <-
function(predictors, dependents)
{
X <- predictors
Y <- dependents
qx <- if(is.qr(X)) {X} else {qr(X)}
qy <- if(is.qr(Y)) {Y} else {qr(Y)}
dx <- qx$rank
dy <- qy$rank
Qx <- qr.Q(qx)[,1:dx, drop=FALSE]
Qy <- qr.Q(qy)[,1:dy, drop=FALSE]
z <- svd(crossprod(Qx, Qy), nu=0)
Ry <- qr.R(qy)[1:dy, 1:dy, drop=FALSE]
rss <- (1-z$d^2) * colSums((t(Ry) %*% z$v)^2)
list(rss = rss, cor=z$d)
}
|
setMethodS3("writeLocusData", "CBS", function(fit, name=getSampleName(fit), tags=NULL, ext="tsv", path=NULL, sep="\t", nbrOfDecimals=4L, addHeader=TRUE, createdBy=NULL, overwrite=FALSE, skip=FALSE, ...) {
name <- Arguments$getCharacter(name)
tags <- Arguments$getCharacters(tags)
ext <- Arguments$getCharacter(ext)
path <- Arguments$getWritablePath(path)
nbrOfDecimals <- Arguments$getInteger(nbrOfDecimals)
fullname <- paste(c(name, tags), collapse=",")
filename <- sprintf("%s.%s", fullname, ext)
pathname <- Arguments$getWritablePathname(filename, path=path, mustNotExist=(!overwrite && !skip))
if (isFile(pathname)) {
if (skip) {
return(pathname)
}
file.remove(pathname)
}
pathnameT <- pushTemporaryFile(pathname)
data <- getLocusData(fit, ...)
if (!is.null(nbrOfDecimals)) {
cols <- colnames(data)
for (key in cols) {
values <- data[[key]]
if (is.double(values)) {
values <- round(values, digits=nbrOfDecimals)
data[[key]] <- values
}
}
}
if (addHeader) {
sigmaDelta <- estimateStandardDeviation(fit, method="diff")
createdOn <- format(Sys.time(), format="%Y-%m-%d %H:%M:%S %Z")
hdr <- c(
name=name,
tags=tags,
fullname=fullname,
segmentationMethod=sprintf("segment() of %s", attr(fit, "pkgDetails")),
nbrOfLoci=nbrOfLoci(fit),
nbrOfSegments=nbrOfSegments(fit),
joinSegments=fit$params$joinSegments,
signalType=getSignalType(fit),
sigmaDelta=sprintf("%.4f", sigmaDelta),
createdBy=createdBy,
createdOn=createdOn,
nbrOfDecimals=nbrOfDecimals,
nbrOfColumns=ncol(data),
columnNames=paste(colnames(data), collapse=", "),
columnClasses=paste(sapply(data, FUN=function(x) class(x)[1]), collapse=", ")
)
bfr <- paste("
cat(file=pathnameT, bfr, sep="\n")
}
write.table(file=pathnameT, data, append=TRUE, quote=FALSE, sep=sep,
row.names=FALSE, col.names=TRUE)
pathname <- popTemporaryFile(pathnameT)
pathname
}, protected=TRUE)
setMethodS3("writeSegments", "CBS", function(fit, name=getSampleName(fit), tags=NULL, ext="tsv", path=NULL, addHeader=TRUE, createdBy=NULL, sep="\t", nbrOfDecimals=4L, splitters=FALSE, overwrite=FALSE, skip=FALSE, ...) {
name <- Arguments$getCharacter(name)
tags <- Arguments$getCharacters(tags)
ext <- Arguments$getCharacter(ext)
path <- Arguments$getWritablePath(path)
nbrOfDecimals <- Arguments$getInteger(nbrOfDecimals)
fullname <- paste(c(name, tags), collapse=",")
filename <- sprintf("%s.%s", fullname, ext)
pathname <- Arguments$getWritablePathname(filename, path=path, mustNotExist=(!overwrite && !skip))
if (isFile(pathname)) {
if (skip) {
return(pathname)
}
file.remove(pathname)
}
pathnameT <- pushTemporaryFile(pathname)
sampleName <- getSampleName(fit)
data <- getSegments(fit, ..., splitters=splitters)
if (!is.null(nbrOfDecimals)) {
cols <- tolower(colnames(data))
isInt <- (regexpr("chromosome|start|end|nbrofloci", cols) != -1)
cols <- which(isInt)
for (cc in cols) {
values <- data[[cc]]
if (is.double(values)) {
values <- round(values, digits=0)
data[[cc]] <- values
}
}
cols <- tolower(colnames(data))
isInt <- (regexpr("chromosome|start|end|nbrofloci", cols) != -1)
isLog <- (regexpr("call", cols) != -1)
isDbl <- (!isInt & !isLog)
cols <- which(isDbl)
for (kk in cols) {
values <- data[[kk]]
if (is.double(values)) {
values <- round(values, digits=nbrOfDecimals)
data[[kk]] <- values
}
}
}
if (addHeader) {
sigmaDelta <- estimateStandardDeviation(fit, method="diff")
createdOn <- format(Sys.time(), format="%Y-%m-%d %H:%M:%S %Z")
hdr <- c(
name=name,
tags=tags,
fullname=fullname,
segmentationMethod=sprintf("segment() of %s", attr(fit, "pkgDetails")),
nbrOfLoci=nbrOfLoci(fit),
nbrOfSegments=nbrOfSegments(fit),
joinSegments=fit$params$joinSegments,
signalType=getSignalType(fit),
sigmaDelta=sprintf("%.4f", sigmaDelta),
createdBy=createdBy,
createdOn=createdOn,
nbrOfDecimals=nbrOfDecimals,
nbrOfColumns=ncol(data),
columnNames=paste(colnames(data), collapse=", "),
columnClasses=paste(sapply(data, FUN=function(x) class(x)[1]), collapse=", ")
)
bfr <- paste("
cat(file=pathnameT, bfr, sep="\n")
}
write.table(file=pathnameT, data, append=TRUE, quote=FALSE, sep=sep,
row.names=FALSE, col.names=TRUE)
pathname <- popTemporaryFile(pathnameT)
pathname
})
|
Y_to_E<-function(N, NE, directed, Y)
{
E<-matrix(NaN,NE,2)
ans<-.C("Y_to_E", NAOK=TRUE, N=as.integer(N),
directed=as.integer(directed), Y=as.numeric(t(Y)), E=as.integer(t(E)))
return(t(matrix(ans$E,2)))
}
Y_to_nonE<-function(N, NnonE, directed, Y)
{
nonE<-matrix(0, NnonE, 2)
ans<-.C("Y_to_nonE", NAOK=TRUE, N=as.integer(N), directed=as.integer(directed),
Y=as.numeric(t(Y)), nonE=as.integer(t(nonE)))
return(t(matrix(ans$nonE,2)))
}
Y_to_M<-function(N, NM, directed, Y)
{
M<-matrix(0, NM, 2)
ans<-.C("Y_to_M", NAOK=TRUE, N=as.integer(N), directed=as.integer(directed),
Y=as.numeric(t(Y)), M=as.integer(t(M)))
return(t(matrix(ans$M,2)))
}
E_to_Y<-function(N, NE, directed, E)
{
Y<-matrix(0,N,N)
ans<-.C("E_to_Y", NAOK=TRUE, N=as.integer(N), NE=as.integer(NE),
directed=as.integer(directed), E=as.integer(t(E)), Y=as.numeric(t(Y)))
return(matrix(ans$Y,N))
}
hops_to_hopslist<-function(hops,diam,N)
{
hopslist<-matrix(0,N,1+diam+N)
for (i in 1:N)
{
tmp<-sort(hops[i,],index=1)
for (h in 0:diam)
hopslist[i,1+h]<-sum(tmp$x==h)
hopslist[i,(2+diam):ncol(hopslist)]<-tmp$ix
}
return(hopslist)
}
|
library(reticulate)
eval <- tryCatch({
config <- py_config()
numeric_version(config$version) >= "3.8" && py_numpy_available()
}, error = function(e) FALSE)
knitr::opts_chunk$set(
collapse = TRUE,
comment = "
eval = eval
)
library(reticulate)
if (TRUE) {
cat("This is one expression. \n")
cat("This is another expression. \n")
}
library(reticulate)
l <- r_to_py(list(1, 2, 3))
it <- as_iterator(l)
iter_next(it)
iter_next(it)
iter_next(it)
iter_next(it, completed = "StopIteration")
my_function <- function(name = "World") {
cat("Hello", name, "\n")
}
my_function()
my_function("Friend")
library(reticulate)
py$a_strict_Python_function(3)
py$a_strict_Python_function(3L)
py$a_strict_Python_function(as.integer(3))
|
setMethodS3("calculateResidualSet", "ProbeLevelModel", function(this, units=NULL, force=FALSE, ..., verbose=FALSE) {
qsort <- function(x) {
sort.int(x, index.return=TRUE, method="quick")
}
verbose <- Arguments$getVerbose(verbose)
if (verbose) {
pushState(verbose)
on.exit(popState(verbose))
}
ces <- getChipEffectSet(this)
if (inherits(ces, "CnChipEffectSet")) {
if (ces$combineAlleles) {
throw("calculateResidualSet() does not yet support chip effects for which allele A and allele B have been combined.")
}
}
paf <- getProbeAffinityFile(this)
nbrOfArrays <- length(ces)
verbose && enter(verbose, "Calculating PLM residuals")
ds <- getDataSet(this)
if (is.null(ds)) {
throw("No data set specified for PLM: ", getFullName(this))
}
calculateEps <- getCalculateResidualsFunction(this)
cdf <- getCdf(ds)
if (is.null(units)) {
nbrOfUnits <- nbrOfUnits(cdf)
} else {
nbrOfUnits <- length(units)
}
verbose && printf(verbose, "Number of units: %d\n", nbrOfUnits)
cdfData <- NULL
chipType <- getChipType(cdf)
key <- list(method="calculateResidualSet", class=class(this)[1],
chipType=chipType, params=getParameters(this),
units=units)
dirs <- c("aroma.affymetrix", chipType)
if (!force) {
cdfData <- loadCache(key, dirs=dirs)
if (!is.null(cdfData)) {
names(cdfData) <- gsub("cells2", "ceCells", names(cdfData), fixed=TRUE)
verbose && cat(verbose, "Found indices cached on file")
}
}
if (is.null(cdfData)) {
units0 <- units
if (is.null(units)) {
units <- seq_len(nbrOfUnits(cdf))
}
nbrOfUnits <- length(units)
unitChunks <- splitInChunks(units, chunkSize=100e3)
cdfData <- list(unitGroupSizes=NULL, cells=NULL, ceCells=NULL)
for (kk in seq_along(unitChunks)) {
verbose && enter(verbose, sprintf("Chunk
units <- unitChunks[[kk]]
verbose && enter(verbose, "Retrieving CDF cell indices")
cdfUnits <- getCellIndices(this, units=units, verbose=less(verbose))
names(cdfUnits) <- NULL;
gc <- gc()
verbose && exit(verbose)
verbose && enter(verbose, "Calculate group sizes")
unitGroupSizes <- .applyCdfGroups(cdfUnits, lapply, FUN=function(group) {
length(.subset2(group, 1))
})
unitGroupSizes <- unlist(unitGroupSizes, use.names=FALSE)
verbose && str(verbose, unitGroupSizes)
gc <- gc()
verbose && print(verbose, gc)
verbose && exit(verbose)
cells <- unlist(cdfUnits, use.names=FALSE)
cdfUnits <- NULL
gc <- gc()
verbose && enter(verbose, "Retrieving CDF cell indices for chip effects")
cdfUnits <- getCellIndices(ces, units=units, verbose=less(verbose))
ceCells <- unlist(cdfUnits, use.names=FALSE)
verbose && exit(verbose)
cdfUnits <- NULL;
cdfData$unitGroupSizes <- c(cdfData$unitGroupSizes, unitGroupSizes)
cdfData$cells <- c(cdfData$cells, cells)
cdfData$ceCells <- c(cdfData$ceCells, ceCells)
verbose && str(verbose, cdfData)
unitGroupSizes <- cells <- ceCells <- NULL
gc <- gc()
verbose && print(verbose, gc)
verbose && exit(verbose)
}
gc <- gc()
verbose && print(verbose, gc)
units <- units0
units0 <- NULL
verbose && enter(verbose, "Saving to file cache")
saveCache(cdfData, key=key, dirs=dirs)
gc <- gc()
verbose && print(verbose, gc)
verbose && exit(verbose)
}
verbose && cat(verbose, "CDF related data cached on file:")
unitGroupSizes <- cdfData$unitGroupSizes
verbose && cat(verbose, "unitGroupSizes:")
verbose && str(verbose, unitGroupSizes)
cells <- cdfData$cells
verbose && cat(verbose, "cells:")
verbose && str(verbose, cells)
ceCells <- cdfData$ceCells
verbose && cat(verbose, "ceCells:")
verbose && str(verbose, ceCells)
cdfData <- NULL
gc <- gc()
verbose && print(verbose, gc)
if (!identical(length(unitGroupSizes), length(ceCells))) {
throw("Internal error: 'unitGroupSizes' and 'ceCells' are of different lengths: ", length(unitGroupSizes), " != ", length(ceCells))
}
o <- qsort(cells)
cells <- o$x
o <- o$ix
oinv <- qsort(o)$ix
gc <- gc()
verbose && print(verbose, gc)
path <- getPath(this)
phi <- NULL
for (kk in seq_along(ds)) {
df <- ds[[kk]]
cef <- ces[[kk]]
verbose && enter(verbose, sprintf("Array
filename <- sprintf("%s,residuals.CEL", getFullName(df))
pathname <- Arguments$getWritablePathname(filename, path=path)
pathname <- AffymetrixFile$renameToUpperCaseExt(pathname)
verbose && cat(verbose, "Pathname: ", pathname)
if (!force && isFile(pathname)) {
verbose && cat(verbose, "Already calculated.")
verbose && exit(verbose)
next
}
verbose && enter(verbose, "Retrieving probe intensity data")
y <- getData(df, indices=cells, fields="intensities")$intensities[oinv]
verbose && exit(verbose)
if (is.null(phi)) {
verbose && enter(verbose, "Retrieving probe-affinity estimates")
phi <- getData(paf, indices=cells, fields="intensities")$intensities[oinv]
verbose && exit(verbose)
}
if (length(y) != length(phi)) {
throw("Internal error: 'y' and 'phi' differ in lengths: ",
length(y), " != ", length(phi))
}
verbose && enter(verbose, "Retrieving chip-effect estimates")
theta <- getData(cef, indices=ceCells, fields="intensities")$intensities
theta <- rep(theta, times=unitGroupSizes)
verbose && exit(verbose)
if (length(theta) != length(phi)) {
throw("Internal error: 'theta' and 'phi' differ in lengths: ",
length(theta), " != ", length(phi))
}
verbose && enter(verbose, "Calculating residuals")
yhat <- phi * theta
if (length(yhat) != length(y)) {
throw("Internal error: 'yhat' and 'y' differ in lengths: ",
length(yhat), " != ", length(y))
}
eps <- calculateEps(y, yhat);
verbose && str(verbose, eps)
if (length(eps) != length(y)) {
throw("Internal error: 'eps' and 'y' differ in lengths: ",
length(eps), " != ", length(y))
}
verbose && exit(verbose)
y <- yhat <- theta <- NULL
eps <- eps[o]
gc <- gc()
verbose && print(verbose, gc)
verbose && enter(verbose, "Storing residuals")
isFile <- (force && isFile(pathname))
pathnameT <- pushTemporaryFile(pathname, isFile=isFile, verbose=verbose)
tryCatch({
verbose && enter(verbose, "Creating empty CEL file for results, if missing")
createFrom(df, filename=pathnameT, path=NULL,
methods="create", clear=TRUE, verbose=less(verbose))
verbose && exit(verbose)
verbose && enter(verbose, "Writing residuals")
.updateCel(pathnameT, indices=cells, intensities=eps)
verbose && exit(verbose)
}, interrupt = function(intr) {
verbose && print(verbose, intr)
file.remove(pathnameT)
}, error = function(ex) {
verbose && print(verbose, ex)
file.remove(pathnameT)
})
popTemporaryFile(pathnameT, verbose=verbose)
dfZ <- getChecksumFile(pathname)
verbose && exit(verbose)
eps <- NULL
gc <- gc()
verbose && print(verbose, gc)
verbose && exit(verbose)
}
cells <- phi <- unitGroupSizes <- NULL
gc <- gc()
verbose && print(verbose, gc)
cdf <- getCdf(ds)
rs <- ResidualSet$byPath(path, cdf=cdf, ...)
verbose && exit(verbose)
invisible(rs)
}, protected=TRUE)
setMethodS3("getCalculateResidualsFunction", "ProbeLevelModel", function(static, ...) {
function(y, yhat) {
y-yhat
}
}, static=TRUE, protected=TRUE)
|
"ma_r_ad.int_uvdrr" <- function(x){
barebones <- x$barebones
ad_obj_x <- x$ad_obj_x
ad_obj_y <- x$ad_obj_y
correct_rxx <- x$correct_rxx
correct_ryy <- x$correct_ryy
residual_ads <- x$residual_ads
cred_level <- x$cred_level
cred_method <- x$cred_method
var_unbiased <- x$var_unbiased
flip_xy <- x$flip_xy
decimals <- x$decimals
k <- barebones[,"k"]
N <- barebones[,"N"]
mean_rxyi <- barebones[,"mean_r"]
var_r <- barebones[,"var_r"]
var_e <- barebones[,"var_e"]
ci_xy_i <- barebones[,grepl(x = colnames(barebones), pattern = "CI")]
se_r <- barebones[,"se_r"]
ad_obj_x <- prepare_ad_int(ad_obj = ad_obj_x, residual_ads = residual_ads, decimals = decimals)
ad_obj_y <- prepare_ad_int(ad_obj = ad_obj_y, residual_ads = residual_ads, decimals = decimals)
if(!correct_rxx) ad_obj_x$qxa_irr <- ad_obj_x$qxi_irr <- ad_obj_x$qxa_drr <- ad_obj_x$qxi_drr <- data.frame(Value = 1, Weight = 1, stringsAsFactors = FALSE)
if(!correct_ryy) ad_obj_y$qxa_irr <- ad_obj_y$qxi_irr <- ad_obj_y$qxa_drr <- ad_obj_y$qxi_drr <- data.frame(Value = 1, Weight = 1, stringsAsFactors = FALSE)
if(flip_xy){
.ad_obj_x <- ad_obj_y
.ad_obj_y <- ad_obj_x
}else{
.ad_obj_x <- ad_obj_x
.ad_obj_y <- ad_obj_y
}
.mean_qxa <- wt_mean(x = .ad_obj_x$qxa_drr$Value, wt = .ad_obj_x$qxa_drr$Weight)
.mean_ux <- wt_mean(x = .ad_obj_x$ux$Value, wt = .ad_obj_x$ux$Weight)
.mean_qyi <- wt_mean(x = .ad_obj_y$qxi_irr$Value, wt = .ad_obj_y$qxi_irr$Weight)
.mean_qya <- NULL
for(i in 1:length(mean_rxyi)).mean_qya[i] <- wt_mean(x = estimate_ryya(ryyi = .ad_obj_y$qxi_irr$Value^2, rxyi = mean_rxyi[i], ux = .mean_ux)^.5, wt = .ad_obj_y$qxi_irr$Weight)
ad_list <- list(.qxa = .ad_obj_x$qxa_drr,
.qyi = .ad_obj_y$qxi_irr,
.ux = .ad_obj_x$ux)
art_grid <- create_ad_array(ad_list = ad_list, name_vec = names(ad_list))
.qxa <- art_grid$.qxa
.qyi <- art_grid$.qyi
.ux <- art_grid$.ux
wt_vec <- art_grid$wt
mean_rtpa <- .correct_r_uvdrr(rxyi = mean_rxyi, qxa = .mean_qxa, qyi = .mean_qyi, ux = .mean_ux)
ci_tp <- .correct_r_uvdrr(rxyi = ci_xy_i, qxa = .mean_qxa, qyi = .mean_qyi, ux = .mean_ux)
var_art <- apply(t(mean_rtpa), 2, function(x){
wt_var(x = .attenuate_r_uvdrr(rtpa = x, qxa = .qxa, qyi = .qyi, ux = .ux), wt = wt_vec, unbiased = var_unbiased)
})
var_pre <- var_e + var_art
var_res <- var_r - var_pre
var_rho_tp <- estimate_var_rho_int_uvdrr(mean_rxyi = mean_rxyi, mean_rtpa = mean_rtpa,
mean_qxa = .mean_qxa, mean_qyi = .mean_qyi,
mean_ux = .mean_ux, var_res = var_res)
.mean_rxpa <- mean_rtpa * .mean_qxa
.ci_xp <- ci_tp * .mean_qxa
.var_rho_xp <- var_rho_tp * .mean_qxa^2
.mean_rtya <- mean_rtpa * .mean_qya
.ci_ty <- ci_tp * .mean_qya
.var_rho_ty <- var_rho_tp * .mean_qya^2
sd_r <- var_r^.5
sd_e <- var_e^.5
sd_art <- var_art^.5
sd_pre <- var_pre^.5
sd_res <- var_res^.5
sd_rho_tp <- var_rho_tp^.5
var_r_tp <- estimate_var_rho_int_uvdrr(mean_rxyi = mean_rxyi, mean_rtpa = mean_rtpa,
mean_qxa = .mean_qxa, mean_qyi = .mean_qyi,
mean_ux = .mean_ux, var_res = var_r)
var_e_tp <- estimate_var_rho_int_uvdrr(mean_rxyi = mean_rxyi, mean_rtpa = mean_rtpa,
mean_qxa = .mean_qxa, mean_qyi = .mean_qyi,
mean_ux = .mean_ux, var_res = var_e)
var_art_tp <- estimate_var_rho_int_uvdrr(mean_rxyi = mean_rxyi, mean_rtpa = mean_rtpa,
mean_qxa = .mean_qxa, mean_qyi = .mean_qyi,
mean_ux = .mean_ux, var_res = var_art)
var_pre_tp <- estimate_var_rho_int_uvdrr(mean_rxyi = mean_rxyi, mean_rtpa = mean_rtpa,
mean_qxa = .mean_qxa, mean_qyi = .mean_qyi,
mean_ux = .mean_ux, var_res = var_pre)
se_r_tp <- estimate_var_rho_int_uvdrr(mean_rxyi = mean_rxyi, mean_rtpa = mean_rtpa,
mean_qxa = .mean_qxa, mean_qyi = .mean_qyi,
mean_ux = .mean_ux, var_res = se_r^2)^.5
.var_r_xp <- var_r_tp * .mean_qxa^2
.var_e_xp <- var_e_tp * .mean_qxa^2
.var_art_xp <- var_art_tp * .mean_qxa^2
.var_pre_xp <- var_pre_tp * .mean_qxa^2
.se_r_xp <- se_r_tp * .mean_qxa
.var_r_ty <- var_r_tp * .mean_qya^2
.var_e_ty <- var_e_tp * .mean_qya^2
.var_art_ty <- var_art_tp * .mean_qya^2
.var_pre_ty <- var_pre_tp * .mean_qya^2
.se_r_ty <- se_r_tp * .mean_qya
if(flip_xy){
correct_meas_y <- !(all(.qxa == 1))
correct_meas_x <- !(all(.qyi == 1))
correct_drr <- !(all(.ux == 1))
mean_rxpa <- .mean_rtya
ci_xp <- .ci_ty
var_rho_xp <- .var_rho_ty
mean_rtya <- .mean_rxpa
ci_ty <- .ci_xp
var_rho_ty <- .var_rho_xp
var_r_xp <- .var_r_ty
var_e_xp <- .var_e_ty
var_art_xp <- .var_art_ty
var_pre_xp <- .var_pre_ty
se_r_xp <- .se_r_ty
var_r_ty <- .var_r_xp
var_e_ty <- .var_e_xp
var_art_ty <- .var_art_xp
var_pre_ty <- .var_pre_xp
se_r_ty <- .se_r_xp
}else{
correct_meas_x <- !(all(.qxa == 1))
correct_meas_y <- !(all(.qyi == 1))
correct_drr <- !(all(.ux == 1))
mean_rxpa <- .mean_rxpa
ci_xp <- .ci_xp
var_rho_xp <- .var_rho_xp
mean_rtya <- .mean_rtya
ci_ty <- .ci_ty
var_rho_ty <- .var_rho_ty
var_r_xp <- .var_r_xp
var_e_xp <- .var_e_xp
var_art_xp <- .var_art_xp
var_pre_xp <- .var_pre_xp
se_r_xp <- .se_r_xp
var_r_ty <- .var_r_ty
var_e_ty <- .var_e_ty
var_art_ty <- .var_art_ty
var_pre_ty <- .var_pre_ty
se_r_ty <- .se_r_ty
}
sd_rho_xp <- var_rho_xp^.5
sd_rho_ty <- var_rho_ty^.5
sd_r_tp <- var_r_tp^.5
sd_r_xp <- var_r_xp^.5
sd_r_ty <- var_r_ty^.5
sd_e_tp <- var_e_tp^.5
sd_e_xp <- var_e_xp^.5
sd_e_ty <- var_e_ty^.5
sd_art_tp <- var_art_tp^.5
sd_art_xp <- var_art_xp^.5
sd_art_ty <- var_art_ty^.5
sd_pre_tp <- var_pre_tp^.5
sd_pre_xp <- var_pre_xp^.5
sd_pre_ty <- var_pre_ty^.5
out <- as.list(environment())
class(out) <- class(x)
out
}
"ma_r_ad.tsa_uvdrr" <- function(x){
barebones <- x$barebones
ad_obj_x <- x$ad_obj_x
ad_obj_y <- x$ad_obj_y
correct_rxx <- x$correct_rxx
correct_ryy <- x$correct_ryy
residual_ads <- x$residual_ads
cred_level <- x$cred_level
cred_method <- x$cred_method
var_unbiased <- x$var_unbiased
flip_xy <- x$flip_xy
k <- barebones[,"k"]
N <- barebones[,"N"]
mean_rxyi <- barebones[,"mean_r"]
var_r <- barebones[,"var_r"]
var_e <- barebones[,"var_e"]
ci_xy_i <- barebones[,grepl(x = colnames(barebones), pattern = "CI")]
se_r <- barebones[,"se_r"]
if(!correct_rxx){
ad_obj_x[c("qxi_irr", "qxi_drr", "qxa_irr", "qxa_drr"),"mean"] <- 1
ad_obj_x[c("qxi_irr", "qxi_drr", "qxa_irr", "qxa_drr"),"var"] <- 0
ad_obj_x[c("qxi_irr", "qxi_drr", "qxa_irr", "qxa_drr"),"var_res"] <- 0
}
if(!correct_ryy){
ad_obj_y[c("qxi_irr", "qxi_drr", "qxa_irr", "qxa_drr"),"mean"] <- 1
ad_obj_y[c("qxi_irr", "qxi_drr", "qxa_irr", "qxa_drr"),"var"] <- 0
ad_obj_y[c("qxi_irr", "qxi_drr", "qxa_irr", "qxa_drr"),"var_res"] <- 0
}
var_label <- ifelse(residual_ads, "var_res", "var")
if(flip_xy){
.ad_obj_x <- ad_obj_y
.ad_obj_y <- ad_obj_x
}else{
.ad_obj_x <- ad_obj_x
.ad_obj_y <- ad_obj_y
}
.mean_qxa <- .ad_obj_x["qxa_drr", "mean"]
.var_qxa <- .ad_obj_x["qxa_drr", var_label]
.mean_qyi <- .ad_obj_y["qxi_irr", "mean"]
.var_qyi <- .ad_obj_y["qxi_irr", var_label]
.mean_ux <- .ad_obj_x["ux", "mean"]
.var_ux <- .ad_obj_x["ux", var_label]
.mean_qya <- estimate_ryya(ryyi = .mean_qyi^2, rxyi = mean_rxyi, ux = .mean_ux)^.5
mean_rtpa <- .correct_r_uvdrr(rxyi = mean_rxyi, qxa = .mean_qxa, qyi = .mean_qyi, ux = .mean_ux)
ci_tp <- .correct_r_uvdrr(rxyi = ci_xy_i, qxa = .mean_qxa, qyi = .mean_qyi, ux = .mean_ux)
var_mat_tp <- estimate_var_rho_tsa_uvdrr(mean_rtpa = mean_rtpa, var_rxyi = var_r, var_e = var_e,
mean_ux = .mean_ux, mean_qxa = .mean_qxa, mean_qyi = .mean_qyi,
var_ux = .var_ux, var_qxa = .var_qxa, var_qyi = .var_qyi, show_variance_warnings = FALSE)
.mean_rxpa <- mean_rtpa * .mean_qxa
.ci_xp <- ci_tp * .mean_qxa
.mean_rtya <- mean_rtpa * .mean_qxa
.ci_ty <- ci_tp * .mean_qxa
var_art <- var_mat_tp$var_art
var_pre <- var_mat_tp$var_pre
var_res <- var_mat_tp$var_res
var_rho_tp <- var_mat_tp$var_rho
.var_rho_xp <- var_rho_tp * .mean_qxa^2
.var_rho_ty <- var_rho_tp * .mean_qya^2
sd_r <- var_r^.5
sd_e <- var_e^.5
sd_art <- var_art^.5
sd_pre <- var_pre^.5
sd_res <- var_res^.5
sd_rho_tp <- var_rho_tp^.5
var_r_tp <- estimate_var_tsa_uvdrr(mean_rtpa = mean_rtpa, var = var_r,
mean_ux = .mean_ux, mean_qxa = .mean_qxa, mean_qyi = .mean_qyi)
var_e_tp <- estimate_var_tsa_uvdrr(mean_rtpa = mean_rtpa, var = var_e,
mean_ux = .mean_ux, mean_qxa = .mean_qxa, mean_qyi = .mean_qyi)
var_art_tp <- estimate_var_tsa_uvdrr(mean_rtpa = mean_rtpa, var = var_art,
mean_ux = .mean_ux, mean_qxa = .mean_qxa, mean_qyi = .mean_qyi)
var_pre_tp <- estimate_var_tsa_uvdrr(mean_rtpa = mean_rtpa, var = var_pre,
mean_ux = .mean_ux, mean_qxa = .mean_qxa, mean_qyi = .mean_qyi)
se_r_tp <- estimate_var_tsa_uvdrr(mean_rtpa = mean_rtpa, var = se_r^2,
mean_ux = .mean_ux, mean_qxa = .mean_qxa, mean_qyi = .mean_qyi)^.5
.var_r_xp <- var_r_tp * .mean_qxa^2
.var_e_xp <- var_e_tp * .mean_qxa^2
.var_art_xp <- var_art_tp * .mean_qxa^2
.var_pre_xp <- var_pre_tp * .mean_qxa^2
.se_r_xp <- se_r_tp * .mean_qxa
.var_r_ty <- var_r_tp * .mean_qya^2
.var_e_ty <- var_e_tp * .mean_qya^2
.var_art_ty <- var_art_tp * .mean_qya^2
.var_pre_ty <- var_pre_tp * .mean_qya^2
.se_r_ty <- se_r_tp * .mean_qya
if(flip_xy){
correct_meas_x <- .mean_qxa != 1
correct_meas_y <- .mean_qyi != 1
correct_drr <- .mean_ux != 1
mean_rxpa <- .mean_rtya
ci_xp <- .ci_ty
var_rho_xp <- .var_rho_ty
mean_rtya <- .mean_rxpa
ci_ty <- .ci_xp
var_rho_ty <- .var_rho_xp
var_r_xp <- .var_r_ty
var_e_xp <- .var_e_ty
var_art_xp <- .var_art_ty
var_pre_xp <- .var_pre_ty
se_r_xp <- .se_r_ty
var_r_ty <- .var_r_xp
var_e_ty <- .var_e_xp
var_art_ty <- .var_art_xp
var_pre_ty <- .var_pre_xp
se_r_ty <- .se_r_xp
}else{
correct_meas_y <- .mean_qxa != 1
correct_meas_x <- .mean_qyi != 1
correct_drr <- .mean_ux != 1
mean_rxpa <- .mean_rxpa
ci_xp <- .ci_xp
var_rho_xp <- .var_rho_xp
mean_rtya <- .mean_rtya
ci_ty <- .ci_ty
var_rho_ty <- .var_rho_ty
var_r_xp <- .var_r_xp
var_e_xp <- .var_e_xp
var_art_xp <- .var_art_xp
var_pre_xp <- .var_pre_xp
se_r_xp <- .se_r_xp
var_r_ty <- .var_r_ty
var_e_ty <- .var_e_ty
var_art_ty <- .var_art_ty
var_pre_ty <- .var_pre_ty
se_r_ty <- .se_r_ty
}
sd_rho_xp <- var_rho_xp^.5
sd_rho_ty <- var_rho_ty^.5
sd_r_tp <- var_r_tp^.5
sd_r_xp <- var_r_xp^.5
sd_r_ty <- var_r_ty^.5
sd_e_tp <- var_e_tp^.5
sd_e_xp <- var_e_xp^.5
sd_e_ty <- var_e_ty^.5
sd_art_tp <- var_art_tp^.5
sd_art_xp <- var_art_xp^.5
sd_art_ty <- var_art_ty^.5
sd_pre_tp <- var_pre_tp^.5
sd_pre_xp <- var_pre_xp^.5
sd_pre_ty <- var_pre_ty^.5
out <- as.list(environment())
class(out) <- class(x)
out
}
|
load(file = "helper_data.rda")
df3 <- df2
df3$dat[1, 1:2] <- 8
test_that("Input validation", {
expect_error(checkValue(df1, value = 3:4),
"'value' needs to be of length 1.")
expect_error(checkValue(df1, vars = 1, value = 3),
"'vars' needs to be a character of at least length 1.", )
expect_error(checkValue(df1, vars = "lala", value = 3),
"The following 'vars' are not variables in the GADSdat: lala")
})
test_that("Value checks raise no false alarms", {
expect_equal(checkValue(df1, value = 4), integer(0))
expect_equal(checkValue(df2, value = -1), integer(0))
})
test_that("Value occurences reported", {
expect_equal(checkValue(df1, value = 1),
c(ID1 = 1L))
expect_equal(checkValue(df3, value = 8),
c(ID1 = 1L, V2 = 2L))
})
test_that("Value checks for variable subset", {
expect_equal(checkValue(df1, vars = "V1", value = 1),
integer())
expect_equal(checkValue(df3, vars = "V2", value = 8),
c(V2 = 2))
})
test_that("Value checks for NA", {
df5 <- df1
df5$dat[1:2, "V1"] <- NA
expect_equal(checkValue(df5, value = NA),
c(V1 = 2L))
expect_equal(checkValue(df1, value = NA),
integer())
})
|
cor_diss <- function(Xr, Xu = NULL, ws = NULL, center = TRUE, scale = FALSE) {
if (!ncol(Xr) >= 2) {
stop("For correlation dissimilarity the number of variables must be larger than 1")
}
if (!is.null(Xu)) {
if (ncol(Xu) != ncol(Xr)) {
stop("The number of columns (variables) in Xr must be equal to the number of columns (variables) in Xu")
}
if (sum(is.na(Xu)) > 0) {
stop("Input data contains missing values")
}
}
if (sum(is.na(Xr)) > 0) {
stop("Matrices with missing values are not accepted")
}
if (!is.logical(center)) {
stop("'center' argument must be logical")
}
if (!is.logical(scale)) {
stop("'scale' argument must be logical")
}
if (center | scale) {
X <- rbind(Xr, Xu)
if (center) {
X <- sweep(x = X, MARGIN = 2, FUN = "-", STATS = colMeans(X))
}
if (scale) {
X <- sweep(x = X, MARGIN = 2, FUN = "/", STATS = get_col_sds(X))
}
if (!is.null(Xu)) {
Xu <- X[(nrow(X) - nrow(Xu) + 1):nrow(X), , drop = FALSE]
Xr <- X[1:(nrow(X) - nrow(Xu)), ]
} else {
Xr <- X
}
rm(X)
}
if (!is.null(ws)) {
if (ws < 3 | length(ws) != 1) {
stop(paste("'ws' must be an unique odd value larger than 2"))
}
if ((ws %% 2) == 0) {
stop("'ws' must be an odd value")
}
if (ws >= ncol(Xr)) {
stop("'ws' must lower than the number of columns (variables) in Xr")
}
if (!is.null(Xu)) {
rslt <- moving_cor_diss(Xu, Xr, ws)
colnames(rslt) <- paste("Xu", 1:nrow(Xu), sep = "_")
rownames(rslt) <- paste("Xr", 1:nrow(Xr), sep = "_")
} else {
rslt <- moving_cor_diss(Xr, Xr, ws)
rownames(rslt) <- colnames(rslt) <- paste("Xr", 1:nrow(Xr), sep = "_")
}
} else {
if (!is.null(Xu)) {
rslt <- fast_diss(Xu, Xr, "cor")
colnames(rslt) <- paste("Xu", 1:nrow(Xu), sep = "_")
rownames(rslt) <- paste("Xr", 1:nrow(Xr), sep = "_")
} else {
rslt <- fast_diss(Xr, Xr, "cor")
rownames(rslt) <- colnames(rslt) <- paste("Xr", 1:nrow(Xr), sep = "_")
}
}
rslt[rslt < 1e-15] <- 0
rslt
}
|
print.aodml <- function(x, ...)
print(
list(
call = x$call,
b = x$b, phi = x$phi, phi.scale = x$phi.scale,
varparam = x$varparam, logL = x$logL,
iterations = x$iterations, code = x$code
)
)
|
fluidRow(
box(
title = i18n$t("現在の感染状況"),
width = 6,
height = "550px",
icon = icon("現在の感染状況"),
sidebar = boxSidebar(
id = "CurrentBoxtableOfEachPrefecturesBoxSidebar",
width = 100,
icon = icon("info-circle"),
i18n$t("現在の感染状況")
),
echarts4rOutput("currentActive", height = "550px") %>% withSpinner(proxy.height = "550px")
)
)
|
rshift <- function(a) {
n <- length(a)
return(c(a[n], a[1:(n - 1)]))
}
|
sen2r_getElements <- function(
s2_names,
naming_convention,
format = "data.table",
abort = TRUE
) {
list_regex <- list(
"sen2r" = list(
"regex" = "^S2([AB])([12][AC])\\_([0-9]{8})\\_([0-9]{3})\\_([^\\_\\.]*)\\_([^\\_\\.]+)\\_([126]0)\\.?([^\\_]*)$",
"elements" = c("mission","level","sensing_date","id_orbit","extent_name","prod_type","res","file_ext"),
"date_format" = "%Y%m%d"
),
"sen2r_new" = list(
"regex" = "^S2\\_([0-9]{8})\\_([0-9]{3})\\_([^\\_\\.]*)\\_([AB])\\_([^\\_\\.]+)\\.?([^\\_]*)$",
"elements" = c("sensing_date","id_orbit","extent_name","mission","prod_type","file_ext"),
"date_format" = "%Y%m%d"
)
)
if (!format %in% c("list", "data.frame", "data.table")) {
print_message(
type="warning",
"Argument must be one between 'data.frame' and 'list'.",
"Returnig a list.")
format <- "list"
}
if (is.null(s2_names)) {
return(invisible(NULL))
}
s2_names <- basename(s2_names)
if (missing(naming_convention) || is.null(naming_convention)) {
regex_match <- sapply(list_regex, function(x){sum(grepl(x$regex,s2_names))})
if (regex_match[["sen2r"]] == 0 & regex_match[["sen2r_new"]] > 0) {
naming_convention <- "sen2r_new"
} else {
naming_convention <- "sen2r"
}
}
if (inherits(naming_convention, "character")) {
if (naming_convention[1] %in% c("sen2r", "sen2r_new")) {
fs2nc_regex <- list_regex[[naming_convention[1]]]
} else {
print_message(
type = "error",
"The argument 'naming_convention' is not recognised."
)
}
} else if (
inherits(naming_convention, "list") &&
all(c("regex", "elements", "date_format") %in% names(naming_convention))
) {
fs2nc_regex <- naming_convention
} else {
print_message(
type = "error",
"The argument 'naming_convention' is not recognised."
)
}
metadata <- data.frame(
"type" = rep(NA, length(s2_names))
)
for (sel_el in fs2nc_regex$elements) {
metadata[,sel_el] <- gsub(
fs2nc_regex$regex,
paste0("\\",which(fs2nc_regex$elements==sel_el)),
s2_names
)
}
metadata[,"sensing_date"] <- as.Date(
metadata[,"sensing_date"],
format = fs2nc_regex$date_format
)
if (nrow(metadata)>0) {
if (!is.null(metadata$res) && all(grepl("[126]0", metadata[,"res"]))) {
metadata[,"res"] <- paste0(metadata[,"res"],"m")
}
metadata$type <- ifelse(
!grepl(fs2nc_regex$regex,s2_names), "unrecognised",
ifelse(
grepl("[0-9]{2}[A-Z]{3}[a-z]?",metadata$extent_name), "tile",
ifelse(
metadata$extent_name=="", "merged", "clipped"
)
)
)
} else {
metadata$type <- as.character(metadata$type)
}
if (sum(metadata$type=="unrecognised") > 0) {
print_message(
type = if(abort==TRUE){"error"}else{"warning"},
"\"",paste(s2_names[metadata$type=="unrecognised"], collapse="\", \""),
"\" were not recognised."
)
metadata[metadata$type=="unrecognised",2:(length(fs2nc_regex$elements)+1)] <- NA
}
if (format == "data.table") {
return(data.table(metadata))
} else if (format == "data.frame") {
return(metadata)
} else if (format == "list") {
meta_list <- lapply(seq_along(s2_names), function(i) {
l <- as.list(metadata[i,])
l$sensing_date <- as.character(l$sensing_date)
l[l==""|is.na(l)] <- NULL
l
})
names(meta_list) <- s2_names
if (length(meta_list)==1) {
return(meta_list[[1]])
} else {
return(meta_list)
}
}
}
|
modelFAMT <-
function(data,x=1,test=x[1],nbf=NULL,maxnbfactors=8,min.err=1e-03) {
if (class(data)[1]!="FAMTdata") stop("Class of data should be FAMTdata")
if (!is.null(nbf)) optimnbfactors = nbf
if (is.null(nbf)) optimnbfactors = nbfactors(data,x,test,pvalues=NULL,maxnbfactors,min.err)$optimalnbfactors
pval = raw.pvalues(data,x,test)
if (optimnbfactors==0) {
adjdata = data
adjpval = pval
fa = NULL
}
if (optimnbfactors>0) {
fa = emfa(data,nbf=optimnbfactors,x=x,test=test,pvalues=NULL,min.err=min.err)
rdata = residualsFAMT(data,x,test,pvalues=NULL)$residuals
stdev = apply(rdata,2,sd)
adjdata = data
adjdata$expression = sweep(data$expression,1,FUN="/",STATS=stdev)-fa$B%*%t(fa$Factors)
adjpval = raw.pvalues(adjdata,x,test)
fa = emfa(data,nbf=optimnbfactors,x=x,test=test,pvalues=adjpval$pval,min.err=min.err)
adjdata$expression = sweep(data$expression,1,FUN="/",STATS=stdev)-fa$B%*%t(fa$Factors)
adjpval = raw.pvalues(adjdata,x,test)
}
idcovar=data$idcovar
res = list(adjpval=adjpval$pval,adjtest=adjpval$test,adjdata=adjdata,FA=fa,pval=pval$pval,x=x,test=test,
nbf=optimnbfactors, idcovar=idcovar)
class(res) = c("FAMTmodel","list")
return(res)
}
|
setMethod("hist", signature(x = "missing_variable"), def =
function(x, ...) {
y <- x@data
NAs <- is.na(x)
h_all <- hist(y, plot = FALSE)
plot(h_all, border = "lightgray", main = "", xlab = if(x@done) "Completed" else "Observed", axes = FALSE,
mgp = c(2, 1, 0), tcl = .05, col = if(x@done) "lightgray" else "blue", freq = TRUE, ...)
axis(1, lwd = 0)
axis(2)
if(x@done) {
h_obs <- hist(y[!NAs], breaks = h_all$breaks, plot = FALSE)
h_miss <- hist(y[NAs], breaks = h_all$breaks, plot = FALSE)
segments(h_obs$breaks[1], 0, y1 = h_obs$counts[1], col = "blue")
segments(h_miss$breaks[1], 0, y1 = h_miss$counts[1], col = "red")
segments(h_obs$breaks[1], y0 = h_obs$counts[1], x1 = h_obs$breaks[2], col = "blue")
segments(h_miss$breaks[1], y0 = h_miss$counts[1], x1 = h_miss$breaks[2], col = "red")
for(i in 2:(length(h_obs$breaks)-1)) {
segments(x0 = h_obs$breaks[i], y0 = h_obs$counts[i-1], y1 = h_obs$counts[i], col = "blue")
segments(x0 = h_miss$breaks[i], y0 = h_miss$counts[i-1], y1 = h_miss$counts[i], col = "red")
segments(x0 = h_obs$breaks[i], y0 = h_obs$counts[i], x1 = h_obs$breaks[i+1], col = "blue")
segments(x0 = h_miss$breaks[i], y0 = h_miss$counts[i], x1 = h_miss$breaks[i+1], col = "red")
}
segments(x0 = h_obs$breaks[i+1], y0 = h_obs$counts[i], y1 = 0, col = "blue")
segments(x0 = h_miss$breaks[i+1], y0 = h_miss$counts[i], y1 = 0, col = "blue")
if(.MI_DEBUG) stopifnot(all(h_all$counts == (h_obs$counts + h_miss$counts)))
}
return(invisible(NULL))
})
setMethod("hist", signature(x = "semi-continuous"), def =
function(x, ...) {
con <- complete(x@indicator, 0L) == 0
y <- x@data[con]
NAs <- is.na(x)[con]
h_all <- hist(y, plot = FALSE)
plot(h_all, freq = TRUE, border = "lightgray", main = "", xlab = if(x@done) "Completed" else "Observed", axes = FALSE,
mgp = c(2, 1, 0), tcl = .05, col = if(x@done) "lightgray" else "blue", xlim = range(x@data, na.rm = TRUE), ...)
axis(1, lwd = 0)
axis(2)
if(x@done) {
h_obs <- hist(y[!NAs], breaks = h_all$breaks, plot = FALSE)
h_miss <- hist(y[NAs], breaks = h_all$breaks, plot = FALSE)
segments(h_obs$breaks[1], 0, y1 = h_obs$counts[1], col = "blue")
segments(h_miss$breaks[1], 0, y1 = h_miss$counts[1], col = "red")
segments(h_obs$breaks[1], y0 = h_obs$counts[1], x1 = h_obs$breaks[2], col = "blue")
segments(h_miss$breaks[1], y0 = h_miss$counts[1], x1 = h_miss$breaks[2], col = "red")
for(i in 2:(length(h_obs$breaks)-1)) {
segments(x0 = h_obs$breaks[i], y0 = h_obs$counts[i-1], y1 = h_obs$counts[i], col = "blue")
segments(x0 = h_miss$breaks[i], y0 = h_miss$counts[i-1], y1 = h_miss$counts[i], col = "red")
segments(x0 = h_obs$breaks[i], y0 = h_obs$counts[i], x1 = h_obs$breaks[i+1], col = "blue")
segments(x0 = h_miss$breaks[i], y0 = h_miss$counts[i], x1 = h_miss$breaks[i+1], col = "red")
}
segments(x0 = h_obs$breaks[i+1], y0 = h_obs$counts[i], y1 = 0, col = "blue")
segments(x0 = h_miss$breaks[i+1], y0 = h_miss$counts[i], y1 = 0, col = "blue")
NAs <- is.na(x)[!con]
tab <- table(x@data[!con], NAs)
for(i in 1:NROW(tab)) {
segments(x0 = as.numeric(rownames(tab)[i]), y0 = 0, y1 = sum(tab[i,]), col = "lightgray", lty = "dashed")
segments(x0 = as.numeric(rownames(tab)[i]), y0 = 0, y1 = tab[i,1], col = "blue", lty = "dashed")
if(ncol(tab) == 2) segments(x0 = as.numeric(rownames(tab)[i]), y0 = 0, y1 = tab[i,2], col = "red", lty = "dashed")
}
if(.MI_DEBUG) stopifnot(all(h_all$counts == (h_obs$counts + h_miss$counts)))
}
else {
tab <- table(x@data[!con])
for(i in 1:NCOL(tab)) segments(x0 = as.numeric(names(tab)[i]), y0 = 0, y1 = tab[i], col = "blue", lty = "dashed")
}
return(invisible(NULL))
})
setMethod("hist", signature(x = "categorical"), def =
function(x, ...) {
y <- x@data
values <- sort(unique(y))
breaks <- c(min(values) - 0.5, values + 0.5)
values <- unique(y)
values <- sort(values[!is.na(values)])
breaks <- c(sapply(values, FUN = function(x) c(x - .25, x + .25)))
NAs <- is.na(x)
h_all <- hist(y, breaks, plot = FALSE)
plot(h_all, border = "lightgray", axes = FALSE, main = "", xlab = if(x@done) "Completed" else "Observed",
mgp = c(2, 1, 0), tcl = .05, col = if(x@done) "lightgray" else "blue", freq = TRUE, ylim = range(h_all$counts, na.rm = TRUE), ...)
axis(1, at = values, labels = levels(x@raw_data), lwd = 0)
axis(2)
if(x@done) {
h_obs <- hist(y[!NAs], breaks, plot = FALSE)
h_miss <- hist(y[NAs], breaks, plot = FALSE)
counts_obs <- h_obs$counts
counts_obs <- counts_obs
counts_miss <- h_miss$counts
counts_miss <- counts_miss
segments(breaks[1], 0, y1 = counts_obs[1], col = "blue")
segments(breaks[1], 0, y1 = counts_miss[1], col = "red")
if(counts_obs[1]) segments(breaks[1], y0 = counts_obs[1], x1 = breaks[2], col = "blue")
if(counts_miss[1]) segments(breaks[1], y0 = counts_miss[1], x1 = breaks[2], col = "red")
for(i in 2:(length(breaks)-1)) {
segments(x0 = breaks[i], y0 = counts_obs[i-1], y1 = counts_obs[i], col = "blue")
segments(x0 = breaks[i], y0 = counts_miss[i-1], y1 = counts_miss[i], col = "red")
if(counts_obs[i]) segments(x0 = breaks[i], y0 = counts_obs[i], x1 = breaks[i+1], col = "blue")
if(counts_miss[i]) segments(x0 = breaks[i], y0 = counts_miss[i], x1 = breaks[i+1], col = "red")
}
segments(x0 = breaks[i+1], y0 = counts_obs[i], y1 = 0, col = "blue")
segments(x0 = breaks[i+1], y0 = counts_miss[i], y1 = 0, col = "red")
if(.MI_DEBUG) stopifnot(all(h_all$counts == (h_obs$counts + h_miss$counts)))
}
return(invisible(NULL))
})
setMethod("hist", signature(x = "binary"), def =
function(x, ...) {
y <- x@data
if(max(y, na.rm = TRUE) > 1) y <- y - 1L
values <- 0:1
breaks <- c(-.5, .5, 1.5)
breaks <- c(-.25, .25, .75, 1.25)
NAs <- is.na(x)
h_all <- hist(y, breaks, plot = FALSE)
plot(h_all, border = "lightgray", axes = FALSE, main = "", xlab = if(x@done) "Completed" else "Observed",
mgp = c(2, 1, 0), tcl = .05, col = if(x@done) "lightgray" else "blue", freq = TRUE, ylim = range(h_all$counts, na.rm = TRUE), ...)
axis(1, at = values, lwd = 0)
axis(2)
if(x@done) {
h_obs <- hist(y[!NAs], breaks, plot = FALSE)
h_miss <- hist(y[NAs], breaks, plot = FALSE)
counts_obs <- h_obs$counts
counts_obs <- counts_obs
counts_miss <- h_miss$counts
counts_miss <- counts_miss
segments(breaks[1], 0, y1 = counts_obs[1], col = "blue")
segments(breaks[1], 0, y1 = counts_miss[1], col = "red")
if(counts_obs[1]) segments(breaks[1], y0 = counts_obs[1], x1 = breaks[2], col = "blue")
if(counts_miss[1]) segments(breaks[1], y0 = counts_miss[1], x1 = breaks[2], col = "red")
for(i in 2:(length(breaks)-1)) {
segments(x0 = breaks[i], y0 = counts_obs[i-1], y1 = counts_obs[i], col = "blue")
segments(x0 = breaks[i], y0 = counts_miss[i-1], y1 = counts_miss[i], col = "red")
if(counts_obs[i]) segments(x0 = breaks[i], y0 = counts_obs[i], x1 = breaks[i+1], col = "blue")
if(counts_miss[i]) segments(x0 = breaks[i], y0 = counts_miss[i], x1 = breaks[i+1], col = "red")
}
segments(x0 = breaks[i+1], y0 = counts_obs[i], y1 = 0, col = "blue")
segments(x0 = breaks[i+1], y0 = counts_miss[i], y1 = 0, col = "red")
if(.MI_DEBUG) stopifnot(all(h_all$counts == (h_obs$counts + h_miss$counts)))
}
return(invisible(NULL))
})
setMethod("hist", signature(x = "missing_data.frame"), def =
function(x, ask = TRUE, ...) {
k <- sum(!x@no_missing)
if (.Device != "null device" && x@done) {
oldask <- grDevices::devAskNewPage(ask = ask)
if (!oldask) on.exit(grDevices::devAskNewPage(oldask), add = TRUE)
op <- options(device.ask.default = TRUE)
on.exit(options(op), add = TRUE)
}
par(mfrow = n2mfrow(k))
for(i in 1:x@DIM[2]) {
if(x@no_missing[i]) next
hist(x@variables[[i]])
header <- x@variables[[i]]@variable_name
if(is(x@variables[[i]], "continuous")) {
trans <- .show_helper(x@variables[[i]])$transformation[1]
header <- paste("\n", header, " (", trans, ")", sep = "")
}
title(main = header)
}
return(invisible(NULL))
})
setMethod("hist", signature(x = "mdf_list"), def =
function(x, ask = TRUE, ...) {
if (.Device != "null device") {
oldask <- grDevices::devAskNewPage(ask = ask)
if (!oldask) on.exit(grDevices::devAskNewPage(oldask), add = TRUE)
op <- options(device.ask.default = ask)
on.exit(options(op), add = TRUE)
}
sapply(x, FUN = hist, ...)
return(invisible(NULL))
})
setMethod("hist", signature(x = "mi"), def =
function(x, m = 1:length(x), ask = TRUE, ...) {
for(i in m) hist(x@data[[i]], ask = ask, ...)
return(invisible(NULL))
})
setMethod("hist", signature(x = "mi_list"), def =
function(x, m = 1:length(x), ask = TRUE, ...) {
if (.Device != "null device") {
oldask <- grDevices::devAskNewPage(ask = ask)
if (!oldask) on.exit(grDevices::devAskNewPage(oldask), add = TRUE)
op <- options(device.ask.default = ask)
on.exit(options(op), add = TRUE)
}
sapply(x, FUN = hist, m = m, ask = ask, ...)
return(invisible(NULL))
})
|
testthat::test_that("Test regressionModelMetrics", {
set.seed(111)
mod <- lm(formula = wt ~ ., data = mtcars)
predictions <- predict(mod, mtcars[,-6])
actials <- mtcars[,6]
res <- regressionModelMetrics(actuals = actials, predictions = predictions, model = mod)
expect_type(res, 'list')
expect_named(res)
expect_equal(names(res), c("AIC", "BIC", "MAE", "MSE", "RMSE", "MAPE", "Corelation", "r.squared", 'adj.r.squared'))
})
|
getTreeSpecies <- function(species){
conn <- try(makeConnection(), T)
if ('try-error' %in% class(conn)){
stop("Invalid database connection. Please use setDB() to connect to a valid DB", call. = FALSE)
}
table <- RSQLite::dbGetQuery(conn, "SELECT * FROM TREESPECIES")
RSQLite::dbDisconnect(conn)
if(!is.null(species)){
if (species %in% table[["species"]]){
species_id <- table[table$species==species,]$species_id
}else if(species %in% table[["species_id"]]){
species_id <- table[table$species_id==species,]$species_id
}else{
stop("Invalid tree species", call. = FALSE)
}
}else{
stop("Invalid tree species", call. = FALSE)
}
return(species_id)
}
|
library(dplyr)
library(stringr)
library(statar)
context("sumup")
test_that("sum_up", {
a <- cars %>% sum_up(speed)
expect_equal(nrow(cars %>% sum_up(speed)), 1)
expect_equal(nrow(cars %>% group_by(ok = speed %/% 10) %>% sum_up(dist)), 3)
})
|
randtest.amova <- function(xtest, nrepet = 99, ...) {
if (!inherits(xtest, "amova")) stop("Object of class 'amova' expected for xtest")
if (nrepet <= 1) stop("Non convenient nrepet")
distances <- as.matrix(xtest$distances) / 2
samples <- as.matrix(xtest$samples)
structures <- xtest$structures
ddl <- xtest$results$Df
ddl[1:(length(ddl) - 1)] <- ddl[(length(ddl) - 1):1]
sigma <- xtest$componentsofcovariance$Sigma
lesss <- xtest$results$"Sum Sq"
if (is.null(structures)) {
structures <- cbind.data.frame(rep(1, nrow(samples)))
indic <- 0
}
else {
for (i in 1:ncol(structures)) {
structures[, i] <- factor(as.numeric(structures[, i]))
}
indic <- 1
}
if (indic != 0) {
longueurresult <- nrepet * (length(sigma) - 1)
res <- testamova(distances, nrow(distances), nrow(distances), samples, nrow(samples), ncol(samples), structures, nrow(structures), ncol(structures), indic, sum(samples), nrepet, lesss[length(lesss)] / sum(samples), ddl, longueurresult)
restests <- matrix(res, nrepet, length(sigma) - 1, byrow = TRUE)
alts <- rep("greater", length(names(structures)) + 1)
permutationtests <- as.krandtest(sim=restests,obs=sigma[(length(sigma) - 1):1],names = paste("Variations", c("within samples", "between samples", paste("between", names(structures)))), alter=c("less", alts), call = match.call(), ...)
}
else {
longueurresult <- nrepet * (length(sigma) - 2)
res <- testamova(distances, nrow(distances), nrow(distances), samples, nrow(samples), ncol(samples), structures, nrow(structures), ncol(structures), indic, sum(samples), nrepet, lesss[length(lesss)] / sum(samples), ddl, longueurresult)
permutationtests <- as.randtest(sim = res, obs = sigma[1], ...)
}
return(permutationtests)
}
|
writeBiclusterResults=function(fileName, bicResult, bicName, geneNames, arrayNames, append=FALSE, delimiter=" ")
{
write(bicName, file=fileName, append=append)
for(i in 1:bicResult@Number)
{
listar=row(matrix(bicResult@RowxNumber[,i]))[bicResult@RowxNumber[,i]==T]
listac=row(matrix(bicResult@NumberxCol[i,]))[bicResult@NumberxCol[i,]==T]
write(c(length(listar), length(listac)), file=fileName, ncolumns=2, append=TRUE, sep =delimiter)
write(geneNames[listar], file=fileName, ncolumns=length(listar), append=TRUE, sep =delimiter)
write(arrayNames[listac], file=fileName, ncolumns=length(listac), append=TRUE, sep =delimiter)
}
}
|
abundtest <- function (prabobj, teststat = "distratio", tuning = 0.25,
times = 1000, p.nb = NULL,
prange = c(0, 1), nperp = 4, step = 0.1, step2 = 0.01,
twostep = TRUE, species.fixed=TRUE, prab01=NULL,
groupvector=NULL,
sarestimate=prab.sarestimate(prabobj),
dist = prabobj$distance,
n.species = prabobj$n.species)
{
if (is.null(prab01))
prab01 <- prabinit(prabmatrix=toprab(prabobj),rows.are.species=FALSE,
distance="none",neighborhood=prabobj$nb)
if (is.null(p.nb) & prabobj$spatial){
ac <- autoconst(prab01, twostep = twostep, prange = prange,
nperp = nperp, step1 = step, step2 = step2,
species.fixed = species.fixed)
p.nb <- ac$pd
}
if (is.null(p.nb))
p.nb <- 1
statres <- rep(0, times)
if (teststat=="groups"){
groupvector <- as.factor(groupvector)
ng <- length(levels(groupvector))
lg <- levels(groupvector)
nsg <- numeric(0)
for (i in 1:ng) nsg[i] <- sum(groupvector==lg[i])
pa <- pb <- rep(1,ng)
groupinfo <- list(lg=lg,ng=ng,nsg=nsg)
statreslist <- list(overall=numeric(0),mean=numeric(0),
gr=matrix(0,nrow=ng,ncol=times))
}
else
groupinfo <- NULL
for (i in 1:times) {
cat("Simulation run ", i)
if (is.null(sarestimate) || teststat == "inclusions")
mat <- randpop.nb(neighbors=prabobj$nb,
p.nb = p.nb, n.species = prabobj$n.species,
vector.species = prab01$regperspec,
species.fixed = species.fixed,
pdf.regions =
prab01$specperreg/sum(prab01$specperreg),
count = FALSE)
else
mat <- regpop.sar(prabobj, prab01, sarestimate,
p.nb, count = FALSE)
if (teststat != "inclusions"){
if (dist == "jaccard")
distm <- jaccard(mat)
if (dist == "kulczynski")
distm <- kulczynski(mat)
if (dist == "qkulczynski")
distm <- qkulczynski(mat)
if (dist == "logkulczynski")
distm <- qkulczynski(mat,log.distance=TRUE)
}
else statres[i] <- incmatrix(mat)$ninc
if (teststat == "isovertice") {
test <- homogen.test(distm, ne = tuning)
statres[i] <- test$iv
}
if (teststat == "lcomponent")
statres[i] <- lcomponent(distm, ne = tuning)$lc
if (teststat == "mean")
statres[i] <- mean(as.dist(distm))
if (teststat == "distratio")
statres[i] <- distratio(distm, prop = tuning)$dr
if (teststat == "nn")
statres[i] <- nn(distm, ne = tuning)
if (teststat == "groups"){
slist <- specgroups(distm, groupvector, groupinfo)
statreslist$overall[i] <- slist$overall
statreslist$mean[i] <- mean(as.dist(distm))
statreslist$gr[,i] <- slist$gr
cat(" statistics value=", statreslist$overall[i], "\n")
}
else
cat(" statistics value=", statres[i], "\n")
}
regmat <- prabobj$prab
if (teststat != "inclusions") {
if (dist==prabobj$distance)
distm <- prabobj$distmat
else{
if (dist == "jaccard")
distm <- jaccard(regmat)
if (dist == "kulczynski")
distm <- kulczynski(regmat)
if (dist == "qkulczynski")
distm <- qkulczynski(regmat)
if (dist == "logkulczynski")
distm <- qkulczynski(regmat,log.distance=TRUE)
}
}
else {
regmat <- prab01$prab
test <- incmatrix(regmat)$ninc
p.above <- (1 + sum(statres >= test))/(1 + times)
p.below <- (1 + sum(statres <= test))/(1 + times)
datac <- test
tuning <- NA
}
if (teststat == "mean"){
test <- mean(as.dist(distm))
p.above <- (1 + sum(statres >= test))/(1 + times)
p.below <- (1 + sum(statres <= test))/(1 + times)
datac <- test
tuning <- NA
}
if (teststat == "isovertice") {
test <- homogen.test(distm, ne = tuning)
p.above <- (1 + sum(statres >= test$iv))/(1 + times)
p.below <- (1 + sum(statres <= test$iv))/(1 + times)
pb <- min(p.above, p.below) * 2
p.above <- max(p.above, p.below)
p.below <- pb
datac <- test$iv
tuning <- test$ne
}
if (teststat == "lcomponent") {
test <- lcomponent(distm, ne = tuning)
p.above <- (1 + sum(statres >= test$lc))/(1 + times)
p.below <- (1 + sum(statres <= test$lc))/(1 + times)
datac <- test$lc
tuning <- test$ne
}
if (teststat == "nn") {
test <- nn(distm, ne = tuning)
p.above <- (1 + sum(statres >= test))/(1 + times)
p.below <- (1 + sum(statres <= test))/(1 + times)
datac <- test
}
if (teststat == "distratio") {
test <- distratio(distm, prop = tuning)
p.above <- (1 + sum(statres >= test$dr))/(1 + times)
p.below <- (1 + sum(statres <= test$dr))/(1 + times)
datac <- test$dr
tuning <- test$prop
}
if (teststat=="groups"){
test <- specgroups(distm, groupvector, groupinfo)
testm <- mean(as.dist(distm))
p.above <- (1 + sum(statreslist$overall >= test$overall))/(1 + times)
p.below <- (1 + sum(statreslist$overall <= test$overall))/(1 + times)
p.m.above <- (1 + sum(statreslist$mean >= testm))/(1 + times)
p.m.below <- (1 + sum(statreslist$mean <= testm))/(1 + times)
for (i in 1:ng){
pa[i] <- (1 + sum(statreslist$gr[i,] >= test$gr[i]))/(1 + times)
pb[i] <- (1 + sum(statreslist$gr[i,] <= test$gr[i]))/(1 + times)
}
datac <- test
tuning <- NA
groupinfo$testm <- testm
groupinfo$pa <- pa
groupinfo$pb <- pb
groupinfo$pma <- p.m.above
groupinfo$pmb <- p.m.below
cat("Data value: ", datac$overall, "\n")
}
else
cat("Data value: ", datac, "\n")
if (!prabobj$spatial || is.null(sarestimate)) sarlambda <- NULL
else sarlambda <- sarestimate$lambda*sarestimate$nbweight
if (teststat=="groups")
results <- statreslist
else
results=statres
out <- list(results = results, p.above = p.above, p.below = p.below,
datac = datac, tuning = tuning, distance=dist, times=times,
teststat=teststat, pd=p.nb,
abund=!is.null(sarestimate),
sarlambda=sarlambda, sarestimate=sarestimate,
groupinfo=groupinfo)
class(out) <- "prabtest"
out
}
toprab <- function(prabobj)
prabobj$prab>0
build.nblist <- function(prabobj,prab01=NULL,style="C"){
if (is.null(prab01))
prab01 <- prabinit(prabmatrix=toprab(prabobj),rows.are.species=FALSE,
distance="none")
nblist <- list()
q <- 1
ijsum <- 0
for (i in 1:prabobj$n.species){
iregs <- (1:prabobj$n.regions)[prab01$prab[,i]]
for (j in 1:prab01$regperspec[i]){
nblist[[q]] <-
(1:length(iregs))[iregs %in% prabobj$nb[[iregs[j]]]]+ijsum
q <- q+1
}
ijsum <- ijsum+prab01$regperspec[i]
}
nblist <- lapply(nblist,as.integer)
nblist[sapply(nblist, length) == 0L] <- 0L
class(nblist) <- "nb"
out <- spdep::nb2listw(nblist,style=style,zero.policy=TRUE)
invisible(out)
}
prab.sarestimate <- function(abmat, prab01=NULL,sarmethod="eigen",
weightstyle="C",
quiet=TRUE, sar=TRUE,
add.lmobject=TRUE){
if (is.null(prab01))
prab01 <- prabinit(prabmatrix=toprab(abmat),rows.are.species=FALSE,
distance="none")
logabund <- log(abmat$prab[prab01$prab[,1],1])
species <- rep(1,sum(prab01$prab[,1]))
region <- (1:abmat$n.regions)[prab01$prab[,1]]
for (j in 2:abmat$n.species){
logabund <- c(logabund,log(abmat$prab[prab01$prab[,j],j]))
species <- c(species,rep(j,sum(prab01$prab[,j])))
region <- c(region,(1:abmat$n.regions)[prab01$prab[,j]])
}
species <- as.factor(species)
region <- as.factor(region)
abundreg <- data.frame(logabund,species,region,
row.names=sapply(1:length(species),toString))
if (sar){
nblistw <- build.nblist(abmat,prab01=prab01,style=weightstyle)
abundlm <- spatialreg::errorsarlm(logabund~region+species,data=abundreg,
listw=nblistw,quiet=quiet,zero.policy=TRUE,
method=sarmethod)
interc <- coef(abundlm)[2]
sigma <- sqrt(summary(abundlm)$s2)
regeffects <- c(0,coef(abundlm)[3:(abmat$n.regions+1)])
speffects <- c(0,coef(abundlm)[(abmat$n.regions+2):
(abmat$n.regions+abmat$n.species)])
lambda <- abundlm$lambda
nbweight <- mean(c(nblistw[[3]],recursive=TRUE))
if (!add.lmobject) abundlm <- NULL
out <- list(sar=sar,intercept=interc,sigma=sigma,regeffects=regeffects,
speffects=speffects,lambda=lambda,size=length(nblistw[[3]]),
nbweight=nbweight,lmobject=abundlm)
}
else{
abundlm <- lm(logabund~region+species,data=abundreg)
interc <- coef(abundlm)[1]
sigma <- summary(abundlm)$sigma
regeffects <- c(0,coef(abundlm)[2:abmat$n.regions])
speffects <- c(0,coef(abundlm)[(abmat$n.regions+1):
(abmat$n.regions+abmat$n.species-1)])
if (!add.lmobject) abundlm <- NULL
out <- list(sar=sar,intercept=interc,sigma=sigma,regeffects=regeffects,
speffects=speffects,lmobject=abundlm)
}
out
}
regpop.sar <- function(abmat, prab01=NULL,
sarestimate=prab.sarestimate(abmat),
p.nb=NULL,
vector.species=prab01$regperspec,
pdf.regions=prab01$specperreg/(sum(prab01$specperreg)),
count=FALSE){
if (is.null(prab01)){
prab01 <- prabinit(prabmatrix=toprab(abmat),rows.are.species=FALSE,
distance="none")
vector.species=prab01$regperspec
pdf.regions=prab01$specperreg/(sum(prab01$specperreg))
}
proble <- function(v,val) mean(v<=val, na.rm=TRUE)
logabund <- log(abmat$prab[prab01$prab[,1],1])
species <- rep(1,sum(prab01$prab[,1]))
region <- (1:abmat$n.regions)[prab01$prab[,1]]
for (j in 2:abmat$n.species){
logabund <- c(logabund,log(abmat$prab[prab01$prab[,j],j]))
species <- c(species,rep(j,sum(prab01$prab[,j])))
region <- c(region,(1:abmat$n.regions)[prab01$prab[,j]])
}
species <- as.factor(species)
region <- as.factor(region)
neighbors <- abmat$nb
m01 <- matrix(FALSE, ncol = abmat$n.species, nrow = abmat$n.regions)
out <- matrix(0, ncol = abmat$n.species, nrow = abmat$n.regions)
cdf.local <- cdf.regions <- c()
for (i in 1:abmat$n.regions) cdf.regions[i] <- sum(pdf.regions[1:i])
for (i in 1:abmat$n.species){
if (count)
cat("Species ", i, "\n")
spec.regind <- spec.neighb <- rep(FALSE, abmat$n.regions)
nsize <- vector.species[i]
if (is.null(p.nb)){
m01[,i] <- rep(FALSE,abmat$n.regions)
m01[sample(abmat$n.regions,nsize,prob=pdf.regions),i] <- rep(TRUE,nsize)
}
else{
r1 <- runif(1)
reg <- 1 + sum(r1 > cdf.regions)
spec.regind[reg] <- TRUE
for (k in neighbors[[reg]]) spec.neighb[k] <- TRUE
m01[reg, i] <- TRUE
if (nsize > 1)
for (j in 2:nsize)
if (all(!spec.neighb) | all(pdf.regions[spec.neighb] <
1e-08) | all(spec.neighb | spec.regind) |
all(pdf.regions[!(spec.regind | spec.neighb)] < 1e-08)) {
nreg <- sum(!spec.regind)
pdf.local <- pdf.regions[!spec.regind]
pdf.local <- pdf.local/sum(pdf.local)
for (l in 1:nreg) cdf.local[l] <- sum(pdf.local[1:l])
r1 <- runif(1)
zz <- 1 + sum(r1 > cdf.local[1:nreg])
reg <- (1:abmat$n.regions)[!spec.regind][zz]
spec.regind[reg] <- TRUE
spec.neighb[reg] <- FALSE
for (k in neighbors[[reg]]) spec.neighb[k] <- !(spec.regind[k])
m01[reg, i] <- TRUE
}
else if (runif(1) < p.nb) {
regs <- !(spec.regind | spec.neighb)
nreg <- sum(regs)
pdf.local <- pdf.regions[regs]
pdf.local <- pdf.local/sum(pdf.local)
for (l in 1:nreg) cdf.local[l] <- sum(pdf.local[1:l])
r1 <- runif(1)
zz <- 1 + sum(r1 > cdf.local[1:nreg])
reg <- (1:abmat$n.regions)[regs][zz]
spec.regind[reg] <- TRUE
for (k in neighbors[[reg]]) spec.neighb[k] <- !(spec.regind[k])
m01[reg, i] <- TRUE
}
else {
nreg <- sum(spec.neighb)
pdf.local <- pdf.regions[spec.neighb]
pdf.local <- pdf.local/sum(pdf.local)
for (l in 1:nreg) cdf.local[l] <- sum(pdf.local[1:l])
r1 <- runif(1)
zz <- 1 + sum(r1 > cdf.local[1:nreg])
reg <- (1:abmat$n.regions)[spec.neighb][zz]
spec.regind[reg] <- TRUE
spec.neighb[reg] <- FALSE
for (k in neighbors[[reg]]) spec.neighb[k] <- !(spec.regind[k])
m01[reg, i] <- TRUE
}
}
iregions <- (1:abmat$n.regions)[m01[,i]]
if (sarestimate$sar){
inbmatrix <- matrix(0,nrow=nsize,ncol=nsize)
for (j in 1:nsize)
inbmatrix[j,(1:nsize)[iregions %in%
abmat$nb[[iregions[j]]]]] <- 1
inbmatrix <- sarestimate$lambda*sarestimate$nbweight*inbmatrix
invmatrix <- solve(diag(nsize)-inbmatrix)
icov <- sarestimate$sigma^2*invmatrix %*% invmatrix
ierror <- mvtnorm::rmvnorm(1,sigma=icov)
}
else
ierror <- rnorm(nsize,sd=sarestimate$sigma)
for (j in 1:nsize){
abundmean <- sarestimate$intercept+sarestimate$speffects[i]+
sarestimate$regeffects[iregions[j]]
out[iregions[j],i] <- exp(abundmean+ierror[j])
}
}
out
}
specgroups <- function (distmat,groupvector, groupinfo)
{
distmat <- as.matrix(distmat)
nc <- ncol(distmat)
sgd <- mgd <- numeric(0)
sni <- 0
for (i in 1:groupinfo$ng){
gd <- distmat[groupvector==groupinfo$lg[i],
groupvector==groupinfo$lg[i]]
ni <- groupinfo$nsg[i]
ni <- ni*(ni-1)/2
sni <- sni+ni
sgd[i] <- sum(gd[upper.tri(gd)])
mgd[i] <- sgd[i]/ni
}
overall <- sum(sgd)/sni
out <- list(overall=overall,gr=mgd)
out
}
|
Vm.spc <- function (obj, m, ...)
{
if (! inherits(obj, "spc")) stop("first argument must be object of class 'spc'")
m <- as.integer(m)
if ( any(is.na(m)) || any(m < 1) ) stop("second argument must be integer(s) >= 1")
idx <- match(m, obj$m)
Vm <- ifelse(is.na(idx), 0, obj$Vm[idx])
m.max <- attr(obj, "m.max")
if (m.max > 0) {
idx <- m > m.max
if (any(idx)) Vm[idx] <- NA
}
Vm
}
|
require(bvpSolve)
require(rootSolve)
mathieu<- function(t, y, lambda = 15) {
list(c(y[2], -(lambda-10*cos(2*t)) * y[1]))
}
x = seq(0, pi, by = 0.01)
init <- c(1, 0)
sol <- bvpshoot(yini = init, yend = c(NA, 0), x = x,
func = mathieu, guess = NULL, extra = 15)
plot(sol[,1:2])
cost <- function(X)
{ sol<- bvptwp(yini = c(1, NA), yend = c(NA, 0),
x = c(0, pi), parms = X,
func = mathieu)
return(sol[2,3])
}
lam <- multiroot(f = cost, start = 15)
Sol<- bvptwp(yini = c(1,NA), yend = c(NA, 0),
x = x, parms = lam$root,
func = mathieu, atol = 1e-10)
lines(Sol, col = "red")
mathieu2<- function(t,y,parms)
{
list(c(y[2],
-(y[3]-10*cos(2*t))*y[1],
0 ))
}
init <- c(y = 1,dy = 0, lambda = NA)
sol1 <- bvpshoot(yini = init, yend = c(NA, 0, NA), x = x,
func = mathieu2, guess = 1)
plot(sol1)
jac <- function(x, y ,p) {
df <- matrix(nr = 3, nc = 3, 0)
df[1,2] <- 1
df[2,1] <- -(y[3]-10*cos(2*x))
df[2,3] <- -y[1]
df
}
xguess <- c(0, 1, 2*pi)
yguess <- matrix(nr = 3, rep(1, 9))
rownames(yguess) <- c("y", "dy", "lambda")
print(system.time(
sol1b <- bvptwp(yini = init, yend = c(NA, 0, NA), x = x,
func = mathieu2, jacfunc = jac, xguess = xguess,
yguess = yguess)
))
plot(sol1b, type="l",lwd=2)
xguess <- c(0,1,2*pi)
yguess <- matrix(nr=3,rep(17,9))
print(system.time(
sol2 <- bvpshoot(yini = init, yend = c(NA, 0, NA), x = x,
func = mathieu2, jacfunc =jac, guess = 17)
))
plot(sol2, type="l",lwd=2)
bound <- function(i,y,parms){
if (i ==1) return(y[1]-1)
if (i ==2) return(y[2])
if (i ==3) return(y[2])
}
print(system.time(
sol2b <- bvptwp(bound = bound, leftbc = 2,x=x,
func=mathieu2, jacfunc =jac, xguess = xguess,
yguess = yguess)
))
xguess <- c(0,1,2*pi)
yguess <- matrix(nr=3,rep(35,9))
print(system.time(
sol3 <- bvpshoot(bound = bound, leftbc = 2,x=x, atol=1e-9,
func=mathieu2, jacfunc =jac, guess=c(y=1,dy=0,lambda=35))
))
jacbound <- function(i,y,parms){
if (i ==1) return(c(1,0,0))
else return(c(0,1,0))
}
print(system.time(
sol3b <- bvptwp(bound = bound, jacbound = jacbound, leftbc = 2, x=x,
func=mathieu2, jacfunc =jac, xguess = xguess,
yguess = yguess)
))
xguess <- c(0,1,2*pi)
yguess <- matrix(nr=3,rep(105,9))
print(system.time(
sol4 <- bvpshoot(bound = bound, jacbound = jacbound, leftbc = 2,x=x,
func=mathieu2, jacfunc =jac, guess=c(y=1,dy=1,lam=105))
))
print(system.time(
sol4b <- bvptwp(bound = bound, jacbound = jacbound, leftbc = 2, x=x,
func=mathieu2, jacfunc =jac, xguess = xguess,
yguess = yguess)
))
par(mfrow=c(2,3))
plot(sol1,which="y", mfrow=NULL,type="l",lwd=2)
plot(sol2,which="y", mfrow=NULL,type="l",lwd=2)
plot(sol3,which="y", mfrow=NULL,type="l",lwd=2)
plot(sol1b,which="y", mfrow=NULL,type="l",lwd=2)
plot(sol2b,which=1, mfrow=NULL,type="l",lwd=2)
plot(sol3b,which=1, mfrow=NULL,type="l",lwd=2)
par(mfrow=c(1,1))
c(sol1[1,4],sol2[1,4],sol3[1,4],sol4[1,4])
c(sol1b[1,4],sol2b[1,4],sol3b[1,4],sol4b[1,4])
|
vkGetDbRegions <- function(
country_id,
q = NULL,
username = getOption("rvkstat.username"),
api_version = getOption("rvkstat.api_version"),
token_path = vkTokenPath(),
access_token = getOption("rvkstat.access_token")
)
{
if ( is.null(access_token) ) {
if ( Sys.getenv("RVK_API_TOKEN") != "" ) {
access_token <- Sys.getenv("RVK_API_TOKEN")
} else {
access_token <- vkAuth(username = username,
token_path = token_path)$access_token
}
}
if ( class(access_token) == "vk_auth" ) {
access_token <- access_token$access_token
}
if(nchar(q) > 15 && !(is.null(q))){
stop(paste0("In query ( argument q ) max length is 15 characters. You enter ", nchar(q)," characters!"))
}
result <- list()
offset <- 0
count <- 1000
last_iteration <- FALSE
while ( last_iteration == FALSE ) {
answer <- GET("https://api.vk.com/method/database.getRegions",
query = list(
country_id = country_id,
q = q,
offset = offset,
count = count,
access_token = access_token,
v = api_version
))
stop_for_status(answer)
dataRaw <- content(answer, "parsed", "application/json")
if(!is.null(dataRaw$error)){
stop(paste0("Error ", dataRaw$error$error_code," - ", dataRaw$error$error_msg))
}
result <- append(result, dataRaw$response$items)
if ( length( dataRaw$response$items ) < count ) {
last_iteration <- TRUE
}
offset <- offset + length( dataRaw$response$items )
Sys.sleep(0.5)
}
result <- bind_rows(result)
return(result)
}
|
library(RProtoBuf)
isProto3 <- (RProtoBuf:::getProtobufLibVersion() >= 3000000)
if (!isProto3) exit_file("Need Proto3 for this test.")
if( !exists("SearchRequest", "RProtoBuf:DescriptorPool")) {
unitest.proto.file <- system.file("tinytest", "data", "proto3.proto", package="RProtoBuf")
readProtoFiles(file = unitest.proto.file)
}
q <- new(SearchRequest, query="abc", page_number=42L, result_per_page=77L)
expect_equal(q$query, "abc", msg="proto3 string")
expect_equal(q$page_number, 42L, msg="proto3 int")
expect_equal(q$result_per_page, 77L, msg="proto3 int again")
|
T3func <-
function(X,n,m,p,r1,r2,r3,start,conv,A,B,C,H){
X=as.matrix(X)
cputime=system.time({
ss=sum(X^2)
dys=0
if (start==0){
cat("Rational ORTHONORMALIZED start",fill=TRUE)
EIG=eigen(X%*%t(X))
A=EIG$vectors[,1:r1]
Z=permnew(X,n,m,p)
EIG=eigen(Z%*%t(Z))
B=EIG$vectors[,1:r2]
Z=permnew(Z,m,p,n)
EIG=eigen(Z%*%t(Z))
C=EIG$vectors[,1:r3]
}
if (start==1){
cat("Random ORTHONORMALIZED starts",fill=TRUE)
if (n>=r1){
A=orth(matrix(runif(n*r1,0,1),n,r1)-.5)
} else{
A=orth(matrix(runif(r1*r1,0,1),r1,r1)-.5)
A=A[1:n,]
}
if (m>=r2){
B=orth(matrix(runif(m*r2,0,1),m,r2)-.5)
} else{
B=orth(matrix(runif(r2*r2,0,1),r2,r2)-.5)
B=B[1:m,]
}
if (p>=r3){
C=orth(matrix(runif(p*r3,0,1),p,r3)-.5)
} else{
C=orth(matrix(runif(r3*r3,0,1),r3,r3)-.5)
C=C[1:p,]
}
}
if (start!=2){
Z=permnew(t(A)%*%X,r1,m,p)
Z=permnew(t(B)%*%Z,r2,p,r1)
H=permnew(t(C)%*%Z,r3,r1,r2)
}
if (start==2){
Z=B%*%permnew(A%*%H,n,r2,r3)
Z=C%*%permnew(Z,m,r3,n)
Z=permnew(Z,p,n,m)
f=sum((X-Z)^2)
} else{
f=ss-sum(H^2)
}
cat(paste("Tucker3 function value at start is ",f),fill=TRUE)
iter=0
fold=f+2*conv*f
while (fold-f>f*conv){
iter=iter+1
fold=f
Z=permnew(X,n,m,p)
Z=permnew(t(B)%*%Z,r2,p,n)
Z=permnew(t(C)%*%Z,r3,n,r2)
A=qr.Q(qr(Z%*%(t(Z)%*%A)),complete=FALSE)
Z=permnew(X,n,m,p)
Z=permnew(Z,m,p,n)
Z=permnew(t(C)%*%Z,r3,n,m)
Z=permnew(t(A)%*%Z,r1,m,r3)
B=qr.Q(qr(Z%*%(t(Z)%*%B)),complete=FALSE)
Z=permnew(t(A)%*%X,r1,m,p)
Z=permnew(t(B)%*%Z,r2,p,r1)
C=qr.Q(qr(Z%*%(t(Z)%*%C)),complete=FALSE)
Z=permnew(t(A)%*%X,r1,m,p)
Z=permnew(t(B)%*%Z,r2,p,r1)
H=permnew(t(C)%*%Z,r3,r1,r2)
f=ss-sum(H^2)
if ((iter%%10)==0){
cat(paste("Tucker3 function value after iteration ",iter," is ",f),fill=TRUE)
}
}
})
ss=sum(X^2)
fp=100*(ss-f)/ss
La=H%*%t(H)
Y=permnew(H,r1,r2,r3)
Lb=Y%*%t(Y)
Y=permnew(Y,r2,r3,r1)
Lc=Y%*%t(Y)
cat(paste("Tucker3 function value is",f,"after",iter,"iterations", sep=" "),fill=TRUE)
cat(paste("Fit percentage is",fp,"%",sep=" "),fill=TRUE)
cat(paste("Procedure used",(round(cputime[1],2)),"seconds", sep=" "),fill=TRUE)
out=list()
out$A=A
out$B=B
out$C=C
out$H=H
out$f=f
out$fp=fp
out$iter=iter
out$cputime=cputime[1]
out$La=La
out$Lb=Lb
out$Lc=Lc
return(out)
}
|
library(act)
examplecorpus@transcripts[[1]]@tiers$name[order(examplecorpus@transcripts[[1]]@tiers$position)]
examplecorpus@transcripts[[2]]@tiers$name[order(examplecorpus@transcripts[[2]]@tiers$position)]
sortVector <- c(examplecorpus@transcripts[[1]]@tiers$name,
examplecorpus@transcripts[[2]]@tiers$name)
sortVector <- sortVector[length(sortVector):1]
examplecorpus <- act::tiers_sort(x=examplecorpus,
sortVector=sortVector)
examplecorpus@transcripts[[1]]@tiers$name[order(examplecorpus@transcripts[[1]]@tiers$position)]
examplecorpus@transcripts[[2]]@tiers$name[order(examplecorpus@transcripts[[2]]@tiers$position)]
examplecorpus <- act::tiers_sort(x=examplecorpus,
sortVector=sortVector,
addMissingTiers=TRUE)
examplecorpus@transcripts[[1]]@tiers$name[order(examplecorpus@transcripts[[1]]@tiers$position)]
examplecorpus@transcripts[[2]]@tiers$name[order(examplecorpus@transcripts[[2]]@tiers$position)]
for (t in examplecorpus@transcripts) {
sortVector <- c(t@tiers$name, "newTier")
examplecorpus <- act::tiers_sort(x=examplecorpus,
sortVector=sortVector,
filterTranscriptNames=t@name,
addMissingTiers=TRUE)
}
examplecorpus@transcripts[[1]]@tiers
|
ProcTraj <-
function(lat = 51.5, lon = -45.1,
hour.interval = 1, name = "london",
start.hour = "00:00", end.hour="23:00",
met,
out,
hours = 12, height = 100,
hy.path, ID = 1,
dates, script.name="test",
add.new.column = F, new.column.name, new.column.value,
tz="GMT", clean.files=TRUE ) {
wd <- getwd()
script.extension <- ".sh"
OS <- "unix"
if(.Platform$OS.type == "windows"){
script.extension <- ".bat"
OS <- "windows"
}
hy.split.wd <- file.path(hy.path, "working" )
hy.split.wd <- normalizePath(hy.split.wd)
setwd(hy.split.wd)
folder.name = paste( "process_", ID, sep="")
process.working.dir <- file.path(hy.split.wd, folder.name)
dir.create(process.working.dir, showWarnings = FALSE)
process.working.dir <- normalizePath(process.working.dir)
setwd(process.working.dir)
hy.split.exec.dir <- file.path(hy.path, "exec", "hyts_std")
bdyfiles.path <- file.path(hy.path, "bdyfiles")
symb.link.files <- list.files(path = bdyfiles.path)
for( i in 1:length(symb.link.files) ){
from <- normalizePath( file.path(bdyfiles.path, symb.link.files[[i]] ) )
to <- file.path(process.working.dir, symb.link.files[[i]])
file.copy( from, to)
}
control.file.number <- 1
script.name <- paste(script.name, "_", ID, script.extension, sep="")
dates.and.times <-
laply( .data = dates,
.fun=function(d) {
start.day <- paste(d, start.hour, sep=" ")
end.day <- paste(d, end.hour, sep=" ")
posix.date <- seq(as.POSIXct(start.day, tz), as.POSIXct(end.day, tz), by = paste(hour.interval, "hour", sep=" "))
as.character(posix.date)
})
hour.interval <- paste( hour.interval, "hour", sep=" ")
for (i in 1:length(dates.and.times)) {
control.file <- "CONTROL"
date <- as.POSIXct(dates.and.times[i], tz=tz)
control.file.extension <- paste(as.character(ID), "_", control.file.number, sep="")
control.file <- paste(control.file, control.file.extension, sep=".")
year <- format(date, "%y")
Year <- format(date, "%Y")
month <- format(date, "%m")
day <- format(date, "%d")
hour <- format(date, "%H")
script.file <- file(script.name, "w")
if(OS == "unix"){
cat("
}
line <- paste("echo", year, month, day, hour, ">", control.file, sep=" ")
cat( line, file = script.file, sep = "\n")
line <- paste("echo 1 >>", control.file, sep=" " )
cat(line, file = script.file, sep="\n")
line <- paste("echo", lat, lon, height, ">>", control.file, sep=" ")
cat(line, file = script.file, sep="\n")
line <- paste("echo", hours, ">>", control.file, sep=" ")
cat(line, file = script.file, sep="\n")
line <- paste("echo 0 >> ", control.file, "\n",
"echo 10000.0 >> ", control.file, "\n",
"echo 3 >> ", control.file, "\n",
sep="")
cat(line, file = script.file, sep="")
months <- as.numeric(unique(format(date, "%m")))
months <- c(months, months + 1:2)
months <- months - 1
months <- months[months <= 12]
if (length(months) == 2) {
months <- c(min(months) - 1, months)
}
for (i in 1:3) {
AddMetFiles(months[i], Year, met, script.file, control.file)
}
line <- paste("echo ./ >>", control.file, sep=" ")
cat(line, file = script.file, sep="\n")
line <- paste("echo tdump", "_", ID, "_", year, month, day, hour,
" >> ", control.file, sep = "")
cat(line, file = script.file, sep="\n")
line <- paste(hy.split.exec.dir, control.file.extension, sep=" ")
cat(line, file = script.file, sep="\n")
close(script.file)
if(OS == "unix"){
system(paste0("sh ", script.name))
} else
{
system(paste0(script.name))
}
control.file.number <- control.file.number + 1
}
traj <- ReadFiles(process.working.dir, ID, dates.and.times, tz)
if (add.new.column == T){
if( !missing(new.column.name) & !missing(new.column.value) ){
traj[new.column.name] <- new.column.value
} else {
stop("Parameters 'new.column.name' and 'new.column.value' are not defined.")
}
}
if( !missing(out) ) {
file.name <- paste(out, name, Year, ".RData", sep = "")
save(traj, file = file.name)
}
setwd(hy.split.wd)
if(clean.files == T){
unlink(folder.name, recursive = TRUE)
}
setwd(wd)
traj
}
|
"ironsup"
|
`smooth_terms` <- function(obj, ...) {
UseMethod("smooth_terms")
}
`smooth_terms.gam` <- function(object, ...) {
lapply(object[["smooth"]], `[[`, "term")
}
`smooth_terms.gamm` <- function(object, ...) {
smooth_terms(object[["gam"]], ...)
}
`smooth_terms.mgcv.smooth` <- function(object, ...) {
object[["term"]]
}
`smooth_terms.fs.interaction` <- function(object, ...) {
object[["term"]]
}
`smooth_dim` <- function(object) {
UseMethod("smooth_dim")
}
`smooth_dim.gam` <- function(object) {
vapply(object[["smooth"]], FUN = `[[`, FUN.VALUE = integer(1), "dim")
}
`smooth_dim.gamm` <- function(object) {
smooth_dim(object[["gam"]])
}
`smooth_dim.mgcv.smooth` <- function(object) {
object[["dim"]]
}
`select_terms` <- function(object, terms) {
TERMS <- unlist(smooth_terms(object))
terms <- if (missing(terms)) {
TERMS
} else {
want <- terms %in% TERMS
if (any(!want)) {
msg <- paste("Terms:",
paste(terms[!want], collapse = ", "),
"not found in `object`")
message(msg)
}
terms[want]
}
terms
}
`select_smooth` <- function(object, smooth) {
SMOOTHS <- smooths(object)
if (missing(smooth)) {
stop("'smooth' must be supplied; eg. `smooth = 's(x2)'`")
}
if (length(smooth) > 1L) {
message(paste("Multiple smooths supplied. Using only first:", smooth[1]))
smooth <- smooth[1]
}
want <- grep(smooth, SMOOTHS, fixed = TRUE)
SMOOTHS[want]
}
`smooths` <- function(object) {
vapply(object[["smooth"]], FUN = `[[`, FUN.VALUE = character(1), "label")
}
`smooth_variable` <- function(smooth) {
check_is_mgcv_smooth(smooth)
smooth[["term"]]
}
`smooth_factor_variable` <- function(smooth) {
check_is_mgcv_smooth(smooth)
smooth[["fterm"]]
}
`smooth_label` <- function(smooth) {
check_is_mgcv_smooth(smooth)
smooth[["label"]]
}
`is_mgcv_smooth` <- function(smooth) {
inherits(smooth, "mgcv.smooth")
}
`is_mrf_smooth` <- function(smooth) {
inherits(smooth, what= "mrf.smooth")
}
`check_is_mgcv_smooth` <- function(smooth) {
out <- is_mgcv_smooth(smooth)
if (identical(out, FALSE)) {
stop("Object passed to 'smooth' is not a 'mgcv.smooth'.")
}
invisible(out)
}
`is.gamm` <- function(object) {
inherits(object, "gamm")
}
`is.gam` <- function(object) {
inherits(object, "gam")
}
`get_smooth` <- function(object, term) {
if (is.gamm(object)) {
object <- object[["gam"]]
}
smooth <- object[["smooth"]][which_smooth(object, term)]
if (identical(length(smooth), 1L)) {
smooth <- smooth[[1L]]
}
smooth
}
`old_get_smooth` <- function(object, term) {
if (is.gamm(object)) {
object <- object[["gam"]]
}
smooth <- object[["smooth"]][old_which_smooth(object, term)]
if (identical(length(smooth), 1L)) {
smooth <- smooth[[1L]]
}
smooth
}
`get_smooths_by_id` <- function(object, id) {
if (is.gamm(object)) {
object <- object[["gam"]]
}
object[["smooth"]][id]
}
`get_by_smooth` <- function(object, term, level) {
if (is.gamm(object)) {
object <- object[["gam"]]
}
take <- old_which_smooth(object, term)
S <- object[["smooth"]][take]
is_by <- vapply(S, is_factor_by_smooth, logical(1L))
if (any(is_by)) {
if (missing(level)) {
stop("No value provided for argument 'level':\n Getting a factor by-variable smooth requires a 'level' be supplied.")
}
level <- as.character(level)
levs <- vapply(S, `[[`, character(1L), "by.level")
take <- match(level, levs)
if (is.na(take)) {
msg <- paste0("Invalid 'level' for smooth '", term, "'. Possible levels are:\n")
msg <- paste(msg, paste(strwrap(paste0(shQuote(levs), collapse = ", "),
prefix = " ", initial = ""),
collapse = "\n"))
stop(msg)
}
S <- S[[take]]
} else {
stop("The requested smooth '", term, "' is not a by smooth.")
}
S
}
`which_smooths` <- function(object, ...) {
UseMethod("which_smooths")
}
`which_smooths.default` <- function(object, ...) {
stop("Don't know how to identify smooths for <", class(object)[[1L]], ">",
call. = FALSE)
}
`which_smooths.gam` <- function(object, terms, ...) {
ids <- unique(unlist(lapply(terms, function(x, object) { which_smooth(object, x) },
object = object)))
if (identical(length(ids), 0L)) {
stop("None of the terms matched a smooth.")
}
ids
}
`which_smooths.bam` <- function(object, terms, ...) {
ids <- unique(unlist(lapply(terms, function(x, object) { which_smooth(object, x) },
object = object)))
if (identical(length(ids), 0L)) {
stop("None of the terms matched a smooth.")
}
ids
}
`which_smooths.gamm` <- function(object, terms, ...) {
ids <- unique(unlist(lapply(terms, function(x, object) { which_smooth(object, x) },
object = object[["gam"]])))
if (identical(length(ids), 0L)) {
stop("None of the terms matched a smooth.")
}
ids
}
`which_smooth` <- function(object, term) {
if (is.gamm(object)) {
object <- object[["gam"]]
}
smooths <- smooths(object)
which(term == smooths)
}
`old_which_smooth` <- function(object, term) {
if (is.gamm(object)) {
object <- object[["gam"]]
}
smooths <- smooths(object)
grep(term, smooths, fixed = TRUE)
}
`n_smooths` <- function(object) {
UseMethod("n_smooths")
}
`n_smooths.default` <- function(object) {
if (!is.null(object[["smooth"]])) {
return(length(object[["smooth"]]))
}
stop("Don't know how to identify smooths for <", class(object)[[1L]], ">",
call. = FALSE)
}
`n_smooths.gam` <- function(object) {
length(object[["smooth"]])
}
`n_smooths.gamm` <- function(object) {
length(object[["gam"]][["smooth"]])
}
`n_smooths.bam` <- function(object) {
length(object[["smooth"]])
}
`get_vcov` <- function(object, unconditional = FALSE, frequentist = FALSE,
term = NULL, by_level = NULL) {
V <- if (frequentist) {
object$Ve
} else if (unconditional) {
if (is.null(object$Vc)) {
warning("Covariance corrected for smoothness uncertainty not available.\nUsing uncorrected covariance.")
object$Vp
} else {
object$Vc
}
} else {
object$Vp
}
if (!is.null(term)) {
if (length(term) > 1L) {
message("Supplied more than 1 'term'; using only the first")
term <- term[1L]
}
term <- select_smooth(object, term)
smooth <- get_smooth(object, term)
start <- smooth$first.para
end <- smooth$last.para
para.seq <- start:end
V <- V[para.seq, para.seq, drop = FALSE]
}
V
}
`has_s` <- function(terms) {
grepl("^s\\(.+\\)$", terms)
}
`add_s` <- function(terms) {
take <- ! has_s(terms)
terms[take] <- paste("s(", terms[take], ")", sep = "")
terms
}
`is_re_smooth` <- function(smooth) {
check_is_mgcv_smooth(smooth)
inherits(smooth, "random.effect")
}
`is_fs_smooth` <- function(smooth) {
check_is_mgcv_smooth(smooth)
inherits(smooth, "fs.interaction")
}
`fix_offset` <- function(model, newdata, offset_val = NULL) {
m.terms <- names(newdata)
p.terms <- attr(terms(model[["pred.formula"]]), "term.labels")
tt <- terms(model)
resp <- names(attr(tt, "dataClasses"))[attr(tt, "response")]
Y <- m.terms == resp
if (any(Y)) {
m.terms <- m.terms[!Y]
}
off <- is_offset(m.terms)
if (any(off)) {
ind <- m.terms %in% p.terms
off_var <- grep(p.terms[!ind], m.terms[off])
if (any(off_var)) {
names(newdata)[which(names(newdata) %in% m.terms)][off] <- p.terms[!ind][off_var]
}
if (!is.null(offset_val)) {
newdata[, p.terms[!ind][off_var]] <- offset_val
}
}
newdata
}
`is_offset` <- function(terms) {
grepl("offset\\(", terms)
}
`parametric_terms` <- function(model, ...) {
UseMethod("parametric_terms")
}
`parametric_terms.default` <- function(model, ...) {
stop("Don't know how to identify parametric terms from <",
class(model)[[1L]], ">", call. = FALSE)
}
`parametric_terms.gam` <- function(model, ...) {
tt <- model$pterms
if (is.list(tt)) {
labs <- unlist(lapply(tt, function(x) labels(delete.response(x))))
names(labs) <- unlist(lapply(seq_along(labs),
function(i, labs) {
if (i > 1L) {
paste0(labs[[i]], ".", i-1)
} else {
labs[[i]]}
}, labs))
labs
} else {
if (length(attr(tt, "term.labels") > 0L)) {
tt <- delete.response(tt)
labs <- labels(tt)
names(labs) <- labs
} else {
labs <- character(0)
}
}
labs
}
`by_smooth_failure` <- function(object) {
msg <- paste("Hmm, something went wrong identifying the requested smooth. Found:\n",
paste(vapply(object, FUN = smooth_label,
FUN.VALUE = character(1L)),
collapse = ', '),
"\nNot all of these are 'by' variable smooths. Contact Maintainer.")
msg
}
`rep_first_factor_value` <- function(f, n) {
stopifnot(is.factor(f))
levs <- levels(f)
factor(rep(levs[1L], length.out = n), levels = levs)
}
`seq_min_max` <- function(x, n) {
if (is.factor(x)) {
factor(levels(x), levels = levels(x))
} else {
seq(from = min(x, na.rm = TRUE), to = max(x, na.rm = TRUE),
length.out = n)
}
}
`seq_min_max_eps` <- function(x, n, order,
type = c("forward", "backward", "central"), eps) {
minx <- min(x, na.rm = TRUE)
maxx <- max(x, na.rm = TRUE)
heps <- eps / 2
deps <- eps * 2
type <- match.arg(type)
if (isTRUE(all.equal(order, 1L))) {
minx <- switch(type,
forward = minx,
backward = minx + eps,
central = minx + heps)
maxx <- switch(type,
forward = maxx - eps,
backward = maxx,
central = maxx - heps)
} else {
minx <- switch(type,
forward = minx,
backward = minx + deps,
central = minx + eps)
maxx <- switch(type,
forward = maxx - deps,
backward = maxx,
central = maxx - eps)
}
seq(from = minx, to = maxx, length.out = n)
}
`data_class` <- function(df) {
vapply(df, data.class, character(1L))
}
`factor_var_names` <- function(df) {
ind <- is_factor_var(df)
result <- if (any(ind)) {
names(df)[ind]
} else {
NULL
}
result
}
`is_factor_var` <- function(df) {
result <- vapply(df, is.factor, logical(1L))
result
}
`is_numeric_var` <- function(df) {
result <- vapply(df, is.numeric, logical(1L))
result
}
`shift_values` <- function(df, h, i, FUN = '+') {
FUN <- match.fun(FUN)
result <- df
if (any(i)) {
result[, !i] <- FUN(result[, !i], h)
} else {
result <- FUN(result, h)
}
result
}
`coverage_normal` <- function(level) {
if (level <= 0 || level >= 1 ) {
stop("Invalid 'level': must be 0 < level < 1", call. = FALSE)
}
qnorm((1 - level) / 2, lower.tail = FALSE)
}
`coverage_t` <- function(level, df) {
if (level <= 0 || level >= 1 ) {
stop("Invalid 'level': must be 0 < level < 1", call. = FALSE)
}
qt((1 - level) / 2, df = df, lower.tail = FALSE)
}
`get_family_rd` <- function(object) {
if (inherits(object, "glm")) {
fam <- family(object)
} else {
fam <- object[["family"]]
}
fam <- fix.family.rd(fam)
if (is.null(fam[["rd"]])) {
stop("Don't yet know how to simulate from family <",
fam[["family"]], ">", call. = FALSE)
}
fam[["rd"]]
}
`check_user_select_smooths` <- function(smooths, select = NULL,
partial_match = FALSE,
model_name = NULL) {
lenSmo <- length(smooths)
select <- if (!is.null(select)) {
lenSel <- length(select)
if (is.numeric(select)) {
if (lenSmo < lenSel) {
stop("Trying to select more smooths than are in the model.")
}
if (any(select > lenSmo)) {
stop("One or more indices in 'select' > than the number of smooths in the model.")
}
l <- rep(FALSE, lenSmo)
l[select] <- TRUE
l
} else if (is.character(select)) {
take <- if (isTRUE(partial_match)) {
if (length(select) != 1L) {
stop("When 'partial_match' is 'TRUE', 'select' must be a single string")
}
grepl(select, smooths, fixed = TRUE)
} else {
smooths %in% select
}
if (sum(take) < length(select)) {
if (all(!take)) {
stop("Failed to match any smooths in model",
ifelse(is.null(model_name), "",
paste0(" ", model_name)),
".\nTry with 'partial_match = TRUE'?",
call. = FALSE)
} else {
stop("Some smooths in 'select' were not found in model ",
ifelse(is.null(model_name), "", model_name),
":\n\t",
paste(select[!select %in% smooths], collapse = ", "),
call. = FALSE)
}
}
take
} else if (is.logical(select)) {
if (lenSmo != lenSel) {
stop("When 'select' is a logical vector, 'length(select)' must equal\nthe number of smooths in the model.")
}
select
} else {
stop("'select' is not numeric, character, or logical.")
}
} else {
rep(TRUE, lenSmo)
}
select
}
`smooth_coefs` <- function(smooth) {
if(!is_mgcv_smooth(smooth)) {
stop("Not an mgcv smooth object")
}
start <- smooth[["first.para"]]
end <- smooth[["last.para"]]
seq(from = start, to = end, by = 1L)
}
`load_mgcv` <- function() {
res <- suppressWarnings(requireNamespace("mgcv", quietly = TRUE))
if (!res) {
stop("Unable to load mgcv. Is it installed?", .call = FALSE)
}
attached <- "package:mgcv" %in% search()
if(!attached) {
suppressPackageStartupMessages(attachNamespace("mgcv"))
}
invisible(res)
}
`is_gamm4` <- function(object) {
out <- FALSE
if (!inherits(object, "list")) {
return(out)
}
nms <- names(object)
if (! all(c("gam","mer") %in% nms)) {
return(out)
}
if (! (inherits(object[["mer"]], "lmerMod") &&
inherits(object[["gam"]], "gam"))) {
return(out)
}
out <- TRUE
out
}
`is_gamm` <- function(object) {
inherits(object, "gamm")
}
`is_factor_term` <- function(object, term, ...) {
UseMethod("is_factor_term", object)
}
`is_factor_term.terms` <- function(object, term, ...) {
if (missing(term)) {
stop("Argument 'term' must be provided.")
}
facs <- attr(object, "factors")
out <- if (term %in% colnames(facs)) {
facs <- facs[, term, drop = FALSE]
take <- rownames(facs)[as.logical(facs)]
data_types <- attr(object, 'dataClasses')[take]
all(data_types %in% c("factor", "character"))
} else {
NULL
}
out
}
`is_factor_term.gam` <- function(object, term, ...) {
object <- terms(object)
is_factor_term(object, term, ...)
}
`is_factor_term.bam` <- function(object, term, ...) {
object <- terms(object)
is_factor_term(object, term, ...)
}
`is_factor_term.gamm` <- function(object, term, ...) {
object <- terms(object$gam)
is_factor_term(object, term, ...)
}
`is_factor_term.list` <- function(object, term, ...) {
if (!is_gamm4(object)) {
if (all(vapply(object, inherits, logical(1), "terms"))) {
out <- any(unlist(lapply(object, is_factor_term, term)))
} else {
stop("Don't know how to handle generic list objects.")
}
} else {
object <- terms(object$gam)
out <- is_factor_term(object, term, ...)
}
out
}
`term_variables` <- function(object, term, ...) {
UseMethod("term_variables")
}
`term_variables.terms` <- function(object, term, ...) {
if (missing(term)) {
stop("'term' must be supplied.")
}
facs <- attr(object, "factors")[ , term]
names(facs)[as.logical(facs)]
}
`term_variables.gam` <- function(object, term, ...) {
object <- terms(object)
term_variables(object, term = term, ...)
}
`term_variables.bam` <- function(object, term, ...) {
object <- terms(object)
term_variables(object, term, ...)
}
`mgcv_by_smooth_labels` <- function(smooth, by_var, level) {
paste0(smooth, ":", by_var, level)
}
vars_from_label <- function(label) {
if (length(label) > 1) {
label <- rep(label, length.out = 1)
warning("'label' must be a length 1 vector; using 'label[1]' only.")
}
vars <- gsub("^[[:alnum:]]{1,2}\\.?[[:digit:]]*\\(([[:graph:]]+)\\):?(.*)$",
"\\1",
label)
vec_c(strsplit(vars, ",")[[1L]])
}
`transform_fun` <- function(object, fun = NULL , ...) {
UseMethod("transform_fun")
}
`transform_fun.evaluated_smooth` <- function(object, fun = NULL, ...) {
if (!is.null(fun)) {
fun <- match.fun(fun)
object[["est"]] <- fun(object[["est"]])
if (!is.null(object[["upper"]])) {
object[["upper"]] <- fun(object[["upper"]])
}
if (!is.null(object[["lower"]])) {
object[["lower"]] <- fun(object[["lower"]])
}
}
object
}
`transform_fun.smooth_estimates` <- function(object, fun = NULL, ...) {
if (!is.null(fun)) {
fun <- match.fun(fun)
object[["est"]] <- fun(object[["est"]])
if (!is.null(object[["upper"]])) {
object[["upper"]] <- fun(object[["upper"]])
}
if (!is.null(object[["lower"]])) {
object[["lower"]] <- fun(object[["lower"]])
}
}
object
}
`transform_fun.mgcv_smooth` <- function(object, fun = NULL, ...) {
if (!is.null(fun)) {
fun <- match.fun(fun)
object <- mutate(object,
across(all_of(c("est", "lower_ci", "upper_ci")),
.fns = fun))
}
object
}
`transform_fun.evaluated_parametric_term` <- function(object, fun = NULL, ...) {
if (!is.null(fun)) {
fun <- match.fun(fun)
object <- mutate(object,
across(all_of(c("est", "lower", "upper")),
.fns = fun))
}
object
}
`transform_fun.parametric_effects` <- function(object, fun = NULL, ...) {
if (!is.null(fun)) {
fun <- match.fun(fun)
object <- mutate(object,
across(any_of(c("partial")),
.fns = fun))
}
object
}
`transform_fun.tbl_df` <- function(object, fun = NULL, column = NULL, ...) {
if (is.null(column)) {
stop("'column' to modify must be supplied.")
}
if (!is.null(fun)) {
fun <- match.fun(fun)
object <- mutate(object,
across(all_of(column), .fns = fun))
}
object
}
`norm_minus_one_to_one` <- function(x) {
abs_x <- abs(x)
sign_x <- sign(x)
minx <- 0
maxx <- max(abs_x, na.rm = TRUE)
abs_x <- (abs_x - 0) / (maxx - 0)
abs_x * sign_x
}
`delete_response` <- function(model, data = NULL, model_frame = TRUE) {
if (is.null(data)) {
if (is.null(model[["model"]])) {
stop("`data` must be supplied if not available from 'model'")
} else {
data <- model[["model"]]
}
}
tt <- terms(model[["pred.formula"]])
tt <- delete.response(tt)
out <- model.frame(tt, data = data)
if(identical(model_frame, FALSE)) {
attr(out, "terms") <- NULL
}
out
}
`term_names` <- function(object, ...) {
UseMethod("term_names")
}
`term_names.gam` <- function(object, ...) {
tt <- object[["pred.formula"]]
if (is.null(tt)) {
stop("`object` does not contain `pred.formula`; is this is fitted GAM?",
call. = FALSE)
}
tt <- terms(tt)
attr(tt, "term.labels")
}
`term_names.mgcv.smooth` <- function(object, ...) {
tt <- object[["term"]]
if (is.null(tt)) {
stop("`object` does not contain `term`; is this is an {mgcv} smooth?",
call. = FALSE)
}
if (is_by_smooth(object)) {
tt <- append(tt, by_variable(object))
}
tt
}
`term_names.gamm` <- function(object, ...) {
object <- object[["gam"]]
term_names(object)
}
`terms_in_smooth` <- function(smooth) {
check_is_mgcv_smooth(smooth)
sm_terms <- smooth[["term"]]
sm_by <- by_variable(smooth)
if (sm_by == "NA") {
sm_by <- NULL
}
c(sm_terms, sm_by)
}
|
dbSendQuery_SQLiteConnection_character <- function(conn, statement, params = NULL, ...) {
statement <- enc2utf8(statement)
if (!is.null(conn@ref$result)) {
warning("Closing open result set, pending rows", call. = FALSE)
dbClearResult(conn@ref$result)
stopifnot(is.null(conn@ref$result))
}
rs <- new("SQLiteResult",
sql = statement,
ptr = result_create(conn@ptr, statement),
conn = conn,
bigint = conn@bigint
)
on.exit(dbClearResult(rs), add = TRUE)
if (!is.null(params)) {
dbBind(rs, params)
}
on.exit(NULL, add = FALSE)
conn@ref$result <- rs
rs
}
setMethod("dbSendQuery", c("SQLiteConnection", "character"), dbSendQuery_SQLiteConnection_character)
|
data("dataLatentIV")
context("Runability - latentIV - Runability")
test_that("Works with intercept", {
expect_silent(latentIV(formula = y~P, data = dataLatentIV, verbose=FALSE))
})
test_that("Verbose produces output", {
expect_message(latentIV(formula = y~P, data = dataLatentIV, verbose=TRUE),
regexp = "No start parameters were given. The linear model")
})
test_that("Works without intercept", {
expect_silent(res.no.i <- latentIV(formula = y~P-1, data = dataLatentIV, verbose=FALSE))
expect_false("(Intercept)" %in% coef(res.no.i))
expect_false("(Intercept)" %in% rownames(coef(suppressWarnings(summary(res.no.i)))))
expect_silent(res.w.i <- latentIV(formula = y~P, data = dataLatentIV, verbose=FALSE))
expect_false(isTRUE(all.equal(coef(res.no.i)["P"], coef(res.w.i)["P"])))
expect_false(isTRUE(all.equal(coef(res.no.i), coef(res.w.i))))
})
test_that("Works with start.params given", {
expect_silent(latentIV(formula = y~P, start.params = c("(Intercept)"=2.5, P=-0.5), data = dataLatentIV, verbose=FALSE))
})
test_that("Works with start.params and transformations", {
expect_silent(latentIV(formula = y~I(P+1), start.params = c("(Intercept)"=2.5, "I(P + 1)"=-0.5), data = dataLatentIV, verbose=FALSE))
})
test_that("Same results with start.params swapped", {
expect_silent(res.lat.1 <- latentIV(formula = y~P, start.params = c("(Intercept)"=2.5, P=-0.5), data = dataLatentIV, verbose=FALSE))
expect_silent(res.lat.2 <- latentIV(formula = y~P, start.params = c(P=-0.5, "(Intercept)"=2.5), data = dataLatentIV, verbose=FALSE))
expect_identical(coef(res.lat.1), coef(res.lat.2))
})
test_that("Fails graciously for bad start.params", {
expect_error(latentIV(formula = y~P, start.params = c("(Intercept)"=10e99, P=10e99), data = dataLatentIV, verbose=FALSE),
regexp = "Failed to optimize the log-likelihood function with error")
})
test_that("Works with function in lhs", {
expect_silent(latentIV(formula = I(y+1)~P, data = dataLatentIV, verbose = FALSE))
})
test_that("Works with all endo transformed", {
expect_silent(latentIV(formula = y~I(P/2), data = dataLatentIV, verbose = FALSE))
})
test_that("Works with proper optimx.args", {
expect_silent(latentIV(optimx.args = list(itnmax = 1000), formula = y~P, data = dataLatentIV, verbose = FALSE))
expect_silent(latentIV(optimx.args = list(itnmax = 1000, control=list(kkttol=0.01)), formula = y~P, data = dataLatentIV, verbose = FALSE))
})
test_that("Summary prints about SE unavailable", {
expect_silent(res.latent <- latentIV(formula = y~P, start.params = c("(Intercept)"=1, P=9999), verbose = FALSE,data = dataLatentIV))
expect_warning(res.sum <- summary(res.latent), regexp = "For some parameters the standard error could not be calculated.")
expect_output(print(res.sum), all = FALSE,
regexp = "because the Std. Errors are unavailable")
expect_true(anyNA(coef(res.sum)))
})
test_that("Stops if lm fails for start",{
expect_error(latentIV(y~K, data=data.frame(y=1:100, K=rep(2, 100))),
regexp = "The start parameters could not be derived by fitting a linear model")
})
|
join_rows <- function(x_key, y_key, type = c("inner", "left", "right", "full"), na_equal = TRUE, error_call = caller_env()) {
type <- arg_match(type)
y_split <- vec_group_loc(y_key)
tryCatch(
matches <- vec_match(x_key, y_split$key, na_equal = na_equal),
vctrs_error_incompatible_type = function(cnd) {
rx <- "^[^$]+[$]"
x_name <- sub(rx, "", cnd$x_arg)
y_name <- sub(rx, "", cnd$y_arg)
bullets <- c(
glue("Can't join on `x${x_name}` x `y${y_name}` because of incompatible types."),
i = glue("`x${x_name}` is of type <{x_type}>>.", x_type = vec_ptype_full(cnd$x)),
i = glue("`y${y_name}` is of type <{y_type}>>.", y_type = vec_ptype_full(cnd$y))
)
abort(bullets, call = error_call)
}
)
y_loc <- y_split$loc[matches]
if (type == "left" || type == "full") {
if (anyNA(matches)) {
y_loc <- vec_assign(y_loc, vec_equal_na(matches), list(NA_integer_))
}
}
x_loc <- seq_len(vec_size(x_key))
x_loc <- rep(x_loc, lengths(y_loc))
y_loc <- index_flatten(y_loc)
y_extra <- integer()
if (type == "right" || type == "full") {
miss_x <- !vec_in(y_key, x_key, na_equal = na_equal)
if (!na_equal) {
miss_x[is.na(miss_x)] <- TRUE
}
if (any(miss_x)) {
y_extra <- seq_len(vec_size(y_key))[miss_x]
}
}
list(x = x_loc, y = y_loc, y_extra = y_extra)
}
index_flatten <- function(x) {
unlist(x, recursive = FALSE, use.names = FALSE)
}
|
expected <- eval(parse(text="structure(list(Y = NULL, B = NULL, V = NULL, N = NULL), .Names = c(\"Y\", \"B\", \"V\", \"N\"), terms = quote(Y ~ B + V + N + V:N), row.names = 2:72, class = \"data.frame\")"));
test(id=0, code={
argv <- eval(parse(text="list(structure(list(Y = c(130L, 157L, 174L, 117L, 114L, 161L, 141L, 105L, 140L, 118L, 156L, 61L, 91L, 97L, 100L, 70L, 108L, 126L, 149L, 96L, 124L, 121L, 144L, 68L, 64L, 112L, 86L, 60L, 102L, 89L, 96L, 89L, 129L, 132L, 124L, 74L, 89L, 81L, 122L, 64L, 103L, 132L, 133L, 70L, 89L, 104L, 117L, 62L, 90L, 100L, 116L, 80L, 82L, 94L, 126L, 63L, 70L, 109L, 99L, 53L, 74L, 118L, 113L, 89L, 82L, 86L, 104L, 97L, 99L, 119L, 121L), B = structure(c(1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 4L, 4L, 4L, 4L, 4L, 4L, 4L, 4L, 4L, 4L, 4L, 4L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L), .Label = c(\"I\", \"II\", \"III\", \"IV\", \"V\", \"VI\"), class = \"factor\"), V = structure(c(3L, 3L, 3L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 3L, 3L, 3L, 3L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 3L, 3L, 3L, 3L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 3L, 3L, 3L, 3L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 3L, 3L, 3L, 3L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 3L, 3L, 3L, 3L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L), .Label = c(\"Golden.rain\", \"Marvellous\", \"Victory\"), class = \"factor\"), N = structure(c(2L, 3L, 4L, 1L, 2L, 3L, 4L, 1L, 2L, 3L, 4L, 1L, 2L, 3L, 4L, 1L, 2L, 3L, 4L, 1L, 2L, 3L, 4L, 1L, 2L, 3L, 4L, 1L, 2L, 3L, 4L, 1L, 2L, 3L, 4L, 1L, 2L, 3L, 4L, 1L, 2L, 3L, 4L, 1L, 2L, 3L, 4L, 1L, 2L, 3L, 4L, 1L, 2L, 3L, 4L, 1L, 2L, 3L, 4L, 1L, 2L, 3L, 4L, 1L, 2L, 3L, 4L, 1L, 2L, 3L, 4L), .Label = c(\"0.0cwt\", \"0.2cwt\", \"0.4cwt\", \"0.6cwt\"), class = \"factor\")), .Names = c(\"Y\", \"B\", \"V\", \"N\"), terms = quote(Y ~ B + V + N + V:N), row.names = 2:72, class = \"data.frame\"), structure(list(Y = NULL, B = NULL, V = NULL, N = NULL), .Names = c(\"Y\", \"B\", \"V\", \"N\"), terms = quote(Y ~ B + V + N + V:N), row.names = 2:72, class = \"data.frame\"))"));
.Internal(`copyDFattr`(argv[[1]], argv[[2]]));
}, o=expected);
|
list_to_dfr <- function(lst) {
col_names <- character()
invisible(lapply(lst, function(x) {col_names <<- union(col_names, names(x))}))
df <- data.frame(matrix(NA, nrow = length(lst), ncol = length(col_names)))
colnames(df) <- col_names
counter <- 1
f <- function(x) {
for (field in names(x)) {
df[counter ,field] <<- x[[field]]
}
counter <<- counter + 1
}
invisible(lapply(lst, f))
return(df)
}
|
"PointsUpdatemp" <-
function(X,coeff,nbrs,newnbrs,index,remove,pointsin,weights,lengths){
r<-which(pointsin==remove);
N<-length(pointsin)
pos<-NULL
for (i in 1:length(nbrs)){
pos[i]<-min(which(newnbrs==nbrs[i]))
}
if ((r>=2)&(r<=(N-1))){
lengths[index]<-as.row(lengths[index])
weights<-as.row(weights)
lengths[index]<-lengths[index]+lengths[r]*weights[pos]
}
else{
if(r==1){
lengths[2]<-lengths[2]+lengths[1]
}
if(r==N){
lengths[N-1]<-lengths[N-1]+lengths[N]
}
}
alpha<-matrix(0,1,length(nbrs))
if (length(nbrs)>=2){
alpha<-lengths[r]*lengths[index]/(sum(lengths[index]^2))
for (i in 1:length(nbrs)){
coeff[[pointsin[index][i]]]<-coeff[[pointsin[index][i]]]+alpha[i]*coeff[[remove]]
}
}
else{
q<-which(pointsin==nbrs)
alpha<-lengths[r]/lengths[q]
coeff[[pointsin[q]]]<-coeff[[pointsin[q]]]+alpha*coeff[[remove]]
}
return(list(coeff=coeff,lengths=lengths,r=r,N=N,weights=weights,alpha=alpha))
}
|
ok_proj6 <- function() {
FALSE
}
|
TaskGeneratorMoons = R6Class("TaskGeneratorMoons",
inherit = TaskGenerator,
public = list(
initialize = function() {
ps = ps(
sigma = p_dbl(0, default = 1, tags = "required")
)
ps$values = list(sigma = 1)
super$initialize(id = "moons", task_type = "classif", param_set = ps,
man = "mlr3::mlr_task_generators_moons")
},
plot = function(n = 200L, pch = 19L, ...) {
tab = private$.generate_obj(n)
plot(tab$x1, tab$x2, pch = pch, col = tab$y)
}
),
private = list(
.generate_obj = function(n) {
sigma = self$param_set$values$sigma
n1 = n %/% 2L
n2 = n - n1
mu = c(rep(-2.5, n1), rep(2.5, n2))
x = c(runif(n1, 0, pi), runif(n2, pi, 2 * pi))
data.table(
y = factor(rep(c("A", "B"), c(n1, n2)), levels = c("A", "B")),
x1 = 5 * cos(x) + rnorm(n, mean = mu, sd = sigma),
x2 = 10 * sin(x) + rnorm(n, mean = mu, sd = sigma)
)
},
.generate = function(n) {
tab = private$.generate_obj(n)
TaskClassif$new(sprintf("%s_%i", self$id, n), tab, target = "y")
}
)
)
mlr_task_generators$add("moons", TaskGeneratorMoons)
|
test_that("can flatten input", {
expect_equal(rray_flatten(1:5), new_array(1:5))
x <- matrix(1:6, 2)
expect_equal(rray_flatten(x), new_array(as.vector(x)))
x <- array(1:8, c(2, 2, 2))
expect_equal(rray_flatten(as.vector(x)), new_array(as.vector(x)))
})
test_that("rray class is kept", {
expect_equal(rray_flatten(rray(1)), rray(1))
})
test_that("can keep names with 1D objects", {
x <- rray(1, dim_names = list("foo"))
expect_equal(rray_dim_names(rray_flatten(x)), rray_dim_names(x))
})
test_that("can keep names with higher dim objects", {
x <- rray(1:2, c(2, 1), dim_names = list(c("foo", "foofy"), "bar"))
expect_equal(rray_dim_names(rray_flatten(x)), list(c("foo", "foofy")))
x_t <- t(x)
expect_equal(rray_dim_names(rray_flatten(x_t)), list(NULL))
})
test_that("can flatten NULL", {
expect_equal(rray_flatten(NULL), NULL)
})
test_that("can flatten 0 length input", {
expect_equal(rray_flatten(numeric()), new_array(numeric()))
})
|
library(testthat)
update_expectation <- FALSE
test_that("Smoke Test", {
testthat::skip_on_cran()
start_clean_result <- REDCapR:::clean_start_simple(batch=TRUE)
project <- start_clean_result$redcap_project
})
test_that("read-insert-and-update", {
testthat::skip_on_cran()
path_expected <- "test-data/specific-redcapr/test-project/read-insert-and-update.R"
start_clean_result <- REDCapR:::clean_start_simple(batch=TRUE)
project <- start_clean_result$redcap_project
expected_outcome_message <- "\\d+ records and \\d+ columns were read from REDCap in \\d+(\\.\\d+\\W|\\W)seconds\\."
expect_message(
returned_object1 <- project$read(raw_or_label="raw"),
regexp = expected_outcome_message
)
returned_object1$data$bmi <- NULL
returned_object1$data$age <- NULL
returned_object1$data$address <- 1000 + seq_len(nrow(returned_object1$data))
returned_object1$data$telephone <- sprintf("(405) 321-%1$i%1$i%1$i%1$i", seq_len(nrow(returned_object1$data)))
project$write(ds=returned_object1$data)
expected_outcome_message <- "\\d+ records and \\d+ columns were read from REDCap in \\d+(\\.\\d+\\W|\\W)seconds\\."
expect_message(
returned_object2 <- project$read(raw_or_label="raw"),
regexp = expected_outcome_message
)
returned_object2$data$bmi <- NULL
returned_object2$data$age <- NULL
if (update_expectation) save_expected(returned_object2$data, path_expected)
expected_data_frame <- retrieve_expected(path_expected)
expect_equal(returned_object2$data, expected=expected_data_frame, label="The returned data.frame should be correct")
expect_equal(returned_object2$status_code, expected="200")
expect_true(returned_object2$records_collapsed=="", "A subset of records was not requested.")
expect_true(returned_object2$fields_collapsed=="", "A subset of fields was not requested.")
expect_match(returned_object2$outcome_message, regexp=expected_outcome_message, perl=TRUE)
expect_true(returned_object2$success)
})
|
pcor_sum <- function(..., iter = NULL, relations){
collect_objects <- list(...)
groups <- length(collect_objects)
partial_sum_i <- list()
if(is.null(iter)){
iter <- collect_objects[[1]]$iter
}
count_sums <- strsplit(relations, "\\;")[[1]]
n_sums <- length(count_sums)
remove_space <- gsub("[[:space:]]", "", count_sums)
remove_plus <- gsub("\\+", replacement = " ", remove_space)
each_sum <- strsplit(remove_plus, split = "[[:space:]]")
if (n_sums > 2) {
stop("there is only support for 'at most' two sums")
}
if (groups == 1) {
if(!all(c("estimate", "default") %in% class(collect_objects[[1]]))){
stop("the object must be of class 'estimate'")
}
samps <- posterior_samples(collect_objects[[1]])[1:iter,]
if (n_sums == 1) {
sums <- lapply(1:1, function(x) {
sum_i <- eval(parse(text = paste0("samps[,'",
each_sum[[x]], "']",
collapse = "+")))
})
names(sums) <- remove_space
diff <- NULL
} else {
sums <- lapply(1:2, function(x) {
sum_i <- eval(parse(text = paste0(
"samps[,'",
each_sum[[x]], "']",
collapse = "+"
)))
})
diff <- sums[[1]] - sums[[2]]
names(sums) <- remove_space
}
} else if (groups == 2) {
if (!all(c("estimate", "default") %in% class(collect_objects[[1]]))) {
stop("the object must be of class 'estimate'")
}
if (!all(c("estimate", "default") %in% class(collect_objects[[2]]))) {
stop("the object must be of class 'estimate'")
}
if (n_sums > 1) {
stop("only one sum can be specified when there are two groups")
}
sums <- lapply(1:2, function(g) {
samps <- posterior_samples(collect_objects[[g]])[1:iter, ]
sapply(1:1, function(x) {
eval(parse(text = paste0(
"samps[,'",
each_sum[[x]], "']",
collapse = "+"
)))
})
})
names(sums) <- paste0("g", 1:2, ": ", remove_space)
diff <- sums[[1]] - sums[[2]]
} else{
stop("too many groups. only two is currently support")
}
partial_sum_i <- list(post_diff = diff,
post_sums = sums,
n_sums = n_sums,
iter = iter)
returned_object <- partial_sum_i
class(returned_object) <- c("BGGM", "pcor_sum")
return(returned_object)
}
print_pcor_sum <- function(x, cred = 0.95, row_names = TRUE){
cat("BGGM: Bayesian Gaussian Graphical Models \n")
cat("--- \n")
cat("Network Stats: Posterior Sum\n")
cat("Posterior Samples:", x$iter, "\n")
cat("--- \n")
cat("Estimates \n\n")
lb <- (1 - cred) / 2
ub <- 1 - lb
if(is.null(x$post_diff)){
cat("Sum:", "\n")
res <- round(
data.frame(Post.mean = mean(x$post_sums[[1]]),
Post.sd = sd(x$post_sums[[1]]),
Cred.lb = quantile(x$post_sums[[1]], probs = lb),
Cred.ub = quantile(x$post_sums[[1]], probs = ub)
), 3)
if(isTRUE(row_names)){
rownames(res) <- names(x$post_sums)
} else {
rownames(res) <- NULL
}
print(res, row.names = row_names)
} else {
cat("Sum:", "\n")
dat_i <- list()
for(i in 1:2){
dat_i[[i]] <- round(
data.frame(Post.mean = mean(x$post_sums[[i]]),
Post.sd = sd(x$post_sums[[i]]),
Cred.lb = quantile(x$post_sums[[i]], probs = lb),
Cred.ub = quantile(x$post_sums[[i]], probs = ub)
), 3)
}
diff_res <- round(
data.frame(Post.mean = mean(x$post_diff),
Post.sd = sd(x$post_diff),
Cred.lb = quantile(x$post_diff, probs = lb),
Cred.ub = quantile(x$post_diff, probs = ub),
Prob.greater = mean(x$post_diff > 0),
Prob.less = mean(x$post_diff < 0)
), 3)
res <- do.call(rbind.data.frame, dat_i)
if(isTRUE(row_names)){
rownames(res) <- names(x$post_sums)
} else {
rownames(res) <- NULL
}
rownames(diff_res) <- NULL
print(res, row.names = row_names)
cat("--- \n\n")
cat("Difference:\n")
cat(paste(names(x$post_sums)[1]), "-", paste(names(x$post_sums)[2]), "\n\n")
print(diff_res, row.names = FALSE)
cat("--- \n")
}
}
plot.pcor_sum <- function(x,
fill = "
...){
if(is.null( x$post_diff)){
g1 <- ggplot(data.frame(x = x$post_sums[[1]]),
aes(x = x)) +
geom_histogram(color = "white",
fill = fill) +
xlab(names(x$post_sums)[1])
if(length( x$post_sums) == 2){
g2 <- ggplot(data.frame(x = x$post_sums[[2]]),
aes(x = x)) +
geom_histogram(color = "white",
fill = fill) +
xlab(names(x$post_sums)[2])
list(g1 = g1, g2 = g2)
} else {
list(g1 = g1)
}
} else {
g1 <- ggplot(data.frame(x = x$post_sums[[1]]),
aes(x = x)) +
geom_histogram(color = "white",
fill = fill) +
xlab(names(x$post_sums)[1])
g2 <- ggplot(data.frame(x = x$post_sums[[2]]),
aes(x = x)) +
geom_histogram(color = "white",
fill = fill) +
xlab(names(x$post_sums)[2])
diff <- ggplot(data.frame(x = x$post_diff),
aes(x = x)) +
geom_histogram(color = "white",
fill = fill) +
xlab("Difference")
suppressWarnings( list(g1 = g1, g2 = g2, diff = diff))
}
}
|
MI.random.times <-
function (time.points) {
indFixed <- object$derivForm$indFixed
indRandom <- object$derivForm$indRandom
t.max <- if (is.null(tt <- attr(time.points, "t.max"))) max(obs.times) else tt
max.visits <- if (is.null(tt <- attr(time.points, "max.visits"))) max(ni) * 5 else tt
id.GK <- rep(TRUE, length(object$x$id.GK))
y.missO <- y
logT.missO <- logT
d.missO <- d
X.missO <- X
Z.missO <- Z
idT.missO <- object$x$idT
if (parameterization %in% c("value", "both")) {
Xtime.missO <- Xtime
Ztime.missO <- Ztime
WintF.vl.missO <- WintF.vl
Ws.intF.vl.missO <- Ws.intF.vl
}
if (parameterization %in% c("slope", "both")) {
Xtime.deriv.missO <- Xtime.deriv
Ztime.deriv.missO <- Ztime.deriv
WintF.sl.missO <- WintF.sl
Ws.intF.sl.missO <- Ws.intF.sl
}
if (method %in% c("weibull-PH-GH", "weibull-AFT-GH")) {
P.missO <- P
log.st.missO <- log.st
if (parameterization %in% c("value", "both")) {
Xs.missO <- Xs
Zs.missO <- Zs
}
if (parameterization %in% c("slope", "both")) {
Xs.deriv.missO <- Xs.deriv
Zs.deriv.missO <- Zs.deriv
}
}
if (method == "spline-PH-GH") {
P.missO <- P
if (parameterization %in% c("value", "both")) {
Xs.missO <- Xs
Zs.missO <- Zs
}
if (parameterization %in% c("slope", "both")) {
Xs.deriv.missO <- Xs.deriv
Zs.deriv.missO <- Zs.deriv
}
W2s.missO <- W2s
W2.missO <- W2
}
if (method == "piecewise-PH-GH") {
st.missO <- st
ind.D.missO <- ind.D
ind.K.missO <- ind.K
wkP.missO <- wkP
if (parameterization %in% c("value", "both")) {
Xs.missO <- Xs
Zs.missO <- Zs
}
if (parameterization %in% c("slope", "both")) {
Xs.deriv.missO <- Xs.deriv
Zs.deriv.missO <- Zs.deriv
}
}
WW.missO <- WW
n.missO <- length(unique(idT.missO))
id.miss <- id3.miss <- id
D <- object$coefficients$D
diag.D <- ncz != ncol(D)
list.thetas <- if (object$method == "weibull-PH-GH" || object$method == "weibull-AFT-GH") {
list(betas = object$coefficients$betas,
log.sigma = log(object$coefficients$sigma),
gammas = object$coefficients$gammas,
alpha = object$coefficients$alpha,
Dalpha = object$coefficients$Dalpha,
log.sigma.t = log(object$coefficients$sigma.t),
D = if (diag.D) log(D) else chol.transf(D))
} else if (object$method == "spline-PH-GH") {
list(betas = object$coefficients$betas,
log.sigma = log(object$coefficients$sigma),
gammas = object$coefficients$gammas,
alpha = object$coefficients$alpha,
Dalpha = object$coefficients$Dalpha,
gammas.bs = object$coefficients$gammas.bs,
D = if (diag.D) log(D) else chol.transf(D))
} else if (object$method == "piecewise-PH-GH") {
list(betas = object$coefficients$betas,
log.sigma = log(object$coefficients$sigma),
gammas = object$coefficients$gammas,
alpha = object$coefficients$alpha,
Dalpha = object$coefficients$Dalpha,
log.xi = log(object$coefficients$xi),
D = if (diag.D) log(D) else chol.transf(D))
}
if (!is.null(object$scaleWB))
list.thetas$log.sigma.t <- NULL
list.thetas <- list.thetas[!sapply(list.thetas, is.null)]
thetas <- unlist(as.relistable(list.thetas))
V.thetas <- vcov(object)
EBs <- ranef(object, postVar = TRUE)
Var <- attr(EBs, "postVar")
EBs <- proposed.b <- EBs
U <- time.points$y[, 1]
X.vs <- time.points$x
ncx.vs <- ncol(X.vs)
id.onevisit <- as.vector(which(tapply(id, id, length) == 1))
id.mrvisits <- as.vector(which(tapply(id, id, length) > 1))
ev.vs <- tapply(id, id, length) - 1
ev.vs <- as.vector(ev.vs[ev.vs > 0])
id.vs.fl <- rep(id.mrvisits, ev.vs)
n.vs.one <- length(id.onevisit)
n.vs.more <- length(id.mrvisits)
n.vs <- n.vs.one + n.vs.more
thetas.vs <- c(time.points$coefficients$betas,
log(time.points$coefficients$scale),
log(time.points$coefficients$shape),
log(time.points$coefficients$var.frailty))
Var.vs <- vcov(time.points)
p.vs <- length(thetas.vs)
current.b <- b.new <- EBs
environment(posterior.b) <- environment()
fitted.valsM.lis <- resid.valsM.lis <- vector("list", M)
old <- options(warn = (-1))
on.exit(options(old))
for (m in 1:M) {
curr.y <- tapply(object$y$y, object$id, function (x) x[length(x)])
new.visit <- last.visit <- tapply(obs.times, object$id, function (x) x[length(x)])
thetas.new <- mvrnorm(1, thetas, V.thetas)
thetas.new <- relist(thetas.new, skeleton = list.thetas)
betas.new <- thetas.new$betas
sigma.new <- exp(thetas.new$log.sigma)
gammas.new <- thetas.new$gammas
alpha.new <- thetas.new$alpha
Dalpha.new <- thetas.new$Dalpha
D.new <- thetas.new$D
D.new <- if (diag.D) exp(D.new) else chol.transf(D.new)
if (object$method == "weibull-PH-GH" || object$method == "weibull-AFT-GH")
sigma.t.new <- if (is.null(object$scaleWB)) {
exp(thetas.new$log.sigma.t)
} else {
object$scaleWB
}
if (object$method == "spline-PH-GH")
gammas.bs.new <- thetas.new$gammas.bs
if (object$method == "piecewise-PH-GH")
xi.new <- exp(thetas.new$log.xi); Q <- object$x$Q
thetas.vs.new <- mvrnorm(1, thetas.vs, Var.vs)
betas.vs.new <- thetas.vs.new[seq_len(ncx.vs)]
scale.vs.new <- exp(thetas.vs.new[ncx.vs + 1])
shape.vs.new <- exp(thetas.vs.new[ncx.vs + 2])
var.fr.new <- exp(thetas.vs.new[ncx.vs + 3])
eta.yx <- as.vector(X.missO %*% betas.new)
eta.yxT <- as.vector(Xtime.missO %*% betas.new)
eta.tw <- as.vector(WW.missO %*% gammas.new)
dmvt.current <- dmvt.proposed <- numeric(n.missO)
for (i in seq_len(n.missO)) {
proposed.b[i, ] <- rmvt(1, EBs[i, ], Var[[i]], 4)
tt <- dmvt(rbind(current.b[i, ], proposed.b[i, ]), EBs[i, ], Var[[i]], 4, TRUE)
dmvt.current[i] <- tt[1]
dmvt.proposed[i] <- tt[2]
}
a <- pmin(exp(posterior.b(proposed.b) + dmvt.current - posterior.b(current.b) - dmvt.proposed), 1)
ind <- runif(n.missO) <= a
b.new[ind, ] <- proposed.b[ind, ]
current.b <- b.new
omega.new <- numeric(n)
omega.new[id.onevisit] <- rgamma(n.vs.one, 1/var.fr.new, 1/var.fr.new)
exp.eta.vs <- exp(X.vs %*% betas.vs.new)
omega.new[id.mrvisits] <- rgamma(n.vs.more, 1/var.fr.new + ev.vs,
1/var.fr.new + scale.vs.new * tapply(c(U^shape.vs.new * exp.eta.vs), id.vs.fl, sum))
fitted.valsM <- resid.valsM <- Visit.Times <- matrix(as.numeric(NA), n, max.visits)
Z.missM.lis <- vector("list", max.visits)
ii <- 1
while (any(new.visit[!is.na(new.visit)] < t.max)) {
data.vs <- time.points$data[!duplicated(time.points$data[[time.points$nam.id]]), ]
if (!is.null(nam <- attr(time.points, "prev.y")))
data.vs[[nam]] <- curr.y
mf.vs <- model.frame(time.points$terms, data = data.vs, na.action = NULL)
X.vs.new <- model.matrix(formula(time.points), mf.vs)[, -1, drop = FALSE]
mu.vs <- c(log(scale.vs.new) + X.vs.new %*% betas.vs.new) + log(omega.new) / shape.vs.new
u.new <- rweibull(n, shape.vs.new, 1 / exp(mu.vs))
Visit.Times[, ii] <- new.visit <- last.visit + u.new
ind.tmax <- new.visit > t.max
dataM <- object$data.id
dataM[object$timeVar] <- pmax(new.visit - object$y$lag, 0)
mfX <- model.frame(object$termsYx, data = dataM, na.action = NULL)
mfZ <- model.frame(object$termsYz, data = dataM, na.action = NULL)
X.missM <- model.matrix(object$formYx, mfX)
Z.missM.lis[[ii]] <- Z.missM <- model.matrix(object$formYz, mfZ)
fitted.valsM[, ii] <- if (type == "Marginal" || type == "stand-Marginal") {
as.vector(X.missM %*% object$coefficients$betas)
} else {
as.vector(X.missM %*% object$coefficients$betas + rowSums(Z.missM * b.new))
}
mu <- as.vector(X.missM %*% betas.new + rowSums(Z.missM * b.new))
y.new <- rnorm(n, mu, sigma.new)
resid.valsM[, ii] <- y.new - fitted.valsM[, ii]
Visit.Times[ind.tmax, ii] <- fitted.valsM[ind.tmax, ii] <- resid.valsM[ind.tmax, ii] <- as.numeric(NA)
curr.y <- y.new
last.visit <- new.visit
ii <- ii + 1
if (ii > max.visits)
break
}
na.ind <- colSums(is.na(fitted.valsM)) != n
Visit.Times <- Visit.Times[, na.ind]
fitted.valsM <- fitted.valsM[, na.ind]
resid.valsM <- resid.valsM[, na.ind]
Z.missM <- do.call(rbind, Z.missM.lis[na.ind])
id2.miss <- rep(1:n, ncol(resid.valsM))
if (type == "stand-Subject")
resid.valsM <- resid.valsM / object$coefficients$sigma
if (type == "stand-Marginal") {
resid.valsM <- unlist(lapply(split(cbind(Z.missM, c(resid.valsM)), id2.miss), function (y) {
M <- matrix(y, ncol = ncz + 1)
z <- M[, - (ncz + 1), drop = FALSE]
res <- M[, ncz + 1]
nz <- nrow(M)
result <- rep(as.numeric(NA), nz)
na.ind <- !is.na(res)
if (all(!na.ind)) {
result
} else {
out <- z[na.ind, , drop = FALSE] %*% D %*% t(z[na.ind, , drop = FALSE])
diag(out) <- diag(out) + object$coefficients$sigma^2
result[na.ind] <- solve(chol(out)) %*% res[na.ind]
result
}
}))
}
fitted.valsM.lis[[m]] <- fitted.valsM
resid.valsM.lis[[m]] <- resid.valsM
}
names(resid.vals) <- names(fitted.vals) <- names(y)
names(fitted.valsM.lis) <- names(resid.valsM.lis) <- paste("m", seq_len(M), sep = "")
fitted.valsM.lis <- lapply(fitted.valsM.lis, function (x) {
dimnames(x) <- list(1:n, paste("time", seq_len(ncol(x)), sep = ""))
x
})
resid.valsM.lis <- if (type == "stand-Marginal") {
resid.valsM.lis
} else {
lapply(resid.valsM.lis, function (x) {
dimnames(x) <- list(1:n, paste("time", seq_len(ncol(x)), sep = ""))
x
})
}
list("fitted.values" = fitted.vals, "residuals" = resid.vals, "fitted.valsM" = fitted.valsM.lis,
"mean.resid.valsM" = NULL, "resid.valsM" = resid.valsM.lis,
"dataM" = NULL)
}
|
skip_if_no_numpy <- function() {
have_numpy <- reticulate::py_module_available("numpy")
if (!have_numpy)
testthat::skip("numpy not available for testing")
}
|
hetcalc <- function(TE, seTE,
method.tau, method.tau.ci,
TE.tau, level.hetstats, subgroup, control,
id = NULL) {
Ccalc <- function(x) {
res <- (sum(x, na.rm = TRUE) -
sum(x^2, na.rm = TRUE) /
sum(x, na.rm = TRUE))
res
}
by <- !missing(subgroup)
sel.noInf <- !is.infinite(TE) & !is.infinite(seTE)
TE <- TE[sel.noInf]
seTE <- seTE[sel.noInf]
if (!is.null(id))
id <- id[sel.noInf]
if (by)
subgroup <- subgroup[sel.noInf]
sel.noNA <- !(is.na(TE) | is.na(seTE))
TE <- TE[sel.noNA]
seTE <- seTE[sel.noNA]
if (!is.null(id))
id <- id[sel.noNA]
if (by)
subgroup <- subgroup[sel.noNA]
noHet <- all(!sel.noNA) || sum(sel.noNA) < 2
allNA <- all(!sel.noNA)
if (!(is.null(TE.tau)) & method.tau == "DL") {
w.fixed <- 1 / seTE^2
w.fixed[is.na(w.fixed)] <- 0
Q <- sum(w.fixed * (TE - TE.tau)^2, na.rm = TRUE)
df.Q <- sum(!is.na(seTE)) - 1
pval.Q <- pvalQ(Q, df.Q)
if (df.Q == 0)
tau2 <- NA
else if (round(Q, digits = 18) <= df.Q)
tau2 <- 0
else
tau2 <- (Q - df.Q) / Ccalc(w.fixed)
se.tau2 <- lower.tau2 <- upper.tau2 <- NA
tau <- sqrt(tau2)
lower.tau <- upper.tau <- NA
sign.lower.tau <- sign.upper.tau <- method.tau.ci <- ""
}
else {
if (noHet) {
if (allNA)
Q <- NA
else
Q <- 0
df.Q <- 0
pval.Q <- pvalQ(Q, df.Q)
tau2 <- NA
se.tau2 <- lower.tau2 <- upper.tau2 <- NA
tau <- sqrt(tau2)
lower.tau <- upper.tau <- NA
sign.lower.tau <- sign.upper.tau <- method.tau.ci <- ""
}
else {
if (is.null(id)) {
mf0 <- runNN(rma.uni,
list(yi = TE, sei = seTE, method = method.tau,
control = control))
tau2 <- mf0$tau2
se.tau2 <- mf0$se.tau2
}
else {
idx <- seq_along(TE)
mf0 <-
runNN(rma.mv,
list(yi = TE, V = seTE^2, method = method.tau,
random = as.call(~ 1 | id / idx),
control = control,
data = data.frame(id, idx)),
warn = FALSE)
tau2 <- mf0$sigma2
se.tau2 <- NA
}
tau <- sqrt(tau2)
Q <- mf0$QE
df.Q <- mf0$k - mf0$p
pval.Q <- pvalQ(Q, df.Q)
if (df.Q < 2)
method.tau.ci <- ""
else if (!is.null(id) & method.tau.ci != "")
method.tau.ci <- "PL"
if (method.tau.ci == "BJ")
ci0 <-
confint.rma.uni(
runNN(rma.uni,
list(yi = TE, sei = seTE, weights = 1 / seTE^2,
method = "GENQ", control = control)))
else if (method.tau.ci == "J")
ci0 <-
confint.rma.uni(
runNN(rma.uni,
list(yi = TE, sei = seTE, weights = 1 / seTE,
method = "GENQ", control = control)))
else if (method.tau.ci == "QP")
ci0 <- confint.rma.uni(mf0)
else if (method.tau.ci == "PL")
ci0 <- confint.rma.mv(mf0)
}
}
useFE <- FALSE
if (by) {
if (is.numeric(subgroup))
subgroup <- as.factor(subgroup)
if (is.null(id)) {
if (length(unique(subgroup)) == 1)
mf1 <-
runNN(rma.uni,
list(yi = TE, sei = seTE, method = method.tau,
control = control))
else {
mf1 <-
try(runNN(rma.uni,
list(yi = TE, sei = seTE, method = method.tau,
mods = as.call(~ subgroup), control = control,
data = data.frame(TE, seTE, subgroup))),
silent = TRUE)
if ("try-error" %in% class(mf1))
if (grepl(paste0("Number of parameters to be estimated is ",
"larger than the number of observations"),
mf1)) {
useFE <- TRUE
mf1 <-
runNN(rma.uni,
list(yi = TE, sei = seTE, method = "FE",
mods = as.call(~ subgroup), control = control,
data = data.frame(TE, seTE, subgroup)))
}
else
stop(mf1)
}
tau2.resid <- mf1$tau2
se.tau2.resid <- mf1$se.tau2
}
else {
idx <- seq_along(TE)
if (length(unique(subgroup)) == 1)
mf1 <-
runNN(rma.mv,
list(yi = TE, V = seTE^2, method = method.tau,
random = as.call(~ 1 | id / idx),
control = control,
data = data.frame(id, idx)),
warn = FALSE)
else {
mf1 <-
try(
runNN(rma.mv,
list(yi = TE, V = seTE^2, method = method.tau,
random = as.call(~ 1 | id / idx),
mods = as.call(~ subgroup), control = control,
data = data.frame(TE, seTE, subgroup, id, idx)),
warn = FALSE),
silent = TRUE)
if ("try-error" %in% class(mf1))
if (grepl(paste0("Number of parameters to be estimated is ",
"larger than the number of observations"),
mf1)) {
useFE <- TRUE
mf1 <-
runNN(rma.mv,
list(yi = TE, V = seTE^2,
method = "FE",
random = as.call(~ 1 | id / idx),
mods = as.call(~ subgroup), control = control,
data = data.frame(TE, seTE, subgroup, id, idx)),
warn = FALSE)
}
else
stop(mf1)
}
tau2.resid <- mf1$sigma2
se.tau2.resid <- NA
}
tau.resid <- sqrt(tau2.resid)
Q.resid <- mf1$QE
df.Q.resid <- mf1$k - mf1$p
pval.Q.resid <- pvalQ(Q.resid, df.Q.resid)
if (df.Q < 2 || useFE)
method.tau.ci <- ""
else if (!is.null(id) & method.tau.ci != "")
method.tau.ci <- "PL"
if (method.tau.ci == "BJ")
ci1 <-
confint.rma.uni(
runNN(rma.uni,
list(yi = TE, sei = seTE, weights = 1 / seTE^2,
method = "GENQ",
mods = as.call(~ subgroup), control = control,
data = data.frame(TE, seTE, subgroup))))
else if (method.tau.ci == "J")
ci1 <-
confint.rma.uni(
runNN(rma.uni,
list(yi = TE, sei = seTE, weights = 1 / seTE,
method = "GENQ",
mods = as.call(~ subgroup), control = control,
data = data.frame(TE, seTE, subgroup))))
else if (method.tau.ci == "QP")
ci1 <- confint.rma.uni(mf1)
else if (method.tau.ci == "PL")
ci1 <- confint.rma.mv(mf1)
}
H <- calcH(Q, df.Q, level.hetstats)
I2 <- isquared(Q, df.Q, level.hetstats)
if (by) {
H.resid <- calcH(Q.resid, df.Q.resid, level.hetstats)
I2.resid <- isquared(Q.resid, df.Q.resid, level.hetstats)
}
if (method.tau.ci %in% c("QP", "BJ", "J")) {
lower.tau2 <- ci0$random["tau^2", "ci.lb"]
upper.tau2 <- ci0$random["tau^2", "ci.ub"]
lower.tau <- ci0$random["tau", "ci.lb"]
upper.tau <- ci0$random["tau", "ci.ub"]
sign.lower.tau <- ci0$lb.sign
sign.upper.tau <- ci0$ub.sign
}
else if (method.tau.ci == "PL") {
if (any(names(ci0) == "random")) {
lower.tau2 <- c(NA, ci0$random["sigma^2.2", "ci.lb"])
upper.tau2 <- c(NA, ci0$random["sigma^2.2", "ci.ub"])
lower.tau <- c(NA, ci0$random["sigma.2", "ci.lb"])
upper.tau <- c(NA, ci0$random["sigma.2", "ci.ub"])
sign.lower.tau <- c("", ci0$lb.sign)
sign.upper.tau <- c("", ci0$ub.sign)
}
else {
lower.tau2 <- c(ci0[[1]]$random["sigma^2.1", "ci.lb"],
ci0[[2]]$random["sigma^2.2", "ci.lb"])
upper.tau2 <- c(ci0[[1]]$random["sigma^2.1", "ci.ub"],
ci0[[2]]$random["sigma^2.2", "ci.ub"])
lower.tau <- c(ci0[[1]]$random["sigma.1", "ci.lb"],
ci0[[2]]$random["sigma.2", "ci.lb"])
upper.tau <- c(ci0[[1]]$random["sigma.1", "ci.ub"],
ci0[[2]]$random["sigma.2", "ci.ub"])
sign.lower.tau <- c(ci0[[1]]$lb.sign, ci0[[2]]$lb.sign)
sign.upper.tau <- c(ci0[[1]]$ub.sign, ci0[[2]]$ub.sign)
}
}
else {
lower.tau2 <- upper.tau2 <- lower.tau <- upper.tau <- NA
sign.lower.tau <- sign.upper.tau <- ""
}
if (by) {
if (method.tau.ci %in% c("QP", "BJ", "J")) {
lower.tau2.resid <- ci1$random["tau^2", "ci.lb"]
upper.tau2.resid <- ci1$random["tau^2", "ci.ub"]
lower.tau.resid <- ci1$random["tau", "ci.lb"]
upper.tau.resid <- ci1$random["tau", "ci.ub"]
sign.lower.tau.resid <- ci1$lb.sign
sign.upper.tau.resid <- ci1$ub.sign
}
else if (method.tau.ci == "PL") {
if (any(names(ci0) == "random")) {
lower.tau2.resid <- c(NA, ci0$random["sigma^2.2", "ci.lb"])
upper.tau2.resid <- c(NA, ci0$random["sigma^2.2", "ci.ub"])
lower.tau.resid <- c(NA, ci0$random["sigma.2", "ci.lb"])
upper.tau.resid <- c(NA, ci0$random["sigma.2", "ci.ub"])
}
else {
lower.tau2.resid <- c(ci1[[1]]$random["sigma^2.1", "ci.lb"],
ci1[[2]]$random["sigma^2.2", "ci.lb"])
upper.tau2.resid <- c(ci1[[1]]$random["sigma^2.1", "ci.ub"],
ci1[[2]]$random["sigma^2.2", "ci.ub"])
lower.tau.resid <- c(ci1[[1]]$random["sigma.1", "ci.lb"],
ci1[[2]]$random["sigma.2", "ci.lb"])
upper.tau.resid <- c(ci1[[1]]$random["sigma.1", "ci.ub"],
ci1[[2]]$random["sigma.2", "ci.ub"])
}
}
else {
lower.tau2.resid <- upper.tau2.resid <-
lower.tau.resid <- upper.tau.resid <- NA
}
}
res <- list(tau2 = tau2,
se.tau2 = se.tau2,
lower.tau2 = lower.tau2,
upper.tau2 = upper.tau2,
tau = tau,
lower.tau = lower.tau,
upper.tau = upper.tau,
method.tau.ci = method.tau.ci,
sign.lower.tau = sign.lower.tau,
sign.upper.tau = sign.upper.tau,
Q = Q,
df.Q = df.Q,
pval.Q = pval.Q,
H = H$TE,
lower.H = H$lower,
upper.H = H$upper,
I2 = I2$TE,
lower.I2 = I2$lower,
upper.I2 = I2$upper,
tau2.resid = if (by) tau2.resid else NA,
se.tau2.resid = if (by) se.tau2.resid else NA,
lower.tau2.resid = if (by) lower.tau2.resid else NA,
upper.tau2.resid = if (by) upper.tau2.resid else NA,
tau.resid = if (by) tau.resid else NA,
lower.tau.resid = if (by) lower.tau.resid else NA,
upper.tau.resid = if (by) upper.tau.resid else NA,
Q.resid = if (by) Q.resid else NA,
df.Q.resid = if (by) df.Q.resid else NA,
pval.Q.resid = if (by) pval.Q.resid else NA,
H.resid = if (by) H.resid$TE else NA,
lower.H.resid = if (by) H.resid$lower else NA,
upper.H.resid = if (by) H.resid$upper else NA,
H.resid = if (by) H.resid$TE else NA,
lower.H.resid = if (by) H.resid$lower else NA,
upper.H.resid = if (by) H.resid$upper else NA,
I2.resid = if (by) I2.resid$TE else NA,
lower.I2.resid = if (by) I2.resid$lower else NA,
upper.I2.resid = if (by) I2.resid$upper else NA
)
res
}
|
d <- data.frame(
trt = factor(c(1, 1, 2, 2)),
resp = c(1, 5, 3, 4),
group = factor(c(1, 2, 1, 2)),
upper = c(1.1, 5.3, 3.3, 4.2),
lower = c(0.8, 4.6, 2.4, 3.6)
)
p <- ggplot(d, aes(trt, resp)) +
geom_crossbar(aes(ymin = lower, ymax = upper), width = 0.2)
test_that("Basic geom_crossbar() works", {
l <- plotly_build(p)$x
expect_length(l$data, 5)
})
p <- ggplot(d, aes(trt, resp, color = group, linetype = group)) +
geom_crossbar(aes(ymin = lower, ymax = upper), width = 0.2) +
scale_colour_manual(values = c("red", "purple"))
test_that("geom_crossbar() with aesthetics", {
l <- plotly_build(p)$x
expect_length(l$data, 6)
colors <- vapply(l$data, function(x) x$line$color, character(1))
dashes <- vapply(l$data, function(x) x$line$dash, character(1))
expect_equivalent(
unique(colors), toRGB(c("red", "purple"))
)
expect_equivalent(
unique(dashes), lty2dash(1:2)
)
})
|
context("Evaluation")
library("testthat")
library("pROC")
library("AUC")
library("scoring")
library("Metrics")
library("PRROC")
test_that("evaluatePlp", {
eval <- evaluatePlp(prediction = plpResult$prediction, plpData = plpData)
testthat::expect_equal(class(eval), 'plpEvaluation')
testthat::expect_equal(names(eval), c('evaluationStatistics', 'thresholdSummary', 'demographicSummary', 'calibrationSummary', 'predictionDistribution') )
})
test_that("AUROC", {
Eprediction <- data.frame(value= runif(100), outcomeCount = round(runif(100)))
attr(Eprediction, "metaData") <- list(predictionType = "binary")
proc.auc <- pROC::roc(Eprediction$outcomeCount, Eprediction$value, algorithm = 3,
direction="<")
auc.auc <- AUC::auc(AUC::roc(Eprediction$value, factor(Eprediction$outcomeCount)))
tolerance <- 0.001
expect_equal(as.numeric(proc.auc$auc), auc.auc, tolerance = tolerance)
plpAUC <- computeAuc(Eprediction, confidenceInterval = FALSE)
expect_equal(as.numeric(proc.auc$auc), plpAUC, tolerance = tolerance)
plpAUCdf <- computeAucFromDataFrames(prediction = Eprediction$value,
status = Eprediction$outcomeCount,
modelType = "logistic")
expect_equal(as.numeric(proc.auc$auc), plpAUCdf, tolerance = tolerance)
})
test_that("AUPRC", {
Eprediction <- data.frame(value= runif(100), outcomeCount = round(runif(100)))
positive <- Eprediction$value[Eprediction$outcomeCount == 1]
negative <- Eprediction$value[Eprediction$outcomeCount == 0]
pr <- PRROC::pr.curve(scores.class0 = positive, scores.class1 = negative)
auprc <- pr$auc.integral
expect_gte(auprc, 0)
expect_lte(auprc, 1)
})
test_that("Brierscore", {
Eprediction <- data.frame(value= runif(100), outcomeCount = round(runif(100)))
Eprediction$dummy <- 1
brier.scoring <- scoring::brierscore(outcomeCount ~ value, data=Eprediction, group='dummy')$brieravg
brier.plp <- brierScore(Eprediction)$brier
expect_that(as.double(brier.scoring), equals(brier.plp))
})
test_that("Average precision", {
Eprediction <- data.frame(value= runif(100), outcomeCount = round(runif(100)))
aveP.metrics <- Metrics::apk(nrow(Eprediction),
which(Eprediction$outcomeCount==1), (1:nrow(Eprediction))[order(-Eprediction$value)])
aveP.plp <- averagePrecision(Eprediction)
expect_that(as.double(aveP.metrics), equals(aveP.plp))
})
test_that("f1Score", {
expect_that(f1Score(TP=0,TN=0,FN=0,FP=0), equals(NaN))
expect_that(f1Score(TP=-1,TN=0,FN=0,FP=0), throws_error())
expect_that(f1Score(TP=1,TN=-1,FN=0,FP=0), throws_error())
expect_that(f1Score(TP=1,TN=3,FN=-1,FP=0), throws_error())
expect_that(f1Score(TP=1,TN=1,FN=5,FP=-1), throws_error())
expect_that(f1Score(TP=NULL,TN=0,FN=0,FP=0), throws_error())
expect_that(f1Score(TP=1,TN=NULL,FN=0,FP=0), throws_error())
expect_that(f1Score(TP=1,TN=3,FN=NULL,FP=0), throws_error())
expect_that(f1Score(TP=1,TN=1,FN=5,FP=NULL), throws_error())
expect_that(f1Score(TP=10,TN=3,FN=5,FP=5), equals(0.6666667,tolerance = 0.0001) )
})
test_that("accuracy", {
expect_that(accuracy(TP=0,TN=0,FN=0,FP=0), equals(NaN))
expect_that(accuracy(TP=-1,TN=0,FN=0,FP=0), throws_error())
expect_that(accuracy(TP=1,TN=-1,FN=0,FP=0), throws_error())
expect_that(accuracy(TP=1,TN=3,FN=-1,FP=0), throws_error())
expect_that(accuracy(TP=1,TN=1,FN=5,FP=-1), throws_error())
expect_that(accuracy(TP=NULL,TN=0,FN=0,FP=0), throws_error())
expect_that(accuracy(TP=1,TN=NULL,FN=0,FP=0), throws_error())
expect_that(accuracy(TP=1,TN=3,FN=NULL,FP=0), throws_error())
expect_that(accuracy(TP=1,TN=1,FN=5,FP=NULL), throws_error())
expect_that(accuracy(TP=10,TN=3,FN=5,FP=5), equals(13/23, tolerance = 0.0001))
})
test_that("sensitivity", {
expect_that(sensitivity(TP=0,TN=0,FN=0,FP=0), equals(NaN))
expect_that(sensitivity(TP=-1,TN=0,FN=0,FP=0), throws_error())
expect_that(sensitivity(TP=1,TN=-1,FN=0,FP=0), throws_error())
expect_that(sensitivity(TP=1,TN=3,FN=-1,FP=0), throws_error())
expect_that(sensitivity(TP=1,TN=1,FN=5,FP=-1), throws_error())
expect_that(sensitivity(TP=NULL,TN=0,FN=0,FP=0), throws_error())
expect_that(sensitivity(TP=1,TN=NULL,FN=0,FP=0), throws_error())
expect_that(sensitivity(TP=1,TN=3,FN=NULL,FP=0), throws_error())
expect_that(sensitivity(TP=1,TN=1,FN=5,FP=NULL), throws_error())
expect_that(sensitivity(TP=10,TN=3,FN=5,FP=5), equals(10/(10+5),tolerance = 0.0001))
})
test_that("falseNegativeRate", {
expect_that(falseNegativeRate(TP=0,TN=0,FN=0,FP=0), equals(NaN))
expect_that(falseNegativeRate(TP=-1,TN=0,FN=0,FP=0), throws_error())
expect_that(falseNegativeRate(TP=1,TN=-1,FN=0,FP=0), throws_error())
expect_that(falseNegativeRate(TP=1,TN=3,FN=-1,FP=0), throws_error())
expect_that(falseNegativeRate(TP=1,TN=1,FN=5,FP=-1), throws_error())
expect_that(falseNegativeRate(TP=NULL,TN=0,FN=0,FP=0), throws_error())
expect_that(falseNegativeRate(TP=1,TN=NULL,FN=0,FP=0), throws_error())
expect_that(falseNegativeRate(TP=1,TN=3,FN=NULL,FP=0), throws_error())
expect_that(falseNegativeRate(TP=1,TN=1,FN=5,FP=NULL), throws_error())
expect_that(falseNegativeRate(TP=10,TN=3,FN=5,FP=5), equals(5/(10+5), tolerance = 0.0001))
})
test_that("falsePositiveRate", {
expect_that(falsePositiveRate(TP=0,TN=0,FN=0,FP=0), equals(NaN))
expect_that(falsePositiveRate(TP=-1,TN=0,FN=0,FP=0), throws_error())
expect_that(falsePositiveRate(TP=1,TN=-1,FN=0,FP=0), throws_error())
expect_that(falsePositiveRate(TP=1,TN=3,FN=-1,FP=0), throws_error())
expect_that(falsePositiveRate(TP=1,TN=1,FN=5,FP=-1), throws_error())
expect_that(falsePositiveRate(TP=NULL,TN=0,FN=0,FP=0), throws_error())
expect_that(falsePositiveRate(TP=1,TN=NULL,FN=0,FP=0), throws_error())
expect_that(falsePositiveRate(TP=1,TN=3,FN=NULL,FP=0), throws_error())
expect_that(falsePositiveRate(TP=1,TN=1,FN=5,FP=NULL), throws_error())
expect_that(falsePositiveRate(TP=10,TN=3,FN=5,FP=5), equals(5/(5+3), tolerance = 0.0001))
})
test_that("specificity", {
expect_that(specificity(TP=0,TN=0,FN=0,FP=0), equals(NaN))
expect_that(specificity(TP=-1,TN=0,FN=0,FP=0), throws_error())
expect_that(specificity(TP=1,TN=-1,FN=0,FP=0), throws_error())
expect_that(specificity(TP=1,TN=3,FN=-1,FP=0), throws_error())
expect_that(specificity(TP=1,TN=1,FN=5,FP=-1), throws_error())
expect_that(specificity(TP=NULL,TN=0,FN=0,FP=0), throws_error())
expect_that(specificity(TP=1,TN=NULL,FN=0,FP=0), throws_error())
expect_that(specificity(TP=1,TN=3,FN=NULL,FP=0), throws_error())
expect_that(specificity(TP=1,TN=1,FN=5,FP=NULL), throws_error())
expect_that(specificity(TP=10,TN=3,FN=5,FP=5), equals(3/(5+3), tolerance = 0.0001))
})
test_that("positivePredictiveValue", {
expect_that(positivePredictiveValue(TP=0,TN=0,FN=0,FP=0), equals(NaN))
expect_that(positivePredictiveValue(TP=-1,TN=0,FN=0,FP=0), throws_error())
expect_that(positivePredictiveValue(TP=1,TN=-1,FN=0,FP=0), throws_error())
expect_that(positivePredictiveValue(TP=1,TN=3,FN=-1,FP=0), throws_error())
expect_that(positivePredictiveValue(TP=1,TN=1,FN=5,FP=-1), throws_error())
expect_that(positivePredictiveValue(TP=NULL,TN=0,FN=0,FP=0), throws_error())
expect_that(positivePredictiveValue(TP=1,TN=NULL,FN=0,FP=0), throws_error())
expect_that(positivePredictiveValue(TP=1,TN=3,FN=NULL,FP=0), throws_error())
expect_that(positivePredictiveValue(TP=1,TN=1,FN=5,FP=NULL), throws_error())
expect_that(positivePredictiveValue(TP=10,TN=3,FN=5,FP=5), equals(10/(10+5), tolerance = 0.0001))
})
test_that("falseDiscoveryRate", {
expect_that(falseDiscoveryRate(TP=0,TN=0,FN=0,FP=0), equals(NaN))
expect_that(falseDiscoveryRate(TP=-1,TN=0,FN=0,FP=0), throws_error())
expect_that(falseDiscoveryRate(TP=1,TN=-1,FN=0,FP=0), throws_error())
expect_that(falseDiscoveryRate(TP=1,TN=3,FN=-1,FP=0), throws_error())
expect_that(falseDiscoveryRate(TP=1,TN=1,FN=5,FP=-1), throws_error())
expect_that(falseDiscoveryRate(TP=NULL,TN=0,FN=0,FP=0), throws_error())
expect_that(falseDiscoveryRate(TP=1,TN=NULL,FN=0,FP=0), throws_error())
expect_that(falseDiscoveryRate(TP=1,TN=3,FN=NULL,FP=0), throws_error())
expect_that(falseDiscoveryRate(TP=1,TN=1,FN=5,FP=NULL), throws_error())
expect_that(falseDiscoveryRate(TP=10,TN=3,FN=5,FP=5), equals(5/(10+5), tolerance = 0.0001))
})
test_that("negativePredictiveValue", {
expect_that(negativePredictiveValue(TP=0,TN=0,FN=0,FP=0), equals(NaN))
expect_that(negativePredictiveValue(TP=-1,TN=0,FN=0,FP=0), throws_error())
expect_that(negativePredictiveValue(TP=1,TN=-1,FN=0,FP=0), throws_error())
expect_that(negativePredictiveValue(TP=1,TN=3,FN=-1,FP=0), throws_error())
expect_that(negativePredictiveValue(TP=1,TN=1,FN=5,FP=-1), throws_error())
expect_that(negativePredictiveValue(TP=NULL,TN=0,FN=0,FP=0), throws_error())
expect_that(negativePredictiveValue(TP=1,TN=NULL,FN=0,FP=0), throws_error())
expect_that(negativePredictiveValue(TP=1,TN=3,FN=NULL,FP=0), throws_error())
expect_that(negativePredictiveValue(TP=1,TN=1,FN=5,FP=NULL), throws_error())
expect_that(negativePredictiveValue(TP=10,TN=3,FN=5,FP=5), equals(3/(5+3), tolerance = 0.0001))
})
test_that("falseOmissionRate", {
expect_that(falseOmissionRate(TP=0,TN=0,FN=0,FP=0), equals(NaN))
expect_that(falseOmissionRate(TP=-1,TN=0,FN=0,FP=0), throws_error())
expect_that(falseOmissionRate(TP=1,TN=-1,FN=0,FP=0), throws_error())
expect_that(falseOmissionRate(TP=1,TN=3,FN=-1,FP=0), throws_error())
expect_that(falseOmissionRate(TP=1,TN=1,FN=5,FP=-1), throws_error())
expect_that(falseOmissionRate(TP=NULL,TN=0,FN=0,FP=0), throws_error())
expect_that(falseOmissionRate(TP=1,TN=NULL,FN=0,FP=0), throws_error())
expect_that(falseOmissionRate(TP=1,TN=3,FN=NULL,FP=0), throws_error())
expect_that(falseOmissionRate(TP=1,TN=1,FN=5,FP=NULL), throws_error())
expect_that(falseOmissionRate(TP=10,TN=3,FN=5,FP=5), equals(5/(5+3), tolerance = 0.0001))
})
test_that("negativeLikelihoodRatio", {
expect_that(negativeLikelihoodRatio(TP=0,TN=0,FN=0,FP=0), equals(NaN))
expect_that(negativeLikelihoodRatio(TP=-1,TN=0,FN=0,FP=0), throws_error())
expect_that(negativeLikelihoodRatio(TP=1,TN=-1,FN=0,FP=0), throws_error())
expect_that(negativeLikelihoodRatio(TP=1,TN=3,FN=-1,FP=0), throws_error())
expect_that(negativeLikelihoodRatio(TP=1,TN=1,FN=5,FP=-1), throws_error())
expect_that(negativeLikelihoodRatio(TP=NULL,TN=0,FN=0,FP=0), throws_error())
expect_that(negativeLikelihoodRatio(TP=1,TN=NULL,FN=0,FP=0), throws_error())
expect_that(negativeLikelihoodRatio(TP=1,TN=3,FN=NULL,FP=0), throws_error())
expect_that(negativeLikelihoodRatio(TP=1,TN=1,FN=5,FP=NULL), throws_error())
expect_that(negativeLikelihoodRatio(TP=10,TN=3,FN=5,FP=5), equals((5/(10+5))/(3/(5+3)), tolerance = 0.0001))
})
test_that("positiveLikelihoodRatio", {
expect_that(positiveLikelihoodRatio(TP=0,TN=0,FN=0,FP=0), equals(NaN))
expect_that(positiveLikelihoodRatio(TP=-1,TN=0,FN=0,FP=0), throws_error())
expect_that(positiveLikelihoodRatio(TP=1,TN=-1,FN=0,FP=0), throws_error())
expect_that(positiveLikelihoodRatio(TP=1,TN=3,FN=-1,FP=0), throws_error())
expect_that(positiveLikelihoodRatio(TP=1,TN=1,FN=5,FP=-1), throws_error())
expect_that(positiveLikelihoodRatio(TP=NULL,TN=0,FN=0,FP=0), throws_error())
expect_that(positiveLikelihoodRatio(TP=1,TN=NULL,FN=0,FP=0), throws_error())
expect_that(positiveLikelihoodRatio(TP=1,TN=3,FN=NULL,FP=0), throws_error())
expect_that(positiveLikelihoodRatio(TP=1,TN=1,FN=5,FP=NULL), throws_error())
expect_that(positiveLikelihoodRatio(TP=10,TN=3,FN=5,FP=5), equals((10/(10+5))/(5/(5+3)), tolerance = 0.0001))
})
test_that("diagnosticOddsRatio", {
expect_that(diagnosticOddsRatio(TP=0,TN=0,FN=0,FP=0), equals(NaN))
expect_that(diagnosticOddsRatio(TP=-1,TN=0,FN=0,FP=0), throws_error())
expect_that(diagnosticOddsRatio(TP=1,TN=-1,FN=0,FP=0), throws_error())
expect_that(diagnosticOddsRatio(TP=1,TN=3,FN=-1,FP=0), throws_error())
expect_that(diagnosticOddsRatio(TP=1,TN=1,FN=5,FP=-1), throws_error())
expect_that(diagnosticOddsRatio(TP=NULL,TN=0,FN=0,FP=0), throws_error())
expect_that(diagnosticOddsRatio(TP=1,TN=NULL,FN=0,FP=0), throws_error())
expect_that(diagnosticOddsRatio(TP=1,TN=3,FN=NULL,FP=0), throws_error())
expect_that(diagnosticOddsRatio(TP=1,TN=1,FN=5,FP=NULL), throws_error())
expect_that(diagnosticOddsRatio(TP=10,TN=3,FN=5,FP=5), equals(((10/(10+5))/(5/(5+3)))/((5/(10+5))/(3/(5+3))), tolerance = 0.0001))
})
test_that("getPredictionDistribution", {
Eprediction <- data.frame(value= runif(100), outcomeCount =round(runif(100)))
predSum <- getPredictionDistribution(Eprediction)
expect_that(nrow(predSum ), equals(2))
expect_that(ncol(predSum ), equals(11))
})
test_that("getCalibration", {
Eprediction <- data.frame(rowId=1:100, value= runif(100), outcomeCount =round(runif(100)))
attr(Eprediction, "metaData")$predictionType <- "binary"
calib <- getCalibration(Eprediction)
expect_that(nrow(calib ), equals(10))
expect_that(ncol(calib ), equals(11))
})
test_that("getThresholdSummary", {
Eprediction <- data.frame(value= runif(100), outcomeCount =round(runif(100)))
thresSum <- getThresholdSummary(Eprediction)
expect_that(nrow(thresSum), equals(length(unique(Eprediction$value))))
expect_that(ncol(thresSum), equals(23))
expect_that(thresSum$truePositiveCount+thresSum$falseNegativeCount,
equals(rep(sum(Eprediction$outcomeCount),length(thresSum$truePositiveCount))))
expect_that(thresSum$truePositiveCount+thresSum$falsePositiveCount+
thresSum$trueNegativeCount+thresSum$falseNegativeCount,
equals(rep(nrow(Eprediction),length(thresSum$truePositiveCount))))
})
test_that("Calibration", {
Eprediction <- data.frame(rowId=1:100,
value= c(rep(0,50),rep(1,50)),
outcomeCount =c(rep(0,50),rep(1,50)))
calibrationTest1 <- calibrationLine(Eprediction,numberOfStrata=2)
expect_that(calibrationTest1$lm['Intercept'], is_equivalent_to(0))
expect_that(calibrationTest1$lm['Gradient'], is_equivalent_to(1))
expect_that(nrow(calibrationTest1$aggregateLmData)==2, equals(T))
Eprediction2 <- data.frame(rowId=1:100,
value= c(0.1+runif(50)*0.9,runif(50)*0.6),
outcomeCount =c(rep(1,50),rep(0,50)))
hs.exist2 <- ResourceSelection::hoslem.test(Eprediction2$outcomeCount,
Eprediction2$value, g=10)
calibrationTest2 <- calibrationLine(Eprediction2,numberOfStrata=10)
expect_that(calibrationTest2$hosmerlemeshow['Xsquared'],
is_equivalent_to(hs.exist2$statistic))
expect_that(calibrationTest2$hosmerlemeshow['df'],
is_equivalent_to(hs.exist2$parameter))
expect_that(calibrationTest2$hosmerlemeshow['pvalue'],
is_equivalent_to(hs.exist2$p.value))
})
|
testGvsGH <- function(x, nsim, verbose = 'vv') {
t0 <- Sys.time()
LLR <- rep(0, nsim)
vmessage(verbose, 2, TRUE, 'Fitting g distribution to data')
depo <- fitG(x, verbose = FALSE)
mleG <- stats::coef(depo)[3]
maxG <- depo$loglik
vmessage(verbose, 2, TRUE, 'Fitting g-and-h distribution to data')
depo <- fitGH(x, method = 'mle', verbose = FALSE)
mleGH <- stats::coef(depo)[3:4]
maxGH <- depo$loglik
observed_LLR <- pmax(2 * (maxGH - maxG), 0)
vmessage(verbose, 2, TRUE, 'Running simulations')
if (verbose %in% c('v', 'vv', 'vvv')) {
pb <- utils::txtProgressBar(min = 0, max = nsim, style = 3)
}
for (i in seq_len(nsim)) {
xsim <- rgh(n = length(x), a = 0, b = 1, g = mleGH[1], h =0)
depo <- fitG(xsim, verbose = FALSE)
maxG <- depo$loglik
depo <- fitGH(xsim, method = 'mle', verbose = FALSE)
maxGH <- depo$loglik
LLR[i] <- 2 * (maxGH - maxG)
if (verbose %in% c('v', 'vv', 'vvv')) { utils::setTxtProgressBar(pb, i) }
}
if (verbose %in% c('v', 'vv', 'vvv')) { close(pb) }
vmessage(verbose, 2, TRUE, 'Done!')
list(
call = match.call(),
n = length(x),
nsim = nsim,
statistic = observed_LLR,
LLR = LLR,
p.value = mean(LLR > observed_LLR),
CIp.value = suppressWarnings(
stats::prop.test(sum(LLR > observed_LLR), nsim)$conf.int
),
time = Sys.time() - t0
) %>%
structure(class = 'testGvsGH') %>%
return()
}
print.testGvsGH <- function(x, ...) {
cat("\nSimulated LLR of g vs Tukey's g-and-h distribution test\n")
cat('\nCall:\n')
print(x$call)
cat('\nStatistic: ', x$statistic, ', Estimated p-value: ', x$p.value,sep = '')
cat('\nApproximate 95% C.I. of p-value: ')
cat('(', paste0(signif(x$CIp.value, 4), collapse = ', '), ')', '\n', sep = '')
cat('\nSummary statistics of the simulated log-likelihood ratios:\n')
print(summary(x$LLR))
cat('\n',
'Fitting method: Maximum Likelihood\n',
'Number of simulations: ', x$nsim, ', ',
'Computation time: ', signif(x$time, 3), ' ', units(x$time), '\n',
'Observations: ', x$n, ', degrees of freedom: ', 1, '\n', sep = ''
)
invisible(x)
}
summary.testGvsGH <- function(object, ...) {
print(object, ...)
}
|
samplePriorSeparationLimits <- function(n, prior, mean.limits, rate.limits, sample.sd=TRUE){
pars <- prior$pars
mu <- matrix(nrow=n, ncol=pars$r)
if(pars$den.mu == "unif"){
for(i in 1:pars$r){ mu[,i] <- stats::runif(n=n, min=mean.limits[1], max=mean.limits[2]) }
} else{
for(i in 1:pars$r){
for(j in 1:n){
repeat{
mu[j,i] <- stats::rnorm(n=1, mean=pars$par.mu[i,1], sd=pars$par.mu[i,2])
if( mu[j,i] > mean.limits[1] && mu[j,i] < mean.limits[2] ) break
}
}
}
}
sample_vcv <- function(){
if(pars$unif.corr == TRUE){
if( pars$p == 1){
vcv <- riwish(v=pars$r + 1, S=diag(nrow=pars$r))
} else{
vcv <- list()
for(i in 1:pars$p){ vcv[[i]] <- riwish(v=pars$r + 1, S=diag(nrow=pars$r) ) }
}
} else{
if( is.matrix(pars$Sigma) ){
vcv <- riwish(v=pars$nu, S=pars$Sigma)
}
if( is.list(pars$Sigma) ){
vcv <- list()
for(i in 1:pars$p){ vcv[[i]] <- riwish(v=pars$nu[i], S=pars$Sigma[[i]]) }
}
if( !is.matrix(pars$Sigma) && !is.list(pars$Sigma) ) stop("Error. Check if the parameter 'Sigma' in function 'make.prior.zhang' is of class 'matrix' or class 'list'. Check if length of 'Sigma', if a list, is equal to 'p'.")
}
return(vcv)
}
sample_sd <- function(){
if(pars$den.sd == "unif"){
if(pars$p == 1){
sd <- stats::runif(n=pars$r, min=pars$par.sd[1], max=pars$par.sd[2])
} else{
sd <- list()
for(i in 1:pars$p){
sd[[i]] <- stats::runif(n=pars$r, min=pars$par.sd[i,1], max=pars$par.sd[i,2])
}
}
} else{
if(pars$p == 1){
sd <- stats::rlnorm(n=pars$r, meanlog=pars$par.sd[1], sdlog=pars$par.sd[2])
} else{
sd <- list()
for(i in 1:pars$p){
sd[[i]] <- stats::rlnorm(n=pars$r, meanlog=pars$par.sd[i,1], sdlog=pars$par.sd[i,2])
}
}
}
return(sd)
}
check_limit <- function(X){
mm <- c( X[ upper.tri(X, diag=TRUE) ] )
low <- mm > rate.limits[1]
high <- mm < rate.limits[2]
if( sum(low) == length(mm) && sum(high) == length(mm) ){
return(TRUE)
} else{
return(FALSE)
}
}
if( sample.sd == TRUE ){
prior.samples <- list()
for( i in 1:n ){
repeat{
vcv <- sample_vcv()
sd <- sample_sd()
corr <- decompose.cov(vcv)$r
vv <- (sd)^2
prior.samples[[i]] <- rebuild.cov(r=corr, v=vv)
if( check_limit( prior.samples[[i]] ) ) break
}
}
} else{
prior.samples <- list()
for( i in 1:n ){
repeat{
prior.samples[[i]] <- sample_vcv()
if( check_limit( prior.samples[[i]] ) ) break
}
}
}
out <- list( mu=mu, matrix=prior.samples )
return( out )
}
|
context("Test of ODEsobol.ODEnetwork() (and plotting)")
masses <- c(1, 1)
dampers <- diag(c(1, 1))
springs <- diag(c(1, 1))
springs[1, 2] <- 1
distances <- diag(c(0, 2))
distances[1, 2] <- 1
lfonet <- ODEnetwork(masses, dampers, springs,
cartesian = TRUE, distances = distances)
lfonet <- setState(lfonet, c(0.5, 1), c(0, 0))
LFObinf <- rep(0.001, 3)
LFObsup <- c(6, 6, 3)
LFOtimes1 <- seq(0.1, 20, by = 5)
LFOtimes2 <- 10
set.seed(2015)
LFOres1 <- suppressWarnings(
ODEsobol(mod = lfonet,
pars = c("k.1", "k.2", "k.1.2"),
times = LFOtimes1,
n = 10,
rfuncs = c("runif", "rnorm", "rexp"),
rargs = c("min = 0.001, max = 6",
"mean = 3, sd = 0.5",
"rate = 1 / 3"),
sobol_method = "Martinez",
ode_method = "adams",
parallel_eval = FALSE,
parallel_eval_ncores = NA)
)
set.seed(2015)
LFOres2 <- suppressWarnings(
ODEsobol(mod = lfonet,
pars = c("k.1", "k.2", "k.1.2"),
times = LFOtimes2,
n = 10,
rfuncs = c("runif", "rnorm", "rexp"),
rargs = c("min = 0.001, max = 6",
"mean = 3, sd = 0.5",
"rate = 1 / 3"),
sobol_method = "Martinez",
ode_method = "adams",
parallel_eval = FALSE,
parallel_eval_ncores = NA)
)
set.seed(2015)
LFOres3 <- suppressWarnings(
ODEsobol(mod = lfonet,
pars = "k.1",
times = LFOtimes2,
n = 10,
rfuncs = "runif",
rargs = "min = 0.001, max = 6",
sobol_method = "Martinez",
ode_method = "adams",
parallel_eval = FALSE,
parallel_eval_ncores = NA)
)
set.seed(2015)
LFOres_parallel <- suppressWarnings(
ODEsobol(mod = lfonet,
pars = c("k.1", "k.2", "k.1.2"),
times = LFOtimes1,
n = 10,
rfuncs = c("runif", "rnorm", "rexp"),
rargs = c("min = 0.001, max = 6",
"mean = 3, sd = 0.5",
"rate = 1 / 3"),
sobol_method = "Martinez",
ode_method = "adams",
parallel_eval = TRUE,
parallel_eval_ncores = 2)
)
set.seed(2015)
LFOres_jansen <- suppressWarnings(
ODEsobol(mod = lfonet,
pars = c("k.1", "k.2", "k.1.2"),
times = LFOtimes1,
n = 10,
rfuncs = c("runif", "rnorm", "rexp"),
rargs = c("min = 0.001, max = 6",
"mean = 3, sd = 0.5",
"rate = 1 / 3"),
sobol_method = "Jansen",
ode_method = "adams",
parallel_eval = FALSE,
parallel_eval_ncores = NA)
)
test_that("Result type is correct", {
expect_true(is.list(LFOres1))
expect_equal(class(LFOres1), "ODEsobol")
expect_equal(attr(LFOres1, "sobol_method"), "Martinez")
expect_equal(length(LFOres1), 4)
expect_equal(names(LFOres1), c("x.1", "v.1", "x.2", "v.2"))
expect_true(is.list(LFOres1$"x.1"))
expect_true(is.list(LFOres1$"v.1"))
expect_true(is.list(LFOres1$"x.2"))
expect_true(is.list(LFOres1$"v.2"))
expect_equal(length(LFOres1$"x.1"), 2)
expect_equal(length(LFOres1$"v.1"), 2)
expect_equal(length(LFOres1$"x.2"), 2)
expect_equal(length(LFOres1$"v.2"), 2)
expect_equal(dim(LFOres1$"x.1"$S),
c(1 + length(c("k.1", "k.2", "k.1.2")), length(LFOtimes1)))
expect_equal(dim(LFOres1$"x.1"$T),
c(1 + length(c("k.1", "k.2", "k.1.2")), length(LFOtimes1)))
expect_equal(dim(LFOres1$"v.1"$S),
c(1 + length(c("k.1", "k.2", "k.1.2")), length(LFOtimes1)))
expect_equal(dim(LFOres1$"v.1"$T),
c(1 + length(c("k.1", "k.2", "k.1.2")), length(LFOtimes1)))
expect_equal(dim(LFOres1$"x.2"$S),
c(1 + length(c("k.1", "k.2", "k.1.2")), length(LFOtimes1)))
expect_equal(dim(LFOres1$"x.2"$T),
c(1 + length(c("k.1", "k.2", "k.1.2")), length(LFOtimes1)))
expect_equal(dim(LFOres1$"v.2"$S),
c(1 + length(c("k.1", "k.2", "k.1.2")), length(LFOtimes1)))
expect_equal(dim(LFOres1$"v.2"$T),
c(1 + length(c("k.1", "k.2", "k.1.2")), length(LFOtimes1)))
expect_true(is.list(LFOres2))
expect_equal(class(LFOres2), "ODEsobol")
expect_equal(attr(LFOres2, "sobol_method"), "Martinez")
expect_equal(length(LFOres2), 4)
expect_equal(names(LFOres2), c("x.1", "v.1", "x.2", "v.2"))
expect_true(is.list(LFOres2$"x.1"))
expect_true(is.list(LFOres2$"v.1"))
expect_true(is.list(LFOres2$"x.2"))
expect_true(is.list(LFOres2$"v.2"))
expect_equal(length(LFOres2$"x.1"), 2)
expect_equal(length(LFOres2$"v.1"), 2)
expect_equal(length(LFOres2$"x.2"), 2)
expect_equal(length(LFOres2$"v.2"), 2)
expect_equal(dim(LFOres2$"x.1"$S),
c(1 + length(c("k.1", "k.2", "k.1.2")), length(LFOtimes2)))
expect_equal(dim(LFOres2$"x.1"$T),
c(1 + length(c("k.1", "k.2", "k.1.2")), length(LFOtimes2)))
expect_equal(dim(LFOres2$"v.1"$S),
c(1 + length(c("k.1", "k.2", "k.1.2")), length(LFOtimes2)))
expect_equal(dim(LFOres2$"v.1"$T),
c(1 + length(c("k.1", "k.2", "k.1.2")), length(LFOtimes2)))
expect_equal(dim(LFOres2$"x.2"$S),
c(1 + length(c("k.1", "k.2", "k.1.2")), length(LFOtimes2)))
expect_equal(dim(LFOres2$"x.2"$T),
c(1 + length(c("k.1", "k.2", "k.1.2")), length(LFOtimes2)))
expect_equal(dim(LFOres2$"v.2"$S),
c(1 + length(c("k.1", "k.2", "k.1.2")), length(LFOtimes2)))
expect_equal(dim(LFOres2$"v.2"$T),
c(1 + length(c("k.1", "k.2", "k.1.2")), length(LFOtimes2)))
expect_true(is.list(LFOres3))
expect_equal(class(LFOres3), "ODEsobol")
expect_equal(attr(LFOres3, "sobol_method"), "Martinez")
expect_equal(length(LFOres3), 4)
expect_equal(names(LFOres3), c("x.1", "v.1", "x.2", "v.2"))
expect_true(is.list(LFOres3$"x.1"))
expect_true(is.list(LFOres3$"v.1"))
expect_true(is.list(LFOres3$"x.2"))
expect_true(is.list(LFOres3$"v.2"))
expect_equal(length(LFOres3$"x.1"), 2)
expect_equal(length(LFOres3$"v.1"), 2)
expect_equal(length(LFOres3$"x.2"), 2)
expect_equal(length(LFOres3$"v.2"), 2)
expect_equal(dim(LFOres3$"x.1"$S),
c(1 + length(c("k.1")), length(LFOtimes2)))
expect_equal(dim(LFOres3$"x.1"$T),
c(1 + length(c("k.1")), length(LFOtimes2)))
expect_equal(dim(LFOres3$"v.1"$S),
c(1 + length(c("k.1")), length(LFOtimes2)))
expect_equal(dim(LFOres3$"v.1"$T),
c(1 + length(c("k.1")), length(LFOtimes2)))
expect_equal(dim(LFOres3$"x.2"$S),
c(1 + length(c("k.1")), length(LFOtimes2)))
expect_equal(dim(LFOres3$"x.2"$T),
c(1 + length(c("k.1")), length(LFOtimes2)))
expect_equal(dim(LFOres3$"v.2"$S),
c(1 + length(c("k.1")), length(LFOtimes2)))
expect_equal(dim(LFOres3$"v.2"$T),
c(1 + length(c("k.1")), length(LFOtimes2)))
expect_equal(LFOres_parallel, LFOres1)
expect_true(is.list(LFOres_jansen))
expect_equal(class(LFOres_jansen), "ODEsobol")
expect_equal(attr(LFOres_jansen, "sobol_method"), "Jansen")
expect_equal(length(LFOres_jansen), 4)
expect_equal(names(LFOres_jansen), c("x.1", "v.1", "x.2", "v.2"))
expect_true(is.list(LFOres_jansen$"x.1"))
expect_true(is.list(LFOres_jansen$"v.1"))
expect_true(is.list(LFOres_jansen$"x.2"))
expect_true(is.list(LFOres_jansen$"v.2"))
expect_equal(length(LFOres_jansen$"x.1"), 2)
expect_equal(length(LFOres_jansen$"v.1"), 2)
expect_equal(length(LFOres_jansen$"x.2"), 2)
expect_equal(length(LFOres_jansen$"v.2"), 2)
expect_equal(dim(LFOres_jansen$"x.1"$S),
c(1 + length(c("k.1", "k.2", "k.1.2")), length(LFOtimes1)))
expect_equal(dim(LFOres_jansen$"x.1"$T),
c(1 + length(c("k.1", "k.2", "k.1.2")), length(LFOtimes1)))
expect_equal(dim(LFOres_jansen$"v.1"$S),
c(1 + length(c("k.1", "k.2", "k.1.2")), length(LFOtimes1)))
expect_equal(dim(LFOres_jansen$"v.1"$T),
c(1 + length(c("k.1", "k.2", "k.1.2")), length(LFOtimes1)))
expect_equal(dim(LFOres_jansen$"x.2"$S),
c(1 + length(c("k.1", "k.2", "k.1.2")), length(LFOtimes1)))
expect_equal(dim(LFOres_jansen$"x.2"$T),
c(1 + length(c("k.1", "k.2", "k.1.2")), length(LFOtimes1)))
expect_equal(dim(LFOres_jansen$"v.2"$S),
c(1 + length(c("k.1", "k.2", "k.1.2")), length(LFOtimes1)))
expect_equal(dim(LFOres_jansen$"v.2"$T),
c(1 + length(c("k.1", "k.2", "k.1.2")), length(LFOtimes1)))
})
test_that("Errors and warnings are thrown", {
set.seed(2015)
expect_error(ODEsobol(mod = lfonet,
pars = c("k.1", "k.2", "k.1.2"),
times = LFOtimes1,
n = 1,
rfuncs = c("runif", "rnorm", "rexp"),
rargs = c("min = 0.001, max = 6",
"mean = 3, sd = 0.5",
"rate = 1 / 3"),
sobol_method = "Martinez",
ode_method = "adams",
parallel_eval = FALSE,
parallel_eval_ncores = NA))
})
test_that("Plots are generated", {
expect_true(plot(LFOres1))
expect_true(plot(LFOres2))
expect_true(plot(LFOres3))
expect_true(plot(LFOres_parallel))
expect_true(plot(LFOres_jansen))
expect_true(plot(LFOres1, state_plot = "x.2", main_title = "Hi!",
legendPos = "topleft", type = "b"))
my_cols <- c("firebrick", "chartreuse3", "dodgerblue")
expect_true(plot(LFOres1, state_plot = "x.2", colors_pars = my_cols))
expect_true(plot(LFOres1, state_plot = "x.2", cex.axis = 2, cex = 4,
main = "Small Title", cex.main = 0.5))
})
|
NA
NA
NA
NA
NA
NA
NA
NA
NA
NA
NA
NA
NA
NA
NA
NA
NA
NA
NA
NA
NA
NA
NA
NA
NA
NA
NA
|
context("Functions output")
library(andurinha)
test_that("findPeaks return a list", {
expect_equal(class(findPeaks(andurinhaData)), "list")
expect_equal(class(findPeaks(andurinhaData, scale = FALSE, ndd = FALSE)), "list")
})
test_that("finPeaks return a list with the correct length", {
expect_equal(length(findPeaks(andurinhaData)), 4)
expect_equal(length(findPeaks(andurinhaData, scale = FALSE)), 3)
expect_equal(length(findPeaks(andurinhaData, ndd = FALSE)), 3)
expect_equal(length(findPeaks(andurinhaData, scale = FALSE, ndd = FALSE)), 2)
})
test_that("gOverview return a ggplot objetc", {
expect_equal(class(gOverview(andurinhaData)), c("gg", "ggplot"))
expect_equal(class(gOverview(findPeaks(andurinhaData)$dataZ,
findPeaks(andurinhaData)$secondDerivative)), c("gg", "ggplot"))
})
test_that("plotPeaks return a ggplot objetc", {
expect_equal(class(plotPeaks(findPeaks(andurinhaData)[[4]]$WN,
findPeaks(andurinhaData)$dataZ,
findPeaks(andurinhaData)$secondDerivative)), c("gg", "ggplot"))
expect_equal(class(plotPeaks(findPeaks(andurinhaData, ndd = FALSE)[[3]]$WN,
findPeaks(andurinhaData)$dataZ)), c("gg", "ggplot"))
})
|
polynomial.roots <- function( m.r )
{
matrices <- jacobi.matrices( m.r )
n <- length( matrices )
eigen.list <- lapply( matrices, eigen )
roots <- as.list( rep( NULL, n+1 ) )
j <- 1
while ( j <= n ) {
roots[[j+1]] <- eigen.list[[j]]$values
j <- j + 1
}
return( roots )
}
|
eps <- function(file="Rplot%03d.eps", width=7, height=7, horizontal=FALSE, paper="special", onefile=FALSE, ...) {
onefile <- FALSE
postscript(file=file, width=width, height=height, horizontal=horizontal, paper=paper, onefile=onefile, ...)
}
|
superpc.plotcv <- function (object,
cv.type=c("full", "preval"),
smooth=TRUE,
smooth.df=10,
call.win.metafile=FALSE, ...) {
cv.type <- match.arg(cv.type)
if(cv.type == "full"){
scor <- object$scor
smooth <- FALSE
} else {
scor <- object$scor.preval
}
k <- nrow(scor)
if (smooth) {
for (j in 1:nrow(scor)) {
if(is.null(smooth.df)){
om <- !is.na(scor[j, ])
junk <- smooth.spline(object$th[om], scor[j,om ])
scor[j,om] <- predict(junk,object$th[om])$y
}
if(!is.null(smooth.df)){
om <- !is.na(scor[j, ])
junk <- smooth.spline(object$th[om], scor[j,om ], df=smooth.df)
scor[j,om] <- predict(junk,object$th[om])$y
}
}
}
if (object$type == "survival") {
if (cv.type == "full") {
ymax <- max(object$scor.upper[!is.na(object$scor.upper)], qchisq(0.95, nrow(scor)))
}
if (cv.type == "preval") {
ymax <- max(scor[!is.na(scor)], qchisq(0.95, nrow(scor)))
}
}
if (object$type == "regression") {
n.mean <- 0
for(i in 1:object$n.fold){
n.mean <- n.mean + length(object$folds[[i]]) / object$n.fold
}
denom.df <- n.mean -1 - nrow(scor)
if (cv.type == "full") {
ymax <- max(object$scor.upper[!is.na(object$scor.upper)], qf(0.95, nrow(scor), denom.df))
}
if (cv.type == "preval") {
ymax <- max(scor[!is.na(scor)], qf(0.95, nrow(scor), denom.df))
}
}
if (call.win.metafile) {
dev.new()
}
ylab <- "Likelihood ratio test statistic"
matplot(object$th, t(scor), xlab="Threshold", ylab=ylab, ylim=c(0, ymax), lty=rep(1,k))
matlines(object$th, t(scor), lty=rep(1,k), ...)
for (j in 1:k) {
if (object$type == "survival") {
abline(h=qchisq(0.95, j), lty=2, col=j)
}
if (object$type == "regression") {
abline(h=qf(0.95, j, denom.df), lty=2, col=j)
}
if (cv.type == "full") {
delta <- ((-1)^j) * diff(object$th)[1] / 4
error.bars(object$th+delta*(j>1),
t(object$scor.lower[j,]),
t(object$scor.upper[j,]), lty=2, col=j)
}
}
if (call.win.metafile) {
dev.off()
}
return(TRUE)
}
error.bars <- function(x, upper, lower, width=0.005, ...) {
xlim <- range(x)
barw <- diff(xlim) * width
segments(x, upper, x, lower, ...)
segments(x - barw, upper, x + barw, upper, ...)
segments(x - barw, lower, x + barw, lower, ...)
range(upper, lower)
}
jitter <- function(x) {
return(x + 0.03 * abs(x) * sign(rnorm(length(x))))
}
|
ICFTs <- function (design, digits = 3, resk.only = TRUE,
kmin = NULL, kmax = ncol(design), detail = FALSE, with.blocks = FALSE,
conc = TRUE)
{
if ("design" %in% class(design)) {
fn <- names(factor.names(design))
if (with.blocks)
fn <- c(fn, design.info(design)$block.name)
design <- design[, fn]
nfac <- length(fn)
}
else {
nfac <- ncol(design)
fn <- 1:nfac
}
nlev <- levels.no(design)
dfs <- nlev - 1
if (!is.data.frame(design)) design <- as.data.frame(design)
for (i in 1:nfac){
design[[i]] <- factor(design[[i]])
contrasts(design[[i]]) <- contr.XuWuPoly(nlev[i])
}
ks <- which(round(GWLP(design, kmax = kmax)[-1], 8) > 0)
N <- nrow(design)
if (length(ks) == 0) {
hilf <- list(list(ICFT = cbind(IC = 0, frequency = sum(nlev) -
kmax), IC1 = 0))
names(hilf) <- kmax
return(hilf)
}
k <- min(ks)
if (k < 2)
stop("resolution of design must be at least 2")
kminset <- FALSE
if (is.null(kmin))
kmin <- k
else {
if (!kmin == min(ks)) kminset <- TRUE
redu <- ks[ks >= kmin]
message(paste("Check sets of sizes ", paste(redu, collapse = ",")))
if (length(redu) == 0)
return()
kmin <- min(redu)
k <- kmin
ks <- redu
}
if (k >= kmin) {
k <- kmin
ns <- choose(nfac, k)
auswahl <- 1:ns
selproj <- sel <- nchoosek(nfac, k)
GWLPs <- round(apply(selproj, 2, function(obj) GWLP(design[,
obj])[-1]), 4)
selproj <- apply(selproj, 2, function(obj) paste(obj,
collapse = ":"))
names(auswahl) <- selproj
if (resk.only) {
reskproj <- apply(GWLPs, 2, function(obj) all(obj[-k] ==
0))
if (all(!reskproj)) {
message("no projections with resolution ",
k, " or higher")
return()
}}
berechn <- lapply(auswahl, function(obj) {
hilf2 <- design[, sel[, obj]]
mmX <- model.matrix(formula(substitute(~.^km1,
list(km1 = k))), data = hilf2)
mmX <- mmX[,-(1:(ncol(mmX) - prod(dfs[sel[, obj]])))]
hilfc <- svd(mmX)
hilf2 <- table(round(hilfc$d^2,6))
cumcounts <- cumsum(rev(hilf2))
from <- c(1, cumcounts[-length(hilf2)]+1)
hilf2 <- rep(0, prod(dfs[sel[, obj]]))
if (conc)
hilf2[from] <- sapply(1:length(from),
function(obj) sum((hilfc$d^2*colMeans(hilfc$u)^2)[from[obj]:cumcounts[obj]])
)
else {
for (i in 1:length(from)){
bereich <- from[i]:cumcounts[i]
hilf2[bereich] <- mean((hilfc$d^2*colMeans(hilfc$u)^2)[bereich])
}}
list(hilf2, hilfc$d^2, colMeans(hilfc$u)^2)
})
ICs <- lapply(berechn, function(obj) obj[[1]])
sv2s <- lapply(berechn, function(obj) obj[[2]])
mean.u2s <- lapply(berechn, function(obj) obj[[3]])
rund <- lapply(ICs, function(obj) round(obj,digits))
ICFT <- table(unlist(rund))
ICFT <- cbind(IC = as.numeric(names(ICFT)), frequency = ICFT)
rownames(ICFT) <- rep("", nrow(ICFT))
aus <- list(ICFT = ICFT)
if (detail)
aus <- c(aus, list(ICs = rund, sv2s = sv2s, mean.u2s = mean.u2s ))
if (!resk.only || kminset){
aus <- list(aus); names(aus) <- k
if (!resk.only){
ks <- kmin:kmax
if (length(ks)>1){
for (k in (kmin+1):kmax){
ns <- choose(nfac, k)
auswahl <- 1:ns
selproj <- sel <- nchoosek(nfac, k)
GWLPs <- round(apply(selproj, 2, function(obj) GWLP(design[,
obj])[-1]), 4)
selproj <- apply(selproj, 2, function(obj) paste(obj,
collapse = ":"))
names(auswahl) <- selproj
berechn <- lapply(auswahl, function(obj) {
hilf2 <- design[, sel[, obj]]
mmX <- model.matrix(formula(substitute(~.^km1,
list(km1 = k))), data = hilf2)
mmX <- mmX[,-(1:(ncol(mmX) - prod(dfs[sel[, obj]])))]
hilfc <- svd(mmX)
hilf2 <- table(round(hilfc$d^2,6))
cumcounts <- cumsum(rev(hilf2))
from <- c(1, cumcounts[-length(hilf2)]+1)
hilf2 <- rep(0, prod(dfs[sel[, obj]]))
if (conc)
hilf2[from] <- sapply(1:length(from),
function(obj) sum((hilfc$d^2*colMeans(hilfc$u)^2)[from[obj]:cumcounts[obj]])
)
else {
for (i in 1:length(from)){
bereich <- from[i]:cumcounts[i]
hilf2[bereich] <- mean((hilfc$d^2*colMeans(hilfc$u)^2)[bereich])
}}
list(hilf2, hilfc$d^2, colMeans(hilfc$u)^2)
})
ICs <- lapply(berechn, function(obj) obj[[1]])
sv2s <- lapply(berechn, function(obj) obj[[2]])
mean.u2s <- lapply(berechn, function(obj) obj[[3]])
rund <- lapply(ICs, function(obj) round(obj,digits))
ICFT <- table(unlist(rund))
ICFT <- cbind(IC = as.numeric(names(ICFT)), frequency = ICFT)
rownames(ICFT) <- rep("", nrow(ICFT))
ausn <- list(ICFT = ICFT)
if (detail)
ausn <- c(ausn, list(ICs = rund, sv2s = sv2s, mean.u2s = mean.u2s ))
ausn <- list(ausn)
names(ausn) <- k
aus <- c(aus, ausn)
}
}
}}
}
else aus <- list(ICFT = NULL)
aus
}
|
library(glmnet)
linear.predictor = function(X, b_0, b_x) {
return(b_0 + X %*% b_x)
}
glmnet.loss = function(G, Y, b_0, b_x, lambda, penalty.factor, family="gaussian") {
n = dim(G)[1]
xbeta = linear.predictor(G, b_0, b_x)
penalty_loss = lambda * (abs(b_x) %*% penalty.factor)[1,1]
if (family == "gaussian"){
loss = sum((Y - xbeta)^2) / (2 * n)
}
if (family == "binomial"){
loss = sum(log(1 + exp(xbeta)) - Y * xbeta) / n
}
return(loss + penalty_loss)
}
grid_size = 10
grid = 10^seq(-4, log10(1), length.out=grid_size)
grid = rev(grid)
max_iterations = 20000
tol = 1e-5
for (family in c("gaussian", "binomial")){
for (seed in 1:20) {
if (seed <= 5) {
sample_size = 200
p = 50
n_confounders = NULL
} else if (seed <= 10) {
sample_size = 200
p = 50
n_confounders = 2
} else if (seed <= 15) {
sample_size = 100
p = 500
n_confounders = 5
} else {
sample_size = 200
p = 500
n_confounders = 10
}
cat("-", seed, family, "\n")
data = data.gen(sample_size=sample_size, p=p,
n_g_non_zero=10, n_gxe_non_zero=4,
seed=seed,
family=family,
n_confounders=n_confounders,
normalize=TRUE)
file_name = paste0("tests/testthat/testdata/compare_with_glmnet/", seed, "_", family, "_data.rds")
saveRDS(data, file_name)
start = Sys.time()
fit = hierNetGxE.fit(G=data$G_train, E=rep(0, sample_size),
Y=data$Y_train, C=data$C_train,
tolerance=tol, grid=grid, family=family,
normalize=FALSE,
max_iterations=max_iterations)
cat("-- hierNetGxE.fit done in ", Sys.time() - start, " seconds. num not converged ", sum(1 - fit$has_converged), "\n")
glmnet_X = cbind(data$G_train, data$C_train)
penalty.factor = rep(0, ncol(glmnet_X))
penalty.factor[1:ncol(data$G_train)] = 1
glmnet_fit = glmnet(x=glmnet_X, y=data$Y_train,
lambda=grid, thresh=1e-10,
intercept=TRUE, standardize.response=FALSE,
standardize=FALSE,
family=family,
penalty.factor=penalty.factor)
objective_value = c()
for (i in 1:grid_size) {
cur_objective_value = glmnet.loss(glmnet_X, data$Y_train,
glmnet_fit$a0[i], glmnet_fit$beta[,i],
glmnet_fit$lambda[i],
penalty.factor=penalty.factor,
family=family)
objective_value = c(objective_value, cur_objective_value)
}
cat("-- max difference in loss", max(fit$objective_value - rep(objective_value, rep(grid_size, grid_size))), "\n")
file_name = paste0("tests/testthat/testdata/compare_with_glmnet/", seed, "_", family, "_glmnet_results.rds")
saveRDS(list(objective_value=objective_value), file_name)
}
}
|
affil_df = function(
affil_id = NULL,
affil_name = NULL,
api_key = NULL,
verbose = TRUE,
facets = NULL,
sort = "document-count",
...){
L = affil_data(
affil_id = affil_id,
affil_name = affil_name,
verbose = verbose,
facets = facets,
sort = sort,
... = ...)
df = L$df
return(df)
}
affil_data = function(
affil_id = NULL,
affil_name = NULL,
api_key = NULL,
verbose = TRUE,
facets = NULL,
sort = "document-count",
...){
if (is.null(affil_id)) {
res = process_affiliation_name(
affil_id = affil_id,
affil_name = affil_name,
api_key = api_key, verbose = verbose
)
affil_id = res$affil_id
}
affil_id = gsub("AFFILIATION_ID:", "", affil_id, fixed = TRUE)
entries = author_search_by_affil(
affil_id = affil_id,
verbose = verbose,
facets = facets,
sort = sort,
...)
total_results = entries$total_results
facets = entries$facets
entries = entries$entries
df = gen_entries_to_df(
entries = entries)
df$df$affil_id = affil_id
L = list(entries = entries, df = df)
L$total_results = total_results
L$facets = facets
return(L)
}
|
write.nexus.data <-
function(x, file, format = "dna", datablock = TRUE,
interleaved = TRUE, charsperline = NULL,
gap = NULL, missing = NULL)
{
format <- match.arg(toupper(format), c("DNA", "PROTEIN", "STANDARD", "CONTINUOUS"))
if (inherits(x, "DNAbin") && format != "DNA") {
format <- "DNA"
warning("object 'x' is of class DNAbin: format forced to DNA")
}
if (inherits(x, "AAbin") && format != "PROTEIN") {
format <- "PROTEIN"
warning("object 'x' is of class AAbin: format forced to PROTEIN")
}
indent <- " "
maxtax <- 5
defcharsperline <- 80
defgap <- "-"
defmissing <- "?"
if (is.matrix(x)) {
if (inherits(x, "DNAbin")) x <- as.list(x) else {
xbak <- x
x <- vector("list", nrow(xbak))
for (i in seq_along(x)) x[[i]] <- xbak[i, ]
names(x) <- rownames(xbak)
rm(xbak)
}
}
ntax <- length(x)
nchars <- length(x[[1]])
zz <- file(file, "w")
if (is.null(names(x))) names(x) <- as.character(1:ntax)
fcat <- function(..., file = zz)
cat(..., file = file, sep = "", append = TRUE)
find.max.length <- function(x) max(nchar(x))
print.matrix <- function(x, dindent = " ", collapse = "") {
Names <- names(x)
printlength <- find.max.length(Names) + 2
if (!interleaved) {
for (i in seq_along(x)) {
sequence <- paste(x[[i]], collapse = collapse)
taxon <- Names[i]
thestring <- sprintf("%-*s%s%s", printlength, taxon, dindent, sequence)
fcat(indent, indent, thestring, "\n")
}
} else {
ntimes <- ceiling(nchars/charsperline)
start <- 1
end <- charsperline
for (j in seq_len(ntimes)) {
for (i in seq_along(x)) {
sequence <- paste(x[[i]][start:end], collapse = collapse)
taxon <- Names[i]
thestring <- sprintf("%-*s%s%s", printlength, taxon, dindent, sequence)
fcat(indent, indent, thestring, "\n")
}
if (j < ntimes) fcat("\n")
start <- start + charsperline
end <- end + charsperline
if (end > nchars) end <- nchars
}
}
}
if (inherits(x, "DNAbin") || inherits(x, "AAbin")) x <- as.character(x)
fcat("
NCHAR <- paste("NCHAR=", nchars, sep = "")
NTAX <- paste0("NTAX=", ntax)
DATATYPE <- paste0("DATATYPE=", format)
if (is.null(charsperline)) {
if (nchars <= defcharsperline) {
charsperline <- nchars
interleaved <- FALSE
} else charsperline <- defcharsperline
}
if (is.null(missing)) missing <- defmissing
MISSING <- paste0("MISSING=", missing)
if (is.null(gap)) gap <- defgap
GAP <- paste0("GAP=", gap)
INTERLEAVE <- if (interleaved) "INTERLEAVE=YES" else "INTERLEAVE=NO"
if (datablock) {
fcat("BEGIN DATA;\n")
fcat(indent, "DIMENSIONS ", NTAX, " ", NCHAR, ";\n")
if(format != "STANDARD") {
fcat(indent, "FORMAT", " ", DATATYPE, " ", MISSING, " ", GAP, " ", INTERLEAVE, ";\n")
} else {
fcat(indent, "FORMAT", " ", DATATYPE, " ", MISSING, " ", GAP, " ", INTERLEAVE, " symbols=\"0123456789\";\n")
}
fcat(indent, "MATRIX\n")
if(format != "CONTINUOUS") {
print.matrix(x)
} else {
print.matrix(x, collapse = "\t")
}
fcat(indent, ";\nEND;\n\n")
} else {
fcat("BEGIN TAXA;\n")
fcat(indent, "DIMENSIONS", " ", NTAX, ";\n")
fcat(indent, "TAXLABELS\n")
fcat(indent, indent)
j <- 0
for (i in seq_len(ntax)) {
fcat(names(x[i]), " ")
j <- j + 1
if (j == maxtax) {
fcat("\n", indent, indent)
j <- 0
}
}
fcat("\n", indent, ";\n")
fcat("END;\n\nBEGIN CHARACTERS;\n")
fcat(indent, "DIMENSIONS", " ", NCHAR, ";\n")
if(format != "STANDARD") {
fcat(indent, "FORMAT", " ", MISSING, " ", GAP, " ", DATATYPE, " ", INTERLEAVE, ";\n")
} else {
fcat(indent, "FORMAT", " ", MISSING, " ", GAP, " ", DATATYPE, " ", INTERLEAVE, " symbols=\"0123456789\";\n")
}
fcat(indent,"MATRIX\n")
if(format != "CONTINUOUS") {
print.matrix(x)
} else {
print.matrix(x, collapse = "\t")
}
fcat(indent, ";\nEND;\n\n")
}
close(zz)
}
|
context("test-symbols.R")
test_that("symbols work", {
skip_on_cran()
skip_on_travis()
skip_on_appveyor()
syms <- fixer_symbols()
expect_equal(length(syms), 2)
expect_true("USD" %in% syms$name)
expect_true(tibble::is.tibble(syms))
})
|
read.magpie <- function(file_name, file_folder = "", file_type = NULL, as.array = FALSE,
comment.char = "*", check.names = FALSE, ...) {
.buildFileName <- function(fileName, fileFolder) {
fileName <- paste0(fileFolder, fileName)
fileNameOut <- Sys.glob(fileName)
if (length(fileNameOut) > 1) {
fileNameOut <- fileNameOut[1]
warning("File name ", fileName, " is ambiguous, only first alternative is used!")
} else if (length(fileNameOut) == 0) {
stop("File ", fileName, " does not exist!")
}
return(fileNameOut)
}
fileName <- .buildFileName(file_name, file_folder)
.getFileType <- function(fileType, fileName) {
fileType <- ifelse(is.null(fileType), tail(strsplit(fileName, "\\.")[[1]], 1), fileType)
allowedTypes <- c("rds", "m", "mz", "csv", "cs2", "cs2b", "cs3", "cs4", "csvr", "cs2r", "cs3r",
"cs4r", "put", "asc", "nc")
if (!(fileType %in% allowedTypes)) stop("Unknown file type: ", fileType)
return(fileType)
}
fileType <- .getFileType(file_type, fileName)
if (fileType %in% c("m", "mz")) {
readMagpie <- readMagpieMZ(fileName, compressed = (fileType == "mz"))
} else if (fileType == "rds") {
readMagpie <- readRDS(fileName)
if (!is.magpie(readMagpie)) stop("File does not contain a magpie object!")
} else if (fileType == "cs3" | fileType == "cs3r") {
x <- read.csv(fileName, comment.char = comment.char, check.names = check.names, stringsAsFactors = TRUE)
datacols <- grep("^dummy\\.?[0-9]*$", colnames(x))
xdimnames <- lapply(x[datacols], function(x) return(as.character(unique(x))))
xdimnames[[length(xdimnames) + 1]] <- colnames(x)[-datacols]
names(xdimnames) <- NULL
tmparr <- array(NA, dim = sapply(xdimnames, length), dimnames = xdimnames)
for (i in xdimnames[[length(xdimnames)]]) {
j <- sapply(cbind(x[datacols], i), as.character)
.duplicates_check(j)
tmparr[j] <- x[, i]
}
readMagpie <- as.magpie(tmparr)
if (length(grep("^[A-Z]+_[0-9]+$", getCells(readMagpie))) == ncells(readMagpie)) {
getCells(readMagpie) <- sub("_", ".", getCells(readMagpie))
}
attr(readMagpie, "comment") <- .readComment(fileName, commentChar = comment.char)
} else if (fileType == "cs4" | fileType == "cs4r") {
x <- read.csv(fileName, comment.char = comment.char, header = FALSE,
check.names = check.names, stringsAsFactors = TRUE)
readMagpie <- as.magpie(x, tidy = TRUE)
attr(readMagpie, "comment") <- .readComment(fileName, commentChar = comment.char)
} else if (fileType %in% c("asc", "nc", "grd", "tif")) {
if (!requireNamespace("raster", quietly = TRUE)) stop("The package \"raster\" is required!")
if (fileType == "nc") {
if (!requireNamespace("ncdf4", quietly = TRUE)) {
stop("The package \"ncdf4\" is required!")
}
nc <- ncdf4::nc_open(fileName)
var <- names(nc[["var"]])
vdim <- vapply(nc[["var"]], function(x) return(x$ndim), integer(1))
var <- var[vdim > 0]
ncdf4::nc_close(nc)
tmp <- list()
for (v in var) {
warning <- capture.output(tmp[[v]] <- raster::brick(fileName, varname = v, ...))
if (length(warning) > 0) {
tmp[[v]] <- NULL
next
}
name <- sub("^X([0-9]*)$", "y\\1", names(tmp[[v]]), perl = TRUE)
if (length(name) == 1 && name == "layer") name <- "y0"
names(tmp[[v]]) <- paste0(name, "..", v)
}
readMagpie <- as.magpie(raster::brick(tmp))
} else {
readMagpie <- as.magpie(raster::brick(fileName, ...))
}
} else {
readMagpie <- readMagpieOther(fileName, fileType, comment.char = comment.char, check.names = check.names)
}
if (as.array) readMagpie <- as.array(readMagpie)[, , ]
return(readMagpie)
}
|
SL.npreg <- function(Y, X, newX, family = gaussian(),
obsWeights = rep(1, length(Y)),
rangeThresh = 1e-7, ...) {
options(np.messages = FALSE)
if (abs(diff(range(Y))) <= rangeThresh) {
thisMod <- glm(Y ~ 1, data = X)
} else {
bw <- np::npregbw(
stats::as.formula(
paste("Y ~", paste(names(X), collapse = "+"))
),
data = X,
ftol = 0.01, tol = 0.01, remin = FALSE
)
thisMod <- np::npreg(bw)
}
pred <- stats::predict(thisMod, newdata = newX)
fit <- list(object = thisMod)
class(fit) <- "SL.npreg"
out <- list(pred = pred, fit = fit)
return(out)
}
predict.SL.npreg <- function(object, newdata, ...) {
pred <- stats::predict(object = object$object, newdata = newdata)
pred
}
|
setGeneric("getformula", function(x) standardGeneric("getformula"),
package = "xergm.common")
setGeneric("interpret", function(object, ...) standardGeneric("interpret"),
package = "xergm.common")
setGeneric("gof", function(object, ...) standardGeneric("gof"),
package = "xergm.common")
setGeneric("checkdegeneracy", function(object, ...)
standardGeneric("checkdegeneracy"), package = "xergm.common")
|
interactive_composite <- function(image, composite_image, operator = "atop", compose_args = "", resolution = 1, return_param = FALSE, scale)
{
image_original <- image
image <- image_convert(as.list(image)[[1]], format = "png")
composite_image_original <- composite_image
composite_image <- image_convert(as.list(composite_image)[[1]], format = "png")
iniv <- 0
initial <- image_composite(image, composite_image, operator = operator, offset = geometry_point(iniv, iniv), compose_args = compose_args)
is_missing_scale <- missing(scale)
iminfo <- image_info(image)
iminfo_composite <- image_info(composite_image)
range_x <- c(-iminfo_composite[["width"]], iminfo[["width"]])
range_y <- c(-iminfo_composite[["height"]], iminfo[["height"]])
length_slider <- as.integer(iminfo$width * 0.6)
if (length_slider < 200)
{
length_slider <- 200
}
text_label_x <- "x: "
text_label_y <- "y: "
quit_waiting <- !is.null(getOption("unit_test_magickGUI"))
temp <- tempfile(fileext = ".jpg")
on.exit(unlink(temp), add = TRUE)
if (!is_missing_scale)
{
image_write(image_scale(initial, scale), temp)
} else
{
image_write(initial, temp)
}
image_tcl <- tkimage.create("photo", "image_tcl", file = temp)
label_digits <- -as.integer(log(resolution, 10))
label_digits <- ifelse(label_digits > 0, label_digits, 0)
label_template <- sprintf("%%.%df", label_digits)
win1 <- tktoplevel()
on.exit(tkdestroy(win1), add = TRUE)
win1.frame1 <- tkframe(win1)
win1.frame2 <- tkframe(win1)
win1.im <- tklabel(win1, image = image_tcl)
win1.frame1.label <- tklabel(win1.frame1, text = sprintf("%s%s", text_label_x, sprintf(label_template, iniv)))
win1.frame2.label <- tklabel(win1.frame2, text = sprintf("%s%s", text_label_y, sprintf(label_template, iniv)))
slider_value_x <- tclVar(iniv)
slider_value_y <- tclVar(iniv)
command_slider_x <- function(...)
{
assign("slider_value_x", slider_value_x, inherits = TRUE)
}
command_slider_y <- function(...)
{
assign("slider_value_y", slider_value_y, inherits = TRUE)
}
win1.frame1.slider <- tkscale(win1.frame1, from = range_x[1], to = range_x[2], variable = slider_value_x, orient = "horizontal", length = length_slider, command = command_slider_x, resolution = resolution, showvalue = 0)
win1.frame2.slider <- tkscale(win1.frame2, from = range_y[1], to = range_y[2], variable = slider_value_y, orient = "horizontal", length = length_slider, command = command_slider_y, resolution = resolution, showvalue = 0)
temp_val <- iniv
update_image <- function()
{
temp_image <- image_composite(image, composite_image, operator = operator, offset = geometry_point(temp_val[1], temp_val[2]), compose_args = compose_args)
if (!is_missing_scale)
{
image_write(image_scale(temp_image, scale), temp)
} else
{
image_write(temp_image, temp)
}
image_tcl <- tkimage.create("photo", "image_tcl", file = temp)
tkconfigure(win1.im, image = image_tcl)
}
command_button <- function(...)
{
assign("quit_waiting", TRUE, inherits = TRUE)
}
win1.button <- tkbutton(win1, text = "OK", command = command_button)
tkpack(win1.im, side = "top")
tkpack(win1.frame1.label, side = "left", anchor = "c")
tkpack(win1.frame1.slider, side = "left", anchor = "c")
tkpack(win1.frame1, side = "top", anchor = "c")
tkpack(win1.frame2.label, side = "left", anchor = "c")
tkpack(win1.frame2.slider, side = "left", anchor = "c")
tkpack(win1.frame2, side = "top", anchor = "c")
tkpack(win1.button, side = "top", anchor = "c", pady = 20)
pre_slider_values <- c(as.numeric(tclvalue(slider_value_x)), as.numeric(tclvalue(slider_value_y)))
if (quit_waiting)
{
wait_test <- TRUE
while (wait_test)
{
wait_test <- FALSE
tryCatch({
tkwm.state(win1)
},
error = function(e) assign("wait_test", TRUE, inherits = TRUE)
)
}
wait_time_long()
tkdestroy(win1.button)
}
tkwm.state(win1, "normal")
while (TRUE)
{
tryCatch({
tkwm.state(win1)
},
error = function(e) assign("quit_waiting", TRUE, inherits = TRUE)
)
if (quit_waiting) break
temp_val <- c(as.numeric(tclvalue(slider_value_x)), as.numeric(tclvalue(slider_value_y)))
if (any(temp_val != pre_slider_values))
{
temp_label_x <- sprintf("%s%s", text_label_x, sprintf(label_template, temp_val[1]))
temp_label_y <- sprintf("%s%s", text_label_y, sprintf(label_template, temp_val[2]))
tkconfigure(win1.frame1.label, text = temp_label_x)
tkconfigure(win1.frame2.label, text = temp_label_y)
update_image()
pre_slider_values <- temp_val
}
}
val_res <- pre_slider_values
names(val_res) <- c("x", "y")
if (return_param)
{
return(geometry_point(val_res[1], val_res[2]))
}
return(image_composite(image_original, composite_image_original, operator = operator, offset = geometry_point(val_res[1], val_res[2]), compose_args = compose_args))
}
|
eblupFH <-
function(formula,vardir,method="REML",MAXITER=100,PRECISION=0.0001,B=0,data)
{
result <- list(eblup=NA,
fit=list(method=method, convergence=TRUE, iterations=0, estcoef=NA,
refvar=NA, goodness=NA)
)
if (method!="REML" & method!="ML" & method!="FH")
stop(" method=\"",method, "\" must be \"REML\", \"ML\" or \"FH\".")
namevar <- deparse(substitute(vardir))
if (!missing(data))
{
formuladata <- model.frame(formula,na.action = na.omit,data)
X <- model.matrix(formula,data)
vardir <- data[,namevar]
} else
{
formuladata <- model.frame(formula,na.action = na.omit)
X <- model.matrix(formula)
}
y <- formuladata[,1]
if (attr(attributes(formuladata)$terms,"response")==1)
textformula <- paste(formula[2],formula[1],formula[3])
else
textformula <- paste(formula[1],formula[2])
if (length(na.action(formuladata))>0)
stop("Argument formula=",textformula," contains NA values.")
if (any(is.na(vardir)))
stop("Argument vardir=",namevar," contains NA values.")
m<-length(y)
p<-dim(X)[2]
Xt<-t(X)
if (method=="ML") {
Aest.ML<-0
Aest.ML[1]<-median(vardir)
k<-0
diff<-PRECISION+1
while ((diff>PRECISION)&(k<MAXITER))
{
k<-k+1
Vi<-1/(Aest.ML[k]+vardir)
XtVi<-t(Vi*X)
Q<-solve(XtVi%*%X)
P<-diag(Vi)-t(XtVi)%*%Q%*%XtVi
Py<-P%*%y
s<-(-0.5)*sum(Vi)+0.5*(t(Py)%*%Py)
F<-0.5*sum(Vi^2)
Aest.ML[k+1]<-Aest.ML[k]+s/F
diff<-abs((Aest.ML[k+1]-Aest.ML[k])/Aest.ML[k])
}
A.ML<-max(Aest.ML[k+1],0)
result$fit$iterations <- k
if(k>=MAXITER && diff>=PRECISION)
{
result$fit$convergence <- FALSE
return(result)
}
Vi<-1/(A.ML+vardir)
XtVi<-t(Vi*X)
Q<-solve(XtVi%*%X)
beta.ML<-Q%*%XtVi%*%y
varA<-1/F
std.errorbeta<-sqrt(diag(Q))
tvalue<-beta.ML/std.errorbeta
pvalue<-2*pnorm(abs(tvalue),lower.tail=FALSE)
Xbeta.ML<-X%*%beta.ML
resid<-y-Xbeta.ML
loglike<-(-0.5)*(sum(log(2*pi*(A.ML+vardir))+(resid^2)/(A.ML+vardir)))
AIC<-(-2)*loglike+2*(p+1)
BIC<-(-2)*loglike+(p+1)*log(m)
goodness<-c(loglike=loglike,AIC=AIC,BIC=BIC)
coef <- data.frame(beta=beta.ML,std.error=std.errorbeta,tvalue,pvalue)
variance <- A.ML
EBLUP <- Xbeta.ML+A.ML*Vi*resid
} else if (method=="REML") {
Aest.REML<-0
Aest.REML[1]<-median(vardir)
k<-0
diff<-PRECISION+1
while ((diff>PRECISION)&(k<MAXITER))
{
k<-k+1
Vi<-1/(Aest.REML[k]+vardir)
XtVi<-t(Vi*X)
Q<-solve(XtVi%*%X)
P<-diag(Vi)-t(XtVi)%*%Q%*%XtVi
Py<-P%*%y
s<-(-0.5)*sum(diag(P))+0.5*(t(Py)%*%Py)
F<-0.5*sum(diag(P%*%P))
Aest.REML[k+1]<-Aest.REML[k]+s/F
diff<-abs((Aest.REML[k+1]-Aest.REML[k])/Aest.REML[k])
}
A.REML<-max(Aest.REML[k+1],0)
result$fit$iterations <- k
if(k>=MAXITER && diff>=PRECISION)
{
result$fit$convergence <- FALSE
return(result)
}
Vi<-1/(A.REML+vardir)
XtVi<-t(Vi*X)
Q<-solve(XtVi%*%X)
beta.REML<-Q%*%XtVi%*%y
varA<-1/F
std.errorbeta<-sqrt(diag(Q))
tvalue<-beta.REML/std.errorbeta
pvalue<-2*pnorm(abs(tvalue),lower.tail=FALSE)
Xbeta.REML<-X%*%beta.REML
resid<-y-Xbeta.REML
loglike<-(-0.5)*(sum(log(2*pi*(A.REML+vardir))+(resid^2)/(A.REML+vardir)))
AIC<-(-2)*loglike+2*(p+1)
BIC<-(-2)*loglike+(p+1)*log(m)
goodness<-c(loglike=loglike,AIC=AIC,BIC=BIC)
coef <- data.frame(beta=beta.REML,std.error=std.errorbeta,tvalue,pvalue)
variance <- A.REML
EBLUP <- Xbeta.REML+A.REML*Vi*resid
} else
{
Aest.FH<-NULL
Aest.FH[1]<-median(vardir)
k<-0
diff<-PRECISION+1
while ((diff>PRECISION)&(k<MAXITER)){
k<-k+1
Vi<-1/(Aest.FH[k]+vardir)
XtVi<-t(Vi*X)
Q<-solve(XtVi%*%X)
betaaux<-Q%*%XtVi%*%y
resaux<-y-X%*%betaaux
s<-sum((resaux^2)*Vi)-(m-p)
F<-sum(Vi)
Aest.FH[k+1]<-Aest.FH[k]+s/F
diff<-abs((Aest.FH[k+1]-Aest.FH[k])/Aest.FH[k])
}
A.FH<-max(Aest.FH[k+1],0)
result$fit$iterations <- k
if(k>=MAXITER && diff>=PRECISION)
{
result$fit$convergence <- FALSE
return(result)
}
Vi<-1/(A.FH+vardir)
XtVi<-t(Vi*X)
Q<-solve(XtVi%*%X)
beta.FH<-Q%*%XtVi%*%y
varA<-1/F
varbeta<-diag(Q)
std.errorbeta<-sqrt(varbeta)
zvalue<-beta.FH/std.errorbeta
pvalue<-2*pnorm(abs(zvalue),lower.tail=FALSE)
Xbeta.FH<-X%*%beta.FH
resid<-y-Xbeta.FH
loglike<-(-0.5)*(sum(log(2*pi*(A.FH+vardir))+(resid^2)/(A.FH+vardir)))
AIC<-(-2)*loglike+2*(p+1)
BIC<-(-2)*loglike+(p+1)*log(m)
goodness<-c(loglike=loglike,AIC=AIC,BIC=BIC)
coef <- data.frame(beta=beta.FH,std.error=std.errorbeta,tvalue=zvalue,pvalue)
variance <- A.FH
EBLUP <- Xbeta.FH+A.FH*Vi*resid
}
result$fit$estcoef <- coef
result$fit$refvar <- variance
result$fit$goodness <- goodness
result$eblup <- EBLUP
min2loglike <- (-2)*loglike
KIC <- min2loglike + 3 * (p+1)
if (B>=1)
{
sigma2d <- vardir
lambdahat <- result$fit$refvar
betahat <- matrix(result$fit$estcoef[,"beta"],ncol=1)
D <- nrow(X)
B1hatast <- 0
B3ast <- 0
B5ast <- 0
sumlogf_ythetahatastb <- 0
sumlogf_yastbthetahatastb <- 0
Xbetahat <- X%*%betahat
b <- 1
while (b<=B)
{
uastb <- sqrt(lambdahat)*matrix(data=rnorm(D, mean=0, sd=1), nrow=D, ncol=1)
eastb <- sqrt(sigma2d)*matrix(data=rnorm(D, mean=0, sd=1), nrow=D, ncol=1)
yastb <- Xbetahat + uastb + eastb
resultb <- eblupFH(yastb~X-1,sigma2d,method=method,MAXITER=MAXITER,PRECISION=PRECISION)
if (resultb$fit$convergence==FALSE)
{
message <- paste("Bootstrap b=",b,": ",method," iteration does not converge.\n")
cat(message)
next
}else
{
betahatastb <- matrix(resultb$fit$estcoef[,"beta"],ncol=1)
lambdahatastb <- resultb$fit$refvar
Xbetahathatastb2 <- (X%*%(betahat-betahatastb))^2
yastbXbetahatastb2 <- (yastb-X%*%betahatastb)^2
lambdahatastbsigma2d <- lambdahatastb + sigma2d
lambdahatsigma2d <- lambdahat + sigma2d
B1ast <- sum((lambdahatsigma2d + Xbetahathatastb2 - yastbXbetahatastb2) / lambdahatastbsigma2d)
B1hatast <- B1hatast + B1ast
logf <- (-0.5)*sum( log(2*pi*lambdahatastbsigma2d) + ((y-X%*%betahatastb)^2)/lambdahatastbsigma2d )
sumlogf_ythetahatastb <- sumlogf_ythetahatastb + logf
sumlogf_yastbthetahatastb <- sumlogf_yastbthetahatastb + resultb$fit$goodness["loglike"]
B3ast <- B3ast + sum((lambdahatastbsigma2d + Xbetahathatastb2)/lambdahatsigma2d)
B5ast <- B5ast + sum(log(lambdahatastbsigma2d) + yastbXbetahatastb2/lambdahatastbsigma2d)
b <- b+1
}
}
B2ast <- sum(log(lambdahatsigma2d)) + B3ast/B - B5ast/B
AICc <- min2loglike + B1hatast/B
AICb1 <- as.vector(min2loglike -2/B*(sumlogf_ythetahatastb - sumlogf_yastbthetahatastb))
AICb2 <- as.vector(min2loglike -4/B*(sumlogf_ythetahatastb - result$fit$goodness["loglike"]*B))
KICc <- AICc + B2ast
KICb1 <- AICb1 + B2ast
KICb2 <- AICb2 + B2ast
result$fit$goodness <- c(result$fit$goodness,KIC=KIC,AICc=AICc,AICb1=AICb1,AICb2=AICb2,KICc=KICc,KICb1=KICb1,KICb2=KICb2,nBootstrap=B)
}
else
result$fit$goodness <- c(result$fit$goodness,KIC=KIC)
return(result)
}
|
afficheResult <-
function(x, noCycle, noEtape, nbSujets, nbVar){
cat("Cycle no.", noCycle, "\n")
cat("Step no.", noEtape, "\n")
cat("Number of observations : ", nbSujets, "\n")
cat("Coefficients : ", x[1:nbVar], "\n")
cat("Threshold=", x["seuil"], " Se=", x["Se"], "Sp=", x["Sp"], "AUC=", x["AUC"], "\n")
cat("=============================================================================\n")
}
|
text_drake_graph <- function(
...,
from = NULL,
mode = c("out", "in", "all"),
order = NULL,
subset = NULL,
targets_only = FALSE,
make_imports = TRUE,
from_scratch = FALSE,
group = NULL,
clusters = NULL,
show_output_files = TRUE,
nchar = 1L,
print = TRUE,
config = NULL
) {
}
text_drake_graph_impl <- function(
config,
from = NULL,
mode = c("out", "in", "all"),
order = NULL,
subset = NULL,
targets_only = FALSE,
make_imports = TRUE,
from_scratch = FALSE,
group = NULL,
clusters = NULL,
show_output_files = TRUE,
nchar = 1L,
print = TRUE
) {
assert_pkg("visNetwork")
graph_info <- drake_graph_info_impl(
config = config,
from = from,
mode = mode,
order = order,
subset = subset,
build_times = "none",
digits = 0,
targets_only = targets_only,
font_size = 20,
make_imports = make_imports,
from_scratch = from_scratch,
full_legend = FALSE,
group = group,
clusters = clusters,
show_output_files = show_output_files,
hover = FALSE
)
render_text_drake_graph(
graph_info = graph_info,
nchar = nchar,
print = print
)
}
body(text_drake_graph) <- config_util_body(text_drake_graph_impl)
render_text_drake_graph <- function(graph_info, nchar = 1L, print = TRUE) {
assert_pkg("txtplot")
pch <- apply(
X = graph_info$nodes,
MARGIN = 1,
FUN = function(node) {
id <- redisplay_keys(node["id"])
id <- substr(x = id, start = 0L, stop = nchar)
id <- ifelse(nchar > 0, id, " ")
if (requireNamespace("crayon", quietly = TRUE)) {
cl <- gsub("000000", "666666", node["color"])
id <- crayon::make_style(cl, bg = nchar < 1L)(id)
}
id
}
)
x <- graph_info$nodes$x
y <- graph_info$nodes$y
txt <- utils::capture.output(
txtplot::txtplot(x = x, y = y, pch = pch)
)
txt <- txt[-c(1L, length(txt) - 1L, length(txt))]
txt <- gsub("\\+|\\|", "|", txt)
txt <- gsub("^[^\\|]*\\|", "", txt)
txt <- gsub("\\|", "", txt)
txt <- paste(txt, collapse = "\n")
if (print) {
message(txt)
}
invisible(txt)
}
|
library(testthat)
library(rray)
test_check("rray")
|
X <- toyModel("Tucker")
out1_1 <- NTD(X, rank=c(1,2,3), algorithm="Frobenius", num.iter=2)
out1_2 <- NTD(X, rank=c(1,2,3), algorithm="Frobenius", init="ALS", num.iter=2)
out1_3 <- NTD(X, rank=c(1,2,3), algorithm="Frobenius", init="Random", num.iter=2)
out2 <- NTD(X, rank=c(1,2,3), algorithm="KL", num.iter=2)
out3 <- NTD(X, rank=c(1,2,3), algorithm="IS", num.iter=2)
out4 <- NTD(X, rank=c(1,2,3), algorithm="Pearson", num.iter=2)
out5 <- NTD(X, rank=c(1,2,3), algorithm="Hellinger", num.iter=2)
out6 <- NTD(X, rank=c(1,2,3), algorithm="Neyman", num.iter=2)
out7 <- NTD(X, rank=c(1,2,3), algorithm="Alpha", num.iter=2)
out8 <- NTD(X, rank=c(1,2,3), algorithm="Beta", num.iter=2)
out9 <- NTD(X, rank=c(1,2,3), algorithm="HALS", num.iter=2)
out10 <- NTD(X, rank=c(1,2,3), algorithm="NMF",
init = "Random", nmf.algorithm="Projected", num.iter=2, num.iter2=2)
out_NTD2_1 <- NTD(X, rank=c(2,3), modes=1:2, algorithm="Frobenius", num.iter=2)
out_NTD2_2 <- NTD(X, rank=c(3,4), modes=2:3, algorithm="Frobenius", num.iter=2)
out_NTD2_3 <- NTD(X, rank=c(4,6), modes=c(1,3), algorithm="Frobenius", num.iter=2)
out_NTD1_1 <- NTD(X, rank=3, modes=1, algorithm="Frobenius", num.iter=2)
out_NTD1_2 <- NTD(X, rank=4, modes=2, algorithm="Frobenius", num.iter=2)
out_NTD1_3 <- NTD(X, rank=5, modes=3, algorithm="Frobenius", num.iter=2)
expect_equivalent(length(out1_1), 6)
expect_equivalent(length(out1_2), 6)
expect_equivalent(length(out1_3), 6)
expect_equivalent(length(out2), 6)
expect_equivalent(length(out3), 6)
expect_equivalent(length(out4), 6)
expect_equivalent(length(out5), 6)
expect_equivalent(length(out6), 6)
expect_equivalent(length(out7), 6)
expect_equivalent(length(out8), 6)
expect_equivalent(length(out9), 6)
expect_equivalent(length(out10), 6)
expect_equivalent(length(out_NTD2_1), 6)
expect_equivalent(length(out_NTD2_2), 6)
expect_equivalent(length(out_NTD2_3), 6)
expect_equivalent(length(out_NTD1_1), 6)
expect_equivalent(length(out_NTD1_2), 6)
expect_equivalent(length(out_NTD1_3), 6)
|
betaRegDisp <- function(y, x, xy.coords = NULL, ws = 3,
method.1 = "jaccard",
method.2 = "ruzicka",
method.3 = "ruzicka",
independent.data = FALSE,
illust.plot = FALSE){
y <- y[order(x, decreasing = FALSE), ]
if(!is.null(xy.coords))xy.coords <- xy.coords[order(x, decreasing = FALSE), ]
x <- x[order(x, decreasing = FALSE)]
N <- (ws)/2
is.even <- function(ee){ ee %% 2 == 0 }
if(is.even(ws)==FALSE){
N <- N - 0.5
}
size <- length(x)
SEQ <- 1:(size-ws+1)
if(is.even(ws)==FALSE){
SEQ <- (N+1):(size-N)
}
if (independent.data) SEQ <- seq (1,size-(ws-1),ws)
if (independent.data && is.even(ws)==FALSE) SEQ <- seq (N+1,size-N,ws)
n <- length(SEQ)
result <- matrix(0, n, 10)
colnames(result) <- c("grad", "mean.grad",
"mean.diss.pairs", "mean.diss.focal", "mean.dist.cent",
"SS.group", "SS.focal",
"beta.TOT","beta.NES","beta.TUR")
result[, "grad"] <- x[SEQ]
rownames(result) <- names(x)[SEQ]
disT <- vegdist(y, method = method.1)
if(!is.null(xy.coords)){
geo.dist<-matrix(0,n,2,
dimnames = list(NULL,c('mean.geodist','focal.geodist')))
}
count = 1
for(i in SEQ){
group <- rep("B", times = size)
if(is.even(ws)==FALSE){
sites <- (i-N):(i+N)
}
if(is.even(ws)==TRUE){
sites <- i:(i+ws-1)
}
sites <- c(i, sites[sites!=i])
result[count, "mean.grad"] <- mean(x[sites])
if(!is.null(xy.coords)){
d <- dist(xy.coords[sites,], method = "euclidean")
geo.dist[count,] <- c(mean(d), mean(d[1:(ws-1)]))
}
if(illust.plot == TRUE){
plot(1:size, x)
points(sites, x[sites], cex = 2, col = "blue")
points(i, x[i], cex = 3, col = "red", pch = 0)
}
group[sites] <- "A"
mat <- y[sites, ]
dis <- vegdist(mat, method = method.1)
result[count, "mean.diss.pairs"] <- mean(dis)
result[count, "mean.diss.focal"] <- mean(dis[1:(ws-1)])
mod <- betadisper(disT, group = group)
d <- mod$distances
result[count, "mean.dist.cent"] <- mean(d[group=="A"])
beta.b <- beta.multi.abund(x = mat, index.family = method.3)
nomes.m3 <- c("bray", "ruzicka")
method.3 <- nomes.m3[pmatch(method.3, nomes.m3)]
if(method.3 == "bray"){
result[count, "beta.TOT"] <- beta.b$beta.BRAY
result[count, "beta.TUR"] <- beta.b$beta.BRAY.BAL
result[count, "beta.NES"] <- beta.b$beta.BRAY.GRA
}
if(method.3 == "ruzicka"){
result[count, "beta.TOT"] <- beta.b$beta.RUZ
result[count, "beta.TUR"] <- beta.b$beta.RUZ.BAL
result[count, "beta.NES"] <- beta.b$beta.RUZ.GRA
}
res.SS <- beta.div(Y = mat, method = method.2, nperm = 0)
result[count, "SS.group"] <- res.SS$beta["SStotal"]
result[count, "SS.focal"] <- res.SS$LCBD[rownames(mat)[1]]*res.SS$beta["SStotal"]
count = count + 1
}
if(!is.null(xy.coords))result<-cbind(result,geo.dist)
return(result)
}
|
library(Hmisc)
d1 = data.frame(drugId = c(11, 22, 33), drugInfo = c("$36.",
"2 for $11", "50% sale"), stringsAsFactors = FALSE)
d1$drugInfo = gsub("\\$", "\\\\$", d1$drugInfo)
d1$drugInfo = gsub("\\%", "\\\\%", d1$drugInfo)
d1
latex(d1, rowname = NULL, colheads = c("Drug ID", "Drug Price"),
file = "")
|
df <- data.frame(State = LETTERS[1:3],
Y = sample(1:10, 30, replace = TRUE),
X = rep(1:10, 3))
df
library(ggplot2)
ggplot(df, aes(X, Y)) +
geom_bar(stat = "identity", position = "dodge") +
facet_grid(State ~ .)
ggplot(df) +
geom_rect(aes(xmin = X - 0.4, xmax = X + 0.4, ymin = 0, ymax = Y)) +
facet_grid(State ~ .)
ggplot(df) +
geom_rect(aes(xmin = 0, xmax = Y, ymin = X - 0.4, ymax = X + 0.4)) +
geom_boxplot(aes(X, Y)) +
coord_flip() +
facet_grid(State ~ .)
ggplot(df) +
geom_rect(aes(xmin = 0, xmax = Y, ymin = X - 0.4, ymax = X + 0.4),
fill = "blue", color = "black") +
geom_boxplot(aes(X, Y), alpha = 0.7, fill = "salmon2") +
coord_flip() +
facet_grid(State ~ .) +
theme_classic() +
scale_y_continuous(breaks = 1:max(df$X))
ggplot(iris, aes(x = Sepal.Width)) +
geom_histogram(binwidth = 0.05) +
geom_boxplot(aes(x = 3, y = Sepal.Width))
library(gridExtra)
a <- ggplot(iris, aes(x = Sepal.Width)) +
geom_histogram(binwidth = 0.05)
b <- ggplot(iris, aes(x = "", y = Sepal.Width)) +
geom_boxplot() +
coord_flip()
grid.arrange(a,b,nrow=2)
a <- ggplot(mtcars, aes(x = mpg)) + geom_histogram(binwidth = 0.1)
b <- ggplot(mtcars, aes(x = "", y = mpg)) + geom_boxplot() + coord_flip()
grid.arrange(a,b,nrow=2)
my3cols <- c("
ggplot(mtcars, aes(x=cyl, y=mpg , group=gear)) + geom_dotplot(aes(color = gear, fill = gear), binaxis = 'y', stackdir = 'center') + scale_color_manual(values = my3cols) + scale_fill_manual(values = my3cols)
|
RNGkind("default")
rm(list=objects())
if(file.exists("./clinDR/inst/tests/extraGraphics")){
pvarA<-"./clinDR/inst/tests/extraGraphics"
} else pvarA<-NULL
source(file.path(pvarA,'test.densityplots.R'),echo=TRUE)
RNGkind("default")
rm(list=objects())
if(file.exists("./clinDR/inst/tests/extraGraphics")){
pvarA<-"./clinDR/inst/tests/extraGraphics"
} else pvarA<-NULL
source(file.path(pvarA,'test.plot.emaxsimBobj.R'),echo=TRUE)
RNGkind("default")
rm(list=objects())
if(file.exists("./clinDR/inst/tests/extraGraphics")){
pvarA<-"./clinDR/inst/tests/extraGraphics"
} else pvarA<-NULL
source(file.path(pvarA,'test.plot.emaxsimobj.R'),echo=TRUE)
RNGkind("default")
rm(list=objects())
if(file.exists("./clinDR/inst/tests/extraGraphics")){
pvarA<-"./clinDR/inst/tests/extraGraphics"
} else pvarA<-NULL
source(file.path(pvarA,'test.plot.fitEmax.R'),echo=TRUE)
RNGkind("default")
rm(list=objects())
if(file.exists("./clinDR/inst/tests/extraGraphics")){
pvarA<-"./clinDR/inst/tests/extraGraphics"
} else pvarA<-NULL
source(file.path(pvarA,'test.plot.fitEmaxB.R'),echo=TRUE)
RNGkind("default")
rm(list=objects())
if(file.exists("./clinDR/inst/tests/extraGraphics")){
pvarA<-"./clinDR/inst/tests/extraGraphics"
} else pvarA<-NULL
source(file.path(pvarA,'test.plotB.R'),echo=TRUE)
RNGkind("default")
rm(list=objects())
if(file.exists("./clinDR/inst/tests/extraGraphics")){
pvarA<-"./clinDR/inst/tests/extraGraphics"
} else pvarA<-NULL
source(file.path(pvarA,'test.plotD.R'),echo=TRUE)
|
context("DeduImpute")
test_that('deduImpute works for editarrays',{
E <- editmatrix(c(
"x1 + x2 == x3",
"x2 == x4",
"x5 + x6 + x7 == x8",
"x3 + x8 == x9",
"x9 - x10 == x11",
"x6 >= 0",
"x7 >= 0"
))
dat <- data.frame(
x1=c(145,145),
x2=c(NA,NA),
x3=c(155,155),
x4=c(NA,NA),
x5=c(NA, 86),
x6=c(NA,NA),
x7=c(NA,NA),
x8=c(86,86),
x9=c(NA,NA),
x10=c(217,217),
x11=c(NA,NA)
)
v <- deduImpute(E,dat)$corrected
expect_equal(v$x1,c(145,145))
expect_equal(v$x2,c(10,10))
expect_equal(v$x5,c(NA,86))
expect_equal(v$x6,c(NA,0))
})
test_that('deduImpute handles variables in records not in edits',{
E <- editmatrix(" x + y == z")
dat <- data.frame(x=1,y=NA,z=2,v=0)
v <- deduImpute(E,dat)$corrected
expect_equal(as.numeric(v[1,]),c(1,1,2,0))
})
context('Deductive imputation with solSpace and imputess')
test_that('solution space works for a simple equality',{
expect_equal(solSpace(editmatrix("x + y == z"),x=c(x=1,y=NA,z=3))$x0[1],2)
expect_equal(solSpace(editmatrix("x + y == z"),x=c(x=1,y=NA,z=3))$C[1],0)
})
test_that('solution space works with extra variables in record',{
expect_equal(solSpace(editmatrix("x + y == z"),x=c(x=1,y=NA,z=3,w=9))$x0[1],2)
expect_equal(solSpace(editmatrix("x + y == z"),x=c(x=1,y=NA,z=3,u=1,v=NA))$x0[1],2)
})
context('Deductive imputation with deductiveZeros')
test_that('deductiveZeros works for a simple equality',{
expect_equal(deductiveZeros(editmatrix(c("x + y == z","y>=0")),x=c(x=1,y=NA,z=1)),c(x=FALSE,y=TRUE,z=FALSE))
})
test_that('deductiveZeros works with variables in record not in editmatrix',{
expect_equal(deductiveZeros(editmatrix(c("x + y == z","y>=0")),x=c(x=1,y=NA,z=1,u=1,v=2)),c(x=FALSE,y=TRUE,z=FALSE,u=FALSE,v=FALSE))
expect_equal(deductiveZeros(editmatrix(c("x + y == z","y>=0")),x=c(x=1,y=NA,z=1,u=1,v=NA)),c(x=FALSE,y=TRUE,z=FALSE,u=FALSE,v=FALSE))
})
context('The deduImpute method for editset')
test_that('deduImpute.editset works for pure numeric',{
E <- editset(expression(x + y == z))
x <- data.frame(
x = NA,
y = 1,
z = 1)
expect_equal(deduImpute(E,x)$corrected$x,0)
})
test_that('deduImpute.editset works for pure categorical',{
E <- editset(expression(
A %in% c('a','b'),
B %in% c('c','d'),
if ( A == 'a' ) B == 'b')
)
x <- data.frame(
A = 'a',
B = NA)
expect_equal(deduImpute(E, x)$corrected$B,'b')
})
test_that('deduImpute.editset works for unconnected categorical and numerical',{
E <- editset(expression(
x + y == z,
A %in% c('a','b'),
B %in% c('c','d'),
if ( A == 'a' ) B == 'b')
)
x <- data.frame(
x = NA,
y = 1,
z = 1,
A = 'a',
B = NA)
v <- deduImpute(E,x)
expect_equal(v$corrected$B,'b')
expect_equal(v$corrected$x, 0)
expect_true(v$status$status == 'corrected')
})
test_that('deduImpute.editset works for connected numerical and categorical',{
E <- editset(expression(
x + y == z,
x >= 0,
A %in% c('a','b'),
B %in% c('c','d'),
if ( A == 'a' ) B == 'b',
if ( B == 'b' ) x > 0
))
x <- data.frame(
x = NA,
y = 1,
z = 1,
A = 'a',
B = NA
)
v <- deduImpute(E,x)
expect_equal(nrow(v$corrections),0)
expect_equal(v$corrected,x)
})
|
a = b = c = d = e = f = g <- 4
|
suppressMessages(library(rENA, quietly = T, verbose = F))
context("Test making R6 sets");
code_names <- c("Data", "Technical.Constraints","Performance.Parameters",
"Client.and.Consultant.Requests","Design.Reasoning","Collaboration")
test_that("Accumulate returns an R6", {
data(RS.data)
df.file <- RS.data
df.accum <- suppressWarnings(
ena.accumulate.data.file(
df.file, units.by = c("UserName", "Condition"),
conversations.by = c("ActivityNumber", "GroupName"),
codes = code_names, as.list = FALSE)
)
testthat::expect_is(df.accum, "ENAdata",
"Accumulation with as.list = FALSE did not return ENAdata"
)
})
test_that("Function params includes ... args", {
data(RS.data)
df.file <- RS.data
df.accum <- suppressWarnings(
ena.accumulate.data.file(
df.file, units.by = c("UserName", "Condition"),
conversations.by = c("ActivityNumber", "GroupName"),
codes = code_names, as.list = FALSE)
)
df.accum.grain <- suppressWarnings(
ena.accumulate.data.file(
df.file, units.by = c("UserName", "Condition"),
conversations.by = c("ActivityNumber", "GroupName"),
codes = code_names, as.list = FALSE, grainSize = 10)
)
testthat::expect_false("grainSize" %in% names(df.accum$function.params))
testthat::expect_true("grainSize" %in% names(df.accum.grain$function.params))
})
test_that("Old accum ignored meta.data", {
data(RS.data)
df.file <- RS.data
df.accum <- suppressWarnings(
ena.accumulate.data.file(
df.file, units.by = c("UserName", "Condition"),
conversations.by = c("ActivityNumber", "GroupName"),
codes = code_names, as.list = FALSE,
include.meta = FALSE
)
)
testthat::expect_equal(nrow(df.accum$metadata), 0)
})
test_that("Make.set returns an R6", {
data(RS.data)
df.file <- RS.data
df.accum <- suppressWarnings(
rENA:::ena.accumulate.data.file(
df.file, units.by = c("UserName", "Condition"),
conversations.by = c("ActivityNumber", "GroupName"),
codes = code_names, as.list = FALSE
)
)
df.set <- suppressWarnings(
rENA:::ena.make.set(df.accum, as.list = FALSE)
)
testthat::expect_is(df.set, "ENAset",
"Set with as.list = FALSE did not return ENAset")
df.accum2 <- rENA:::ena.accumulate.data.file(
df.file, units.by = c("UserName", "Condition"),
conversations.by = c("ActivityNumber", "GroupName"),
codes = code_names, as.list = T
)
error_set <- testthat::expect_error(
suppressWarnings(rENA:::ena.make.set(df.accum2, as.list = F)),
regexp = "Re-run the accumulation with as.list=FALSE"
)
error_set2 <- testthat::expect_warning(
rENA:::ena.make.set(df.accum, as.list = T),
regexp = "ENAdata objects will be deprecated"
)
})
test_that("Old sets are the same as the new ones", {
data(RS.data)
units.by <- c("UserName", "Condition")
conv.by <- c("Condition", "GroupName")
df.accum <- suppressWarnings(
rENA:::ena.accumulate.data.file(
RS.data, units.by = units.by,
conversations.by = conv.by,
codes = code_names, as.list = FALSE, window.size.back = 4
)
)
df.set <- suppressWarnings(
rENA:::ena.make.set(df.accum, as.list = FALSE)
)
new.set <- rENA:::ena.accumulate.data(
units = RS.data[, units.by],
conversation = RS.data[, conv.by],
metadata = RS.data[, code_names],
codes = RS.data[,code_names],
model = "EndPoint",
window.size.back = 4
) %>%
rENA:::ena.make.set()
testthat::expect_equivalent(df.set$points.rotated[1, ],
as.matrix(new.set$points)[1, ])
testthat::expect_equivalent(df.set$line.weights[1, ],
as.matrix(new.set$line.weights)[1, ])
})
test_that("Old R6 w custom rotation", {
data(RS.data)
df.file <- RS.data
df.accum <- suppressWarnings(
rENA:::ena.accumulate.data.file(
df.file, units.by = c("UserName", "Condition"),
conversations.by = c("ActivityNumber", "GroupName"),
codes = code_names, as.list = FALSE
)
)
df.set <- suppressWarnings(
rENA:::ena.make.set(df.accum, as.list = FALSE)
)
df.accum.2 <- suppressWarnings(
rENA:::ena.accumulate.data.file(
df.file, units.by = c("GroupName", "Condition"),
conversations.by = c("ActivityNumber", "GroupName"),
codes = code_names, as.list = FALSE
)
)
df.set.2 <- suppressWarnings(
rENA:::ena.make.set(df.accum.2, as.list = FALSE, rotation.set = df.set$rotation.set)
)
testthat::expect_equal(df.set$node.positions, df.set.2$node.positions)
testthat::expect_error(
suppressWarnings(rENA:::ena.make.set(df.accum.2, as.list = FALSE, rotation.set = -1)),
regexp = "Supplied rotation.set is not an instance of ENARotationSet"
)
testthat::expect_error(
suppressWarnings(rENA:::ena.make.set(df.accum.2, as.list = FALSE, rotation.by = "NOTHING")),
regexp = "Unable to find or create a rotation set"
)
testthat::expect_error(
suppressWarnings(rENA:::ena.make.set(df.accum.2, as.list = FALSE, node.position.method = function(set) {
return(list("failed" = NULL))
})),
regexp = "node position method didn't return back the expected objects"
)
testthat::expect_error(
suppressWarnings(rENA:::ena.make.set(df.accum.2, as.list = FALSE, rotation.set = -1)),
regexp = "Supplied rotation.set is not an instance of ENARotationSet"
)
rot.set <- list(
"rotation" = matrix(rep(0, choose(length(code_names),2) ^ 2 ), nrow = choose(length(code_names),2)),
"node.positions" = NULL
)
class(rot.set) <- c("ENARotationSet")
testthat::expect_error(
suppressWarnings(rENA:::ena.make.set(df.accum.2, as.list = FALSE, rotation.set = rot.set)),
regexp = "Unable to determine the node positions either by calculating"
)
})
test_that("Verify ENArotation set class", {
data(RS.data)
df.file <- RS.data
df.accum <- suppressWarnings(
rENA:::ena.accumulate.data.file(
df.file, units.by = c("UserName", "Condition"),
conversations.by = c("ActivityNumber", "GroupName"),
codes = code_names,
as.list = FALSE
)
)
df.set <- suppressWarnings(
rENA:::ena.make.set(df.accum, as.list = FALSE)
)
nodes <- df.set$node.positions
rownames(nodes) <- NULL
rotationSet = ENARotationSet$new(
rotation = df.set$rotation.set$rotation,
codes = df.set$codes,
node.positions = nodes,
eigenvalues = NULL
)
testthat::expect_true(all(rownames(rotationSet$node.positions) == df.set$codes))
})
|
summaryTotalDistance <- function(list, summary.df = NA) {
stimulus <- NULL
id_stim <- NULL
distance <- NULL
list <- purrr::map_if(list, is.data.frame, function(.x) {
total_distance <- .x %>%
select(distance) %>%
sum()
if (any(names(.x) == "id_stim")) {
out <- data.frame(id_stim = .x$id_stim[1],
total_distance = total_distance,
stringsAsFactors = FALSE)
} else{
out <- data.frame(id = .x$id[1],
total_distance = total_distance,
stringsAsFactors = FALSE)
}
if (any(!is.na(summary.df))){
if (any(names(.x) == "id_stim")) {
out <- inner_join(out, summary.df, by = "id_stim")
} else {
out <- inner_join(out, summary.df, by = "id")
}
} else {
if (any(names(.x) == "id_stim")) {
trial_cols <- .x %>%
select(!!list$col.names, stimulus, -id_stim) %>%
slice(1)
out <- bind_cols(out, trial_cols)
} else {
trial_cols <- .x %>%
select(!!list$col.names, stimulus, -id) %>%
slice(1)
out <- bind_cols(out, trial_cols)
}
}
return(out)
})
magrittr::extract(list, 1:(length(list) - 1)) %>%
bind_rows()
}
summaryNetDisplacement <- function(list, summary.df = NA) {
stimulus <- NULL
id_stim <- NULL
list <- purrr::map_if(list, is.data.frame, function(.x) {
net_displacement <- sqrt((.x$x[nrow(.x)] ^ 2) + (.x$y[nrow(.x)] ^ 2))
if (any(names(.x) == "id_stim")) {
out <- data.frame(id_stim = .x$id_stim[1],
net_displacement = net_displacement,
stringsAsFactors = FALSE)
} else{
out <- data.frame(id = .x$id[1],
net_displacement = net_displacement,
stringsAsFactors = FALSE)
}
if (any(!is.na(summary.df))){
if (any(names(.x) == "id_stim")) {
out <- inner_join(out, summary.df, by = "id_stim")
} else {
out <- inner_join(out, summary.df, by = "id")
}
} else {
if (any(names(.x) == "id_stim")) {
trial_cols <- .x %>%
select(!!list$col.names, stimulus, -id_stim) %>%
slice(1)
out <- bind_cols(out, trial_cols)
} else {
trial_cols <- .x %>%
select(!!list$col.names, stimulus, -id) %>%
slice(1)
out <- bind_cols(out, trial_cols)
}
}
return(out)
})
magrittr::extract(list, 1:(length(list) - 1)) %>%
bind_rows()
}
summaryTortuosity <- function(summary.df,
total.distance,
net.displacement,
inverse = FALSE) {
total.distance <- enquo(total.distance)
net.displacement <- enquo(net.displacement)
if (inverse == FALSE) {
summary.df <- summary.df %>%
mutate(tortuosity = !!net.displacement / !!total.distance)
}
else {
summary.df <- summary.df %>%
mutate(tortuosity = !!total.distance / !!net.displacement)
}
return(summary.df)
}
summaryAvgBearing <- function(list, summary.df = NA) {
stimulus <- NULL
id_stim <- NULL
out <- list %>% purrr::map_if(is.data.frame, function(.x) {
b <- .x[!is.na(.x$bearing), "bearing"]
r <- b * (pi / 180)
mean.r <- atan3((sum(sin(r))) / length(r),
(sum(cos(r))) / length(r))
mean.c <- mean.r * (180 / pi)
if (mean.c < 0) {
mean.c <- 360 + mean.c
}
rho <- sqrt(((sum(sin(r))) / length(r)) ^ 2 +
((sum(cos(r))) / length(r)) ^ 2)
if (any(names(.x) == "id_stim")) {
return(c(.x$id_stim[1], mean.c, rho))
} else {
return(c(.x$id[1], mean.c, rho))
}
})
out <- magrittr::extract(out, 1:(length(out) - 1))
out <- unlist(out)
id <- unname(out[seq(1, length(out), by = 3)])
circular.mean <-
unname(out[seq(2, (length(out) - 1), by = 3)])
circular.rho <- unname(out[seq(3, (length(out) - 0), by = 3)])
if (any(names(list[[1]]) == "id_stim")) {
out <- data.frame(
id_stim = id,
circular_mean = circular.mean,
circular_rho = circular.rho,
stringsAsFactors = FALSE
)
} else{
out <- data.frame(
id = id,
circular_mean = circular.mean,
circular_rho = circular.rho,
stringsAsFactors = FALSE
)
}
if (any(!is.na(summary.df))) {
if (any(names(list[[1]]) == "id_stim")) {
out <- inner_join(out, summary.df, by = "id_stim")
} else {
out <- inner_join(out, summary.df, by = "id")
}
} else {
if (any(names(list[[1]]) == "id_stim")) {
trial_cols <- list %>%
map_if(is.data.frame, function(.x) {
.x %>%
select(!!list$col.names, stimulus, -id_stim) %>%
slice(1)
})
trial_cols <- trial_cols %>%
magrittr::extract(1:((length(.data) - 1))) %>%
bind_rows
out <- bind_cols(out, trial_cols)
} else {
trial_cols <- list %>%
map_if(is.data.frame, function(.x) {
.x %>%
select(!!list$col.names, stimulus, -id) %>%
slice(1)
})
trial_cols <- trial_cols %>%
magrittr::extract(1:((length(.data) - 1))) %>%
bind_rows
out <- bind_cols(out, trial_cols)
}
return(out)
}
}
summaryAvgVelocity <- function(list, summary.df = NA) {
stimulus <- NULL
id_stim <- NULL
list <- list %>% purrr::map_if(is.data.frame, function(.x) {
out <- data.frame(id = .x$id[1],
avg.velocity = mean(.x$velocity, na.rm = TRUE))
if (any(names(.x) == "id_stim")) {
out <- data.frame(id_stim = .x$id_stim[1],
avg_velocity = out$avg.velocity,
stringsAsFactors = FALSE)
} else{
out <- data.frame(id = .x$id[1],
avg_velocity = out$avg.velocity,
stringsAsFactors = FALSE)
}
if (any(!is.na(summary.df))){
if (any(names(.x) == "id_stim")) {
out <- inner_join(out, summary.df, by = "id_stim")
} else {
out <- inner_join(out, summary.df, by = "id")
}
} else {
if (any(names(.x) == "id_stim")) {
trial_cols <- .x %>%
select(!!list$col.names, stimulus, -id_stim) %>%
slice(1)
out <- bind_cols(out, trial_cols)
} else {
trial_cols <- .x %>%
select(!!list$col.names, stimulus, -id) %>%
slice(1)
out <- bind_cols(out, trial_cols)
}
}
})
magrittr::extract(list, 1:(length(list) - 1)) %>%
bind_rows()
}
summaryStops <- function(list, summary.df = NA, stop.threshold = 0) {
stimulus <- NULL
id_stim <- NULL
list <- list %>% purrr::map_if(is.data.frame, function(.x) {
stops <- ifelse(.x$velocity <= stop.threshold, 0, .x$velocity)
stops <- rle(stops)
num_stops <- length(stops$lengths[stops$values == 0])
len_stops <- mean(stops$lengths[stops$values == 0])
out <- data.frame(id = .x$id[1],
number_stops = num_stops,
avg_length_stops = len_stops)
if (any(names(.x) == "id_stim")) {
out <- data.frame(id_stim = .x$id_stim[1],
number_stops = num_stops,
avg_length_stops = len_stops,
stringsAsFactors = FALSE)
} else{
out <- data.frame(id = .x$id[1],
number_stops = num_stops,
avg_length_stops = len_stops,
stringsAsFactors = FALSE)
}
if (any(!is.na(summary.df))){
if (any(names(.x) == "id_stim")) {
out <- inner_join(out, summary.df, by = "id_stim")
} else {
out <- inner_join(out, summary.df, by = "id")
}
} else {
if (any(names(.x) == "id_stim")) {
trial_cols <- .x %>%
select(!!list$col.names, stimulus, -id_stim) %>%
slice(1)
out <- bind_cols(out, trial_cols)
} else {
trial_cols <- .x %>%
select(!!list$col.names, stimulus, -id) %>%
slice(1)
out <- bind_cols(out, trial_cols)
}
}
})
magrittr::extract(list, 1:(length(list) - 1)) %>%
bind_rows()
}
|
library(hamcrest)
expected <- c(0x1.22af67381fb8p-5 + 0x0p+0i, 0x1.22193386ea6p-5 + -0x1.1adb230dcef8p-9i,
0x1.20575bd7f218p-5 + -0x1.1a32d0e5597p-8i, 0x1.1d6c2944ab48p-5 + -0x1.a5a8530f4fep-8i,
0x1.195b675f5ad8p-5 + -0x1.17943cf34498p-7i, 0x1.142a5ed440bp-5 + -0x1.5b082bbbbd4p-7i,
0x1.0ddfcdef99ap-5 + -0x1.9ce046ae20dp-7i, 0x1.0683df132acp-5 + -0x1.dccf1382b3bp-7i,
0x1.fc403a52021p-6 + -0x1.0d44db6be13p-6i, 0x1.e97ecc47db1p-6 + -0x1.2ae42d3571f8p-6i,
0x1.d4dbb7422ebp-6 + -0x1.4723477f024p-6i, 0x1.be71a3814eap-6 + -0x1.61e1d4add4bp-6i,
0x1.a65d789c4c6p-6 + -0x1.7b018cfa6ce8p-6i, 0x1.8cbe34a72848p-6 + -0x1.92665f4d581p-6i,
0x1.71b4c06916d8p-6 + -0x1.a7f696ef04cp-6i, 0x1.5563c0e1ba4p-6 + -0x1.bb9afdd45f2p-6i,
0x1.37ef66602158p-6 + -0x1.cd3efb56c13p-6i, 0x1.197d3971dddp-6 + -0x1.dcd0af2adb1p-6i,
0x1.f467cbe77p-7 + -0x1.ea41086f94ap-6i, 0x1.b47609207cap-7 + -0x1.f583d8b37c2p-6i,
0x1.7375c5f599a8p-7 + -0x1.fe8fe2d51eep-6i, 0x1.31b8966f181p-7 + -0x1.02af72d4487p-5i,
0x1.df212909889p-8 + -0x1.04f6d12941cp-5i, 0x1.5a9fd99f3be8p-8 + -0x1.061def27b708p-5i,
0x1.ad21ad19dc6p-9 + -0x1.06263091c418p-5i, 0x1.4e584565c8cp-10 + -0x1.051276db4d4p-5i,
-0x1.6d886a637bp-11 + -0x1.02e71bb275ap-5i, -0x1.595108089a8p-9 + -0x1.ff53d2d4c62p-6i,
-0x1.28b891f33a4p-8 + -0x1.f6c422a6438p-6i, -0x1.a14e897e407p-8 + -0x1.ec30401901cp-6i,
-0x1.0aeecda9d888p-7 + -0x1.dfabdfc96c5p-6i, -0x1.42efcc4cad4p-7 + -0x1.d14d32ace4fp-6i,
-0x1.786ad590ecfp-7 + -0x1.c12cc379b52p-6i, -0x1.ab24814caadp-7 + -0x1.af6550acedep-6i,
-0x1.dae5bad4931p-7 + -0x1.9c13a365787p-6i, -0x1.03be0588c8c8p-6 + -0x1.87566350458p-6i,
-0x1.185cedd5ddap-6 + -0x1.714de7e607p-6i, -0x1.2b3b5972e5bp-6 + -0x1.5a1c073eea5p-6i,
-0x1.3c47b3e7783p-6 + -0x1.41e3e2c487dp-6i, -0x1.4b73273dcd7p-6 + -0x1.28c9b20d88bp-6i,
-0x1.58b1ae00c0ep-6 + -0x1.0ef28c31733p-6i, -0x1.63fa20ff72dp-6 + -0x1.e9085fcb0a8p-7i,
-0x1.6d4640c0b9fp-6 + -0x1.b349956b2d2p-7i, -0x1.7492ba96d33p-6 + -0x1.7cf57f5b0bp-7i,
-0x1.79df294a2ccp-6 + -0x1.4658db91b74p-7i, -0x1.7d2e1158a03p-6 + -0x1.0fbfecfddb4p-7i,
-0x1.7e84d8ccf78p-6 + -0x1.b2ec1307bacp-8i, -0x1.7debbab9255p-6 + -0x1.478a531c254p-8i,
-0x1.7b6db664199p-6 + -0x1.bbd5e766754p-9i, -0x1.77187a4295cp-6 + -0x1.da67813eb4p-10i,
-0x1.70fc4ad8bb4p-6 + -0x1.21dc6f15e4p-12i, -0x1.692be5a84cep-6 + 0x1.3c22bc91a1p-10i,
-0x1.5fbc6055ab8p-6 + 0x1.58bf363d2bcp-9i, -0x1.54c50433738p-6 + 0x1.05748b9fdbp-8i,
-0x1.485f266a3f6p-6 + 0x1.59dd2bfec18p-8i, -0x1.3aa5fcf771p-6 + 0x1.a937deb32d4p-8i,
-0x1.2bb670c2f92p-6 + 0x1.f32c49fbf8dp-8i, -0x1.1baeed10c16p-6 + 0x1.1bb5eaec613p-7i,
-0x1.0aaf2c96cf2p-6 + 0x1.3ad90d446b78p-7i, -0x1.f1b0090a4acp-7 + 0x1.56e29f6b1c4p-7i,
-0x1.cc965ba011p-7 + 0x1.6fbb22c5a918p-7i, -0x1.a6561a20cdcp-7 + 0x1.8550a4c50e08p-7i,
-0x1.7f34f2a1c4p-7 + 0x1.9796d40a9dap-7i, -0x1.57790440498p-7 + 0x1.a6870c7addbp-7i,
-0x1.2f686a85ecp-7 + 0x1.b2205a21933p-7i, -0x1.0748c8c355p-7 + 0x1.ba6772d7aabp-7i,
-0x1.bebdac355b8p-8 + 0x1.bf66a6a9a4cp-7i, -0x1.6fdbd5e5318p-8 + 0x1.c12dc70b5eep-7i,
-0x1.226f2114638p-8 + 0x1.bfd204f43d6p-7i, -0x1.adec4d9309p-9 + 0x1.bb6dc60cec4p-7i,
-0x1.1bd4650426p-9 + 0x1.b4207126076p-7i, -0x1.1ef9da6078p-10 + 0x1.aa0e323cdeap-7i,
-0x1.37d29d4bdp-14 + 0x1.9d5fb6613cp-7i, 0x1.d274611b7ep-11 + 0x1.8e41dfdc5ap-7i,
0x1.d5ba0ddb47p-10 + 0x1.7ce57306306p-7i, 0x1.5859d569658p-9 + 0x1.697ebc42a0ep-7i,
0x1.bc7ef07f288p-9 + 0x1.54452fabfa6p-7i, 0x1.0b65c853ffcp-8 + 0x1.3d7302fb7ap-7i,
0x1.3369cb1dbe4p-8 + 0x1.2544c24affcp-7i, 0x1.56202160ed8p-8 + 0x1.0bf8e055014p-7i,
0x1.736866f596cp-8 + 0x1.e39e85c13f8p-8i, 0x1.8b2d6a6e6acp-8 + 0x1.ae11981fc88p-8i,
0x1.9d65515ec7p-8 + 0x1.77cdc29a7ap-8i, 0x1.aa11a8d0b28p-8 + 0x1.4155e136e5p-8i,
0x1.b13f619924p-8 + 0x1.0b2bea20ba8p-8i, 0x1.b306b85765ap-8 + 0x1.ab9fed1a5ep-9i,
0x1.af8b090ba42p-8 + 0x1.437e9a5546p-9i, 0x1.a6fa8e5084ep-8 + 0x1.bdcdbef8a2p-10i,
0x1.998e0c63e1cp-8 + 0x1.fb096c8c7p-11i, 0x1.8788684b2bap-8 + 0x1.1f7bc5341p-12i,
0x1.71362b807e5p-8 + -0x1.86026fa75p-12i, 0x1.56ecf4b5d19p-8 + -0x1.fa286b5cap-11i,
0x1.390ad65bd8ep-8 + -0x1.8976817d52p-10i, 0x1.17f5a3b9a79p-8 + -0x1.02b2bdf8e1p-9i,
0x1.e8345b0281ap-9 + -0x1.37e29afdc3p-9i, 0x1.9bd6ddd6a14p-9 + -0x1.63d129e217p-9i,
0x1.4bc35902c26p-9 + -0x1.861a23f962p-9i, 0x1.f1e61767d6p-10 + -0x1.9e6fa328f3p-9i,
0x1.48cbee91224p-10 + -0x1.ac9af49b47p-9i, 0x1.3c835bd60ep-11 + -0x1.b07d42b488p-9i,
-0x1.753e7ad0cp-15 + -0x1.aa10145ed2p-9i, -0x1.65ce9e687ep-11 + -0x1.99659ffe68p-9i,
-0x1.556a30a1aap-10 + -0x1.7ea8f09b0ep-9i, -0x1.f12e4b5f18p-10 + -0x1.5a1ddc0c47p-9i,
-0x1.421dd987cdp-9 + -0x1.2c20c939acp-9i, -0x1.8655aa3c048p-9 + -0x1.ea4c8b8d3ap-10i,
-0x1.c455b2d58d8p-9 + -0x1.6b74d59256p-10i, -0x1.fb41e2e3dd8p-9 + -0x1.ba0041db18p-11i,
-0x1.15269116b9p-8 + -0x1.017e87ea88p-12i, -0x1.285db21b68cp-8 + 0x1.a349279858p-12i,
-0x1.36f1d3d272cp-8 + 0x1.1cd4d634fp-10i, -0x1.4098eec3988p-8 + 0x1.d9c3dded6cp-10i,
-0x1.45145a518b4p-8 + 0x1.4ecda9f9fbp-9i, -0x1.4431a2760ep-8 + 0x1.b31d84ebb1p-9i,
-0x1.3dcb4bb6e68p-8 + 0x1.0c5ab232964p-8i, -0x1.31c9838522p-8 + 0x1.3f37ae07bbcp-8i,
-0x1.2022bb4a728p-8 + 0x1.718f02fdfb8p-8i, -0x1.08dc2c7d85p-8 + 0x1.a2c77d942bcp-8i,
-0x1.d814885c02p-9 + 0x1.d246b4fb8fp-8i, -0x1.93a1e9279ep-9 + 0x1.ff7263912a8p-8i,
-0x1.44c7d694fap-9 + 0x1.14d8e55b35cp-7i, -0x1.d81aac06f2p-10 + 0x1.28379006648p-7i,
-0x1.14321e39eep-10 + 0x1.398c80fdcc6p-7i, -0x1.fb106d9e4p-13 + 0x1.4891f8a67ap-7i,
0x1.491790a6c8p-11 + 0x1.5505e6cb49cp-7i, 0x1.95917a8d1ap-10 + 0x1.5eaaa90732ap-7i,
0x1.48b77e38ap-9 + 0x1.6547c82bf34p-7i, 0x1.cad654f1aap-9 + 0x1.68aab363e13p-7i,
0x1.27e79580ecp-8 + 0x1.68a777c74dbp-7i, 0x1.6b19b734368p-8 + 0x1.651973193e98p-7i,
0x1.ae3f0d9e64p-8 + 0x1.5de4005c358p-7i, 0x1.f08b029e2dp-8 + 0x1.52f31cee6438p-7i,
0x1.189417f945p-7 + 0x1.443c04dd1df8p-7i, 0x1.379ce0d778p-7 + 0x1.31bdc521947p-7i,
0x1.54ee7b0229cp-7 + 0x1.1b81c27ceb1p-7i, 0x1.70155c8ca6p-7 + 0x1.019c33afa13p-7i,
0x1.889cadbc954p-7 + 0x1.c8591ba015ep-8i, 0x1.9e0f30b7b1cp-7 + 0x1.86bbc31a65p-8i,
0x1.aff834b9ac8p-7 + 0x1.3ece506dfdap-8i, 0x1.bde493cc13p-7 + 0x1.e22dfdc7304p-9i,
0x1.c763b9e292p-7 + 0x1.3c694a2b43cp-9i, 0x1.cc08b4273ep-7 + 0x1.1b7b839d8fp-10i,
0x1.cb6b472cc68p-7 + -0x1.419e5036fep-12i, 0x1.c5290aa7e18p-7 + -0x1.c7327fce5ep-10i,
0x1.b8e6893da28p-7 + -0x1.a26fe2d0a4p-9i, 0x1.a65062e265p-7 + -0x1.3142a847e48p-8i,
0x1.8d1c70342e8p-7 + -0x1.90bc6e0d7f8p-8i, 0x1.6d0ae52c42ep-7 + -0x1.ee5ea2d6148p-8i,
0x1.45e7717627cp-7 + -0x1.2466b47addep-7i, 0x1.178a5caea14p-7 + -0x1.4f4c70426d6p-7i,
0x1.c3b33986ad8p-8 + -0x1.771f498bbeep-7i, 0x1.4993c947e08p-8 + -0x1.9b1587c9546p-7i,
0x1.817ea5be898p-9 + -0x1.ba5ddae970ap-7i, 0x1.4b01c46882p-11 + -0x1.d42058ca8f6p-7i,
-0x1.f0ef48db9dp-10 + -0x1.e77f91e80eap-7i, -0x1.2fa20da6df8p-8 + -0x1.f399bcb3e3ap-7i,
-0x1.f039146503cp-8 + -0x1.f789f6efc6p-7i, -0x1.5e9fb3cf758p-7 + -0x1.f2699c3594cp-7i,
-0x1.cae51218536p-7 + -0x1.e351b0bc188p-7i, -0x1.1e30f9111dfp-6 + -0x1.c95c5f42becp-7i,
-0x1.593af434c0fp-6 + -0x1.a3a688ecb04p-7i, -0x1.963539925058p-6 + -0x1.715165b1e26p-7i,
-0x1.d4b95bdd9258p-6 + -0x1.318433eba3ep-7i, -0x1.0a2ad7375ee8p-5 + -0x1.c6dbeac3b04p-8i,
-0x1.2a46a0a912p-5 + -0x1.0c8e703d4acp-8i, -0x1.4a6bf6813d08p-5 + -0x1.953e93033cp-11i,
-0x1.6a51373390b4p-5 + 0x1.906b42737fp-9i, -0x1.89a749d08f6p-5 + 0x1.e5496836668p-8i,
-0x1.a819bd10dc9p-5 + 0x1.92d8815d114p-7i, -0x1.c54eef3a7728p-5 + 0x1.229909ed36ep-6i,
-0x1.e0e83f0c17f4p-5 + 0x1.850c3eba9f7p-6i, -0x1.fa8245d19168p-5 + 0x1.f0ee12648e5p-6i,
-0x1.08da8d5d249cp-4 + 0x1.332d3a51397p-5i, -0x1.130a4fc056d6p-4 + 0x1.72b013e724f8p-5i,
-0x1.1b986b33802ep-4 + 0x1.b6ffee4b9c2p-5i, -0x1.224b20c171fep-4 + 0x1.000ad5202448p-4i,
-0x1.26e725a2f782p-4 + 0x1.26f12320f99p-4i, -0x1.292fdc130794p-4 + 0x1.50273c4bc4fcp-4i,
-0x1.28e7909e73ap-4 + 0x1.7b9d27660f6cp-4i, -0x1.25cfbbd4bba4p-4 + 0x1.a93e69a045e8p-4i,
-0x1.1fa94839bbe6p-4 + 0x1.d8f1dcc03158p-4i, -0x1.1634dc51e278p-4 + 0x1.054cc480fd9ap-3i,
-0x1.0933289c9b12p-4 + 0x1.1f094181ab5ep-3i, -0x1.f0ca72953b78p-5 + 0x1.399a67055748p-3i,
-0x1.c71996efafb4p-5 + 0x1.54e9a17d0c18p-3i, -0x1.94d94b5585ap-5 + 0x1.70ddbe1da396p-3i,
-0x1.5991e6aa5fccp-5 + 0x1.8d5ae377655p-3i, -0x1.14cf4619c928p-5 + 0x1.aa428cd2c458p-3i,
-0x1.8c431cb3687p-6 + 0x1.c7738873d7e2p-3i, -0x1.b477c454cefp-7 + 0x1.e4c9f8e417dap-3i,
-0x1.2beeb982868p-10 + 0x1.010facafd2dep-2i, 0x1.95e0ea5775cp-7 + 0x1.0fa542c07a76p-2i,
0x1.bf801060cb8p-6 + 0x1.1e0fe2218221p-2i, 0x1.65c69d8495b8p-5 + 0x1.2c386b37c539p-2i,
0x1.f7d1988021dp-5 + 0x1.3a068445bdcc8p-2i, 0x1.4b0e5e0ea7c8p-4 + 0x1.4760a428945ap-2i,
0x1.a06c91be9d8p-4 + 0x1.542c1ee1f6df8p-2i, 0x1.fc163bb42028p-4 + 0x1.604d33f2f8f8p-2i,
0x1.2f0c09c965c6p-3 + 0x1.6ba71e8a74e58p-2i, 0x1.633c2967e418p-3 + 0x1.761c27878c0d8p-2i,
0x1.9a9b2af35d48p-3 + 0x1.7f8db94efe478p-2i, 0x1.d5252057a73p-3 + 0x1.87dc75701a9dp-2i,
0x1.096929948059p-2 + 0x1.8ee84c140e548p-2i, 0x1.29cb8d56ca46p-2 + 0x1.949095304e5c4p-2i,
0x1.4bb1da1ac24bp-2 + 0x1.98b42b72c9428p-2i, 0x1.6f120f0048fp-2 + 0x1.9b3188dc7aa58p-2i,
0x1.93dffa77effcp-2 + 0x1.9be6e4fce2e24p-2i, 0x1.ba0d2aa0b9a3p-2 + 0x1.9ab254becc518p-2i,
0x1.e188df48f5f4p-2 + 0x1.9771ebb4b05c4p-2i, 0x1.051ffed7e5b6p-1 + 0x1.9203ded0fa05cp-2i,
0x1.1a0e83121123p-1 + 0x1.8a46a87454448p-2i, 0x1.2f8405ce58af4p-1 + 0x1.80192dba2aaa8p-2i,
0x1.457356b1e8408p-1 + 0x1.735ae4e984d2p-2i, 0x1.5bce09783b388p-1 + 0x1.63ebfcee727a8p-2i,
0x1.7284750234cc8p-1 + 0x1.51ad85be5a5b8p-2i, 0x1.8985b39314a5p-1 + 0x1.3c819987a98fp-2i,
0x1.a0bfa445ba4ap-1 + 0x1.244b868ba03b8p-2i, 0x1.b81eedc3c8be8p-1 + 0x1.08eff97f4b778p-2i,
0x1.cf8f02474c254p-1 + 0x1.d4aa509e4cd1p-3i, 0x1.e6fa24ee83b1p-1 + 0x1.90c5fa3ebfdep-3i,
0x1.fe49706865a18p-1 + 0x1.460682c2c0658p-3i, 0x1.0ab26f7fabbdp+0 + 0x1.e88723579009p-4i,
0x1.1619aa0336efp+0 + 0x1.36b271477aacp-4i, 0x1.214d5356299d2p+0 + 0x1.d94241b4cb2cp-6i,
0x1.2c3fd71adb2f2p+0 + -0x1.634ae4dc0738p-6i, 0x1.36e327cfec77ap+0 + -0x1.36e0dc995396p-4i,
0x1.4128c65542108p+0 + -0x1.11fbf09a35eb8p-3i, 0x1.4b01ca23a7ee6p+0 + -0x1.9014e483589d8p-3i,
0x1.545eea3563edp+0 + -0x1.0adefe6c7f528p-2i, 0x1.5d30869d49a4ep+0 + -0x1.5179aadd5c8cp-2i,
0x1.6566b2c92c53p+0 + -0x1.9bd517ae3d6ep-2i, 0x1.6cf1406bd4272p+0 + -0x1.e9e856383f1cp-2i,
0x1.73bfcb09e4356p+0 + -0x1.1dd36523f8bd8p-1i, 0x1.79c1c4246540dp+0 + -0x1.488006bedd77cp-1i,
0x1.7ee67ffaf190fp+0 + -0x1.74efea832604cp-1i, 0x1.831d42dec64dp+0 + -0x1.a316eceb198d8p-1i,
0x1.86554f0f4968cp+0 + -0x1.d2e6e1c94388cp-1i, 0x1.887df317df9dp+0 + -0x1.0227c593d65d4p+0i,
0x1.898698a63dfd8p+0 + -0x1.1b9f48d2cac33p+0i, 0x1.895ed3cfb6189p+0 + -0x1.35cfbf333f097p+0i,
0x1.87f672bb53d55p+0 + -0x1.50addb5230a21p+0i, 0x1.853d8da601a1dp+0 + -0x1.6c2d3c7b04faap+0i,
0x1.812497354acd4p+0 + -0x1.88406e2cab34bp+0i, 0x1.7b9c6d0cbdcdcp+0 + -0x1.a4d8e8880acb4p+0i,
0x1.7496689961a47p+0 + -0x1.c1e711ae767f7p+0i, 0x1.6c0470062a8f3p+0 + -0x1.df5a401632709p+0i,
0x1.61d9074bdc5b2p+0 + -0x1.fd20bdda6a7f3p+0i, 0x1.5607614e53a3p+0 + -0x1.0d93e6859d7a8p+1i,
0x1.488370f8c489dp+0 + -0x1.1cadd680d5048p+1i, 0x1.3941fa4a1cccap+0 + -0x1.2bd3d05d527c4p+1i,
0x1.2838a34261dd5p+0 + -0x1.3afafb1da03b5p+1i, 0x1.155e04a199aeep+0 + -0x1.4a18077fd0e2ap+1i,
0x1.00a9ba688f4dcp+0 + -0x1.591f34ba8adcap+1i, 0x1.d428e81725d38p-1 + -0x1.680455bf7a337p+1i,
0x1.a330088e6220fp-1 + -0x1.76bad70282c8ap+1i, 0x1.6e5ee10d8b6e6p-1 + -0x1.8535c4c4952fep+1i,
0x1.35adff998d95p-1 + -0x1.9367d1e08f278p+1i, 0x1.f23124329b2d9p-2 + -0x1.a1435f1815edp+1i,
0x1.713903990d4f5p-2 + -0x1.aeba82dddcfbp+1i, 0x1.d0ea3597a8d24p-3 + -0x1.bbbf119a4f987p+1i,
0x1.5fb30d4878e44p-4 + -0x1.c842a66717521p+1i, -0x1.014a7c9628d5p-4 + -0x1.d436ac3d7d734p+1i,
-0x1.c06ae8654f22p-3 + -0x1.df8c67932a724p+1i, -0x1.87a1c7ce6f8b4p-2 + -0x1.ea3500604c1eep+1i,
-0x1.1b3b93ab9b763p-1 + -0x1.f4218c89b32c2p+1i, -0x1.76474159a2c6cp-1 + -0x1.fd431aa9023dp+1i,
-0x1.d4dd08e44667ap-1 + -0x1.02c55e964a83p+2i, -0x1.1b714f9f16bb8p+0 + -0x1.0674cae42a9a6p+2i,
-0x1.4e1d24dae3107p+0 + -0x1.09a8709822d28p+2i, -0x1.82616be23a856p+0 + -0x1.0c59018a91654p+2i,
-0x1.b82bcfb317c67p+0 + -0x1.0e7f492d89272p+2i, -0x1.ef683e069b384p+0 + -0x1.101432c0fda3p+2i,
-0x1.14007410fbc4p+1 + -0x1.1110cf9b62ac2p+2i, -0x1.30ef228342192p+1 + -0x1.116e5d8233aaap+2i,
-0x1.4e738a840c74ap+1 + -0x1.11264d0daf172p+2i, -0x1.6c803367eae2ep+1 + -0x1.10324812eddf1p+2i,
-0x1.8b06cee83968ep+1 + -0x1.0e8c380f5d1dcp+2i, -0x1.a9f83d180f28ap+1 + -0x1.0c2e4c9082c0bp+2i,
-0x1.c94491156a8b9p+1 + -0x1.09130192db95cp+2i, -0x1.e8db16777de9ep+1 + -0x1.053525d28af55p+2i,
-0x1.04552bbd352c7p+2 + -0x1.008fe10881fdcp+2i, -0x1.145011f40e93fp+2 + -0x1.f63d741d6c49dp+1i,
-0x1.2454cc5eb45f4p+2 + -0x1.e9bb39cbea0eep+1i, -0x1.34599444e9a41p+2 + -0x1.db91c12fbb94p+1i,
-0x1.44545243fad3p+2 + -0x1.cbba99df0eac2p+1i, -0x1.543aa335d1d25p+2 + -0x1.ba304006bc396p+1i,
-0x1.6401dd713a2b3p+2 + -0x1.a6ee2715985d6p+1i, -0x1.739f16614f6efp+2 + -0x1.91f0c3f496382p+1i,
-0x1.83072871bede8p+2 + -0x1.7b3596c1210cbp+1i, -0x1.922eb94d349ap+2 + -0x1.62bb33ff38d82p+1i,
-0x1.a10a406afc899p+2 + -0x1.48814d39162fbp+1i, -0x1.af8e0de89005ap+2 + -0x1.2c88b902586bep+1i,
-0x1.bdae51ab7b58bp+2 + -0x1.0ed37a5509c08p+1i, -0x1.cb5f22c7b9b8fp+2 + -0x1.dec98e7e2d9dfp+0i,
-0x1.d89487265bb17p+2 + -0x1.9c821dae64d3cp+0i, -0x1.e5427b68033e6p+2 + -0x1.56dbfce104e6p+0i,
-0x1.f15cfafe7a9b4p+2 + -0x1.0de50c09bfd1p+0i, -0x1.fcd8087866eabp+2 + -0x1.835b6f37a7c64p-1i,
-0x1.03d3dafceb841p+3 + -0x1.c923f82d507b8p-2i, -0x1.08e016ee1879ep+3 + -0x1.fcc73901bfeep-4i,
-0x1.0d8addb7e2f0dp+3 + 0x1.ad7ddf0f5d91p-3i, -0x1.11ce69ec8f039p+3 + 0x1.1c2198f0caf44p-1i,
-0x1.15a50f81ed261p+3 + 0x1.d272c87fd3a48p-1i, -0x1.190940526d818p+3 + 0x1.470996c9f0ad1p+0i,
-0x1.1bf590a2980ccp+3 + 0x1.a75eeac292706p+0i, -0x1.1e64bba798861p+3 + 0x1.050a30be0c5fp+1i,
-0x1.2051a80b7ca8dp+3 + 0x1.3781403a762dcp+1i, -0x1.21b76c6bb4a86p+3 + 0x1.6affae6d7c4b2p+1i,
-0x1.229153ce5a2bp+3 + 0x1.9f6f54870f5bep+1i, -0x1.22dae20ab8beep+3 + 0x1.d4b8e210e74aep+1i,
-0x1.228fd8218c3a8p+3 + 0x1.0561f2d7134a2p+2i, -0x1.21ac3881659bep+3 + 0x1.20bb6b6c281bp+2i,
-0x1.202c4b33a5f14p+3 + 0x1.3c5b904ba07eep+2i, -0x1.1e0ca1ee80906p+3 + 0x1.58349698883dcp+2i,
-0x1.1b4a1c087a86p+3 + 0x1.74383966f8481p+2i, -0x1.17e1ea49e4a8p+3 + 0x1.9057c085aa7c4p+2i,
-0x1.13d19298d81edp+3 + 0x1.ac8407bc6f8eep+2i, -0x1.0f16f37c4a8e4p+3 + 0x1.c8ad867c5f294p+2i,
-0x1.09b04772df4bcp+3 + 0x1.e4c457fe352b7p+2i, -0x1.039c281a2e33cp+3 + 0x1.005c21e575814p+3i,
-0x1.f9b322469d83ep+2 + 0x1.0e3c63551cccp+3i, -0x1.eacfc6231df28p+2 + 0x1.1bfa8df927e6ep+3i,
-0x1.da8dcb80b2c91p+2 + 0x1.298e2399d58ap+3i, -0x1.c8ed955ad80aap+2 + 0x1.36ee8f1dd1f06p+3i,
-0x1.b5f05e09467bcp+2 + 0x1.441329b3f3634p+3i, -0x1.a1983ad57f5a8p+2 + 0x1.50f340225e8eep+3i,
-0x1.8be81f0968dfap+2 + 0x1.5d861837daf6ap+3i, -0x1.74e3de726861p+2 + 0x1.69c2f65bf893ep+3i,
-0x1.5c902f54bdaeap+2 + 0x1.75a1233a827f8p+3i, -0x1.42f2abcb3ea06p+2 + 0x1.8117f1869779p+3i,
-0x1.2811d28ff1826p+2 + 0x1.8c1ec3d1a0758p+3i, -0x1.0bf5072a68ef5p+2 + 0x1.96ad12723f25ep+3i,
-0x1.dd49230256c59p+1 + 0x1.a0ba717732c8ap+3i, -0x1.a0533997b849ap+1 + 0x1.aa3e96a218aecp+3i,
-0x1.611c6bc893c46p+1 + 0x1.b3315f65d7ce8p+3i, -0x1.1fba91e9f1d88p+1 + 0x1.bb8ad6e473958p+3i,
-0x1.b88a7a10ce142p+0 + 0x1.c3433be7f2172p+3i, -0x1.2dab5f6c85802p+0 + 0x1.ca5306d1f4b25p+3i,
-0x1.3e1b56470260cp-1 + 0x1.d0b2ef7d988c1p+3i, -0x1.9d4e610940fcp-5 + 0x1.d65bf30f2db3ap+3i,
0x1.1104151168d9cp-1 + 0x1.db4759ad50a5ep+3i, 0x1.20f87f3a0ec9p+0 + 0x1.df6ebc1ef218ep+3i,
0x1.bc36a7cb46676p+0 + 0x1.e2cc0949dca94p+3i, 0x1.2cfbb4c90fa32p+1 + 0x1.e5598b8d4f218p+3i,
0x1.7cf97931e486ep+1 + 0x1.e711edf44cb2p+3i, 0x1.cdef6d6c28b8bp+1 + 0x1.e7f0413b528fdp+3i,
0x1.0fdb92b639a34p+2 + 0x1.e7f000a53409ap+3i, 0x1.39148819e15bcp+2 + 0x1.e70d169af33dcp+3i,
0x1.628e46233374p+2 + 0x1.e543e112852aep+3i, 0x1.8c33fe95b63b2p+2 + 0x1.e29135b88bce5p+3i,
0x1.b5f070f4883c2p+2 + 0x1.def265d930673p+3i, 0x1.dfadf638f046cp+2 + 0x1.da6542046aa3cp+3i,
0x1.04ab467d53dcp+3 + 0x1.d4e81d6a2774ep+3i, 0x1.1969f3016c60ep+3 + 0x1.ce79d0eaeb6b6p+3i,
0x1.2e07b8a132144p+3 + 0x1.c719bdd9b8b5ap+3i, 0x1.42793591cffeep+3 + 0x1.bec7d06c301a1p+3i,
0x1.56b2f517b1e43p+3 + 0x1.b58481d6165ep+3i, 0x1.6aa9767e6f334p+3 + 0x1.ab50da0d9c6c6p+3i,
0x1.7e513431afabbp+3 + 0x1.a02e713600122p+3i, 0x1.919eaaf278102p+3 + 0x1.941f70ae55257p+3i,
0x1.a486612429c9cp+3 + 0x1.872693c28346cp+3i, 0x1.b6fcee2c600f9p+3 + 0x1.794727fcc2f24p+3i,
0x1.c8f701e0b81a9p+3 + 0x1.6a850d1625265p+3i, 0x1.da696bfd784efp+3 + 0x1.5ae4b484f34cap+3i,
0x1.eb49239ff428bp+3 + 0x1.4a6b20a7f9213p+3i, 0x1.fb8b4ebf7839ap+3 + 0x1.391de38e0fdffp+3i,
0x1.0592a4cfbd659p+4 + 0x1.27031d5988d34p+3i, 0x1.0d06571ae15c6p+4 + 0x1.14217a3f5d468p+3i,
0x1.141badbf9784cp+4 + 0x1.0080302252a54p+3i, 0x1.1acdbe5ee352bp+4 + 0x1.d84df7951602fp+2i,
0x1.2117c84d5c307p+4 + 0x1.ae3c3b74883aap+2i, 0x1.26f538137c395p+4 + 0x1.82dcad41a26c6p+2i,
0x1.2c61aada98ac4p+4 + 0x1.5641c6da4bd32p+2i, 0x1.3158f1c3e596p+4 + 0x1.287ef20324f26p+2i,
0x1.35d71526eebc9p+4 + 0x1.f350fe2a6ca1p+1i, 0x1.39d857b4fa618p+4 + 0x1.93a735ec74ec3p+1i,
0x1.3d59397ed7062p+4 + 0x1.322c88c0f0226p+1i, 0x1.40567adaa3ef9p+4 + 0x1.9e1d01febacdcp+0i,
0x1.42cd1f2734b1ap+4 + 0x1.a9f077491c298p-1i, 0x1.44ba6f6ac2893p+4 + 0x1.296e7ab188c4p-5i,
0x1.461bfccab2afep+4 + -0x1.890c3be974064p-1i, 0x1.46efa2da4f17fp+4 + -0x1.9416f75d74da6p+0i,
0x1.473389be6710ap+4 + -0x1.327fc23b5f5cep+1i, 0x1.46e62823e726p+4 + -0x1.9b6b2d87cfbafp+1i,
0x1.4606450793186p+4 + -0x1.024c1df67c05p+2i, 0x1.4492f94d29ef1p+4 + -0x1.36e85f509da98p+2i,
0x1.428bb1245adb2p+4 + -0x1.6b6efcf4fe2d7p+2i, 0x1.3ff02d3a11da4p+4 + -0x1.9fc462ff20e83p+2i,
0x1.3cc083b4c495cp+4 + -0x1.d3ccd842b176p+2i, 0x1.38fd20fa8ae4dp+4 + -0x1.03b647a70173p+3i,
0x1.34a6c83ff2742p+4 + -0x1.1d43dbd248d68p+3i, 0x1.2fbe93dea24e4p+4 + -0x1.368147930f899p+3i,
0x1.2a45f5730940ap+4 + -0x1.4f60b9cb60c8ap+3i, 0x1.243eb5c0793f6p+4 + -0x1.67d47a1e0931cp+3i,
0x1.1daaf45b39defp+4 + -0x1.7fcef1c1bae3p+3i, 0x1.168d271844a3p+4 + -0x1.9742b452ac80ap+3i,
0x1.0ee8194287162p+4 + -0x1.ae22889c93edep+3i, 0x1.06beea95b04fcp+4 + -0x1.c4617156d51eep+3i,
0x1.fc2a1bfd731e6p+3 + -0x1.d9f2b5ccb9939p+3i, 0x1.e9dc904507985p+3 + -0x1.eec9ea6b85a65p+3i,
0x1.d69d5b561ce3ap+3 + -0x1.016d7c98230f8p+4i, 0x1.c27542b7eb7c6p+3 + -0x1.0b0d14f79d216p+4i,
0x1.ad6da3de22c4cp+3 + -0x1.143e15376682ep+4i, 0x1.97906f746ba94p+3 + -0x1.1cfb0b281181fp+4i,
0x1.80e8243e88654p+3 + -0x1.253ebf5fad3bp+4i, 0x1.697fc98fbae8ep+3 + -0x1.2d0438f2db38p+4i,
0x1.5162e95c6bd46p+3 + -0x1.3446c10c07a78p+4i, 0x1.389d89e95ad93p+3 + -0x1.3b01e65e039acp+4i,
0x1.1f3c271bee5e9p+3 + -0x1.4131806f556cp+4i, 0x1.054bab6f8344fp+3 + -0x1.46d1b2bbaa33p+4i,
0x1.d5b2d127cc9f9p+2 + -0x1.4bdeefa8ec129p+4i, 0x1.9fe61f72cdd5cp+2 + -0x1.5055fb4d9c09ep+4i,
0x1.694d531e6cc6cp+2 + -0x1.5433ee062acaap+4i, 0x1.32051bf20e3fbp+2 + -0x1.577636d72acb2p+4i,
0x1.f4556e047271fp+1 + -0x1.5a1a9d9a564fcp+4i, 0x1.83b7ba30db508p+1 + -0x1.5c1f44f486619p+4i,
0x1.126d6153e8076p+1 + -0x1.5d82ac12db83p+4i, 0x1.4166ada53184ap+0 + -0x1.5e43b02d7e578p+4i,
0x1.7638b7e413dd8p-2 + -0x1.5e618dce85444p+4i, -0x1.0c65fc784801ep-1 + -0x1.5ddbe1dbb438ep+4i,
-0x1.6960a0ad89515p+0 + -0x1.5cb2aa61f527ap+4i, -0x1.25bf2c82a3f3fp+1 + -0x1.5ae6472192326p+4i,
-0x1.9607f5d0541c3p+1 + -0x1.587779da66014p+4i, -0x1.02a6642c16b66p+2 + -0x1.55676657630b6p+4i,
-0x1.39a810e1abadbp+2 + -0x1.51b79238ffa99p+4i, -0x1.6fea847754c6cp+2 + -0x1.4d69e47e42709p+4i,
-0x1.a54f9ab6f986p+2 + -0x1.4880a4cc587e9p+4i, -0x1.d9b99b6a19518p+2 + -0x1.42fe7a74cddb8p+4i,
-0x1.0685a6e952986p+3 + -0x1.3ce66b3aaecacp+4i, -0x1.1f9405f572bf4p+3 + -0x1.363bd9d707adbp+4i,
-0x1.37f9eaaf66c8fp+3 + -0x1.2f02843d67d39p+4i, -0x1.4fa9b1189e7ccp+3 + -0x1.273e81a13a198p+4i,
-0x1.66961a7fead11p+3 + -0x1.1ef4403cf46c3p+4i, -0x1.7cb2566c1ff74p+3 + -0x1.162882dc4cf7p+4i,
-0x1.91f20b48f8b4ap+3 + -0x1.0ce05e2ad0e5bp+4i, -0x1.a6495ecfe804ep+3 + -0x1.032135c864fdfp+4i,
-0x1.b9acfe26ad8fdp+3 + -0x1.f1e1724ac79d9p+3i, -0x1.cc1225adadad5p+3 + -0x1.dca9c0506f624p+3i,
-0x1.dd6ea8783e912p+3 + -0x1.c6a7cf3cf04eap+3i, -0x1.edb8f7695190ep+3 + -0x1.afe89af77479ap+3i,
-0x1.fce827ef192c9p+3 + -0x1.987999d0d4665p+3i, -0x1.0579fd2c4539dp+4 + -0x1.8068b44653ec8p+3i,
-0x1.0bea6fdfed2e6p+4 + -0x1.67c43c6ea6af4p+3i, -0x1.11c1ffc2aec42p+4 + -0x1.4e9ae516bdc67p+3i,
-0x1.16fd9e2b3d3d7p+4 + -0x1.34fbb894201e6p+3i, -0x1.1b9a9c5e55b7p+4 + -0x1.1af60f52cb1b8p+3i,
-0x1.1f96ad986e9bcp+4 + -0x1.00998624d0fap+3i, -0x1.22efe8d7e61ccp+4 + -0x1.cbebe8b43c10cp+2i,
-0x1.25a4ca662b48p+4 + -0x1.9636c34dfa2adp+2i, -0x1.27b4351e89d1bp+4 + -0x1.6033f7c03c073p+2i,
-0x1.291d73716e35ep+4 + -0x1.2a0419260ea98p+2i, -0x1.29e0382326947p+4 + -0x1.e78fba07f20fdp+1i,
-0x1.29fc9ec55431cp+4 + -0x1.7b400c29fbf6ep+1i, -0x1.29732bea71ebcp+4 + -0x1.0f5a9df699e7ep+1i,
-0x1.2844cd1304f7ap+4 + -0x1.4841538cf9755p+0i, -0x1.2672d8543ebbap+4 + -0x1.ce9822751aa7p-2i,
-0x1.23ff0bb80880fp+4 + 0x1.7a706fc0ebd3p-2i, -0x1.20eb8c56a3cafp+4 + 0x1.2e060658eef16p+0i,
-0x1.1d3ae52a3c52ap+4 + 0x1.fa1a6c0ac4a18p+0i, -0x1.18f0059cfab9p+4 + 0x1.612f0f5adfd4p+1i,
-0x1.140e3fd258e66p+4 + 0x1.c32c2f19a0b79p+1i, -0x1.0e9946acaa8c8p+4 + 0x1.1164ca4259f79p+2i,
-0x1.08952b8ffd422p+4 + 0x1.3fe6e2d467767p+2i, -0x1.02065be3a4064p+4 + 0x1.6d007af29affep+2i,
-0x1.f5e33ca7e3048p+3 + 0x1.989695b76cfb1p+2i, -0x1.e6b81faba5fe2p+3 + 0x1.c28f31d971344p+2i,
-0x1.d69640dc5707ap+3 + 0x1.ead15b3f4934cp+2i, -0x1.c5891f7df5ce4p+3 + 0x1.08a29df4cfcb2p+3i,
-0x1.b39cd2b90f1dcp+3 + 0x1.1aea16137b4a6p+3i, -0x1.a0de01d9d62fap+3 + 0x1.2c346102c5e73p+3i,
-0x1.8d59dc2913fecp+3 + 0x1.3c776ffc09ddcp+3i, -0x1.791e10644926ap+3 + 0x1.4ba9e1514c443p+3i,
-0x1.6438c3daa7783p+3 + 0x1.59c306e2acabep+3i, -0x1.4eb88934cd2dp+3 + 0x1.66baec1e10067p+3i,
-0x1.38ac56ed6c436p+3 + 0x1.728a5b84921d8p+3i, -0x1.22237d8144e16p+3 + 0x1.7d2ae3b19c9bfp+3i,
-0x1.0b2d9d5d1170cp+3 + 0x1.8696dbdfd996ep+3i, -0x1.e7b539206c9ep+2 + 0x1.8ec967e891625p+3i,
-0x1.b87538966a86cp+2 + 0x1.95be7bba60283p+3i, -0x1.88bbdc621c8bbp+2 + 0x1.9b72de448ec21p+3i,
-0x1.58aa1305b6c99p+2 + 0x1.9fe42bd4bba42p+3i, -0x1.2861005a29398p+2 + 0x1.a310d7e4e3d5ep+3i,
-0x1.f003cef808a12p+1 + 0x1.a4f82e5840c5cp+3i, -0x1.8f5c2933e06c4p+1 + 0x1.a59a5425d5ee4p+3i,
-0x1.2f0d8d68139b7p+1 + 0x1.a4f8476ff07bp+3i, -0x1.9eb4656e2d8b8p+0 + 0x1.a313df08430b2p+3i,
-0x1.c20f7411711d6p-1 + 0x1.9fefc960b01cap+3i, -0x1.2cbb0d15ba668p-3 + 0x1.9b8f8ae93e33ep+3i,
0x1.2637e221b74cfp-1 + 0x1.95f77bdc193cep+3i, 0x1.489378440595fp+0 + 0x1.8f2cc578ec8efp+3i,
0x1.fa52b06f78031p+0 + 0x1.87355eb1482c6p+3i, 0x1.53efd62299afep+1 + 0x1.7e18084829f09p+3i,
0x1.a861b37cb15a1p+1 + 0x1.73dc486728228p+3i, 0x1.fa450a53e537dp+1 + 0x1.688a65ac1df5cp+3i,
0x1.24b0dc2759c42p+2 + 0x1.5c2b61b29bb24p+3i, 0x1.4ac0c253d0e6cp+2 + 0x1.4ec8f31cbb1c5p+3i,
0x1.6f38227248ff2p+2 + 0x1.406d7f1f552d4p+3i, 0x1.91fe001816a12p+2 + 0x1.31241295efc2cp+3i,
0x1.b2fa889e403f8p+2 + 0x1.20f85aa3108ccp+3i, 0x1.d217234b74e86p+2 + 0x1.0ff69ce1f5c93p+3i,
0x1.ef3e80ab455cep+2 + 0x1.fc575e5e0a3c4p+2i, 0x1.052e5483e6046p+3 + 0x1.d749de1919bbep+2i,
0x1.11af84fdc58cbp+3 + 0x1.b0e0715377f8ap+2i, 0x1.1d1a4188f512ap+3 + 0x1.8937bb23f179ep+2i,
0x1.2766b8b6b3166p+3 + 0x1.606d3615fb00dp+2i, 0x1.308ddd359b5p+3 + 0x1.369f2050bba84p+2i,
0x1.38896a9e70154p+3 + 0x1.0bec673697c94p+2i, 0x1.3f53e9b874c0ap+3 + 0x1.c0e925169dfbcp+1i,
0x1.44e8b43253976p+3 + 0x1.68af5e5e2f13fp+1i, 0x1.4943f7ccec29ap+3 + 0x1.0f6c72fadded4p+1i,
0x1.4c62b8f5cbb5ep+3 + 0x1.6ac41d7a6a61p+0i, 0x1.4e42d4cf77385p+3 + 0x1.6b4990d8558f8p-1i,
0x1.4ee302a6165fcp+3 + 0x0p+0i, 0x1.4e42d4cf7738p+3 + -0x1.6b4990d8558e8p-1i,
0x1.4c62b8f5cbb58p+3 + -0x1.6ac41d7a6a603p+0i, 0x1.4943f7ccec294p+3 + -0x1.0f6c72faddecdp+1i,
0x1.44e8b43253972p+3 + -0x1.68af5e5e2f136p+1i, 0x1.3f53e9b874c07p+3 + -0x1.c0e925169dfb9p+1i,
0x1.38896a9e7015p+3 + -0x1.0bec673697c9p+2i, 0x1.308ddd359b4fcp+3 + -0x1.369f2050bba7ep+2i,
0x1.2766b8b6b3162p+3 + -0x1.606d3615fb009p+2i, 0x1.1d1a4188f5128p+3 + -0x1.8937bb23f1798p+2i,
0x1.11af84fdc58cap+3 + -0x1.b0e0715377f86p+2i, 0x1.052e5483e6043p+3 + -0x1.d749de1919bb9p+2i,
0x1.ef3e80ab455ccp+2 + -0x1.fc575e5e0a3bep+2i, 0x1.d217234b74e84p+2 + -0x1.0ff69ce1f5c9p+3i,
0x1.b2fa889e403f4p+2 + -0x1.20f85aa3108c8p+3i, 0x1.91fe001816a1p+2 + -0x1.31241295efc29p+3i,
0x1.6f38227248fecp+2 + -0x1.406d7f1f552d2p+3i, 0x1.4ac0c253d0e6cp+2 + -0x1.4ec8f31cbb1c2p+3i,
0x1.24b0dc2759c42p+2 + -0x1.5c2b61b29bb1fp+3i, 0x1.fa450a53e537ap+1 + -0x1.688a65ac1df59p+3i,
0x1.a861b37cb15ap+1 + -0x1.73dc486728226p+3i, 0x1.53efd62299b03p+1 + -0x1.7e18084829f09p+3i,
0x1.fa52b06f7802fp+0 + -0x1.87355eb1482c4p+3i, 0x1.489378440595ap+0 + -0x1.8f2cc578ec8eep+3i,
0x1.2637e221b74cep-1 + -0x1.95f77bdc193cep+3i, -0x1.2cbb0d15ba628p-3 + -0x1.9b8f8ae93e33ep+3i,
-0x1.c20f7411711dp-1 + -0x1.9fefc960b01cap+3i, -0x1.9eb4656e2d8bap+0 + -0x1.a313df08430b1p+3i,
-0x1.2f0d8d68139b2p+1 + -0x1.a4f8476ff07bp+3i, -0x1.8f5c2933e06bep+1 + -0x1.a59a5425d5ee6p+3i,
-0x1.f003cef808a1p+1 + -0x1.a4f82e5840c5ep+3i, -0x1.2861005a29398p+2 + -0x1.a310d7e4e3d6p+3i,
-0x1.58aa1305b6c9ap+2 + -0x1.9fe42bd4bba42p+3i, -0x1.88bbdc621c8b8p+2 + -0x1.9b72de448ec1fp+3i,
-0x1.b87538966a86ap+2 + -0x1.95be7bba60284p+3i, -0x1.e7b539206c9ep+2 + -0x1.8ec967e891624p+3i,
-0x1.0b2d9d5d1170bp+3 + -0x1.8696dbdfd996ep+3i, -0x1.22237d8144e16p+3 + -0x1.7d2ae3b19c9c2p+3i,
-0x1.38ac56ed6c436p+3 + -0x1.728a5b84921d8p+3i, -0x1.4eb88934cd2d2p+3 + -0x1.66baec1e10068p+3i,
-0x1.6438c3daa7785p+3 + -0x1.59c306e2acac2p+3i, -0x1.791e10644926ap+3 + -0x1.4ba9e1514c446p+3i,
-0x1.8d59dc2913fecp+3 + -0x1.3c776ffc09dep+3i, -0x1.a0de01d9d62fcp+3 + -0x1.2c346102c5e74p+3i,
-0x1.b39cd2b90f1dep+3 + -0x1.1aea16137b4a9p+3i, -0x1.c5891f7df5ce6p+3 + -0x1.08a29df4cfcb5p+3i,
-0x1.d69640dc5707cp+3 + -0x1.ead15b3f49353p+2i, -0x1.e6b81faba5fe4p+3 + -0x1.c28f31d971349p+2i,
-0x1.f5e33ca7e304cp+3 + -0x1.989695b76cfb6p+2i, -0x1.02065be3a4065p+4 + -0x1.6d007af29b004p+2i,
-0x1.08952b8ffd424p+4 + -0x1.3fe6e2d46776dp+2i, -0x1.0e9946acaa8cap+4 + -0x1.1164ca4259f7dp+2i,
-0x1.140e3fd258e68p+4 + -0x1.c32c2f19a0b83p+1i, -0x1.18f0059cfab92p+4 + -0x1.612f0f5adfd4cp+1i,
-0x1.1d3ae52a3c52cp+4 + -0x1.fa1a6c0ac4a22p+0i, -0x1.20eb8c56a3cb2p+4 + -0x1.2e060658eef2p+0i,
-0x1.23ff0bb808812p+4 + -0x1.7a706fc0ebd6cp-2i, -0x1.2672d8543ebbep+4 + 0x1.ce9822751aa38p-2i,
-0x1.2844cd1304f7ep+4 + 0x1.4841538cf974dp+0i, -0x1.29732bea71ecp+4 + 0x1.0f5a9df699e8p+1i,
-0x1.29fc9ec55431fp+4 + 0x1.7b400c29fbf6cp+1i, -0x1.29e038232694ap+4 + 0x1.e78fba07f20fap+1i,
-0x1.291d73716e362p+4 + 0x1.2a0419260ea9ap+2i, -0x1.27b4351e89d1fp+4 + 0x1.6033f7c03c074p+2i,
-0x1.25a4ca662b481p+4 + 0x1.9636c34dfa2aep+2i, -0x1.22efe8d7e61c7p+4 + 0x1.cbebe8b43c108p+2i,
-0x1.1f96ad986e9b8p+4 + 0x1.00998624d0f9cp+3i, -0x1.1b9a9c5e55b6cp+4 + 0x1.1af60f52cb1b6p+3i,
-0x1.16fd9e2b3d3d4p+4 + 0x1.34fbb894201e2p+3i, -0x1.11c1ffc2aec4p+4 + 0x1.4e9ae516bdc64p+3i,
-0x1.0bea6fdfed2e3p+4 + 0x1.67c43c6ea6afp+3i, -0x1.0579fd2c4539ap+4 + 0x1.8068b44653ec4p+3i,
-0x1.fce827ef192c4p+3 + 0x1.987999d0d4663p+3i, -0x1.edb8f7695190ap+3 + 0x1.afe89af774796p+3i,
-0x1.dd6ea8783e91p+3 + 0x1.c6a7cf3cf04e8p+3i, -0x1.cc1225adadad2p+3 + 0x1.dca9c0506f622p+3i,
-0x1.b9acfe26ad8fap+3 + 0x1.f1e1724ac79d4p+3i, -0x1.a6495ecfe804ap+3 + 0x1.032135c864fddp+4i,
-0x1.91f20b48f8b48p+3 + 0x1.0ce05e2ad0e5ap+4i, -0x1.7cb2566c1ff73p+3 + 0x1.162882dc4cf6ep+4i,
-0x1.66961a7fead0ep+3 + 0x1.1ef4403cf46c2p+4i, -0x1.4fa9b1189e7ccp+3 + 0x1.273e81a13a196p+4i,
-0x1.37f9eaaf66c8ep+3 + 0x1.2f02843d67d37p+4i, -0x1.1f9405f572bf3p+3 + 0x1.363bd9d707ad9p+4i,
-0x1.0685a6e952986p+3 + 0x1.3ce66b3aaecacp+4i, -0x1.d9b99b6a19518p+2 + 0x1.42fe7a74cddb7p+4i,
-0x1.a54f9ab6f985ep+2 + 0x1.4880a4cc587e7p+4i, -0x1.6fea847754c6cp+2 + 0x1.4d69e47e4270ap+4i,
-0x1.39a810e1abadbp+2 + 0x1.51b79238ffa98p+4i, -0x1.02a6642c16b68p+2 + 0x1.55676657630b7p+4i,
-0x1.9607f5d0541c2p+1 + 0x1.587779da66013p+4i, -0x1.25bf2c82a3f4p+1 + 0x1.5ae6472192326p+4i,
-0x1.6960a0ad89522p+0 + 0x1.5cb2aa61f527ap+4i, -0x1.0c65fc784802ep-1 + 0x1.5ddbe1dbb438ep+4i,
0x1.7638b7e413de4p-2 + 0x1.5e618dce85444p+4i, 0x1.4166ada531846p+0 + 0x1.5e43b02d7e578p+4i,
0x1.126d6153e8076p+1 + 0x1.5d82ac12db831p+4i, 0x1.83b7ba30db502p+1 + 0x1.5c1f44f486618p+4i,
0x1.f4556e0472718p+1 + 0x1.5a1a9d9a564fcp+4i, 0x1.32051bf20e3fbp+2 + 0x1.577636d72acb2p+4i,
0x1.694d531e6cc6ap+2 + 0x1.5433ee062acabp+4i, 0x1.9fe61f72cdd58p+2 + 0x1.5055fb4d9c09ep+4i,
0x1.d5b2d127cc9f8p+2 + 0x1.4bdeefa8ec129p+4i, 0x1.054bab6f8344fp+3 + 0x1.46d1b2bbaa332p+4i,
0x1.1f3c271bee5e9p+3 + 0x1.4131806f556c2p+4i, 0x1.389d89e95ad94p+3 + 0x1.3b01e65e039aep+4i,
0x1.5162e95c6bd48p+3 + 0x1.3446c10c07a78p+4i, 0x1.697fc98fbae92p+3 + 0x1.2d0438f2db382p+4i,
0x1.80e8243e88654p+3 + 0x1.253ebf5fad3b2p+4i, 0x1.97906f746ba96p+3 + 0x1.1cfb0b281182p+4i,
0x1.ad6da3de22c4ep+3 + 0x1.143e15376683p+4i, 0x1.c27542b7eb7c9p+3 + 0x1.0b0d14f79d218p+4i,
0x1.d69d5b561ce3cp+3 + 0x1.016d7c98230fap+4i, 0x1.e9dc904507988p+3 + 0x1.eec9ea6b85a6ap+3i,
0x1.fc2a1bfd731e9p+3 + 0x1.d9f2b5ccb993ep+3i, 0x1.06beea95b04fep+4 + 0x1.c4617156d51f4p+3i,
0x1.0ee8194287164p+4 + 0x1.ae22889c93ee3p+3i, 0x1.168d271844a32p+4 + 0x1.9742b452ac81p+3i,
0x1.1daaf45b39df1p+4 + 0x1.7fcef1c1bae32p+3i, 0x1.243eb5c0793f8p+4 + 0x1.67d47a1e0932p+3i,
0x1.2a45f5730940dp+4 + 0x1.4f60b9cb60c8fp+3i, 0x1.2fbe93dea24e6p+4 + 0x1.368147930f89dp+3i,
0x1.34a6c83ff2746p+4 + 0x1.1d43dbd248d6cp+3i, 0x1.38fd20fa8ae51p+4 + 0x1.03b647a701732p+3i,
0x1.3cc083b4c496p+4 + 0x1.d3ccd842b1765p+2i, 0x1.3ff02d3a11da8p+4 + 0x1.9fc462ff20e8ap+2i,
0x1.428bb1245adb6p+4 + 0x1.6b6efcf4fe2dap+2i, 0x1.4492f94d29ef6p+4 + 0x1.36e85f509da9ap+2i,
0x1.4606450793186p+4 + 0x1.024c1df67c05p+2i, 0x1.46e62823e725cp+4 + 0x1.9b6b2d87cfbb2p+1i,
0x1.473389be67106p+4 + 0x1.327fc23b5f5d4p+1i, 0x1.46efa2da4f17bp+4 + 0x1.9416f75d74da7p+0i,
0x1.461bfccab2afap+4 + 0x1.890c3be97407ep-1i, 0x1.44ba6f6ac289p+4 + -0x1.296e7ab188a4p-5i,
0x1.42cd1f2734b18p+4 + -0x1.a9f077491c28cp-1i, 0x1.40567adaa3ef6p+4 + -0x1.9e1d01febaccap+0i,
0x1.3d59397ed706p+4 + -0x1.322c88c0f0221p+1i, 0x1.39d857b4fa614p+4 + -0x1.93a735ec74ecp+1i,
0x1.35d71526eebc7p+4 + -0x1.f350fe2a6ca0fp+1i, 0x1.3158f1c3e595dp+4 + -0x1.287ef20324f26p+2i,
0x1.2c61aada98ac1p+4 + -0x1.5641c6da4bd2ep+2i, 0x1.26f538137c393p+4 + -0x1.82dcad41a26c2p+2i,
0x1.2117c84d5c305p+4 + -0x1.ae3c3b74883a8p+2i, 0x1.1acdbe5ee3529p+4 + -0x1.d84df7951602ep+2i,
0x1.141badbf9784ap+4 + -0x1.0080302252a53p+3i, 0x1.0d06571ae15c6p+4 + -0x1.14217a3f5d465p+3i,
0x1.0592a4cfbd659p+4 + -0x1.27031d5988d2fp+3i, 0x1.fb8b4ebf7839ap+3 + -0x1.391de38e0fdfbp+3i,
0x1.eb49239ff428bp+3 + -0x1.4a6b20a7f9212p+3i, 0x1.da696bfd784efp+3 + -0x1.5ae4b484f34c8p+3i,
0x1.c8f701e0b81a8p+3 + -0x1.6a850d1625262p+3i, 0x1.b6fcee2c600f9p+3 + -0x1.794727fcc2f22p+3i,
0x1.a486612429c9cp+3 + -0x1.872693c28346ap+3i, 0x1.919eaaf278102p+3 + -0x1.941f70ae55256p+3i,
0x1.7e513431afabap+3 + -0x1.a02e713600122p+3i, 0x1.6aa9767e6f335p+3 + -0x1.ab50da0d9c6c8p+3i,
0x1.56b2f517b1e44p+3 + -0x1.b58481d6165e2p+3i, 0x1.42793591cffeep+3 + -0x1.bec7d06c301a1p+3i,
0x1.2e07b8a132144p+3 + -0x1.c719bdd9b8b5ap+3i, 0x1.1969f3016c60ep+3 + -0x1.ce79d0eaeb6b7p+3i,
0x1.04ab467d53dbep+3 + -0x1.d4e81d6a2774fp+3i, 0x1.dfadf638f046ep+2 + -0x1.da6542046aa3ap+3i,
0x1.b5f070f4883cap+2 + -0x1.def265d930674p+3i, 0x1.8c33fe95b63b3p+2 + -0x1.e29135b88bce6p+3i,
0x1.628e46233374p+2 + -0x1.e543e112852aep+3i, 0x1.39148819e15bfp+2 + -0x1.e70d169af33ddp+3i,
0x1.0fdb92b639a36p+2 + -0x1.e7f000a53409bp+3i, 0x1.cdef6d6c28b94p+1 + -0x1.e7f0413b528ffp+3i,
0x1.7cf97931e4874p+1 + -0x1.e711edf44cb23p+3i, 0x1.2cfbb4c90fa32p+1 + -0x1.e5598b8d4f21ap+3i,
0x1.bc36a7cb46676p+0 + -0x1.e2cc0949dca98p+3i, 0x1.20f87f3a0ec8cp+0 + -0x1.df6ebc1ef219p+3i,
0x1.1104151168d88p-1 + -0x1.db4759ad50a61p+3i, -0x1.9d4e6109410cp-5 + -0x1.d65bf30f2db3ep+3i,
-0x1.3e1b56470261cp-1 + -0x1.d0b2ef7d988c4p+3i, -0x1.2dab5f6c85816p+0 + -0x1.ca5306d1f4b29p+3i,
-0x1.b88a7a10ce152p+0 + -0x1.c3433be7f2174p+3i, -0x1.1fba91e9f1d86p+1 + -0x1.bb8ad6e47395cp+3i,
-0x1.611c6bc893c46p+1 + -0x1.b3315f65d7cecp+3i, -0x1.a0533997b849ap+1 + -0x1.aa3e96a218af1p+3i,
-0x1.dd49230256c5bp+1 + -0x1.a0ba717732c8fp+3i, -0x1.0bf5072a68ef8p+2 + -0x1.96ad12723f262p+3i,
-0x1.2811d28ff1828p+2 + -0x1.8c1ec3d1a075cp+3i, -0x1.42f2abcb3ea09p+2 + -0x1.8117f18697795p+3i,
-0x1.5c902f54bdaf2p+2 + -0x1.75a1233a827fcp+3i, -0x1.74e3de7268617p+2 + -0x1.69c2f65bf8942p+3i,
-0x1.8be81f0968e02p+2 + -0x1.5d861837daf6ep+3i, -0x1.a1983ad57f5b1p+2 + -0x1.50f340225e8f2p+3i,
-0x1.b5f05e09467c5p+2 + -0x1.441329b3f3638p+3i, -0x1.c8ed955ad80b2p+2 + -0x1.36ee8f1dd1f0ap+3i,
-0x1.da8dcb80b2c9ap+2 + -0x1.298e2399d58a4p+3i, -0x1.eacfc6231df33p+2 + -0x1.1bfa8df927e72p+3i,
-0x1.f9b322469d83ep+2 + -0x1.0e3c63551cccp+3i, -0x1.039c281a2e336p+3 + -0x1.005c21e575812p+3i,
-0x1.09b04772df4b6p+3 + -0x1.e4c457fe352b4p+2i, -0x1.0f16f37c4a8dfp+3 + -0x1.c8ad867c5f292p+2i,
-0x1.13d19298d81e9p+3 + -0x1.ac8407bc6f8ecp+2i, -0x1.17e1ea49e4a7dp+3 + -0x1.9057c085aa7c2p+2i,
-0x1.1b4a1c087a85cp+3 + -0x1.74383966f8482p+2i, -0x1.1e0ca1ee80902p+3 + -0x1.58349698883dcp+2i,
-0x1.202c4b33a5f12p+3 + -0x1.3c5b904ba07ecp+2i, -0x1.21ac3881659bap+3 + -0x1.20bb6b6c281adp+2i,
-0x1.228fd8218c3a6p+3 + -0x1.0561f2d7134ap+2i, -0x1.22dae20ab8bedp+3 + -0x1.d4b8e210e74a8p+1i,
-0x1.229153ce5a2aep+3 + -0x1.9f6f54870f5b9p+1i, -0x1.21b76c6bb4a84p+3 + -0x1.6affae6d7c4afp+1i,
-0x1.2051a80b7ca8bp+3 + -0x1.3781403a762d6p+1i, -0x1.1e64bba79886p+3 + -0x1.050a30be0c5edp+1i,
-0x1.1bf590a2980c9p+3 + -0x1.a75eeac29270bp+0i, -0x1.190940526d816p+3 + -0x1.470996c9f0addp+0i,
-0x1.15a50f81ed261p+3 + -0x1.d272c87fd3a74p-1i, -0x1.11ce69ec8f038p+3 + -0x1.1c2198f0caf74p-1i,
-0x1.0d8addb7e2f0ep+3 + -0x1.ad7ddf0f5d98p-3i, -0x1.08e016ee1879ep+3 + 0x1.fcc73901bfe2p-4i,
-0x1.03d3dafceb84p+3 + 0x1.c923f82d50798p-2i, -0x1.fcd8087866eaap+2 + 0x1.835b6f37a7c5p-1i,
-0x1.f15cfafe7a9b2p+2 + 0x1.0de50c09bfd0ep+0i, -0x1.e5427b68033e5p+2 + 0x1.56dbfce104e62p+0i,
-0x1.d89487265bb16p+2 + 0x1.9c821dae64d3ap+0i, -0x1.cb5f22c7b9b92p+2 + 0x1.dec98e7e2d9e8p+0i,
-0x1.bdae51ab7b58cp+2 + 0x1.0ed37a5509c0ap+1i, -0x1.af8e0de89005bp+2 + 0x1.2c88b902586c2p+1i,
-0x1.a10a406afc898p+2 + 0x1.48814d39163p+1i, -0x1.922eb94d349a2p+2 + 0x1.62bb33ff38d84p+1i,
-0x1.83072871bede8p+2 + 0x1.7b3596c1210cdp+1i, -0x1.739f16614f6f2p+2 + 0x1.91f0c3f49637fp+1i,
-0x1.6401dd713a2b6p+2 + 0x1.a6ee2715985d4p+1i, -0x1.543aa335d1d26p+2 + 0x1.ba304006bc396p+1i,
-0x1.44545243fad3p+2 + 0x1.cbba99df0eac1p+1i, -0x1.34599444e9a42p+2 + 0x1.db91c12fbb94p+1i,
-0x1.2454cc5eb45f7p+2 + 0x1.e9bb39cbea0ecp+1i, -0x1.145011f40e942p+2 + 0x1.f63d741d6c4ap+1i,
-0x1.04552bbd352c8p+2 + 0x1.008fe10881fddp+2i, -0x1.e8db16777dea2p+1 + 0x1.053525d28af58p+2i,
-0x1.c94491156a8bcp+1 + 0x1.09130192db96p+2i, -0x1.a9f83d180f288p+1 + 0x1.0c2e4c9082c0fp+2i,
-0x1.8b06cee83968ap+1 + 0x1.0e8c380f5d1ep+2i, -0x1.6c803367eae2cp+1 + 0x1.10324812eddf6p+2i,
-0x1.4e738a840c74ap+1 + 0x1.11264d0daf177p+2i, -0x1.30ef228342194p+1 + 0x1.116e5d8233aadp+2i,
-0x1.14007410fbc3ep+1 + 0x1.1110cf9b62ac6p+2i, -0x1.ef683e069b389p+0 + 0x1.101432c0fda34p+2i,
-0x1.b82bcfb317c6ap+0 + 0x1.0e7f492d89276p+2i, -0x1.82616be23a856p+0 + 0x1.0c59018a91658p+2i,
-0x1.4e1d24dae310bp+0 + 0x1.09a8709822d2cp+2i, -0x1.1b714f9f16bb7p+0 + 0x1.0674cae42a9acp+2i,
-0x1.d4dd08e446674p-1 + 0x1.02c55e964a833p+2i, -0x1.76474159a2c7p-1 + 0x1.fd431aa9023dap+1i,
-0x1.1b3b93ab9b752p-1 + 0x1.f4218c89b32ccp+1i, -0x1.87a1c7ce6f88ap-2 + 0x1.ea3500604c1f8p+1i,
-0x1.c06ae8654f208p-3 + 0x1.df8c67932a72bp+1i, -0x1.014a7c9628c6p-4 + 0x1.d436ac3d7d73cp+1i,
0x1.5fb30d4878fp-4 + 0x1.c842a66717526p+1i, 0x1.d0ea3597a8dap-3 + 0x1.bbbf119a4f98fp+1i,
0x1.713903990d532p-2 + 0x1.aeba82dddcfb6p+1i, 0x1.f23124329b2f3p-2 + 0x1.a1435f1815edap+1i,
0x1.35adff998d95p-1 + 0x1.9367d1e08f278p+1i, 0x1.6e5ee10d8b6dcp-1 + 0x1.8535c4c4952fap+1i,
0x1.a330088e621fep-1 + 0x1.76bad70282c8cp+1i, 0x1.d428e81725d27p-1 + 0x1.680455bf7a337p+1i,
0x1.00a9ba688f4d4p+0 + 0x1.591f34ba8adcdp+1i, 0x1.155e04a199aedp+0 + 0x1.4a18077fd0e2ep+1i,
0x1.2838a34261dc8p+0 + 0x1.3afafb1da03b6p+1i, 0x1.3941fa4a1ccc1p+0 + 0x1.2bd3d05d527c8p+1i,
0x1.488370f8c4896p+0 + 0x1.1cadd680d5046p+1i, 0x1.5607614e53a2ep+0 + 0x1.0d93e6859d7a8p+1i,
0x1.61d9074bdc5adp+0 + 0x1.fd20bdda6a7f8p+0i, 0x1.6c0470062a8f8p+0 + 0x1.df5a401632707p+0i,
0x1.7496689961a49p+0 + 0x1.c1e711ae767f6p+0i, 0x1.7b9c6d0cbdcdep+0 + 0x1.a4d8e8880acb2p+0i,
0x1.812497354acd5p+0 + 0x1.88406e2cab34cp+0i, 0x1.853d8da601a24p+0 + 0x1.6c2d3c7b04fafp+0i,
0x1.87f672bb53d4fp+0 + 0x1.50addb5230a26p+0i, 0x1.895ed3cfb6189p+0 + 0x1.35cfbf333f09ep+0i,
0x1.898698a63dfdcp+0 + 0x1.1b9f48d2cac46p+0i, 0x1.887df317df9ccp+0 + 0x1.0227c593d65e4p+0i,
0x1.86554f0f4968dp+0 + 0x1.d2e6e1c9438a4p-1i, 0x1.831d42dec64dcp+0 + 0x1.a316eceb198ep-1i,
0x1.7ee67ffaf1912p+0 + 0x1.74efea832606p-1i, 0x1.79c1c4246540cp+0 + 0x1.488006bedd79p-1i,
0x1.73bfcb09e4353p+0 + 0x1.1dd36523f8bd4p-1i, 0x1.6cf1406bd4275p+0 + 0x1.e9e856383f198p-2i,
0x1.6566b2c92c52fp+0 + 0x1.9bd517ae3d6fp-2i, 0x1.5d30869d49a59p+0 + 0x1.5179aadd5c8cp-2i,
0x1.545eea3563eep+0 + 0x1.0adefe6c7f52p-2i, 0x1.4b01ca23a7efp+0 + 0x1.9014e483589c8p-3i,
0x1.4128c6554210cp+0 + 0x1.11fbf09a35e58p-3i, 0x1.36e327cfec77cp+0 + 0x1.36e0dc995394p-4i,
0x1.2c3fd71adb2f8p+0 + 0x1.634ae4dc0738p-6i, 0x1.214d5356299dcp+0 + -0x1.d94241b4cb3cp-6i,
0x1.1619aa0336efcp+0 + -0x1.36b271477aaap-4i, 0x1.0ab26f7fabbd6p+0 + -0x1.e88723579008p-4i,
0x1.fe49706865a2p-1 + -0x1.460682c2c0658p-3i, 0x1.e6fa24ee83b18p-1 + -0x1.90c5fa3ebfe28p-3i,
0x1.cf8f02474c268p-1 + -0x1.d4aa509e4ccep-3i, 0x1.b81eedc3c8cp-1 + -0x1.08eff97f4b78p-2i,
0x1.a0bfa445ba49cp-1 + -0x1.244b868ba03ep-2i, 0x1.8985b39314a68p-1 + -0x1.3c819987a9908p-2i,
0x1.7284750234cdcp-1 + -0x1.51ad85be5a5d8p-2i, 0x1.5bce09783b398p-1 + -0x1.63ebfcee727b8p-2i,
0x1.457356b1e840cp-1 + -0x1.735ae4e984d38p-2i, 0x1.2f8405ce58afp-1 + -0x1.80192dba2aad8p-2i,
0x1.1a0e83121123p-1 + -0x1.8a46a8745447p-2i, 0x1.051ffed7e5b68p-1 + -0x1.9203ded0fa07cp-2i,
0x1.e188df48f5f5p-2 + -0x1.9771ebb4b05f8p-2i, 0x1.ba0d2aa0b9a6p-2 + -0x1.9ab254becc544p-2i,
0x1.93dffa77effep-2 + -0x1.9be6e4fce2e74p-2i, 0x1.6f120f0048f1p-2 + -0x1.9b3188dc7aa9p-2i,
0x1.4bb1da1ac24bp-2 + -0x1.98b42b72c947p-2i, 0x1.29cb8d56ca47p-2 + -0x1.949095304e61p-2i,
0x1.096929948059p-2 + -0x1.8ee84c140e574p-2i, 0x1.d5252057a732p-3 + -0x1.87dc75701aa1p-2i,
0x1.9a9b2af35d4ap-3 + -0x1.7f8db94efe4ap-2i, 0x1.633c2967e416p-3 + -0x1.761c27878c108p-2i,
0x1.2f0c09c965c6p-3 + -0x1.6ba71e8a74e4p-2i, 0x1.fc163bb42024p-4 + -0x1.604d33f2f8fap-2i,
0x1.a06c91be9d78p-4 + -0x1.542c1ee1f6e18p-2i, 0x1.4b0e5e0ea7c4p-4 + -0x1.4760a428945cp-2i,
0x1.f7d1988021c8p-5 + -0x1.3a068445bddp-2i, 0x1.65c69d8495cp-5 + -0x1.2c386b37c53bp-2i,
0x1.bf801060cb9p-6 + -0x1.1e0fe2218223p-2i, 0x1.95e0ea57759p-7 + -0x1.0fa542c07a78p-2i,
-0x1.2beeb98285p-10 + -0x1.010facafd2e1p-2i, -0x1.b477c454cedp-7 + -0x1.e4c9f8e417dep-3i,
-0x1.8c431cb3686p-6 + -0x1.c7738873d7e8p-3i, -0x1.14cf4619c93p-5 + -0x1.aa428cd2c45ep-3i,
-0x1.5991e6aa5fc4p-5 + -0x1.8d5ae3776556p-3i, -0x1.94d94b55858cp-5 + -0x1.70ddbe1da39cp-3i,
-0x1.c71996efafcp-5 + -0x1.54e9a17d0c1ep-3i, -0x1.f0ca72953b84p-5 + -0x1.399a67055748p-3i,
-0x1.0933289c9b0ap-4 + -0x1.1f094181ab64p-3i, -0x1.1634dc51e278p-4 + -0x1.054cc480fd9cp-3i,
-0x1.1fa94839bbeap-4 + -0x1.d8f1dcc03164p-4i, -0x1.25cfbbd4bba4p-4 + -0x1.a93e69a045ecp-4i,
-0x1.28e7909e7394p-4 + -0x1.7b9d27660f78p-4i, -0x1.292fdc130794p-4 + -0x1.50273c4bc5p-4i,
-0x1.26e725a2f782p-4 + -0x1.26f12320f99cp-4i, -0x1.224b20c17212p-4 + -0x1.000ad520245p-4i,
-0x1.1b986b338036p-4 + -0x1.b6ffee4b9c3p-5i, -0x1.130a4fc056dep-4 + -0x1.72b013e7252p-5i,
-0x1.08da8d5d24a2p-4 + -0x1.332d3a513978p-5i, -0x1.fa8245d1918p-5 + -0x1.f0ee12648e4p-6i,
-0x1.e0e83f0c1804p-5 + -0x1.850c3eba9f9p-6i, -0x1.c54eef3a7738p-5 + -0x1.229909ed36fp-6i,
-0x1.a819bd10dc88p-5 + -0x1.92d8815d112p-7i, -0x1.89a749d08f64p-5 + -0x1.e549683666p-8i,
-0x1.6a51373390bp-5 + -0x1.906b427381p-9i, -0x1.4a6bf6813d1p-5 + 0x1.953e93033ep-11i,
-0x1.2a46a0a91224p-5 + 0x1.0c8e703d4a4p-8i, -0x1.0a2ad7375eecp-5 + 0x1.c6dbeac3bp-8i,
-0x1.d4b95bdd9248p-6 + 0x1.318433eba3cp-7i, -0x1.96353992506p-6 + 0x1.715165b1e28p-7i,
-0x1.593af434c13p-6 + 0x1.a3a688ecb06p-7i, -0x1.1e30f9111e3p-6 + 0x1.c95c5f42bfp-7i,
-0x1.cae5121853ap-7 + 0x1.e351b0bc18ap-7i, -0x1.5e9fb3cf75ap-7 + 0x1.f2699c3594cp-7i,
-0x1.f0391465038p-8 + 0x1.f789f6efc62p-7i, -0x1.2fa20da6ep-8 + 0x1.f399bcb3e3ep-7i,
-0x1.f0ef48dbap-10 + 0x1.e77f91e80eap-7i, 0x1.4b01c4687ep-11 + 0x1.d42058ca8f8p-7i,
0x1.817ea5be88p-9 + 0x1.ba5ddae971p-7i, 0x1.4993c947ep-8 + 0x1.9b1587c9548p-7i,
0x1.c3b33986aep-8 + 0x1.771f498bbecp-7i, 0x1.178a5caea14p-7 + 0x1.4f4c70426d6p-7i,
0x1.45e7717627cp-7 + 0x1.2466b47addcp-7i, 0x1.6d0ae52c432p-7 + 0x1.ee5ea2d6148p-8i,
0x1.8d1c70342e8p-7 + 0x1.90bc6e0d7fcp-8i, 0x1.a65062e264cp-7 + 0x1.3142a847e5p-8i,
0x1.b8e6893da28p-7 + 0x1.a26fe2d0a58p-9i, 0x1.c5290aa7e18p-7 + 0x1.c7327fce5fp-10i,
0x1.cb6b472cc68p-7 + 0x1.419e503706p-12i, 0x1.cc08b4273d8p-7 + -0x1.1b7b839d8cp-10i,
0x1.c763b9e291cp-7 + -0x1.3c694a2b43p-9i, 0x1.bde493cc12cp-7 + -0x1.e22dfdc72dcp-9i,
0x1.aff834b9ac8p-7 + -0x1.3ece506dfdp-8i, 0x1.9e0f30b7b1cp-7 + -0x1.86bbc31a646p-8i,
0x1.889cadbc95p-7 + -0x1.c8591ba015cp-8i, 0x1.70155c8ca68p-7 + -0x1.019c33afa12p-7i,
0x1.54ee7b0229cp-7 + -0x1.1b81c27ceb2p-7i, 0x1.379ce0d778p-7 + -0x1.31bdc521947p-7i,
0x1.189417f944cp-7 + -0x1.443c04dd1e1p-7i, 0x1.f08b029e2ep-8 + -0x1.52f31cee63fp-7i,
0x1.ae3f0d9e64p-8 + -0x1.5de4005c35ap-7i, 0x1.6b19b734358p-8 + -0x1.651973193e7p-7i,
0x1.27e79580ecp-8 + -0x1.68a777c74d9p-7i, 0x1.cad654f1aap-9 + -0x1.68aab363e11p-7i,
0x1.48b77e38ap-9 + -0x1.6547c82bf32p-7i, 0x1.95917a8d16p-10 + -0x1.5eaaa90732ap-7i,
0x1.491790a6c4p-11 + -0x1.5505e6cb499p-7i, -0x1.fb106d9e2p-13 + -0x1.4891f8a67ap-7i,
-0x1.14321e39f2p-10 + -0x1.398c80fdcc6p-7i, -0x1.d81aac06f6p-10 + -0x1.28379006644p-7i,
-0x1.44c7d694fcp-9 + -0x1.14d8e55b36p-7i, -0x1.93a1e9279fp-9 + -0x1.ff7263912bp-8i,
-0x1.d814885c02p-9 + -0x1.d246b4fb8f4p-8i, -0x1.08dc2c7d85p-8 + -0x1.a2c77d942c4p-8i,
-0x1.2022bb4a73p-8 + -0x1.718f02fdfbcp-8i, -0x1.31c98385218p-8 + -0x1.3f37ae07bcp-8i,
-0x1.3dcb4bb6e7p-8 + -0x1.0c5ab232968p-8i, -0x1.4431a2760e8p-8 + -0x1.b31d84ebb1p-9i,
-0x1.45145a518bcp-8 + -0x1.4ecda9f9fbp-9i, -0x1.4098eec398cp-8 + -0x1.d9c3dded6ap-10i,
-0x1.36f1d3d2724p-8 + -0x1.1cd4d634eep-10i, -0x1.285db21b68cp-8 + -0x1.a34927984p-12i,
-0x1.15269116b9p-8 + 0x1.017e87ea8p-12i, -0x1.fb41e2e3dcp-9 + 0x1.ba0041db18p-11i,
-0x1.c455b2d58dp-9 + 0x1.6b74d5925ap-10i, -0x1.8655aa3c05p-9 + 0x1.ea4c8b8d3ap-10i,
-0x1.421dd987cep-9 + 0x1.2c20c939adp-9i, -0x1.f12e4b5f1ap-10 + 0x1.5a1ddc0c47p-9i,
-0x1.556a30a1adp-10 + 0x1.7ea8f09b0ep-9i, -0x1.65ce9e687dp-11 + 0x1.99659ffe69p-9i,
-0x1.753e7ad0bp-15 + 0x1.aa10145ed3p-9i, 0x1.3c835bd60ep-11 + 0x1.b07d42b488p-9i,
0x1.48cbee91208p-10 + 0x1.ac9af49b48p-9i, 0x1.f1e61767d44p-10 + 0x1.9e6fa328f4p-9i,
0x1.4bc35902c1ep-9 + 0x1.861a23f961p-9i, 0x1.9bd6ddd6a04p-9 + 0x1.63d129e217p-9i,
0x1.e8345b02834p-9 + 0x1.37e29afdc4p-9i, 0x1.17f5a3b9a79p-8 + 0x1.02b2bdf8e2p-9i,
0x1.390ad65bd83p-8 + 0x1.8976817d5p-10i, 0x1.56ecf4b5d16p-8 + 0x1.fa286b5cap-11i,
0x1.71362b807eap-8 + 0x1.86026fa758p-12i, 0x1.8788684b2c8p-8 + -0x1.1f7bc53408p-12i,
0x1.998e0c63e14p-8 + -0x1.fb096c8c7p-11i, 0x1.a6fa8e50848p-8 + -0x1.bdcdbef8a4p-10i,
0x1.af8b090ba3ap-8 + -0x1.437e9a5546p-9i, 0x1.b306b857658p-8 + -0x1.ab9fed1a5ep-9i,
0x1.b13f619923p-8 + -0x1.0b2bea20ba8p-8i, 0x1.aa11a8d0b1cp-8 + -0x1.4155e136e6p-8i,
0x1.9d65515ec68p-8 + -0x1.77cdc29a7ap-8i, 0x1.8b2d6a6e6bp-8 + -0x1.ae11981fc8p-8i,
0x1.736866f5968p-8 + -0x1.e39e85c14p-8i, 0x1.56202160ed4p-8 + -0x1.0bf8e05501cp-7i,
0x1.3369cb1dbe4p-8 + -0x1.2544c24bp-7i, 0x1.0b65c854004p-8 + -0x1.3d7302fb7ap-7i,
0x1.bc7ef07f29p-9 + -0x1.54452fabfacp-7i, 0x1.5859d56964p-9 + -0x1.697ebc42a12p-7i,
0x1.d5ba0ddb45p-10 + -0x1.7ce57306308p-7i, 0x1.d274611b7ep-11 + -0x1.8e41dfdc5a4p-7i,
-0x1.37d29d4bep-14 + -0x1.9d5fb6613c4p-7i, -0x1.1ef9da607cp-10 + -0x1.aa0e323cdeep-7i,
-0x1.1bd4650427p-9 + -0x1.b420712607p-7i, -0x1.adec4d930bp-9 + -0x1.bb6dc60cec4p-7i,
-0x1.226f211464p-8 + -0x1.bfd204f43d4p-7i, -0x1.6fdbd5e532p-8 + -0x1.c12dc70b5fp-7i,
-0x1.bebdac355c8p-8 + -0x1.bf66a6a9a4cp-7i, -0x1.0748c8c354cp-7 + -0x1.ba6772d7aafp-7i,
-0x1.2f686a85ecp-7 + -0x1.b2205a21932p-7i, -0x1.57790440498p-7 + -0x1.a6870c7add6p-7i,
-0x1.7f34f2a1c4p-7 + -0x1.9796d40a9d9p-7i, -0x1.a6561a20cd8p-7 + -0x1.8550a4c50dfp-7i,
-0x1.cc965ba0114p-7 + -0x1.6fbb22c5a8f8p-7i, -0x1.f1b0090a4bp-7 + -0x1.56e29f6b1c6p-7i,
-0x1.0aaf2c96cf2p-6 + -0x1.3ad90d446b48p-7i, -0x1.1baeed10c14p-6 + -0x1.1bb5eaec616p-7i,
-0x1.2bb670c2f94p-6 + -0x1.f32c49fbf84p-8i, -0x1.3aa5fcf7714p-6 + -0x1.a937deb32cep-8i,
-0x1.485f266a3f6p-6 + -0x1.59dd2bfec1p-8i, -0x1.54c5043373ap-6 + -0x1.05748b9fdabp-8i,
-0x1.5fbc6055ab8p-6 + -0x1.58bf363d2a4p-9i, -0x1.692be5a84ccp-6 + -0x1.3c22bc919fp-10i,
-0x1.70fc4ad8bb6p-6 + 0x1.21dc6f15ecp-12i, -0x1.77187a4295ep-6 + 0x1.da67813eb28p-10i,
-0x1.7b6db66419bp-6 + 0x1.bbd5e766758p-9i, -0x1.7debbab9254p-6 + 0x1.478a531c26p-8i,
-0x1.7e84d8ccf78p-6 + 0x1.b2ec1307baap-8i, -0x1.7d2e1158ap-6 + 0x1.0fbfecfddb2p-7i,
-0x1.79df294a2cdp-6 + 0x1.4658db91b7p-7i, -0x1.7492ba96d32p-6 + 0x1.7cf57f5b0b4p-7i,
-0x1.6d4640c0bap-6 + 0x1.b349956b2cep-7i, -0x1.63fa20ff72cp-6 + 0x1.e9085fcb0a4p-7i,
-0x1.58b1ae00c0cp-6 + 0x1.0ef28c31733p-6i, -0x1.4b73273dcd8p-6 + 0x1.28c9b20d89p-6i,
-0x1.3c47b3e7782p-6 + 0x1.41e3e2c488p-6i, -0x1.2b3b5972e5cp-6 + 0x1.5a1c073eea2p-6i,
-0x1.185cedd5ddap-6 + 0x1.714de7e607p-6i, -0x1.03be0588c8c8p-6 + 0x1.8756635045ap-6i,
-0x1.dae5bad4931p-7 + 0x1.9c13a36578ap-6i, -0x1.ab24814caaap-7 + 0x1.af6550acedep-6i,
-0x1.786ad590ecep-7 + 0x1.c12cc379b5p-6i, -0x1.42efcc4cad1p-7 + 0x1.d14d32ace51p-6i,
-0x1.0aeecda9d8dp-7 + 0x1.dfabdfc96c4p-6i, -0x1.a14e897e405p-8 + 0x1.ec30401901cp-6i,
-0x1.28b891f33a2p-8 + 0x1.f6c422a643bp-6i, -0x1.595108089ap-9 + 0x1.ff53d2d4c64p-6i,
-0x1.6d886a637c8p-11 + 0x1.02e71bb275ap-5i, 0x1.4e584565c64p-10 + 0x1.051276db4d28p-5i,
0x1.ad21ad19dd9p-9 + 0x1.06263091c43p-5i, 0x1.5a9fd99f3bfp-8 + 0x1.061def27b71p-5i,
0x1.df212909891p-8 + 0x1.04f6d12941dp-5i, 0x1.31b8966f1818p-7 + 0x1.02af72d4487p-5i,
0x1.7375c5f599fp-7 + 0x1.fe8fe2d51fp-6i, 0x1.b47609207cfp-7 + 0x1.f583d8b37c4p-6i,
0x1.f467cbe7701p-7 + 0x1.ea41086f94bp-6i, 0x1.197d3971ddd8p-6 + 0x1.dcd0af2adbp-6i,
0x1.37ef6660216p-6 + 0x1.cd3efb56c15p-6i, 0x1.5563c0e1ba58p-6 + 0x1.bb9afdd45f5p-6i,
0x1.71b4c06916e8p-6 + 0x1.a7f696ef04dp-6i, 0x1.8cbe34a72838p-6 + 0x1.92665f4d582p-6i,
0x1.a65d789c4c48p-6 + 0x1.7b018cfa6cep-6i, 0x1.be71a3814ebp-6 + 0x1.61e1d4add4cp-6i,
0x1.d4dbb7422ecp-6 + 0x1.4723477f0248p-6i, 0x1.e97ecc47db1p-6 + 0x1.2ae42d3571f8p-6i,
0x1.fc403a52021p-6 + 0x1.0d44db6be11p-6i, 0x1.0683df132ae8p-5 + 0x1.dccf1382b3dp-7i,
0x1.0ddfcdef99b8p-5 + 0x1.9ce046ae209p-7i, 0x1.142a5ed440b8p-5 + 0x1.5b082bbbbd68p-7i,
0x1.195b675f5aep-5 + 0x1.17943cf3449p-7i, 0x1.1d6c2944ab58p-5 + 0x1.a5a8530f5p-8i,
0x1.20575bd7f22p-5 + 0x1.1a32d0e559a8p-8i, 0x1.22193386ea5p-5 + 0x1.1adb230dcf48p-9i
)
assertThat(stats:::fft(z=c(0, 0.0903382508251652, -0.692612979277417, 2.22118689394031,
-3.63146376683691, 2.37397647099501, 2.17310165159549, -6.40442160751507,
6.55905914403848, -3.49594360553802, 0.842263577161454, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0))
, identicalTo( expected, tol = 1e-6 ) )
|
checkTask = function(x, cl = "Task", allow.desc = FALSE, task.type = NULL, binary = FALSE, .var.name = "task") {
if (allow.desc) {
assert(.var.name = .var.name,
checkClass(x, classes = cl),
checkClass(x, "TaskDesc")
)
} else {
assertClass(x, classes = cl, .var.name = .var.name)
}
td = getTaskDesc(x)
if (!is.null(task.type) && td$type %nin% task.type) {
stopf("Task must be one of '%s', but is: '%s'", collapse(task.type), td$type)
}
if (binary && length(td$class.levels) != 2L) {
stopf("Task '%s' must be binary classification!", td$id)
}
}
|
"print.gvlma" <-
function(x, ...)
{
NextMethod("print", x,...)
display.gvlmatests(x)
}
|
require(quantstrat)
source(paste0(path.package("quantstrat"),"/demo/luxor.include.R"))
strategy(strategy.st, store=TRUE)
add.indicator(strategy.st, name = "SMA",
arguments = list(
x = quote(Cl(mktdata)[,1]),
n = .fast
),
label="nFast"
)
add.indicator(strategy.st, name="SMA",
arguments = list(
x = quote(Cl(mktdata)[,1]),
n = .slow
),
label="nSlow"
)
add.signal(strategy.st, name='sigCrossover',
arguments = list(
columns=c("nFast","nSlow"),
relationship="gte"
),
label='long'
)
add.signal(strategy.st, name='sigCrossover',
arguments = list(
columns=c("nFast","nSlow"),
relationship="lt"
),
label='short'
)
add.rule(strategy.st, name = 'ruleSignal',
arguments=list(sigcol='long' , sigval=TRUE,
replace=TRUE,
orderside='short',
ordertype='market',
TxnFees=.txnfees,
orderqty='all',
orderset='ocoshort'
),
type='exit',
timespan = .timespan,
label='Exit2LONG'
)
add.rule(strategy.st, name = 'ruleSignal',
arguments=list(sigcol='short', sigval=TRUE,
replace=TRUE,
orderside='long' ,
ordertype='market',
TxnFees=.txnfees,
orderqty='all',
orderset='ocolong'
),
type='exit',
timespan = .timespan,
label='Exit2SHORT'
)
add.rule(strategy.st, name = 'ruleSignal',
arguments=list(sigcol='long' , sigval=TRUE,
replace=FALSE,
orderside='long' ,
ordertype='stoplimit',
prefer='High',
threshold=.threshold,
TxnFees=0,
orderqty=+.orderqty,
osFUN=osMaxPos,
orderset='ocolong'
),
type='enter',
timespan = .timespan,
label='EnterLONG'
)
add.rule(strategy.st, name = 'ruleSignal',
arguments=list(sigcol='short', sigval=TRUE,
replace=FALSE,
orderside='short',
ordertype='stoplimit',
prefer='Low',
threshold=.threshold,
TxnFees=0,
orderqty=-.orderqty,
osFUN=osMaxPos,
orderset='ocoshort'
),
type='enter',
timespan = .timespan,
label='EnterSHORT'
)
add.distribution(strategy.st,
paramset.label = 'SMA',
component.type = 'indicator',
component.label = 'nFast',
variable = list(n = .FastSMA),
label = 'nFAST'
)
add.distribution(strategy.st,
paramset.label = 'SMA',
component.type = 'indicator',
component.label = 'nSlow',
variable = list(n = .SlowSMA),
label = 'nSLOW'
)
add.distribution.constraint(strategy.st,
paramset.label = 'SMA',
distribution.label.1 = 'nFAST',
distribution.label.2 = 'nSLOW',
operator = '<',
label = 'SMA'
)
add.rule(strategy.st, name = 'ruleSignal',
arguments=list(sigcol='long' , sigval=TRUE,
replace=FALSE,
orderside='long',
ordertype='stoplimit', tmult=TRUE, threshold=quote(.stoploss),
TxnFees=.txnfees,
orderqty='all',
orderset='ocolong'
),
type='chain', parent='EnterLONG',
label='StopLossLONG',
enabled=FALSE
)
add.rule(strategy.st, name = 'ruleSignal',
arguments=list(sigcol='short' , sigval=TRUE,
replace=FALSE,
orderside='short',
ordertype='stoplimit', tmult=TRUE, threshold=quote(.stoploss),
TxnFees=.txnfees,
orderqty='all',
orderset='ocoshort'
),
type='chain', parent='EnterSHORT',
label='StopLossSHORT',
enabled=FALSE
)
add.distribution(strategy.st,
paramset.label = 'StopLoss',
component.type = 'chain',
component.label = 'StopLossLONG',
variable = list(threshold = .StopLoss),
label = 'StopLossLONG'
)
add.distribution(strategy.st,
paramset.label = 'StopLoss',
component.type = 'chain',
component.label = 'StopLossSHORT',
variable = list(threshold = .StopLoss),
label = 'StopLossSHORT'
)
add.distribution.constraint(strategy.st,
paramset.label = 'StopLoss',
distribution.label.1 = 'StopLossLONG',
distribution.label.2 = 'StopLossSHORT',
operator = '==',
label = 'StopLoss'
)
add.rule(strategy.st, name = 'ruleSignal',
arguments=list(sigcol='long' , sigval=TRUE,
replace=FALSE,
orderside='long',
ordertype='stoptrailing', tmult=TRUE, threshold=quote(.stoptrailing),
TxnFees=.txnfees,
orderqty='all',
orderset='ocolong'
),
type='chain', parent='EnterLONG',
label='StopTrailingLONG',
enabled=FALSE
)
add.rule(strategy.st, name = 'ruleSignal',
arguments=list(sigcol='short' , sigval=TRUE,
replace=FALSE,
orderside='short',
ordertype='stoptrailing', tmult=TRUE, threshold=quote(.stoptrailing),
TxnFees=.txnfees,
orderqty='all',
orderset='ocoshort'
),
type='chain', parent='EnterSHORT',
label='StopTrailingSHORT',
enabled=FALSE
)
add.distribution(strategy.st,
paramset.label = 'StopTrailing',
component.type = 'chain',
component.label = 'StopTrailingLONG',
variable = list(threshold = .StopTrailing),
label = 'StopTrailingLONG'
)
add.distribution(strategy.st,
paramset.label = 'StopTrailing',
component.type = 'chain',
component.label = 'StopTrailingSHORT',
variable = list(threshold = .StopTrailing),
label = 'StopTrailingSHORT'
)
add.distribution.constraint(strategy.st,
paramset.label = 'StopTrailing',
distribution.label.1 = 'StopTrailingLONG',
distribution.label.2 = 'StopTrailingSHORT',
operator = '==',
label = 'StopTrailing'
)
add.rule(strategy.st, name = 'ruleSignal',
arguments=list(sigcol='long' , sigval=TRUE,
replace=FALSE,
orderside='long',
ordertype='limit', tmult=TRUE, threshold=quote(.takeprofit),
TxnFees=.txnfees,
orderqty='all',
orderset='ocolong'
),
type='chain', parent='EnterLONG',
label='TakeProfitLONG',
enabled=FALSE
)
add.rule(strategy.st, name = 'ruleSignal',
arguments=list(sigcol='short' , sigval=TRUE,
replace=FALSE,
orderside='short',
ordertype='limit', tmult=TRUE, threshold=quote(.takeprofit),
TxnFees=.txnfees,
orderqty='all',
orderset='ocoshort'
),
type='chain', parent='EnterSHORT',
label='TakeProfitSHORT',
enabled=FALSE
)
add.distribution(strategy.st,
paramset.label = 'TakeProfit',
component.type = 'chain',
component.label = 'TakeProfitLONG',
variable = list(threshold = .TakeProfit),
label = 'TakeProfitLONG'
)
add.distribution(strategy.st,
paramset.label = 'TakeProfit',
component.type = 'chain',
component.label = 'TakeProfitSHORT',
variable = list(threshold = .TakeProfit),
label = 'TakeProfitSHORT'
)
add.distribution.constraint(strategy.st,
paramset.label = 'TakeProfit',
distribution.label.1 = 'TakeProfitLONG',
distribution.label.2 = 'TakeProfitSHORT',
operator = '==',
label = 'TakeProfit'
)
add.distribution(strategy.st,
paramset.label = 'WFA',
component.type = 'indicator',
component.label = 'nFast',
variable = list(n = .FastWFA),
label = 'nFAST'
)
add.distribution(strategy.st,
paramset.label = 'WFA',
component.type = 'indicator',
component.label = 'nSlow',
variable = list(n = .SlowWFA),
label = 'nSLOW'
)
add.distribution.constraint(strategy.st,
paramset.label = 'WFA',
distribution.label.1 = 'nFAST',
distribution.label.2 = 'nSLOW',
operator = '<',
label = 'WFA'
)
save.strategy(strategy.st)
|
get_index_quo <- function(.tbl_time) {
if(!inherits(.tbl_time, "tbl_time")) glue_stop("Object is not of class `tbl_time`.")
index_quo <- attr(.tbl_time, "index_quo")
if(is.null(index_quo)) {
glue_stop("Attribute, `index_quo`, has been lost, ",
"but class is still `tbl_time`. This should not happen unless ",
"something has gone horribly wrong.")
}
index_quo
}
get_index_char <- function(.tbl_time) {
rlang::quo_name(get_index_quo(.tbl_time))
}
get_index_col <- function(.tbl_time) {
.tbl_time[[get_index_char(.tbl_time)]]
}
get_index_time_zone <- function(.tbl_time) {
if(!inherits(.tbl_time, "tbl_time")) glue_stop("Object is not of class `tbl_time`.")
index_time_zone <- attr(.tbl_time, "index_time_zone")
if(is.null(index_time_zone)) {
glue_stop("Attribute, `index_time_zone`, has been lost, ",
"but class is still `tbl_time`. This should not happen unless ",
"something has gone horribly wrong.")
}
index_time_zone
}
get_index_class <- function(.tbl_time) {
class(get_index_col(.tbl_time))[[1]]
}
get_.index_col <- function(.tbl_time) {
to_posixct_numeric(get_index_col(.tbl_time))
}
get_index_dispatcher <- function(.tbl_time) {
make_dummy_dispatch_obj(get_index_class(.tbl_time))
}
get_default_time_zone <- function() {
"UTC"
}
get_index_col_time_zone <- function(index) {
if(inherits(index, "POSIXct")) {
(attr(index, "tzone") %||% Sys.timezone()) %||% get_default_time_zone()
} else {
get_default_time_zone()
}
}
get_index_col_class <- function(index) {
class(index)[[1]]
}
|
tar_test("fst_tbl format", {
skip_if_not_installed("fst")
skip_if_not_installed("tibble")
envir <- new.env(parent = baseenv())
envir$f <- function() {
tibble::tibble(x = 1, y = 2)
}
x <- target_init(
name = "abc",
expr = quote(f()),
format = "fst_tbl"
)
store_update_stage_early(x$store, "abc", path_store_default())
builder_update_build(x, envir = envir)
builder_update_paths(x, path_store_default())
builder_update_object(x)
exp <- envir$f()
out <- tibble::as_tibble(fst::read_fst(x$store$file$path))
expect_equal(out, exp)
expect_equal(target_read_value(x)$object, exp)
expect_silent(target_validate(x))
})
tar_test("fst_tbl coercion", {
skip_if_not_installed("fst")
skip_if_not_installed("tibble")
envir <- new.env(parent = baseenv())
envir$f <- function() {
data.frame(x = 1, y = 2)
}
x <- target_init(
name = "abc",
expr = quote(f()),
format = "fst_tbl"
)
store_update_stage_early(x$store, "abc", path_store_default())
builder_update_build(x, envir)
expect_true(inherits(x$value$object, "tbl_df"))
builder_update_paths(x, path_store_default())
builder_update_object(x)
expect_true(inherits(target_read_value(x)$object, "tbl_df"))
})
tar_test("bad compression level throws error (unstructured resources)", {
skip_if_not_installed("fst")
skip_if_not_installed("tibble")
tar_script({
list(
tar_target(
abc,
data.frame(x = 1, y = 2),
format = "fst_tbl",
resources = list(compress = "bad")
)
)
})
expect_warning(
tar_target(
abc,
data.frame(x = 1, y = 2),
format = "fst_tbl",
resources = list(compress = "bad")
),
class = "tar_condition_deprecate"
)
suppressWarnings(
expect_error(
tar_make(callr_function = NULL),
class = "tar_condition_run"
)
)
})
tar_test("fst_tbl packages", {
skip_if_not_installed("fst")
skip_if_not_installed("tibble")
x <- tar_target(x, 1, format = "fst_tbl")
out <- sort(store_get_packages(x$store))
expect_equal(out, sort(c("fst", "tibble")))
})
tar_test("does not inherit from tar_external", {
skip_if_not_installed("fst")
skip_if_not_installed("tibble")
store <- tar_target(x, "x_value", format = "fst_tbl")$store
expect_false(inherits(store, "tar_external"))
})
tar_test("store_row_path()", {
skip_if_not_installed("fst")
skip_if_not_installed("tibble")
store <- tar_target(x, "x_value", format = "fst_tbl")$store
store$file$path <- "path"
expect_equal(store_row_path(store), NA_character_)
})
tar_test("store_path_from_record()", {
skip_if_not_installed("fst")
skip_if_not_installed("tibble")
store <- tar_target(x, "x_value", format = "fst_tbl")$store
record <- record_init(name = "x", path = "path", format = "fst_tbl")
expect_equal(
store_path_from_record(store, record, path_store_default()),
path_objects(path_store_default(), "x")
)
})
|
NULL
ml_is_set <- function(x, param, ...) {
UseMethod("ml_is_set")
}
ml_is_set.ml_pipeline_stage <- function(x, param, ...) {
jobj <- spark_jobj(x)
param_jobj <- jobj %>%
invoke(ml_map_param_names(param, direction = "rs"))
jobj %>%
invoke("isSet", param_jobj)
}
ml_is_set.spark_jobj <- function(x, param, ...) {
param_jobj <- x %>%
invoke(ml_map_param_names(param, direction = "rs"))
x %>%
invoke("isSet", param_jobj)
}
ml_param_map <- function(x, ...) {
x$param_map %||% stop("'x' does not have a param map")
}
ml_param <- function(x, param, allow_null = FALSE, ...) {
ml_param_map(x)[[param]] %||%
(if (allow_null) NULL else stop("param ", param, " not found"))
}
ml_params <- function(x, params = NULL, allow_null = FALSE, ...) {
params <- params %||% names(x$param_map)
params %>%
lapply(function(param) ml_param(x, param, allow_null)) %>%
rlang::set_names(unlist(params))
}
ml_set_param <- function(x, param, value, ...) {
setter <- param %>%
ml_map_param_names(direction = "rs") %>%
{
paste0(
"set",
toupper(substr(., 1, 1)),
substr(., 2, nchar(.))
)
}
spark_jobj(x) %>%
invoke(setter, value) %>%
ml_call_constructor()
}
ml_get_param_map <- function(jobj) {
sc <- spark_connection(jobj)
object <- if (spark_version(sc) < "2.0.0") {
"sparklyr.MLUtils"
} else {
"sparklyr.MLUtils2"
}
invoke_static(
sc,
object,
"getParamMap",
jobj
) %>%
ml_map_param_list_names()
}
ml_map_param_list_names <- function(x, direction = c("sr", "rs"), ...) {
direction <- rlang::arg_match(direction)
mapping <- if (identical(direction, "sr")) {
.globals$param_mapping_s_to_r
} else {
.globals$param_mapping_r_to_s
}
rlang::set_names(
x,
unname(
sapply(
names(x),
function(nm) rlang::env_get(mapping, nm, default = NULL, inherit = TRUE) %||% nm
)
)
)
}
ml_map_param_names <- function(x, direction = c("sr", "rs"), ...) {
direction <- rlang::arg_match(direction)
mapping <- if (identical(direction, "sr")) {
.globals$param_mapping_s_to_r
} else {
.globals$param_mapping_r_to_s
}
unname(
sapply(
x,
function(nm) rlang::env_get(mapping, nm, default = NULL, inherit = TRUE) %||% nm
)
)
}
|
OPC3d <- function (OPC_Output_Object,
binColors = hsv(h=(seq(10, 290, 40)/360), s=0.9, v=0.85),
patchOutline = FALSE, outlineColor = "black", maskDiscard = FALSE,
legend = TRUE, legendScale= 1, legendTextCol = "black",
legendLineCol = "black", leftOffset = 1, fieldofview = 0,
fileName = NA, binary = FALSE)
{
plyFile <- OPC_Output_Object$plyFile
bins <- plyFile$Directional_Bins
BinCount <- as.numeric(length(unique(plyFile$Directional_Bins)))
BlackPatch <- NULL
for (i in 1:BinCount) {
Bin <- which(bins == i)
bins[Bin] <- binColors[i]
if (maskDiscard == TRUE) {
if(OPC_Output_Object$Parameters$Minimum_Area==0){
PatchList <- unlist(OPC_Output_Object$Patches[i],
recursive = F)
SmallPatch <- names(which(lapply(PatchList, length) <
OPC_Output_Object$Parameters$Minimum_Faces))
Discarded <- as.numeric(unlist(PatchList[SmallPatch]))
BlackPatch <- c(BlackPatch, Discarded)
}
if(OPC_Output_Object$Parameters$Minimum_Area>0){
AreaList <- as.vector(OPC_Output_Object$Patch_Details[[i]][,2])
MinAreaPercentage <- sum(OPC_Output_Object$plyFile$Face_Areas)*
OPC_Output_Object$Parameters$Minimum_Area
SmallPatchList <- which(AreaList < MinAreaPercentage)
Discarded <- as.numeric(unlist(OPC_Output_Object$Patches[[i]][SmallPatchList]))
}
BlackPatch <- c(BlackPatch, Discarded)
}
}
colormatrix <- bins
if (maskDiscard == TRUE) {
colormatrix[BlackPatch] <- "
}
open3d()
par3d(windowRect = c(100, 100, 900, 900))
if (patchOutline == TRUE) {
for (i in 1:BinCount) {
Orientation <- OPC_Output_Object$Patches[i]
PatchCount <- as.numeric(length(Orientation[[1]]))
for (j in 1:PatchCount) {
Patch <- Orientation[[1]][j]
Patch <- as.numeric(Patch[[1]])
Faces <- t(plyFile$it[, Patch])
fnum <- length(Faces[, 1])
vorder <- vector("list", fnum)
for (i in 1:fnum) {vorder[[i]] <- unlist(sort(Faces[i, ]))}
edges <- vector("list", fnum)
for (i in 1:fnum) {
Ordered <- vorder[[i]]
G1 <- Ordered[1]
G2 <- Ordered[2]
G3 <- Ordered[3]
ED1 <- paste(G1, G2, sep = "_")
ED2 <- paste(G1, G3, sep = "_")
ED3 <- paste(G2, G3, sep = "_")
edges[[i]] <- paste(ED1, ED2, ED3, sep = ",")
}
for (i in 1:fnum) {edges[[i]] <- unlist(strsplit(edges[[i]], ","))}
string <- unlist(edges)
edgeframe <- data.frame(names = string)
UniqueEdge <- aggregate(edgeframe, list(edgeframe$names), FUN = length)
PatchEdge <- subset(UniqueEdge, UniqueEdge$names == 1)
EdgeVerts <- as.numeric(unlist(strsplit(as.character(unlist(PatchEdge$Group.1)), "_")))
EdgeCoords <- plyFile$vb[1:3, EdgeVerts]
segments3d(t(EdgeCoords), color = outlineColor,
lwd = 1.25, shininess = 120)
}
}
}
shade3d(plyFile, meshColor='faces', color = colormatrix, shininess = 100)
if (legend == TRUE) {
if(legendScale <= 0){stop("legendScale must be a positive number")}
if(legendScale > 1.05){
warning("legendScale greater than 1.05 will restrict legend visibility")
}
Fills <- rep("
for (i in 1:BinCount) {
Fills[i] <- binColors[i]
}
molaR_bgplot(OPC_Legend(binColors=Fills, binNumber = BinCount, maskDiscard = maskDiscard,
size = legendScale, textCol=legendTextCol, lineCol=legendLineCol))
}
if (leftOffset > 1) {warning("Left offset greater than 1 may restrict mesh visibility")}
if (leftOffset < -1) {warning("Left offset less than -1 may restrict mesh visibility")}
rgl.viewpoint(fov = fieldofview)
ZView <- par3d("observer")[3]
XView <- leftOffset * ZView *0.055
observer3d(XView, 0, ZView)
if(!is.na(fileName)){
if(!is.character(fileName)){stop("Enter a name for fileName")}
if(substr(fileName, nchar(fileName)-3, nchar(fileName))!=".ply"){
fileName <- paste(fileName, ".ply", sep="")
}
OutPly <- plyFile
NewVertList <- plyFile$vb[,plyFile$it[1:length(plyFile$it)]]
NewNormList <- plyFile$normals[,plyFile$it[1:length(plyFile$it)]]
NewFaceList <- matrix(1:ncol(NewVertList), nrow=3)
colormatrix <- matrix(rep(colormatrix, 3), nrow = 3, byrow = TRUE)
NewColorList <- colormatrix[1:length(colormatrix)]
OutPly$vb <- NewVertList
OutPly$it <- NewFaceList
OutPly$normals <- NewNormList
OutPly$material$color <- NewColorList
vcgPlyWrite(mesh=OutPly, filename = fileName, binary = binary)
if(binary==FALSE){
FileText <- readLines(con=paste(getwd(), "/", fileName, sep=""), warn = F)
NewCom <- paste("comment OPC plot generated in molaR",
packageVersion("molaR"), "for", R.version.string)
NewCom <- unlist(strsplit(NewCom, split='\n'))
NewOut <- c(FileText[1:3], NewCom, FileText[(4):length(FileText)])
writeLines(NewOut, con=paste(getwd(), "/", fileName, sep=""))
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.