code
stringlengths 1
13.8M
|
---|
library(hpiR)
library(testthat)
sales <- get(data(seattle_sales))
sales_df <- dateToPeriod(trans_df = sales,
date = 'sale_date',
periodicity = 'monthly')
hed_df <- hedCreateTrans(trans_df = sales,
prop_id = 'pinx',
trans_id = 'sale_id',
price = 'sale_price',
date = 'sale_date',
periodicity = 'monthly')
rt_df <- rtCreateTrans(trans_df = sales_df,
prop_id = 'pinx',
trans_id = 'sale_id',
price = 'sale_price')
hed_index <- hedIndex(trans_df = hed_df,
estimator = 'weighted',
log_dep = FALSE,
dep_var = 'price',
ind_var = c('tot_sf', 'beds', 'baths'),
weights = runif(nrow(hed_df), 0, 1),
smooth = TRUE)
rt_index <- rtIndex(trans_df = rt_df,
estimator = 'base',
log_dep = TRUE,
periodicity = 'monthly',
smooth = TRUE)
context('createSeries()')
test_that('Index Series works', {
expect_is(hed_series <- createSeries(hpi_obj = hed_index,
train_period = 24),
'serieshpi')
expect_is(rt_series <- createSeries(hpi_obj = rt_index,
train_period = 24,
max_period = 50),
'serieshpi')
expect_true(length(rt_series$hpis) == 27)
})
test_that('Parameter arguments work',{
expect_true(length(hed_series <- createSeries(hpi_obj = hed_index,
train_period = 12,
max_period = 150)$hpis) == 73)
})
test_that('Bad arguments create errors',{
expect_error(hed_series <- createSeries(hpi_obj = hed_index$index,
train_period = 24,
max_period = 50))
expect_error(hed_series <- createSeries(hpi_obj = hed_index,
train_period = 'x',
max_period = 50))
expect_error(hed_series <- createSeries(hpi_obj = hed_index,
train_period = 99,
max_period = 50))
})
context('smoothSeries()')
rt_series <- createSeries(hpi_obj = rt_index, train_period = 24)
test_that('smoothSeries() works as intended', {
expect_is(rt_series <- smoothSeries(series_obj = rt_series,
order = 5),
'serieshpi')
expect_is(rt_series$hpis[[1]]$index$smooth, 'indexsmooth')
expect_is(rt_series$hpis[[1]]$index, 'hpiindex')
})
test_that('smoothSeries() breaks with bad arguments.',{
expect_error(rt_sseries <- smoothSeries(series_obj = rt_series[[1]],
order = 5))
expect_error(rt_sseries <- smoothSeries(series_obj = rt_series,
order = -1))
})
hed_series <- createSeries(hpi_obj = hed_index,
train_period = 24,
max_period = 30)
rt_series <- createSeries(hpi_obj = rt_index,
train_period = 24)
rt_series <- smoothSeries(rt_series)
context('calcVolatility()')
test_that('Volatility Function works with a variety of inputs',{
expect_is(index_vol <- calcVolatility(index = hed_index$index$value,
window = 3),
'indexvolatility')
expect_is(index_vol <- calcVolatility(index = hed_index$index,
window = 3),
'indexvolatility')
expect_is(index_vol <- calcVolatility(index = hed_index,
window = 3),
'indexvolatility')
})
test_that('Volatility Function works for smoothed indexes',{
expect_is(index_vol <- calcVolatility(index = hed_index$index$smooth,
window = 3),
'indexvolatility')
expect_is(index_vol <- calcVolatility(index = hed_index$index,
window = 3,
smooth = TRUE),
'indexvolatility')
ex_index <- hed_index
ex_index$index$smooth <- NULL
expect_error(calcVolatility(index = ex_index,
window = 3,
smooth = TRUE), 'No smoothed')
expect_error(calcVolatility(index = ex_index$index,
window = 3,
smooth = TRUE), 'No smoothed')
expect_is(index_vol <- calcVolatility(index = hed_index,
window = 3,
smooth = TRUE),
'indexvolatility')
})
test_that('Errors are given when index is bad',{
expect_error(index_vol <- calcVolatility(index = 'abc',
window = 3))
expect_error(index_vol <- calcVolatility(index = hed_index$index,
window = -1))
expect_error(index_vol <- calcVolatility(index = hed_index$index,
window = 'x'))
expect_error(index_vol <- calcVolatility(index = hed_index$index,
window = NA_integer_,
smooth = TRUE))
})
test_that('Returning in place works',{
expect_is(index_vol <- calcVolatility(index = hed_index$index$value,
window = 3,
in_place = TRUE),
'indexvolatility')
expect_is(hed_index$index <- calcVolatility(index = hed_index$index,
window = 3,
in_place = TRUE),
'hpiindex')
expect_is(hed_index$index <- calcVolatility(index = hed_index$index,
window = 3,
in_place = TRUE,
smooth = TRUE),
'hpiindex')
expect_is(hed_index$index$volatility_smooth, 'indexvolatility')
expect_is(hed_index <- calcVolatility(index = hed_index,
window = 3,
in_place = TRUE),
'hpi')
expect_is(hed_index <- calcVolatility(index = hed_index,
window = 3,
in_place = TRUE,
smooth = TRUE),
'hpi')
expect_is(hed_index$index$volatility_smooth, 'indexvolatility')
expect_is(hed_index <- calcVolatility(index = hed_index,
window = 3,
in_place = TRUE,
in_place_name = 'xxx'),
'hpi')
expect_is(hed_index$index$xxx, 'indexvolatility')
})
context('calcSeriesVolatility()')
test_that('Volatility Function works with a variety of inputs',{
expect_is(series_vol <- calcSeriesVolatility(series_obj = rt_series,
window = 3),
'serieshpi')
expect_is(series_vol <- calcSeriesVolatility(series_obj = rt_series,
window = 3,
smooth = TRUE),
'serieshpi')
expect_true('volatility' %in% names(series_vol))
})
test_that('Fails if bad arguments', {
expect_error(series_vol <- calcSeriesVolatility(series_obj = index,
window = 3))
})
context('calcAccuracy() before error functions')
test_that('bad arguments fail',{
expect_error(calcAccuracy(hpi_obj = 'xxx'))
expect_error(calcAccuracy(hpi_obj = hed_index$data))
expect_error(calcAccuracy(hpi_obj = hed_index,
test_type = 'rt'))
expect_error(calcAccuracy(hpi_obj = rt_index,
test_type = 'hed'))
expect_error(calcAccuracy(hpi_obj = hed_index,
test_type = 'rt',
pred_df = hed_index$data))
expect_error(calcAccuracy(hpi_obj = rt_index,
test_type = 'hed',
pred_df = rt_index$data))
expect_error(calcAccuracy(hpi_obj = rt_index,
test_type = 'rt',
test_method = 'x'))
expect_error(calcAccuracy(hpi_obj = rt_index,
test_type = 'x',
test_method = 'insample'))
})
context('calcInSampleError()')
test_that('in sample error fails with bad arguments',{
expect_error(rt_error <- calcInSampleError(pred_df = hed_index,
index = hed_index$index$value))
expect_error(rt_error <- calcInSampleError(pred_df = rt_index$data,
index = hed_index$index))
})
test_that('in sample error works',{
expect_is(rt_error <- calcInSampleError(pred_df = rt_index$data,
index = hed_index$index$value),
'hpiaccuracy')
expect_is(rt_error <- calcInSampleError(pred_df = rt_index$data,
index = hed_index$index$smooth),
'hpiaccuracy')
expect_is(rt_error <- calcInSampleError(pred_df = rt_index$data[1:4, ],
index = hed_index$index$value),
'hpiaccuracy')
expect_is(rt_error <- calcInSampleError(pred_df = rt_index$data[0, ],
index = hed_index$index$value),
'hpiaccuracy')
})
context('calcKFoldError()')
test_that('kFold error fails with bad arguments',{
expect_error(rt_error <- calcKFoldError(hpi_obj = hed_index$index,
pred_df = rt_index$data))
expect_error(rt_error <- calcKFoldError(hpi_obj = hed_index,
pred_df = rt_index))
expect_error(rt_error <- calcKFoldError(hpi_obj = hed_index,
pred_df = rt_index$data,
k = 'a'))
expect_error(rt_error <- calcKFoldError(hpi_obj = hed_index,
pred_df = rt_index$data,
seed = 'x'))
})
test_that('kfold works',{
expect_is(rt_error <- calcKFoldError(hpi_obj = hed_index,
pred_df = rt_index$data),
'hpiaccuracy')
expect_true(ncol(rt_error) == 6)
expect_is(rt_error <- calcKFoldError(hpi_obj = hed_index,
pred_df = rt_index$data,
smooth = TRUE),
'hpiaccuracy')
expect_is(rt_error <- calcKFoldError(hpi_obj = hed_index,
pred_df = rt_index$data[1:40, ]),
'hpiaccuracy')
expect_is(rt_error <- calcKFoldError(hpi_obj = hed_index,
pred_df = rt_index$data[0, ]),
'hpiaccuracy')
})
context('calcAccuracy() after error functions')
test_that('calcAccuracy works with insample errors',{
expect_is(rt_error <- calcAccuracy(hpi_obj = rt_index,
test_type = 'rt',
test_method = 'insample',
pred_df = rt_index$data),
'hpiaccuracy')
expect_true(ncol(rt_error) == 6)
expect_is(hed_index <- calcAccuracy(hpi_obj = hed_index,
test_type = 'rt',
test_method = 'insample',
pred_df = rt_index$data,
in_place = TRUE,
in_place_name ='acc'),
'hpi')
expect_is(hed_index$index$acc, 'hpiaccuracy')
expect_true(attr(hed_index$index$acc, 'test_method') == 'insample')
})
test_that('calcAccuracy works with kfold errors',{
expect_is(rt_error <- calcAccuracy(hpi_obj = rt_index,
test_type = 'rt',
test_method = 'kfold',
pred_df = rt_index$data),
'hpiaccuracy')
expect_true(ncol(rt_error) == 6)
expect_is(hed_index <- calcAccuracy(hpi_obj = hed_index,
test_type = 'rt',
test_method = 'kfold',
pred_df = rt_index$data,
in_place = TRUE,
in_place_name = 'errors'),
'hpi')
expect_is(hed_index$index$errors, 'hpiaccuracy')
expect_true(attr(hed_index$index$errors, 'test_method') == 'kfold')
})
context('calcSeriesAccuracy()')
test_that('calcSeriesAccuracy() fails with bad arguments',{
expect_error(rt_series <- calcSeriesAccuracy(series_obj = rt_series$data,
test_method = 'insample',
test_type = 'rt'))
expect_error(rt_series <- calcSeriesAccuracy(series_obj = rt_series,
test_method = 'xxx',
test_type = 'rt',
smooth = TRUE))
expect_error(rt_series <- calcSeriesAccuracy(series_obj = rt_series,
test_method = 'kfold',
test_type = 'rtx',
smooth = TRUE))
expect_error(hed_series <- calcSeriesAccuracy(series_obj = hed_series,
test_method = 'insample',
test_type = 'rt'))
})
test_that('calcSeriesAccuracy() insample works',{
expect_is(rt_series <- calcSeriesAccuracy(series_obj = rt_series,
test_method = 'insample',
test_type = 'rt',
smooth = TRUE,
in_place = TRUE),
'serieshpi')
expect_true('accuracy_smooth' %in% names(rt_series))
expect_is(rt_series$accuracy_smooth, 'seriesaccuracy')
expect_error(hed_series <- calcSeriesAccuracy(series_obj = hed_series,
test_method = 'insample',
test_type = 'rt',
smooth = TRUE,
pred_df = rt_series$data))
})
test_that('calcSeriesAccuracy() kfold works',{
expect_error(hed_series <- calcSeriesAccuracy(series_obj = hed_series,
test_method = 'kfold',
test_type = 'rt',
smooth = TRUE,
pred_df = rt_series$data))
})
test_that('calcSeriesAccuracy() summarize works',{
expect_true(nrow(calcSeriesAccuracy(series_obj = rt_series,
test_method = 'insample',
test_type = 'rt',
summarize = TRUE,
in_place = TRUE)$accuracy) == 5102)
})
context('buildForecastIDs()')
test_that('buildForecastIDs works', {
expect_true(length(is_data <- buildForecastIDs(time_cut = 33,
hpi_df = hed_index$data,
train = TRUE)) == 11863)
expect_true(length(is_data <- buildForecastIDs(time_cut = 33,
hpi_df = rt_index$data,
train = TRUE)) == 287)
expect_true(length(is_data <- buildForecastIDs(time_cut = 33,
hpi_df = hed_index$data,
forecast_length = 2,
train = FALSE)) == 960)
expect_true(length(is_data <- buildForecastIDs(time_cut = 33,
hpi_df = rt_index$data,
train = FALSE)) == 21)
})
test_that('buildForecastIDs() does not work with bad arguments',{
expect_error(is_data <- buildForecastIDs(time_cut = 33,
hpi_df = hed_index,
train = TRUE))
expect_error(is_data <- buildForecastIDs(time_cut = -1,
hpi_df = hed_index$data,
train = TRUE))
expect_error(is_data <- buildForecastIDs(time_cut = 33,
forecast_length = 'x',
hpi_df = hed_index$data,
train = TRUE))
})
context('calcForecastError()')
test_that('forecast fails with bad arguments',{
expect_error(rt_acc <- calcForecastError(is_obj = hed_index,
pred_df = rt_index$data))
expect_error(rt_acc <- calcForecastError(is_obj = hed_series,
pred_df = rt_index))
expect_error(hed_acc <- calcForecastError(is_obj = hed_series,
pred_df = rt_index$data,
smooth = TRUE))
expect_error(hed_acc <- calcForecastError(is_obj = hed_series,
pred_df = rt_index$data,
forecast_length = 'x'))
})
test_that('Forecast works',{
expect_is(hed_acc <- calcForecastError(is_obj = hed_series,
pred_df = rt_index$data),
'hpiaccuracy')
expect_is(hed_acc <- calcForecastError(is_obj = hed_series,
pred_df = rt_index$data,
forecast_length = 3),
'seriesaccuracy')
expect_is(rt_acc <- calcForecastError(is_obj = rt_series,
pred_df = rt_index$data,
smooth = TRUE),
'hpiaccuracy')
expect_is(rt_acc <- calcForecastError(is_obj = rt_series,
pred_df = rt_index$data[1:40, ]),
'hpiaccuracy')
expect_is(rt_acc <- calcForecastError(is_obj = rt_series,
pred_df = rt_index$data[0, ],
smooth=TRUE),
'hpiaccuracy')
})
test_that('calcSeriesAccuracy works with forecast',{
expect_is(rt_series <- calcSeriesAccuracy(series_obj = rt_series,
test_type = 'rt',
test_method = 'forecast',
pred_df = rt_series$data,
smooth = TRUE,
in_place = TRUE),
'serieshpi')
expect_is(rt_series$accuracy_smooth, 'seriesaccuracy')
})
context('calcRevision()')
test_that('calcRevision() works',{
expect_is(hed_rev <- calcRevision(series_obj = hed_series),
'seriesrevision')
expect_is(hed_series <- calcRevision(series_obj = hed_series,
in_place = TRUE),
'serieshpi')
expect_is(hed_series$revision, 'seriesrevision')
expect_is(rt_rev_s <- calcRevision(series_obj = rt_series,
smooth = TRUE),
'seriesrevision')
expect_is(rt_series <- calcRevision(series_obj = rt_series,
smooth = TRUE,
in_place = TRUE),
'serieshpi')
expect_is(rt_series$revision_smooth, 'seriesrevision')
})
test_that('Bad arguments create errors',{
expect_error(hed_rev <- calcRevision(series_obj = hed_series$data))
expect_error(hed_rev <- calcRevision(series_obj = hed_series,
smooth = TRUE))
}) |
"TrepDF" |
tripSplit <- function(
dataGroup, colony, innerBuff = NULL, returnBuff = NULL, duration = NULL,
gapLimit = NULL, nests=FALSE, rmNonTrip=FALSE, verbose=TRUE) {
if (is.null(gapLimit)) {gapLimit <- 365 * 24}
if (!"data.frame" %in% class(dataGroup)) {
stop("dataGroup must be data.frame")
}
if (is.null(duration)) {
message(
"No duration specified, trips splitting will be done using only innerBuff and
returnBuff.")
}
if (nrow(colony) > 1 & nests == FALSE) {
stop(
"colony object has multiple locations. Did you mean to set nests=TRUE")
}
dataGroup <- dataGroup %>%
mutate(DateTime = lubridate::ymd_hms(.data$DateTime)) %>%
mutate(tripID = .data$ID) %>%
arrange(.data$ID, .data$DateTime)
dup_check <- dataGroup %>%
group_by(.data$ID) %>%
mutate(duplicates = duplicated(.data$DateTime)) %>%
ungroup() %>%
dplyr::summarise(duplicates = sum(.data$duplicates))
if (dup_check$duplicates > 0) {message(
"WARNING:dataset may contain duplicated data, this will affect trip-splitting"
)}
DataGroup <- SpatialPointsDataFrame(
SpatialPoints(
data.frame(dataGroup$Longitude, dataGroup$Latitude),
proj4string=sp::CRS("+proj=longlat +datum=WGS84 +no_defs")
),
data = dataGroup, match.ID=FALSE)
DataGroup.Projected <- DataGroup
for(nid in seq_len(length(unique(dataGroup$ID)))) {
TrackIn <- base::subset(
DataGroup.Projected,
DataGroup.Projected$ID == unique(DataGroup.Projected$ID)[nid])
TrackOut <- splitSingleID(
Track=TrackIn, colony=colony,
innerBuff = innerBuff, returnBuff = returnBuff,
duration = duration, gapLimit = gapLimit, nests=nests, verbose = verbose)
if (nid == 1) {Trips <- TrackOut} else {
Trips <- maptools::spRbind(Trips, TrackOut)
}
}
if (rmNonTrip==TRUE) {
Trips <- Trips[Trips$tripID != "-1",]
}
return(Trips)
}
splitSingleID <- function(
Track, colony, innerBuff = 15, returnBuff = 45, duration = 12,
gapLimit = gapLimit, nests = FALSE, verbose = verbose){
if (!"Latitude" %in% colnames(colony) | !"Longitude" %in% colnames(colony)){
stop("colony missing Latitude or Longitude field: add or rename.")}
if (nests == TRUE) {
if (!"ID" %in% names(colony)) stop("colony missing ID field")
nest <- colony[match(unique(Track$ID), colony$ID),]
colonyWGS <- SpatialPoints(
data.frame(nest$Longitude, nest$Latitude),
proj4string = CRS("+proj=longlat +datum=WGS84 +no_defs")
)
} else {
colonyWGS <- SpatialPoints(
data.frame(colony$Longitude, colony$Latitude),
proj4string = CRS("+proj=longlat +datum=WGS84 +no_defs")
)
}
Track$X <- Track@coords[,1]
Track$Y <- Track@coords[,2]
Track$Returns <- ""
Track$StartsOut <- ""
Track$tripID <- 0
Track$ColDist <- spDistsN1(Track, colonyWGS, longlat = TRUE) * 1000
steptime <- c(abs(as.numeric(
difftime(Track$DateTime[seq_len(nrow(Track)) - 1],
Track$DateTime[seq.int(2, nrow(Track))], units = "hours"))), NA)
Trip.Sequence <- 0
trip_dur <- 0
Max.Dist <- 0
returnBuff <- returnBuff * 1000
innerBuff <- innerBuff * 1000
if (is.null(duration)) {duration <- 0.0001}
i <- 0
while (i < base::nrow(Track)) {
i <- i + 1
if (Track$ColDist[i] < innerBuff) {Track$tripID[i] <- "-1"} else {
k <- i
Dist <- Track$ColDist[i]
if (i == nrow(Track)) {Track$tripID[i] <- "-1"
break
}
if (i > 1 & Track$tripID[i] == "-1") {i <- i - 1}
while ((Dist >= innerBuff)) {
if (k == nrow(Track) & Dist < returnBuff) {break} else {
if (k == nrow(Track)) {
if (verbose == TRUE) {
message(
paste("track ", Track$ID[1], Trip.Sequence + 1,
" does not return to the colony", sep = "")
)
}
Track$Returns[i:k] <- "No"
break
} else if (steptime[k] > gapLimit) {
if (Dist > returnBuff) {
Track$Returns[i:k] <- "No"
} else {Track$Returns[i:k] <- "Yes"}
break
}
}
k <- k + 1
Dist <- Track$ColDist[k]
}
trip_dur <- as.numeric(
difftime(Track$DateTime[k], Track$DateTime[i], units = "hours")
)
Max.Dist <- max(Track$ColDist[i:k])
if (trip_dur < duration | Max.Dist < innerBuff) {
Track$tripID[i:k] <- "-1"
i <- k
next
}
Trip.Sequence <- Trip.Sequence + 1
if (i == 1) {
if (verbose == TRUE) {
message(
paste0("track ", Track$ID[1], sprintf("%02d", Trip.Sequence),
" starts out on trip", sep = "")
)
}
Track$StartsOut[i:k] <- "Yes"
Track$tripID[i:k] <- paste(Track$ID[1],
sprintf("%02d", Trip.Sequence), sep = "_")
} else {
Track$tripID[i:k] <- paste(Track$ID[1],
sprintf("%02d", Trip.Sequence), sep = "_")
}
i <- k
}
}
Track$Returns <- ifelse(
Track$Returns != "No" & Track$tripID != "-1", "Yes", Track$Returns
)
return(Track)
} |
package_delete <- function(id, url = get_default_url(),
key = get_default_key(), ...) {
id <- as.ckan_package(id, url = url, key = key)
tmp <- ckan_POST(url, 'package_delete', body = list(id = id$id), key = key,
opts = list(...))
jsonlite::fromJSON(tmp)$success
} |
getMinorMajorCopyNumbers <- function(region) {
pattern <- "\\(([0-9]+),([0-9]+)\\)"
C1 <- gsub(pattern, "\\1", region)
C2 <- gsub(pattern, "\\2", region)
mat <- cbind(C1=as.numeric(C1), C2=as.numeric(C2))
rownames(mat) <- region
mat
} |
run.pc.tsne <- function (x = NULL,
dims = 1:10, my.seed = 0,add.3d = TRUE,
initial_dims = 50, perplexity = 30,
theta = 0.5, check_duplicates = FALSE, pca = TRUE, max_iter = 1000,
verbose = FALSE, is_distance = FALSE, Y_init = NULL,
pca_center = TRUE, pca_scale = FALSE,
stop_lying_iter = ifelse(is.null(Y_init), 250L, 0L),
mom_switch_iter = ifelse(is.null(Y_init), 250L, 0L), momentum = 0.5,
final_momentum = 0.8, eta = 200, exaggeration_factor = 12) {
if ("iCellR" != class(x)[1]) {
stop("x should be an object of class iCellR")
}
set.seed(my.seed)
DATA <- [email protected]
TransPosed <- DATA[dims]
tsne <- Rtsne(TransPosed, dims = 2,
initial_dims = initial_dims, perplexity = perplexity,
theta = theta, check_duplicates = check_duplicates, pca = pca, max_iter = max_iter,
verbose = verbose, is_distance = is_distance, Y_init = Y_init,
pca_center = pca_center, pca_scale = pca_scale,
stop_lying_iter = stop_lying_iter,
mom_switch_iter = mom_switch_iter, momentum = momentum,
final_momentum = final_momentum, eta = eta, exaggeration_factor = exaggeration_factor)
tsne.data = as.data.frame(tsne$Y)
tsne.data = cbind(cells = row.names(TransPosed),tsne.data)
rownames(tsne.data) <- tsne.data$cells
tsne.data <- tsne.data[,-1]
attributes(x)$tsne.data <- tsne.data
if (add.3d == TRUE) {
tsne <- Rtsne(TransPosed, dims = 3,
initial_dims = initial_dims, perplexity = perplexity,
theta = theta, check_duplicates = check_duplicates, pca = pca, max_iter = max_iter,
verbose = verbose, is_distance = is_distance, Y_init = Y_init,
pca_center = pca_center, pca_scale = pca_scale,
stop_lying_iter = stop_lying_iter,
mom_switch_iter = mom_switch_iter, momentum = momentum,
final_momentum = final_momentum, eta = eta, exaggeration_factor = exaggeration_factor)
tsne.data = as.data.frame(tsne$Y)
tsne.data = cbind(cells = row.names(TransPosed),tsne.data)
rownames(tsne.data) <- tsne.data$cells
tsne.data <- tsne.data[,-1]
attributes(x)$tsne.data.3d <- tsne.data
}
return(x)
} |
cusumSq <- function(model, alpha = 0.05){
table <- matrix( c(NA, 0.1 , 0.05, 0.025, 0.01, 0.005,
1 , 0.4 , 0.45 , 0.475 , 0.49 , 0.495 ,
2 , 0.35044 , 0.44306 , 0.50855 , 0.56667 , 0.59596 ,
3 , 0.35477 , 0.41811 , 0.46702 , 53456 , 0.579 ,
4 , 0.33435 , 0.39075 , 0.44641 , 0.50495 , 0.5421 ,
5 , 0.31556 , 0.37359 , 0.42174 , 0.47692 , 0.51576 ,
6 , 0.30244 , 0.35522 , 0.40045 , 0.4544 , 0.48988 ,
7 , 0.28991 , 33905 , 0.38294 , 0.43337 , 0.46761 ,
8 , 0.27828 , 0.32538 , 0.36697 , 0.41522 , 0.44819 ,
9 , 0.26794 , 0.31325 , 0.35277 , 0.39922 , 0.43071 ,
10 , 0.25884 , 0.30221 , 0.34022 , 0.38481 , 0.41517 ,
11 , 0.25071 , 0.29227 , 0.32894 , 0.37187 , 0.40122 ,
12 , 0.24325 , 0.2833 , 0.31869 , 0.36019 , 0.38856 ,
13 , 0.23639 , 0.27515 , 0.30935 , 0.34954 , 0.37703 ,
14 , 0.2301 , 0.26767 , 0.30081 , 0.3398 , 0.36649 ,
15 , 0.2243 , 0.26077 , 0.29296 , 0.33083 , 0.35679 ,
16 , 0.21895 , 0.25439 , 0.2857 , 0.32256 , 0.34784 ,
17 , 0.21397 , 0.24847 , 0.27897 , 0.31489 , 0.33953 ,
18 , 0.20933 , 0.24296 , 0.2727 , 0.30775 , 0.33181 ,
19 , 0.20498 , 23781 , 0.26685 , 0.30108 , 0.32459 ,
20 , 0.20089 , 0.23298 , 0.26137 , 0.29484 , 0.31784 ,
21 , 0.19705 , 0.22844 , 0.25622 , 0.28898 , 0.31149 ,
22 , 0.19343 , 0.22416 , 0.25136 , 0.28346 , 0.30552 ,
23 , 0.19001 , 0.22012 , 0.24679 , 0.27825 , 0.29989 ,
24 , 18677 , 0.2163 , 0.24245 , 0.27333 , 0.29456 ,
25 , 18370 , 0.21268 , 0.23835 , 0.26866 , 0.28951 ,
26 , 0.18077 , 0.20924 , 0.23445 , 0.26423 , 0.28472 ,
27 , 0.17799 , 20596 , 0.23074 , 0.26001 , 0.28016 ,
28 , 0.17533 , 0.20283 , 0.22721 , 0.256 , 0.27582 ,
29 , 0.1728 , 19985 , 0.22383 , 0.25217 , 0.27168 ,
30 , 0.17037 , 0.197 , 0.22061 , 0.24851 , 0.26772 ,
31 , 0.16805 , 0.19427 , 0.21752 , 0.24501 , 0.26393 ,
32 , 0.16582 , 19166 , 0.21457 , 0.24165 , 0.2603 ,
33 , 0.16368 , 0.18915 , 0.21173 , 0.23843 , 0.25683 ,
34 , 0.16162 , 0.18674 , 0.20901 , 0.23534 , 0.25348 ,
35 , 0.15964 , 0.18442 , 0.20639 , 0.23237 , 0.25027 ,
36 , 0.15774 , 18218 , 0.20387 , 0.22951 , 0.24718 ,
37 , 0.1559 , 0.18003 , 0.20144 , 0.22676 , 0.24421 ,
38 , 0.15413 , 0.17796 , 0.1991 , 0.2241 , 0.24134 ,
39 , 0.15242 , 17595 , 0.19684 , 0.22154 , 0.23857 ,
40 , 0.15076 , 0.17402 , 0.19465 , 0.21906 , 0.23589 ,
42 , 0.14761 , 0.17034 , 0.1905 , 0.21436 , 0.23081 ,
43 , 0.14611 , 0.16858 , 0.18852 , 0.21212 , 0.22839 ,
44 , 0.14466 , 0.16688 , 0.18661 , 0.20995 , 0.22605 ,
45 , 0.14325 , 0.16524 , 0.18475 , 20785 , 0.22377 ,
46 , 0.14188 , 0.16364 , 0.18295 , 0.20581 , 0.22157 ,
47 , 0.14055 , 0.16208 , 0.1812 , 0.20383 , 0.21943 ,
48 , 0.13926 , 0.16058 , 0.1795 , 0.2019 , 0.21735 ,
49 , 0.138 , 0.15911 , 0.17785 , 0.20003 , 0.21534 ,
50 , 13678 , 0.15769 , 0.17624 , 0.19822 , 0.21337 ,
51 , 0.13559 , 0.1563 , 0.17468 , 0.19645 , 0.21146 ,
52 , 0.13443 , 0.15495 , 0.17316 , 0.19473 , 0.20961 ,
53 , 0.1333 , 0.15363 , 0.17168 , 0.19305 , 0.2078 ,
54 , 0.13221 , 0.15235 , 0.17024 , 0.19142 , 0.20604 ,
55 , 0.13113 , 0.1511 , 0.16884 , 0.18983 , 0.20432 ,
56 , 0.13009 , 0.14989 , 0.16746 , 0.18828 , 0.20265 ,
57 , 0.12907 , 0.1487 , 0.16613 , 0.18677 , 0.20101 ,
58 , 0.12807 , 0.14754 , 0.16482 , 0.18529 , 0.19942 ,
59 , 0.1271 , 0.14641 , 0.16355 , 0.18385 , 0.19786 ,
60 , 0.12615 , 0.1453 , 0.1623 , 0.18245 , 0.19635 ,
62 , 0.12431 , 0.14316 , 0.1599 , 0.17973 , 0.19341 ,
64 , 0.12255 , 0.14112 , 0.1576 , 0.17713 , 0.19061 ,
66 , 0.12087 , 0.13916 , 0.1554 , 0.17464 , 0.18792 ,
68 , 0.11926 , 0.13728 , 0.15329 , 0.17226 , 0.18535 ,
70 , 0.11771 , 0.13548 , 0.15127 , 0.16997 , 0.18288 ,
72 , 0.11622 , 0.13375 , 0.14932 , 0.16777 , 0.18051 ,
74 , 0.11479 , 0.13208 , 0.14745 , 0.16566 , 0.17823 ,
76 , 0.11341 , 0.13048 , 0.14565 , 0.16363 , 0.17604 ,
78 , 0.11208 , 0.12894 , 0.14392 , 16167 , 0.17392 ,
80 , 0.11079 , 0.12745 , 0.14224 , 0.15978 , 0.17188 ,
82 , 0.10955 , 0.12601 , 0.14063 , 0.15795 , 0.16992 ,
84 , 0.10835 , 0.12462 , 0.13907 , 0.15619 , 0.16802 ,
86 , 0.10719 , 0.12327 , 0.13756 , 0.15449 , 0.16618 ,
88 , 10607 , 0.12197 , 0.1361 , 15284 , 0.1644 ,
90 , 0.10499 , 0.12071 , 0.13468 , 0.15124 , 0.16268 ,
92 , 0.10393 , 0.11949 , 0.13331 , 0.1497 , 0.16101 ,
94 , 0.10291 , 0.11831 , 0.13198 , 0.1482 , 0.594 ,
96 , 0.10192 , 0.11716 , 0.1307 , 0.14674 , 0.15783 ,
98 , 0.10096 , 0.11604 , 0.12944 , 0.14533 , 0.15631 ,
100 , 0.10002 , 0.11496 , 0.12823 , 0.14396 , 0.15483 ), nrow = 80, ncol = 6, byrow = TRUE)
Wr <- recresid(model, data = model$model, type = "Rec-CUSUM")
k <- start(Wr)[1] - 1
t <- end(Wr)[1] + 1
if (((t-k) %% 2) != 0){
n1 <- min( 0.5 * (t-k) - 1.5, 100)
n2 <- min( 0.5 * (t-k) - 0.5, 100)
c01 <- table[which((table[,1] == n1)), which(table[1,] == alpha/2)]
c02 <- table[which((table[,1] == n2)), which(table[1,] == alpha/2)]
c0 <- mean(c(c01,c02))
} else {
n <- min ( 0.5 * (t-k)-1, 100)
c0 <- table[which((table[,1] == n)), which(table[1,] == alpha/2)]
}
WrSq <- array(NA, length(Wr))
lb <- array(NA, length(Wr))
ub <- array(NA, length(Wr))
for (i in 1:length(Wr)){
WrSq[i] <- sum(Wr[1:i]^2)/sum(Wr^2)
lb[i] <- -c0 + (start(Wr)[1] + i - 1 - k)/(t-k)
ub[i] <- c0 + (start(Wr)[1] + i - 1 - k)/(t-k)
}
plot(ts(WrSq, start = start(Wr)), ylim = c(min(lb)-0.1,max(ub)+0.1), ylab="CUSUM of squared residuals", main ="Recursive CUSUM of squares test")
lines(ts(lb, start = start(Wr)), col = "red")
lines(ts(ub, start = start(Wr)), col = "red")
abline(h=0)
}
appendList <- function(list1, list2){
for (i in names(list1)[which(names(list1) == names(list2))]){
list1[[i]] = sort(c(list1[[i]], list2[[i]]))
}
for (i in names(list2)[which(names(list2) != names(list1))]){
list1[[i]] = list2[[i]]
}
return(list1)
}
ardlBound <- function(data = NULL , formula = NULL , case = 3 , p = NULL , remove = NULL, autoOrder = FALSE ,
HAC = FALSE,
ic = c("AIC", "BIC", "MASE", "GMRAE") ,
max.p = 15, max.q = 15, ECM = TRUE, stability = TRUE){
if (is.null(data)) stop("Enter data by data argument.")
if (is.null(formula)) stop("A formula object showing the dependent and indepdenent series must be entered.")
vars <- all.vars(formula)
NumVar <- length(vars)
if ( NumVar < ncol(data)){
data <- data[,vars]
}
if (is.null(p) | (autoOrder == TRUE) ){
cat(" ", "\n")
cat("Orders being calculated with max.p =", max.p , "and max.q =", max.q, "...\n\n")
orders <- ardlBoundOrders(data = data , formula = formula, ic = "AIC" , max.p = max.p, max.q = max.q )
cat("Autoregressive order:" , orders$q + 1, "and p-orders:" , unlist(orders$p) + 1 , "\n")
cat("------------------------------------------------------", "\n")
p <- data.frame(orders$q , orders$p ) + 1
}
if (!is.data.frame(p)){
p <- array(p , NumVar)
p <- data.frame(t(p))
colnames(p) <- vars
} else {
colnames(p) <- vars
if (length(p) < NumVar) stop("Enter an integer or and array of lenght equal to the number of variables in the formula!")
}
caseType <- switch(case, "no intercept, no trend", "restricted intercert, no trend (not supported)",
"unrestricted intercert, no trend", "unrestricted intercept, restricted trend (not supported)",
"unrestricted intercept, unrestricted trend")
diffData <- apply(data , 2 , diff)
colnames(diffData) <- paste0("d" , colnames(diffData))
data <- cbind(data[2:nrow(data),],diffData)
if (max(p[2:NumVar]) == 1){
max.p <- 2
} else {
max.p <- max(p[2:NumVar])
}
rem.p <- list()
for (i in 1:NumVar){
if (max.p > 2){
rem.p[[vars[i] ]] <- c(0,2:(max.p-1))
}else {
rem.p[[vars[i] ]] <- c(0)
}
}
if (sum((p[2:NumVar] - max.p) == 0) == (NumVar-1)){
removeP <- NULL
if (ECM == TRUE) {
remP <- list()
if ((max.p-1) >= 2){
remP[["ec"]] <- c(0, 2:(max.p-1))
} else if ((max.p-1) == 1){
remP[["ec"]] <- c(0)
}
removeP = list(p = remP )
}
} else {
remP <- list()
reduce <- which((p[2:NumVar] - max.p) != 0) + 1
for ( j in reduce){
remP[[paste0("d" ,vars[j])]] <- c((max.p-1):p[[j]])
rem.p[[paste0("d" ,vars[j])]] <- c((max.p-1):p[[j]])
}
if (ECM == TRUE) {
if ((max.p-1) >= 2){
remP[["ec"]] <- c(0, 2:(max.p-1))
} else if ((max.p-1) == 1){
remP[["ec"]] <- c(0)
}
}
removeP = list(p = remP )
}
removeP$p <- appendList(removeP$p, remove$p)
removeP$q <- remove$q
rem.p <- appendList(rem.p, remove$p)
removeP2 <- list(p = rem.p, q = remove$q )
formula1 <- as.formula(paste0("d" , vars[1] , " ~ " , paste(c(vars ,
paste0("d" , vars[2:NumVar] )) , collapse=" + ") ))
if (case == 1){
formula1 <- update(formula1, ~. -1)
formula2 <- as.formula(paste0("d" , vars[1] , " ~ - 1 + " , paste(c(paste0("d" , vars[2:NumVar] )) , collapse=" + ") ))
if (p[[vars[1]]] == 1){
p[[vars[1]]] <- 2
removeP <- appendList(removeP, list( q = c(1)))
removeP2$q <- c(1)
}
modelFull <- ardlDlm(formula = formula1, data = data.frame(data) , p = (max.p-1) , q = (p[[vars[1]]]-1) , remove = removeP2)
modelNull <- ardlDlm(formula = formula2, data = data.frame(data) , p = (max.p-1) , q = (p[[vars[1]]]-1) , remove = removeP )
if (length(modelFull$model$residuals) != length(modelNull$model$residuals)){
strt <- nrow(data) - length(modelNull$model$residuals)
modelNull <- ardlDlm(formula = formula2, data = data.frame(tail(data,(length(modelFull$model$residuals) + strt))) ,
p = (max.p-1) , q = (p[[vars[1]]]-1) , remove = removeP )
}
if ( ECM == TRUE){
mult <- as.vector(modelFull$model$coefficients[2:NumVar] / modelFull$model$coefficients[1])
ec <- data[,1] + as.vector(mult %*% t(data[,2:NumVar]))
data.ecm <- data.frame(cbind(data, ec))
formula3 <- as.formula(paste0("d" , vars[1] , " ~ - 1 + ec + " , paste(c(paste0("d" , vars[2:NumVar] )) , collapse=" + ") ))
modelECM <- ardlDlm(formula = formula3, data = data.ecm , p = (max.p-1) , q = (p[[vars[1]]]-1) , remove = removeP )
ecm.beta <- modelECM$model$coefficients["ec.1"]
}
} else if (case == 2){
formula1 <- update(formula1, ~. +1)
formula2 <- as.formula(paste0("d" , vars[1] , " ~ - 1 + " , paste(c(paste0("d" , vars[2:NumVar] )) , collapse=" + ") ))
if (p[[vars[1]]] == 1){
p[[vars[1]]] <- 2
removeP <- appendList(removeP, list( q = c(1)))
removeP2$q <- c(1)
}
modelFull <- ardlDlm(formula = formula1, data = data.frame(data) , p = (max.p-1) , q = (p[[vars[1]]]-1) , remove = removeP2)
modelNull <- ardlDlm(formula = formula2, data = data.frame(data) , p = (max.p-1) , q = (p[[vars[1]]]-1) , remove = removeP )
if (length(modelFull$model$residuals) != length(modelNull$model$residuals)){
strt <- nrow(data) - length(modelNull$model$residuals)
modelNull <- ardlDlm(formula = formula2, data = data.frame(tail(data,(length(modelFull$model$residuals) + strt))) ,
p = (max.p-1) , q = (p[[vars[1]]]-1) , remove = removeP )
}
if ( ECM == TRUE){
mult <- as.vector(modelFull$model$coefficients[3:(NumVar+1)] / modelFull$model$coefficients[2])
ec <- data[,1] + as.vector(mult %*% t(data[,2:NumVar])) + (modelFull$model$coefficients[1] / modelFull$model$coefficients[2])
data.ecm <- data.frame(cbind(data, ec))
formula3 <- as.formula(paste0("d" , vars[1] , " ~ - 1 + ec + " , paste(c(paste0("d" , vars[2:NumVar] )) , collapse=" + ") ))
modelECM <- ardlDlm(formula = formula3, data = data.ecm , p = (max.p-1) , q = (p[[vars[1]]]-1) , remove = removeP )
ecm.beta <- modelECM$model$coefficients["ec.1"]
}
} else if (case == 3){
formula2 <- as.formula(paste0("d" , vars[1] , " ~ +1 + " , paste(c(paste0("d" , vars[2:NumVar] )) , collapse=" + ") ))
if (p[[vars[1]]] == 1){
p[[vars[1]]] <- 2
removeP <- appendList(removeP, list( q = c(1)))
removeP2$q <- c(1)
}
modelFull <- ardlDlm(formula = formula1, data = data.frame(data) , p = (max.p-1) , q = (p[[vars[1]]]-1) , remove = removeP2)
modelNull <- ardlDlm(formula = formula2, data = data.frame(data) , p = (max.p-1) , q = (p[[vars[1]]]-1) , remove = removeP )
if (length(modelFull$model$residuals) != length(modelNull$model$residuals)){
strt <- nrow(data) - length(modelNull$model$residuals)
modelNull <- ardlDlm(formula = formula2, data = data.frame(tail(data,(length(modelFull$model$residuals) + strt))) , p = (max.p-1) , q = (p[[vars[1]]]-1) , remove = removeP )
}
if ( ECM == TRUE){
mult <- as.vector(modelFull$model$coefficients[3:(NumVar+1)] / modelFull$model$coefficients[2])
ec <- data[,1] + as.vector(mult %*% t(data[,2:NumVar]))
data.ecm <- data.frame(cbind(data, ec))
formula3 <- as.formula(paste0("d" , vars[1] , " ~ +1 + ec + " , paste(c(paste0("d" , vars[2:NumVar] )) , collapse=" + ") ))
modelECM <- ardlDlm(formula = formula3, data = data.ecm , p = (max.p-1) , q = (p[[vars[1]]]-1) , remove = removeP )
ecm.beta <- modelECM$model$coefficients["ec.1"]
}
} else if (case == 4){
trend <- c(1:nrow(data))
data <- cbind(data , trend)
formula1 <- as.formula(paste0(formula1 , " + trend"))
formula2 <- as.formula(paste0("d" , vars[1] , " ~ " , paste(c(paste0("d" , vars[2:NumVar] )) , collapse=" + ") ))
if (p[[vars[1]]] == 1){
p[[vars[1]]] <- 2
removeP <- appendList(removeP, list( q = c(1)))
removeP2$q <- c(1)
}
rem.p[["trend"]] <- c(1:(max.p-1))
modelFull <- ardlDlm(formula = formula1, data = data.frame(data) , p = (max.p-1) , q = (p[[vars[1]]]-1) , remove = removeP2)
modelNull <- ardlDlm(formula = formula2, data = data.frame(data) , p = (max.p-1) , q = (p[[vars[1]]]-1) , remove = removeP )
if (length(modelFull$model$residuals) != length(modelNull$model$residuals)){
strt <- nrow(data) - length(modelNull$model$residuals)
modelNull <- ardlDlm(formula = formula2, data = data.frame(tail(data,(length(modelFull$model$residuals) + strt))) , p = (max.p-1) , q = (p[[vars[1]]]-1) , remove = removeP )
}
if ( ECM == TRUE){
mult <- as.vector(modelFull$model$coefficients[3:(NumVar+1)] / modelFull$model$coefficients[2])
ec <- data[,1] + as.vector(mult %*% t(data[,2:NumVar])) + modelFull$model$coefficients["trend.t"] * trend / modelFull$model$coefficients[2]
data.ecm <- data.frame(cbind(data, ec))
formula3 <- as.formula(paste0("d" , vars[1] , " ~ ec + " , paste(c(paste0("d" , vars[2:NumVar] )) , collapse=" + ") ))
modelECM <- ardlDlm(formula = formula3, data = data.ecm , p = (max.p-1) , q = (p[[vars[1]]]-1) , remove = removeP )
ecm.beta <- modelECM$model$coefficients["ec.1"]
}
} else if (case == 5){
trend <- c(1:nrow(data))
data <- cbind(data , trend)
formula1 <- as.formula(paste0(formula1 , " + trend"))
formula2 <- as.formula(paste0("d" , vars[1] , " ~ " , paste(c(paste0("d" , vars[2:NumVar] )) , collapse=" + ") , " + trend" ))
if (p[[vars[1]]] == 1){
p[[vars[1]]] <- 2
removeP <- appendList(removeP, list( q = c(1)))
removeP2$q <- c(1)
}
removeP$p[["trend"]] <- c(1:(max.p-1))
removeP2$p[["trend"]] <- c(1:(max.p-1))
rem.p[["trend"]] <- c(1:(max.p-1))
modelFull <- ardlDlm(formula = formula1, data = data.frame(data) , p = (max.p-1) , q = (p[[vars[1]]]-1) , remove = removeP2 )
modelNull <- ardlDlm(formula = formula2, data = data.frame(data) , p = (max.p-1) , q = (p[[vars[1]]]-1) , remove = removeP )
if (length(modelFull$model$residuals) != length(modelNull$model$residuals)){
strt <- nrow(data) - length(modelNull$model$residuals)
modelNull <- ardlDlm(formula = formula2, data = data.frame(tail(data,(length(modelFull$model$residuals) + strt))) , p = (max.p-1) , q = (p[[vars[1]]]-1) , remove = removeP )
}
if ( ECM == TRUE){
mult <- as.vector(modelFull$model$coefficients[3:(NumVar+1)] / modelFull$model$coefficients[2])
ec <- data[,1] + as.vector(mult %*% t(data[,2:NumVar]))
data.ecm <- data.frame(cbind(data, ec))
formula3 <- as.formula(paste0("d" , vars[1] , " ~ ec + " , paste(c(paste0("d" , vars[2:NumVar] )) , collapse=" + ") , " + trend" ))
modelECM <- ardlDlm(formula = formula3, data = data.ecm , p = (max.p-1) , q = (p[[vars[1]]]-1) , remove = removeP )
ecm.beta <- modelECM$model$coefficients["ec.1"]
}
}
bg <- NULL
bg <- lmtest::bgtest(modelFull$model, type = "F")
cat("\n Breusch-Godfrey Test for the autocorrelation in residuals:\n")
print(bg)
if (!is.na(bg$p.value)){
if (bg$p.value < 0.05){
cat("The p-value of Breusch-Godfrey test for the autocorrelation in residuals: ", bg$p.value , "< 0.05!\n" )
}
}
cat("------------------------------------------------------", "\n")
res <- modelFull$model$residuals
cat("\n Ljung-Box Test for the autocorrelation in residuals:\n")
lb <- Box.test(res, type = c("Ljung-Box"))
print(lb)
if (!is.na(lb$p.value)){
if (lb$p.value < 0.05){
cat("The p-value of Ljung-Box test for the autocorrelation in residuals: ", lb$p.value , "< 0.05!\n" )
}
}
cat("------------------------------------------------------", "\n")
bp <- NULL
bp <- lmtest::bptest(modelFull$model)
cat("\n Breusch-Pagan Test for the homoskedasticity of residuals:\n")
print(bp)
if (!is.na(bp$p.value)){
if (bp$p.value < 0.05){
cat("The p-value of Breusch-Pagan test for the homoskedasticity of residuals: ", bp$p.value , "< 0.05!\n" )
}
}
cat("------------------------------------------------------", "\n")
sp <- NULL
sp <- shapiro.test(modelFull$model$residual)
cat("\n Shapiro-Wilk test of normality of residuals:\n")
print(sp)
if (!is.na(sp$p.value)){
if (sp$p.value < 0.05){
cat("The p-value of Shapiro-Wilk test normality of residuals: ", sp$p.value , "< 0.05!\n" )
}
}
cat("------------------------------------------------------", "\n")
tryCatch(
{if (HAC == TRUE) {
cat("Newey-West HAC covariance matrix estimators are used for testing.", "\n")
neweyCM <- NeweyWest(modelFull$model)
Fvalue <- lmtest::waldtest( modelNull$model , modelFull$model, vcov = neweyCM)$F[2]
} else {
Fvalue <- lmtest::waldtest( modelNull$model , modelFull$model)$F[2]
}
},
error = function(e) {
Fvalue <<- anova( modelNull$model , modelFull$model)$F[2]
}
)
k = (NumVar - 1)
pssbounds(obs = nrow(data) , fstat = Fvalue , case = case, k = k )
if ((stability == TRUE) & (ECM == TRUE)){
cat("------------------------------------------------------", "\n")
reset <- NULL
reset <- lmtest::resettest(modelECM$model)
cat("\n Ramsey's RESET Test for model specification:\n")
print(reset)
if (reset$p.value < 0.05){
cat("the p-value of RESET test: ", reset$p.value , "< 0.05!\n" )
}
cat("------------------------------------------------------", "\n")
cusum.test <- efp(modelECM$model, data = modelECM$model$model, type = "Rec-CUSUM")
mosum.test <- efp(modelECM$model, data = modelECM$model$model, type = "Rec-MOSUM")
graphics.off()
plot.new()
par(oma = c(4, 1, 1, 1))
two <- FALSE
if (sum(c(any(is.nan(cusum.test$process)), any(is.nan(mosum.test$process)))) >0){
par(mfrow = c(1,2))
two <- TRUE
} else {
par(mfrow = c(2,2))
}
if (!any(is.nan(cusum.test$process))){
plot(cusum.test)
}
cusumSq(modelECM$model)
if(!any(is.nan(mosum.test$process))){
plot(mosum.test)
}
if (two){
par(fig = c(0, 1, 0, 1), oma = c(0, 0, 0, 0), mar = c(0, 0, 0, 0), new = TRUE)
plot.new()
legend("bottom", c("Recursive residuals", "5% limits"), xpd = TRUE, horiz = TRUE, inset = 0, bty = "n",
col = c("black" , "red"), lwd=2, cex = 1)
} else {
plot.new()
legend("top", c("Recursive residuals", "5% limits"), col = c("black" , "red"),
xpd = TRUE, bty = "o", lwd = 2, cex = 1)
}
}
if (ECM){
cat("------------------------------------------------------", "\n")
cat("Error Correction Model Output: \n")
summary(modelECM)
cat("------------------------------------------------------", "\n")
cat("Long-run coefficients: \n")
if (case == 1){
print(modelFull$model$coefficients[1:NumVar])
} else {
print(modelFull$model$coefficients[2:(NumVar+1)])
}
cat(" ", "\n")
}
if (ECM){
return(list(model = list(modelNull = modelNull, modelFull = modelFull), F.stat = Fvalue , p = p , k = k, bg = bg, lb = lb , bp = bp, sp = sp,
ECM = list(EC.t = ec, EC.model = modelECM$model , EC.beta = ecm.beta, EC.data = data.ecm), ARDL.model = modelFull$model ) )
} else {
return(list(model = list(modelNull = modelNull, modelFull = modelFull), ARDL.model = modelFull$model, F.stat = Fvalue , p = p , k = k, bg = bg, lb = lb , bp = bp, sp = sp ) )
}
} |
po.tutorial <- function(tutorial="") {
tutorials <- c("101", "compare", "13030", "scenario")
tutorial_files <- c("1-po101", "2-compare", "3-13030", "4-scenario")
if(!tutorial %in% tutorials) {
print(paste0("Available tutorials: ", paste(tutorials, collapse=", ")))
}
else {
tutorial_pos <- which(tutorials == tutorial)
return(paste0(path.package("portfolio.optimization"), "/", tutorial_files[tutorial_pos], ".R"))
}
} |
context("test-augmentation")
test_that("augmentation method works with speedglm and glm", {
xf <- matrix(runif(100 * 2), nrow = 100, ncol = 2)
theta <- c(2, -2)
log_odd <- apply(xf, 1, function(row) theta %*% row)
yf <- rbinom(100, 1, 1 / (1 + exp(-log_odd)))
xnf <- matrix(runif(100 * 2), nrow = 100, ncol = 2)
modele_rejected <- augmentation(xf, xnf, yf)
expect_s4_class(modele_rejected, "reject_infered")
expect_equal(modele_rejected@method_name, "augmentation")
expect_s3_class(modele_rejected@financed_model, "glmORlogicalORspeedglm")
expect_true(is.na(modele_rejected@acceptance_model))
expect_s3_class(modele_rejected@infered_model, "glmORlogicalORspeedglm")
with_mock(
"scoringTools:::is_speedglm_installed" = function() FALSE,
{
expect_warning(modele_rejected <- augmentation(xf, xnf, yf))
expect_s4_class(modele_rejected, "reject_infered")
expect_equal(modele_rejected@method_name, "augmentation")
expect_s3_class(modele_rejected@financed_model, "glmORlogicalORspeedglm")
expect_true(is.na(modele_rejected@acceptance_model))
expect_s3_class(modele_rejected@infered_model, "glmORlogicalORspeedglm")
}
)
}) |
get_afltables_stats <- function(start_date = "1897-01-01",
end_date = Sys.Date()) {
.Deprecated("fetch_player_stats_afltables")
seasons <- lubridate::year(start_date):lubridate::year(end_date)
fetch_player_stats_afltables(season = seasons)
} |
HMM_simulation <-
function(size, m, delta = rep(1 / m, times = m), gamma = 0.8 * diag(m) + rep(0.2 / m, times = m), distribution_class, distribution_theta, obs_range=c(NA,NA), obs_round=FALSE, obs_non_neg = FALSE, plotting = 0)
{
if (!exists('delta'))
{
delta <- rep(1 / m, times = m)
}
if (!exists('gamma'))
{
gamma <- 0.8 * diag(m) + rep(0.2 / m, times = m)
}
markov_chain <- sample(x = seq(1, m, by = 1), 1, prob = delta)
for (i in 2:size)
{
last_state <- markov_chain[i - 1]
markov_chain <- c(markov_chain, sample(x = seq(1, m, by=1), 1, prob = gamma[last_state, ]))
}
observations <- rep(NA, times = size)
if (distribution_class == "pois")
{
obs_dist_means <- distribution_theta$lambda
means_along_markov_chain <- NULL
for (i in 1:size)
{
means_along_markov_chain <- c(means_along_markov_chain, distribution_theta$lambda[markov_chain[i]])
}
for (i in 1:size)
{
observations[i] <- rpois(n = 1, lambda = distribution_theta$lambda[markov_chain[i]])
if (any(!is.na(obs_range)))
{
if (!is.na(obs_range[1]) & !is.na(obs_range[2]))
{
while(observations[i] < obs_range[1] | observations[i] > obs_range[2])
{
observations[i] <- rpois(n = 1, lambda = distribution_theta$lambda[markov_chain[i]])
}
}
if (!is.na(obs_range[1]) & is.na(obs_range[2]))
{
while(observations[i] < obs_range[1])
{
observations[i] <- rpois(n = 1, lambda = distribution_theta$lambda[markov_chain[i]])
}
}
if (is.na(obs_range[1]) & is.na(obs_range[2]))
{
while(observations[i] > obs_range[2])
{
observations[i] <- rpois(n = 1, lambda = distribution_theta$lambda[markov_chain[i]])
}
}
}
}
}
if (distribution_class == "norm")
{
obs_dist_means <- distribution_theta$mean
means_along_markov_chain <- NULL
for(i in 1:size)
{
means_along_markov_chain <- c(means_along_markov_chain, distribution_theta$mean[markov_chain[i]])
}
for (i in 1:size)
{
observations[i] <- rnorm(n = 1, mean = distribution_theta$mean[markov_chain[i]], sd = distribution_theta$sd[markov_chain[i]])
if (any(!is.na(obs_range)))
{
if (!is.na(obs_range[1]) & !is.na(obs_range[2]))
{
while(observations[i] < obs_range[1] | observations[i] > obs_range[2])
{
observations[i] <- rnorm(n = 1, mean = distribution_theta$mean[markov_chain[i]], sd = distribution_theta$sd[markov_chain[i]])
}
}
if (!is.na(obs_range[1]) & is.na(obs_range[2]))
{
while(observations[i] < obs_range[1])
{
observations[i] <- rnorm(n = 1, mean = distribution_theta$mean[markov_chain[i]], sd = distribution_theta$sd[markov_chain[i]])
}
}
if (is.na(obs_range[1]) & is.na(obs_range[2]))
{
while(observations[i] > obs_range[2])
{
observations[i] <- rnorm(n = 1, mean = distribution_theta$mean[markov_chain[i]], sd = distribution_theta$sd[markov_chain[i]])
}
}
}
if (obs_non_neg == TRUE)
{
if (observations[i] < 0)
{
observations[i] <- 0
}
}
}
}
if (distribution_class == "genpois")
{
obs_dist_means <- distribution_theta$lambda1 / (1 - distribution_theta$lambda2)
means_along_markov_chain <- NULL
for (i in 1:size)
{
means_along_markov_chain <- c(means_along_markov_chain, (distribution_theta$lambda1[markov_chain[i]]) / (1 - distribution_theta$lambda2[markov_chain[i]]))
}
for (i in 1:size)
{
observations[i] <- rgenpois(n = 1, lambda1 = distribution_theta$lambda1[markov_chain[i]], lambda2 = distribution_theta$lambda2[markov_chain[i]])
if (any(!is.na(obs_range)))
{
if (!is.na(obs_range[1]) & !is.na(obs_range[2]))
{
while(observations[i] < obs_range[1] | observations[i] > obs_range[2])
{
observations[i] <- rgenpois(n = 1, lambda1 = distribution_theta$lambda1[markov_chain[i]], lambda2 = distribution_theta$lambda2[markov_chain[i]])
}
}
if (!is.na(obs_range[1]) & is.na(obs_range[2]))
{
while(observations[i] < obs_range[1])
{
observations[i] <- rgenpois(n = 1, lambda1 = distribution_theta$lambda1[markov_chain[i]], lambda2 = distribution_theta$lambda2[markov_chain[i]]) }
}
if(is.na(obs_range[1]) & is.na(obs_range[2]))
{
while(observations[i] > obs_range[2])
{
observations[i] <- rgenpois(n = 1, lambda1 = distribution_theta$lambda1[markov_chain[i]], lambda2 = distribution_theta$lambda2[markov_chain[i]])
}
}
}
}
}
if (distribution_class == "geom")
{
obs_dist_means <- distribution_theta$prob
means_along_markov_chain <- NULL
for (i in 1:size)
{
means_along_markov_chain <- c(means_along_markov_chain, distribution_theta$prob[markov_chain[i]])
}
for (i in 1:size)
{
observations[i] <- rgeom(n = 1, prob = distribution_theta$prob[markov_chain[i]])
if (any(!is.na(obs_range)))
{
if (!is.na(obs_range[1]) & !is.na(obs_range[2]))
{
while(observations[i] < obs_range[1] | observations[i] > obs_range[2])
{
observations[i] <- rgeom(n = 1,prob = distribution_theta$prob[markov_chain[i]])
}
}
if (!is.na(obs_range[1]) & is.na(obs_range[2]))
{
while(observations[i] < obs_range[1])
{
observations[i] <- rgeom(n = 1,prob = distribution_theta$prob[markov_chain[i]])
}
}
if (is.na(obs_range[1]) & is.na(obs_range[2]))
{
while(observations[i] > obs_range[2])
{
observations[i] <- rgeom(n = 1, prob = distribution_theta$prob[markov_chain[i]])
}
}
}
}
}
if (!is.na(plotting))
{
if (plotting == 0)
{
par(mfrow=c(2,2))
plot(markov_chain, xlab ='t', main ='simulated (hidden) Markov chain', col = "green", type = 'o', ylab = "states")
plot(means_along_markov_chain, xlab = 't', main = 'means along Markov chain', col = "green", type = 'o', ylab = 'observation')
plot(observations, xlab = 't', main = 'observations along Markov chain')
abline(h = obs_dist_means, col = "grey50", lty = "dashed")
lines(means_along_markov_chain, xlab = 'time', main = 'simulation data', type = "l", col = "green")
plot(observations, xlab = 't', main = 'simulated observations')
par(mfrow=c(1,1))
plot(markov_chain, xlab = 't', main = 'simulated (hidden) Markov chain', col = "green", type = 'o', ylab = "states")
plot(means_along_markov_chain, xlab = 't', main = 'means along Markov chain', col = "green", type = 'o', ylab = 'observation')
plot(observations, xlab = 't', main = 'observations along Markov chain')
abline(h = obs_dist_means, col = "grey50", lty = "dashed")
lines(means_along_markov_chain, xlab = 'time', main = 'simulation data', type = "l", col = "green")
plot(observations, xlab = 't', main = 'simulated observations')
par(mfrow=c(1,1))
}
if (plotting == 1)
{
par(mfrow=c(2,2))
plot(markov_chain, xlab = 't', main = 'simulated (hidden) Markov chain', col = "green", type = 'o', ylab = "states")
plot(means_along_markov_chain, xlab = 't', main = 'means along Markov chain', col = "green", type = 'o', ylab = 'observation')
plot(observations, xlab = 't', main = 'observations along Markov chain')
abline(h = obs_dist_means, col = "grey50", lty = "dashed")
lines(means_along_markov_chain, xlab = 'time', main = 'simulation data', type = "l", col = "green")
plot(observations, xlab = 't', main = 'simulated observations')
par(mfrow=c(1,1))
}
if (plotting == 2)
{
plot(markov_chain, xlab = 't', main = 'simulated (hidden) Markov chain', col = "green", type = 'o', ylab = "states")
}
if (plotting == 3)
{
plot(means_along_markov_chain, xlab = 't', main = 'means along Markov chain', col = "green", type = 'o', ylab = 'observation')
}
if (plotting == 4)
{
plot(observations, xlab = 't', main = 'observations along Markov chain')
abline(h = obs_dist_means, col = "grey50", lty = "dashed")
lines(means_along_markov_chain, xlab = 'time', main = 'simulation data', type = "l", col = "green")
}
if (plotting == 5)
{
plot(observations, xlab = 't', main = 'simulated observations')
}
}
if(obs_round == TRUE)
{
observations <- round(observations)
}
return(list(size = size,
m = m,
delta = delta,
gamma = gamma,
distribution_class = distribution_class,
distribution_theta = distribution_theta,
markov_chain = markov_chain,
means_along_markov_chain = means_along_markov_chain,
observations = observations))
} |
get_apsimx_json <- function(model = "Wheat", wrt.dir = ".", cleanup = FALSE){
st0 <- "https://raw.githubusercontent.com/APSIMInitiative/ApsimX/master/Models/Resources"
str <- paste0(st0, "/", model, ".json")
dst <- file.path(wrt.dir, paste0(model, ".json"))
utils::download.file(url = str, destfile = dst)
ans <- jsonlite::read_json(dst)
if(cleanup) unlink(dst)
invisible(ans)
}
insert_replacement_node <- function(file, src.dir, wrt.dir, rep.node,
rep.node.position = 1,
new.core.position = rep.node.position + 1,
edit.tag = "-edited",
overwrite = FALSE,
verbose = TRUE,
root){
.check_apsim_name(file)
if(missing(wrt.dir)) wrt.dir <- src.dir
file.names <- dir(path = src.dir, pattern=".apsimx$", ignore.case=TRUE)
if(length(file.names) == 0){
stop("There are no .apsimx files in the specified directory to edit.")
}
file <- match.arg(file, file.names)
if(apsimx_filetype(file = file, src.dir = src.dir) != "json")
stop("This function only edits JSON files")
apsimx_json <- jsonlite::read_json(file.path(src.dir, file))
wcore <- grep("Core.Simulation", apsimx_json$Children)
wdatastore <- grep("Models.Storage.DataStore", apsimx_json$Children)
if(verbose){
cat("Simulation(s) is/are in node(s)", wcore, "\n")
cat("Datastore(s) is/are in node(s)", wdatastore, "\n")
}
if(rep.node.position == wdatastore)
warning("Replacement node will overwrite DataStore")
if(new.core.position == wdatastore)
warning("Simulations node will overwrite DataStore")
rep.node$ExplorerWidth <- NULL
rep.node$Version <- NULL
rep.node$Name <- "Replacements"
rep.node$`$type` <- "Models.Core.Replacements, Models"
if(length(wcore) != length(new.core.position))
stop("length of new.core.position should be equal to the number of core simulations")
for(i in seq_along(new.core.position)){
apsimx_json$Children[[new.core.position[i]]] <- apsimx_json$Children[[wcore[i]]]
}
apsimx_json$Children[[rep.node.position]] <- rep.node
if(overwrite == FALSE){
wr.path <- paste0(wrt.dir, "/",
tools::file_path_sans_ext(file),
edit.tag, ".apsimx")
}else{
wr.path <- paste0(wrt.dir, "/", file)
}
jsonlite::write_json(apsimx_json, path = wr.path,
pretty = TRUE, digits = NA,
auto_unbox = TRUE, null = "null")
if(verbose){
cat("Created: ", wr.path,"\n")
}
} |
knitr::opts_chunk$set(echo = TRUE)
library(RColorBrewer)
library(rnpn)
library(rgdal)
library(raster)
knitr::include_graphics("figures/7-plot.png") |
output$amBullet0 <- rAmCharts::renderAmCharts({
amBullet(value = 65)
})
output$code_amBullet0 <- renderText({
"
amBullet(value = 65)
"
})
output$amBullet0 <- rAmCharts::renderAmCharts({
amBullet(value = 65)
})
output$code_amBullet0 <- renderText({
"
amBullet(value = 65)
"
})
output$amBullet1 <- rAmCharts::renderAmCharts({
amBullet(value = 65, val_color = "purple", limit_color = "
})
output$code_amBullet1 <- renderText({
"
amBullet(value = 65, val_color = 'purple', limit_color = '
"
})
output$amBullet2 <- rAmCharts::renderAmCharts({
amBullet(value = 65, main = 'Bullet chart 1', mainSize = 15)
})
output$code_amBullet2 <- renderText({
"
amBullet(value = 65, main = 'Bullet chart 1', mainSize = 15)
"
}) |
context("Testing the Group Sequential and Inverse Normal Design Functionality")
test_that("'getDesignInverseNormal' with default parameters: parameters and results are as expected", {
x0 <- getDesignInverseNormal()
expect_equal(x0$alphaSpent, c(0.00025917372, 0.0071600594, 0.02499999), tolerance = 1e-07)
expect_equal(x0$criticalValues, c(3.4710914, 2.4544323, 2.0040356), tolerance = 1e-07)
expect_equal(x0$stageLevels, c(0.00025917372, 0.0070553616, 0.022533125), tolerance = 1e-07)
if (isTRUE(.isCompleteUnitTestSetEnabled())) {
invisible(capture.output(expect_error(print(x0), NA)))
expect_output(print(x0)$show())
invisible(capture.output(expect_error(summary(x0), NA)))
expect_output(summary(x0)$show())
x0CodeBased <- eval(parse(text = getObjectRCode(x0, stringWrapParagraphWidth = NULL)))
expect_equal(x0CodeBased$alphaSpent, x0$alphaSpent, tolerance = 1e-05)
expect_equal(x0CodeBased$criticalValues, x0$criticalValues, tolerance = 1e-05)
expect_equal(x0CodeBased$stageLevels, x0$stageLevels, tolerance = 1e-05)
expect_type(names(x0), "character")
df <- as.data.frame(x0)
expect_s3_class(df, "data.frame")
expect_true(nrow(df) > 0 && ncol(df) > 0)
mtx <- as.matrix(x0)
expect_true(is.matrix(mtx))
expect_true(nrow(mtx) > 0 && ncol(mtx) > 0)
}
})
test_that("'getDesignInverseNormal' with type of design = 'asHSD', 'bsHSD', 'asKD', and 'bsKD'", {
.skipTestIfDisabled()
x1 <- getDesignInverseNormal(kMax = 3, informationRates = c(0.2, 0.4, 1),
alpha = 0.03, sided = 1, beta = 0.14, typeOfDesign = "asHSD", gammaA = 0)
expect_equal(x1$alphaSpent, c(0.006, 0.012, 0.02999999), tolerance = 1e-07)
expect_equal(x1$criticalValues, c(2.5121443, 2.4228747, 2.0280392), tolerance = 1e-07)
expect_equal(x1$stageLevels, c(0.006, 0.0076991189, 0.021278125), tolerance = 1e-07)
if (isTRUE(.isCompleteUnitTestSetEnabled())) {
invisible(capture.output(expect_error(print(x1), NA)))
expect_output(print(x1)$show())
invisible(capture.output(expect_error(summary(x1), NA)))
expect_output(summary(x1)$show())
x1CodeBased <- eval(parse(text = getObjectRCode(x1, stringWrapParagraphWidth = NULL)))
expect_equal(x1CodeBased$alphaSpent, x1$alphaSpent, tolerance = 1e-05)
expect_equal(x1CodeBased$criticalValues, x1$criticalValues, tolerance = 1e-05)
expect_equal(x1CodeBased$stageLevels, x1$stageLevels, tolerance = 1e-05)
expect_type(names(x1), "character")
df <- as.data.frame(x1)
expect_s3_class(df, "data.frame")
expect_true(nrow(df) > 0 && ncol(df) > 0)
mtx <- as.matrix(x1)
expect_true(is.matrix(mtx))
expect_true(nrow(mtx) > 0 && ncol(mtx) > 0)
}
y1 <- getDesignCharacteristics(x1)
expect_equal(y1$nFixed, 8.7681899, tolerance = 1e-07)
expect_equal(y1$shift, 9.4594102, tolerance = 1e-07)
expect_equal(y1$inflationFactor, 1.0788327, tolerance = 1e-07)
expect_equal(y1$information, c(1.891882, 3.7837641, 9.4594102), tolerance = 1e-07)
expect_equal(y1$power, c(0.12783451, 0.34055165, 0.86), tolerance = 1e-07)
expect_equal(y1$rejectionProbabilities, c(0.12783451, 0.21271713, 0.51944835), tolerance = 1e-07)
expect_equal(y1$futilityProbabilities, c(9.8658765e-10, 9.7584074e-10), tolerance = 1e-07)
expect_equal(y1$averageSampleNumber1, 0.83081135, tolerance = 1e-07)
expect_equal(y1$averageSampleNumber01, 1.0142116, tolerance = 1e-07)
expect_equal(y1$averageSampleNumber0, 1.0697705, tolerance = 1e-07)
if (isTRUE(.isCompleteUnitTestSetEnabled())) {
invisible(capture.output(expect_error(print(y1), NA)))
expect_output(print(y1)$show())
invisible(capture.output(expect_error(summary(y1), NA)))
expect_output(summary(y1)$show())
y1CodeBased <- eval(parse(text = getObjectRCode(y1, stringWrapParagraphWidth = NULL)))
expect_equal(y1CodeBased$nFixed, y1$nFixed, tolerance = 1e-05)
expect_equal(y1CodeBased$shift, y1$shift, tolerance = 1e-05)
expect_equal(y1CodeBased$inflationFactor, y1$inflationFactor, tolerance = 1e-05)
expect_equal(y1CodeBased$information, y1$information, tolerance = 1e-05)
expect_equal(y1CodeBased$power, y1$power, tolerance = 1e-05)
expect_equal(y1CodeBased$rejectionProbabilities, y1$rejectionProbabilities, tolerance = 1e-05)
expect_equal(y1CodeBased$futilityProbabilities, y1$futilityProbabilities, tolerance = 1e-05)
expect_equal(y1CodeBased$averageSampleNumber1, y1$averageSampleNumber1, tolerance = 1e-05)
expect_equal(y1CodeBased$averageSampleNumber01, y1$averageSampleNumber01, tolerance = 1e-05)
expect_equal(y1CodeBased$averageSampleNumber0, y1$averageSampleNumber0, tolerance = 1e-05)
expect_type(names(y1), "character")
df <- as.data.frame(y1)
expect_s3_class(df, "data.frame")
expect_true(nrow(df) > 0 && ncol(df) > 0)
mtx <- as.matrix(y1)
expect_true(is.matrix(mtx))
expect_true(nrow(mtx) > 0 && ncol(mtx) > 0)
}
x2 <- getDesignInverseNormal(kMax = 3, informationRates = c(0.2, 0.4, 1),
alpha = 0.07, sided = 1, beta = 0.14, typeOfDesign = "asHSD", gammaA = -1,
typeBetaSpending = "bsHSD", gammaB = -2)
expect_equal(x2$power, c(0.12038954, 0.32895265, 0.86), tolerance = 1e-07)
expect_equal(x2$futilityBounds, c(-1.1063623, -0.35992438), tolerance = 1e-07)
expect_equal(x2$alphaSpent, c(0.0090195874, 0.020036136, 0.06999999), tolerance = 1e-07)
expect_equal(x2$betaSpent, c(0.010777094, 0.026854629, 0.14), tolerance = 1e-07)
expect_equal(x2$criticalValues, c(2.364813, 2.1928805, 1.5660474), tolerance = 1e-07)
expect_equal(x2$stageLevels, c(0.0090195874, 0.014157994, 0.058668761), tolerance = 1e-07)
if (isTRUE(.isCompleteUnitTestSetEnabled())) {
invisible(capture.output(expect_error(print(x2), NA)))
expect_output(print(x2)$show())
invisible(capture.output(expect_error(summary(x2), NA)))
expect_output(summary(x2)$show())
x2CodeBased <- eval(parse(text = getObjectRCode(x2, stringWrapParagraphWidth = NULL)))
expect_equal(x2CodeBased$power, x2$power, tolerance = 1e-05)
expect_equal(x2CodeBased$futilityBounds, x2$futilityBounds, tolerance = 1e-05)
expect_equal(x2CodeBased$alphaSpent, x2$alphaSpent, tolerance = 1e-05)
expect_equal(x2CodeBased$betaSpent, x2$betaSpent, tolerance = 1e-05)
expect_equal(x2CodeBased$criticalValues, x2$criticalValues, tolerance = 1e-05)
expect_equal(x2CodeBased$stageLevels, x2$stageLevels, tolerance = 1e-05)
expect_type(names(x2), "character")
df <- as.data.frame(x2)
expect_s3_class(df, "data.frame")
expect_true(nrow(df) > 0 && ncol(df) > 0)
mtx <- as.matrix(x2)
expect_true(is.matrix(mtx))
expect_true(nrow(mtx) > 0 && ncol(mtx) > 0)
}
y2 <- getDesignCharacteristics(x2)
expect_equal(y2$nFixed, 6.5337002, tolerance = 1e-07)
expect_equal(y2$shift, 7.1015943, tolerance = 1e-07)
expect_equal(y2$inflationFactor, 1.0869177, tolerance = 1e-07)
expect_equal(y2$information, c(1.4203189, 2.8406377, 7.1015943), tolerance = 1e-07)
expect_equal(y2$power, c(0.12038953, 0.32895265, 0.86), tolerance = 1e-07)
expect_equal(y2$rejectionProbabilities, c(0.12038953, 0.20856311, 0.53104735), tolerance = 1e-07)
expect_equal(y2$futilityProbabilities, c(0.010777094, 0.016077535), tolerance = 1e-07)
expect_equal(y2$averageSampleNumber1, 0.82636428, tolerance = 1e-07)
expect_equal(y2$averageSampleNumber01, 0.91614201, tolerance = 1e-07)
expect_equal(y2$averageSampleNumber0, 0.79471657, tolerance = 1e-07)
if (isTRUE(.isCompleteUnitTestSetEnabled())) {
invisible(capture.output(expect_error(print(y2), NA)))
expect_output(print(y2)$show())
invisible(capture.output(expect_error(summary(y2), NA)))
expect_output(summary(y2)$show())
y2CodeBased <- eval(parse(text = getObjectRCode(y2, stringWrapParagraphWidth = NULL)))
expect_equal(y2CodeBased$nFixed, y2$nFixed, tolerance = 1e-05)
expect_equal(y2CodeBased$shift, y2$shift, tolerance = 1e-05)
expect_equal(y2CodeBased$inflationFactor, y2$inflationFactor, tolerance = 1e-05)
expect_equal(y2CodeBased$information, y2$information, tolerance = 1e-05)
expect_equal(y2CodeBased$power, y2$power, tolerance = 1e-05)
expect_equal(y2CodeBased$rejectionProbabilities, y2$rejectionProbabilities, tolerance = 1e-05)
expect_equal(y2CodeBased$futilityProbabilities, y2$futilityProbabilities, tolerance = 1e-05)
expect_equal(y2CodeBased$averageSampleNumber1, y2$averageSampleNumber1, tolerance = 1e-05)
expect_equal(y2CodeBased$averageSampleNumber01, y2$averageSampleNumber01, tolerance = 1e-05)
expect_equal(y2CodeBased$averageSampleNumber0, y2$averageSampleNumber0, tolerance = 1e-05)
expect_type(names(y2), "character")
df <- as.data.frame(y2)
expect_s3_class(df, "data.frame")
expect_true(nrow(df) > 0 && ncol(df) > 0)
mtx <- as.matrix(y2)
expect_true(is.matrix(mtx))
expect_true(nrow(mtx) > 0 && ncol(mtx) > 0)
}
x3 <- getDesignInverseNormal(kMax = 3, informationRates = c(0.3, 0.7, 1),
alpha = 0.03, sided = 1, beta = 0.34, typeOfDesign = "asKD", gammaA = 2.2,
typeBetaSpending = "bsKD", gammaB = 3.2)
expect_equal(x3$power, c(0.058336437, 0.39824601, 0.66), tolerance = 1e-07)
expect_equal(x3$futilityBounds, c(-1.1558435, 0.72836893), tolerance = 1e-07)
expect_equal(x3$alphaSpent, c(0.0021222083, 0.013687904, 0.02999999), tolerance = 1e-07)
expect_equal(x3$betaSpent, c(0.0072155083, 0.1085907, 0.34), tolerance = 1e-07)
expect_equal(x3$criticalValues, c(2.8594012, 2.2435708, 1.9735737), tolerance = 1e-07)
expect_equal(x3$stageLevels, c(0.0021222083, 0.012430015, 0.02421512), tolerance = 1e-07)
if (isTRUE(.isCompleteUnitTestSetEnabled())) {
invisible(capture.output(expect_error(print(x3), NA)))
expect_output(print(x3)$show())
invisible(capture.output(expect_error(summary(x3), NA)))
expect_output(summary(x3)$show())
x3CodeBased <- eval(parse(text = getObjectRCode(x3, stringWrapParagraphWidth = NULL)))
expect_equal(x3CodeBased$power, x3$power, tolerance = 1e-05)
expect_equal(x3CodeBased$futilityBounds, x3$futilityBounds, tolerance = 1e-05)
expect_equal(x3CodeBased$alphaSpent, x3$alphaSpent, tolerance = 1e-05)
expect_equal(x3CodeBased$betaSpent, x3$betaSpent, tolerance = 1e-05)
expect_equal(x3CodeBased$criticalValues, x3$criticalValues, tolerance = 1e-05)
expect_equal(x3CodeBased$stageLevels, x3$stageLevels, tolerance = 1e-05)
expect_type(names(x3), "character")
df <- as.data.frame(x3)
expect_s3_class(df, "data.frame")
expect_true(nrow(df) > 0 && ncol(df) > 0)
mtx <- as.matrix(x3)
expect_true(is.matrix(mtx))
expect_true(nrow(mtx) > 0 && ncol(mtx) > 0)
}
y3 <- getDesignCharacteristics(x3)
expect_equal(y3$nFixed, 5.2590265, tolerance = 1e-07)
expect_equal(y3$shift, 5.5513711, tolerance = 1e-07)
expect_equal(y3$inflationFactor, 1.0555891, tolerance = 1e-07)
expect_equal(y3$information, c(1.6654113, 3.8859597, 5.5513711), tolerance = 1e-07)
expect_equal(y3$power, c(0.058336437, 0.39824601, 0.66), tolerance = 1e-07)
expect_equal(y3$rejectionProbabilities, c(0.058336437, 0.33990957, 0.26175399), tolerance = 1e-07)
expect_equal(y3$futilityProbabilities, c(0.0072155083, 0.10137519), tolerance = 1e-07)
expect_equal(y3$averageSampleNumber1, 0.86740735, tolerance = 1e-07)
expect_equal(y3$averageSampleNumber01, 0.87361708, tolerance = 1e-07)
expect_equal(y3$averageSampleNumber0, 0.75480974, tolerance = 1e-07)
if (isTRUE(.isCompleteUnitTestSetEnabled())) {
invisible(capture.output(expect_error(print(y3), NA)))
expect_output(print(y3)$show())
invisible(capture.output(expect_error(summary(y3), NA)))
expect_output(summary(y3)$show())
y3CodeBased <- eval(parse(text = getObjectRCode(y3, stringWrapParagraphWidth = NULL)))
expect_equal(y3CodeBased$nFixed, y3$nFixed, tolerance = 1e-05)
expect_equal(y3CodeBased$shift, y3$shift, tolerance = 1e-05)
expect_equal(y3CodeBased$inflationFactor, y3$inflationFactor, tolerance = 1e-05)
expect_equal(y3CodeBased$information, y3$information, tolerance = 1e-05)
expect_equal(y3CodeBased$power, y3$power, tolerance = 1e-05)
expect_equal(y3CodeBased$rejectionProbabilities, y3$rejectionProbabilities, tolerance = 1e-05)
expect_equal(y3CodeBased$futilityProbabilities, y3$futilityProbabilities, tolerance = 1e-05)
expect_equal(y3CodeBased$averageSampleNumber1, y3$averageSampleNumber1, tolerance = 1e-05)
expect_equal(y3CodeBased$averageSampleNumber01, y3$averageSampleNumber01, tolerance = 1e-05)
expect_equal(y3CodeBased$averageSampleNumber0, y3$averageSampleNumber0, tolerance = 1e-05)
expect_type(names(y3), "character")
df <- as.data.frame(y3)
expect_s3_class(df, "data.frame")
expect_true(nrow(df) > 0 && ncol(df) > 0)
mtx <- as.matrix(y3)
expect_true(is.matrix(mtx))
expect_true(nrow(mtx) > 0 && ncol(mtx) > 0)
}
})
test_that("'getDesignInverseNormal' with binding futility bounds", {
x4 <- getDesignInverseNormal(kMax = 4, alpha = 0.035, futilityBounds = rep(0.5244, 3),
bindingFutility = TRUE, typeOfDesign = "WT", deltaWT = 0.4)
expect_equal(x4$alphaSpent, c(0.0099446089, 0.020756912, 0.029001537, 0.03499999), tolerance = 1e-07)
expect_equal(x4$criticalValues, c(2.3284312, 2.1725031, 2.0861776, 2.0270171), tolerance = 1e-07)
expect_equal(x4$stageLevels, c(0.0099446089, 0.014908866, 0.018481267, 0.021330332), tolerance = 1e-07)
if (isTRUE(.isCompleteUnitTestSetEnabled())) {
invisible(capture.output(expect_error(print(x4), NA)))
expect_output(print(x4)$show())
invisible(capture.output(expect_error(summary(x4), NA)))
expect_output(summary(x4)$show())
x4CodeBased <- eval(parse(text = getObjectRCode(x4, stringWrapParagraphWidth = NULL)))
expect_equal(x4CodeBased$alphaSpent, x4$alphaSpent, tolerance = 1e-05)
expect_equal(x4CodeBased$criticalValues, x4$criticalValues, tolerance = 1e-05)
expect_equal(x4CodeBased$stageLevels, x4$stageLevels, tolerance = 1e-05)
expect_type(names(x4), "character")
df <- as.data.frame(x4)
expect_s3_class(df, "data.frame")
expect_true(nrow(df) > 0 && ncol(df) > 0)
mtx <- as.matrix(x4)
expect_true(is.matrix(mtx))
expect_true(nrow(mtx) > 0 && ncol(mtx) > 0)
}
})
test_that("'getDesignGroupSequential' with type of design = 'asUser'", {
.skipTestIfDisabled()
x5 <- getDesignGroupSequential(typeOfDesign = "asUser",
userAlphaSpending = c(0.01, 0.02, 0.03, 0.05))
expect_equal(x5$alphaSpent, c(0.01, 0.02, 0.03, 0.04999999), tolerance = 1e-07)
expect_equal(x5$criticalValues, c(2.3263479, 2.2192994, 2.1201347, 1.8189562), tolerance = 1e-07)
expect_equal(x5$stageLevels, c(0.01, 0.01323318, 0.016997342, 0.034459058), tolerance = 1e-07)
if (isTRUE(.isCompleteUnitTestSetEnabled())) {
invisible(capture.output(expect_error(print(x5), NA)))
expect_output(print(x5)$show())
invisible(capture.output(expect_error(summary(x5), NA)))
expect_output(summary(x5)$show())
x5CodeBased <- eval(parse(text = getObjectRCode(x5, stringWrapParagraphWidth = NULL)))
expect_equal(x5CodeBased$alphaSpent, x5$alphaSpent, tolerance = 1e-05)
expect_equal(x5CodeBased$criticalValues, x5$criticalValues, tolerance = 1e-05)
expect_equal(x5CodeBased$stageLevels, x5$stageLevels, tolerance = 1e-05)
expect_type(names(x5), "character")
df <- as.data.frame(x5)
expect_s3_class(df, "data.frame")
expect_true(nrow(df) > 0 && ncol(df) > 0)
mtx <- as.matrix(x5)
expect_true(is.matrix(mtx))
expect_true(nrow(mtx) > 0 && ncol(mtx) > 0)
}
})
test_that("'getDesignGroupSequential' with type of design = 'asOF' and 'bsUser'", {
x6 <- getDesignGroupSequential(kMax = 3, alpha = 0.03,
typeOfDesign = "asOF", typeBetaSpending = "bsUser",
bindingFutility = FALSE,
userBetaSpending = c(0.01, 0.05, 0.3))
expect_equal(x6$power, c(0.014685829, 0.33275272, 0.7), tolerance = 1e-07)
expect_equal(x6$futilityBounds, c(-0.92327973, 0.29975473), tolerance = 1e-07)
expect_equal(x6$alphaSpent, c(0.00017079385, 0.0078650906, 0.03), tolerance = 1e-07)
expect_equal(x6$betaSpent, c(0.01, 0.05, 0.3), tolerance = 1e-07)
expect_equal(x6$criticalValues, c(3.5815302, 2.417863, 1.9175839), tolerance = 1e-07)
expect_equal(x6$stageLevels, c(0.00017079385, 0.0078059773, 0.027581894), tolerance = 1e-07)
if (isTRUE(.isCompleteUnitTestSetEnabled())) {
invisible(capture.output(expect_error(print(x6), NA)))
expect_output(print(x6)$show())
invisible(capture.output(expect_error(summary(x6), NA)))
expect_output(summary(x6)$show())
x6CodeBased <- eval(parse(text = getObjectRCode(x6, stringWrapParagraphWidth = NULL)))
expect_equal(x6CodeBased$power, x6$power, tolerance = 1e-05)
expect_equal(x6CodeBased$futilityBounds, x6$futilityBounds, tolerance = 1e-05)
expect_equal(x6CodeBased$alphaSpent, x6$alphaSpent, tolerance = 1e-05)
expect_equal(x6CodeBased$betaSpent, x6$betaSpent, tolerance = 1e-05)
expect_equal(x6CodeBased$criticalValues, x6$criticalValues, tolerance = 1e-05)
expect_equal(x6CodeBased$stageLevels, x6$stageLevels, tolerance = 1e-05)
expect_type(names(x6), "character")
df <- as.data.frame(x6)
expect_s3_class(df, "data.frame")
expect_true(nrow(df) > 0 && ncol(df) > 0)
mtx <- as.matrix(x6)
expect_true(is.matrix(mtx))
expect_true(nrow(mtx) > 0 && ncol(mtx) > 0)
}
})
test_that("'getDesignGroupSequential' with type of design = 'asOF' and 'bsKD' and binding futility bounds", {
.skipTestIfDisabled()
x7 <- getDesignGroupSequential(kMax = 3, alpha = 0.03,
typeOfDesign = "asOF", typeBetaSpending = "bsKD",
informationRates = c(0.4, 0.75, 1),
gammaB = 2.5, bindingFutility = TRUE)
expect_equal(x7$power, c(0.068966747, 0.55923121, 0.8), tolerance = 1e-07)
expect_equal(x7$futilityBounds, c(-0.29391761, 1.0736333), tolerance = 1e-07)
expect_equal(x7$alphaSpent, c(0.00060088601, 0.012217314, 0.03), tolerance = 1e-07)
expect_equal(x7$betaSpent, c(0.020238577, 0.097427858, 0.2), tolerance = 1e-07)
expect_equal(x7$criticalValues, c(3.2384592, 2.2562378, 1.905812), tolerance = 1e-07)
expect_equal(x7$stageLevels, c(0.00060088601, 0.012027871, 0.0283373), tolerance = 1e-07)
if (isTRUE(.isCompleteUnitTestSetEnabled())) {
invisible(capture.output(expect_error(print(x7), NA)))
expect_output(print(x7)$show())
invisible(capture.output(expect_error(summary(x7), NA)))
expect_output(summary(x7)$show())
x7CodeBased <- eval(parse(text = getObjectRCode(x7, stringWrapParagraphWidth = NULL)))
expect_equal(x7CodeBased$power, x7$power, tolerance = 1e-05)
expect_equal(x7CodeBased$futilityBounds, x7$futilityBounds, tolerance = 1e-05)
expect_equal(x7CodeBased$alphaSpent, x7$alphaSpent, tolerance = 1e-05)
expect_equal(x7CodeBased$betaSpent, x7$betaSpent, tolerance = 1e-05)
expect_equal(x7CodeBased$criticalValues, x7$criticalValues, tolerance = 1e-05)
expect_equal(x7CodeBased$stageLevels, x7$stageLevels, tolerance = 1e-05)
expect_type(names(x7), "character")
df <- as.data.frame(x7)
expect_s3_class(df, "data.frame")
expect_true(nrow(df) > 0 && ncol(df) > 0)
mtx <- as.matrix(x7)
expect_true(is.matrix(mtx))
expect_true(nrow(mtx) > 0 && ncol(mtx) > 0)
}
})
test_that("'getDesignGroupSequential' with binding futility bounds ", {
.skipTestIfDisabled()
x8 <- getDesignGroupSequential(kMax = 4, alpha = 0.025, futilityBounds = rep(0.5244, 3),
bindingFutility = TRUE, typeOfDesign = "WT", deltaWT = 0.4)
expect_equal(x8$alphaSpent, c(0.0062828133, 0.013876673, 0.02015684, 0.02499999), tolerance = 1e-07)
expect_equal(x8$criticalValues, c(2.4958485, 2.328709, 2.2361766, 2.1727623), tolerance = 1e-07)
expect_equal(x8$stageLevels, c(0.0062828133, 0.0099372444, 0.012670104, 0.014899106), tolerance = 1e-07)
if (isTRUE(.isCompleteUnitTestSetEnabled())) {
invisible(capture.output(expect_error(print(x8), NA)))
expect_output(print(x8)$show())
invisible(capture.output(expect_error(summary(x8), NA)))
expect_output(summary(x8)$show())
x8CodeBased <- eval(parse(text = getObjectRCode(x8, stringWrapParagraphWidth = NULL)))
expect_equal(x8CodeBased$alphaSpent, x8$alphaSpent, tolerance = 1e-05)
expect_equal(x8CodeBased$criticalValues, x8$criticalValues, tolerance = 1e-05)
expect_equal(x8CodeBased$stageLevels, x8$stageLevels, tolerance = 1e-05)
expect_type(names(x8), "character")
df <- as.data.frame(x8)
expect_s3_class(df, "data.frame")
expect_true(nrow(df) > 0 && ncol(df) > 0)
mtx <- as.matrix(x8)
expect_true(is.matrix(mtx))
expect_true(nrow(mtx) > 0 && ncol(mtx) > 0)
}
})
test_that("'getDesignGroupSequential' with Haybittle Peto boundaries ", {
.skipTestIfDisabled()
x9 <- getDesignGroupSequential(kMax = 4, alpha = 0.025, typeOfDesign = "HP")
expect_equal(x9$alphaSpent, c(0.001349898, 0.0024617416, 0.0033695882, 0.025), tolerance = 1e-07)
expect_equal(x9$criticalValues, c(3, 3, 3, 1.9827514), tolerance = 1e-07)
expect_equal(x9$stageLevels, c(0.001349898, 0.001349898, 0.001349898, 0.023697604), tolerance = 1e-07)
if (isTRUE(.isCompleteUnitTestSetEnabled())) {
invisible(capture.output(expect_error(print(x9), NA)))
expect_output(print(x9)$show())
invisible(capture.output(expect_error(summary(x9), NA)))
expect_output(summary(x9)$show())
x9CodeBased <- eval(parse(text = getObjectRCode(x9, stringWrapParagraphWidth = NULL)))
expect_equal(x9CodeBased$alphaSpent, x9$alphaSpent, tolerance = 1e-05)
expect_equal(x9CodeBased$criticalValues, x9$criticalValues, tolerance = 1e-05)
expect_equal(x9CodeBased$stageLevels, x9$stageLevels, tolerance = 1e-05)
expect_type(names(x9), "character")
df <- as.data.frame(x9)
expect_s3_class(df, "data.frame")
expect_true(nrow(df) > 0 && ncol(df) > 0)
mtx <- as.matrix(x9)
expect_true(is.matrix(mtx))
expect_true(nrow(mtx) > 0 && ncol(mtx) > 0)
}
})
test_that("'getDesignGroupSequential' with Pampallona Tsiatis boundaries ", {
.skipTestIfDisabled()
x10 <- getDesignGroupSequential(kMax = 3, alpha = 0.035, beta = 0.1,
informationRates = c(0.3, 0.8, 1), typeOfDesign = "PT", sided = 1,
bindingFutility = TRUE, deltaPT1 = 0.2, deltaPT0 = 0.3)
expect_equal(x10$power, c(0.19834666, 0.83001122, 0.9), tolerance = 1e-07)
expect_equal(x10$futilityBounds, c(-0.042079551, 1.4407359), tolerance = 1e-07)
expect_equal(x10$alphaSpent, c(0.0038332428, 0.024917169, 0.035), tolerance = 1e-07)
expect_equal(x10$betaSpent, c(0.031375367, 0.080734149, 0.099999999), tolerance = 1e-07)
expect_equal(x10$criticalValues, c(2.6664156, 1.9867225, 1.8580792), tolerance = 1e-07)
expect_equal(x10$stageLevels, c(0.0038332428, 0.023476576, 0.031578886), tolerance = 1e-07)
if (isTRUE(.isCompleteUnitTestSetEnabled())) {
invisible(capture.output(expect_error(print(x10), NA)))
expect_output(print(x10)$show())
invisible(capture.output(expect_error(summary(x10), NA)))
expect_output(summary(x10)$show())
x10CodeBased <- eval(parse(text = getObjectRCode(x10, stringWrapParagraphWidth = NULL)))
expect_equal(x10CodeBased$power, x10$power, tolerance = 1e-05)
expect_equal(x10CodeBased$futilityBounds, x10$futilityBounds, tolerance = 1e-05)
expect_equal(x10CodeBased$alphaSpent, x10$alphaSpent, tolerance = 1e-05)
expect_equal(x10CodeBased$betaSpent, x10$betaSpent, tolerance = 1e-05)
expect_equal(x10CodeBased$criticalValues, x10$criticalValues, tolerance = 1e-05)
expect_equal(x10CodeBased$stageLevels, x10$stageLevels, tolerance = 1e-05)
expect_type(names(x10), "character")
df <- as.data.frame(x10)
expect_s3_class(df, "data.frame")
expect_true(nrow(df) > 0 && ncol(df) > 0)
mtx <- as.matrix(x10)
expect_true(is.matrix(mtx))
expect_true(nrow(mtx) > 0 && ncol(mtx) > 0)
}
x11 <- getDesignGroupSequential(kMax = 3, alpha = 0.035, beta = 0.05,
informationRates = c(0.3, 0.8, 1), typeOfDesign = "PT", sided = 2,
bindingFutility = TRUE, deltaPT1 = 0.2, deltaPT0 = 0.3)
expect_equal(x11$power, c(0.16615376, 0.88013007, 0.94999991), tolerance = 1e-07)
expect_equal(x11$futilityBounds, c(NA_real_, 1.671433), tolerance = 1e-07)
expect_equal(x11$alphaSpent, c(0.0019236202, 0.022017713, 0.035), tolerance = 1e-07)
expect_equal(x11$betaSpent, c(0, 0.035025978, 0.05), tolerance = 1e-07)
expect_equal(x11$criticalValues, c(3.1017782, 2.3111074, 2.1614596), tolerance = 1e-07)
expect_equal(x11$stageLevels, c(0.00096181012, 0.010413463, 0.015329928), tolerance = 1e-07)
if (isTRUE(.isCompleteUnitTestSetEnabled())) {
invisible(capture.output(expect_error(print(x11), NA)))
expect_output(print(x11)$show())
invisible(capture.output(expect_error(summary(x11), NA)))
expect_output(summary(x11)$show())
x11CodeBased <- eval(parse(text = getObjectRCode(x11, stringWrapParagraphWidth = NULL)))
expect_equal(x11CodeBased$power, x11$power, tolerance = 1e-05)
expect_equal(x11CodeBased$futilityBounds, x11$futilityBounds, tolerance = 1e-05)
expect_equal(x11CodeBased$alphaSpent, x11$alphaSpent, tolerance = 1e-05)
expect_equal(x11CodeBased$betaSpent, x11$betaSpent, tolerance = 1e-05)
expect_equal(x11CodeBased$criticalValues, x11$criticalValues, tolerance = 1e-05)
expect_equal(x11CodeBased$stageLevels, x11$stageLevels, tolerance = 1e-05)
expect_type(names(x11), "character")
df <- as.data.frame(x11)
expect_s3_class(df, "data.frame")
expect_true(nrow(df) > 0 && ncol(df) > 0)
mtx <- as.matrix(x11)
expect_true(is.matrix(mtx))
expect_true(nrow(mtx) > 0 && ncol(mtx) > 0)
}
x12 <- getDesignGroupSequential(kMax = 3, alpha = 0.035, beta = 0.05,
informationRates = c(0.3, 0.8, 1), typeOfDesign = "PT", sided = 2,
bindingFutility = FALSE, deltaPT1 = 0.2, deltaPT0 = 0.3)
expect_equal(x12$power, c(0.15712278, 0.87874666, 0.94999995), tolerance = 1e-07)
expect_equal(x12$futilityBounds, c(NA_real_, 1.7090472), tolerance = 1e-07)
expect_equal(x12$alphaSpent, c(0.0015647742, 0.019435851, 0.035), tolerance = 1e-07)
expect_equal(x12$betaSpent, c(0, 0.034947415, 0.05), tolerance = 1e-07)
expect_equal(x12$criticalValues, c(3.1623945, 2.356272, 2.2036998), tolerance = 1e-07)
expect_equal(x12$stageLevels, c(0.00078238708, 0.009229697, 0.013772733), tolerance = 1e-07)
if (isTRUE(.isCompleteUnitTestSetEnabled())) {
invisible(capture.output(expect_error(print(x12), NA)))
expect_output(print(x12)$show())
invisible(capture.output(expect_error(summary(x12), NA)))
expect_output(summary(x12)$show())
x12CodeBased <- eval(parse(text = getObjectRCode(x12, stringWrapParagraphWidth = NULL)))
expect_equal(x12CodeBased$power, x12$power, tolerance = 1e-05)
expect_equal(x12CodeBased$futilityBounds, x12$futilityBounds, tolerance = 1e-05)
expect_equal(x12CodeBased$alphaSpent, x12$alphaSpent, tolerance = 1e-05)
expect_equal(x12CodeBased$betaSpent, x12$betaSpent, tolerance = 1e-05)
expect_equal(x12CodeBased$criticalValues, x12$criticalValues, tolerance = 1e-05)
expect_equal(x12CodeBased$stageLevels, x12$stageLevels, tolerance = 1e-05)
expect_type(names(x12), "character")
df <- as.data.frame(x12)
expect_s3_class(df, "data.frame")
expect_true(nrow(df) > 0 && ncol(df) > 0)
mtx <- as.matrix(x12)
expect_true(is.matrix(mtx))
expect_true(nrow(mtx) > 0 && ncol(mtx) > 0)
}
})
test_that("'getDesignInverseNormal': illegal arguments throw exceptions as expected", {
expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER,
userAlphaSpending = c(0.01, 0.02, 0.023, 0.023, 0.025), kMax = 4),
paste0("Conflicting arguments: length of 'userAlphaSpending' (5) ",
"must be equal to 'kMax' (4)"), fixed = TRUE)
expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER,
userAlphaSpending = c(0.01, 0.02, 0.023, 0.023, 0.021)),
paste0("'userAlphaSpending' = c(0.01, 0.02, 0.023, 0.023, 0.021) must be a vector that ",
"satisfies the following condition: 0 <= alpha_1 <= .. <= alpha_5 <= alpha = 0.021"), fixed = TRUE)
expect_error(getDesignInverseNormal(typeOfDesign = "asUser",
userAlphaSpending = c(0.01, 0.02, 0.023), alpha = 0.02),
paste0("'userAlphaSpending' = c(0.01, 0.02, 0.023) must be a vector that ",
"satisfies the following condition: 0 <= alpha_1 <= .. <= alpha_3 <= alpha = 0.02"), fixed = TRUE)
expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_WT, deltaWT = NA_real_),
"Missing argument: parameter 'deltaWT' must be specified in design", fixed = TRUE)
expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_WT_OPTIMUM,
optimizationCriterion = "x"),
"Illegal argument: optimization criterion must be one of the following: 'ASNH1', 'ASNIFH1', 'ASNsum'", fixed = TRUE)
expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_AS_KD, gammaA = NA_real_),
"Missing argument: parameter 'gammaA' must be specified in design", fixed = TRUE)
expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_AS_HSD, gammaA = NA_real_),
"Missing argument: parameter 'gammaA' must be specified in design", fixed = TRUE)
expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER),
"Missing argument: parameter 'userAlphaSpending' must be specified in design", fixed = TRUE)
expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER,
userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = "x"),
"Illegal argument: type of beta spending must be one of the following: 'none', 'bsP', 'bsOF', 'bsKD', 'bsHSD', 'bsUser'", fixed = TRUE)
expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER,
userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = C_TYPE_OF_DESIGN_BS_USER),
"Missing argument: parameter 'userBetaSpending' must be specified in design", fixed = TRUE)
expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER,
userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = C_TYPE_OF_DESIGN_BS_USER,
userBetaSpending = c(0.1, 0.2)),
paste0("Conflicting arguments: length of 'userBetaSpending' (2) must ",
"be equal to length of 'informationRates' (3)"), fixed = TRUE)
expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER,
userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = C_TYPE_OF_DESIGN_BS_USER,
userBetaSpending = c(0.2, 0.1, 0.05)),
paste0("'userBetaSpending' = c(0.2, 0.1, 0.05) must be a vector that satisfies the ",
"following condition: 0 <= beta_1 <= .. <= beta_3 <= beta = 0.05"), fixed = TRUE)
expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER,
userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = C_TYPE_OF_DESIGN_BS_USER,
userBetaSpending = c(0.1, 0.2, 0.3), beta = 0.2),
paste0("'userBetaSpending' = c(0.1, 0.2, 0.3) must be a vector that satisfies the ",
"following condition: 0 <= beta_1 <= .. <= beta_3 <= beta = 0.2"), fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = Inf),
paste0("Argument out of bounds: 'kMax' (Inf) is out of bounds [1; ",
C_KMAX_UPPER_BOUND, "]"), fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = -Inf),
paste0("Argument out of bounds: 'kMax' (-Inf) is out of bounds [1; ",
C_KMAX_UPPER_BOUND, "]"), fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = -Inf), "Argument out of bounds: 'kMax' (-Inf) is out of bounds [1; 20]", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = -10), "Argument out of bounds: 'kMax' (-10) is out of bounds [1; 20]", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = -9), "Argument out of bounds: 'kMax' (-9) is out of bounds [1; 20]", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = -8), "Argument out of bounds: 'kMax' (-8) is out of bounds [1; 20]", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = -7), "Argument out of bounds: 'kMax' (-7) is out of bounds [1; 20]", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = -6), "Argument out of bounds: 'kMax' (-6) is out of bounds [1; 20]", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = -5), "Argument out of bounds: 'kMax' (-5) is out of bounds [1; 20]", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = -4), "Argument out of bounds: 'kMax' (-4) is out of bounds [1; 20]", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = -3), "Argument out of bounds: 'kMax' (-3) is out of bounds [1; 20]", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = -2), "Argument out of bounds: 'kMax' (-2) is out of bounds [1; 20]", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = -1), "Argument out of bounds: 'kMax' (-1) is out of bounds [1; 20]", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = 0), "Argument out of bounds: 'kMax' (0) is out of bounds [1; 20]", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = 21), "Argument out of bounds: 'kMax' (21) is out of bounds [1; 20]", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = 22), "Argument out of bounds: 'kMax' (22) is out of bounds [1; 20]", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = 23), "Argument out of bounds: 'kMax' (23) is out of bounds [1; 20]", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = 24), "Argument out of bounds: 'kMax' (24) is out of bounds [1; 20]", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = 25), "Argument out of bounds: 'kMax' (25) is out of bounds [1; 20]", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = 26), "Argument out of bounds: 'kMax' (26) is out of bounds [1; 20]", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = 27), "Argument out of bounds: 'kMax' (27) is out of bounds [1; 20]", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = 28), "Argument out of bounds: 'kMax' (28) is out of bounds [1; 20]", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = 29), "Argument out of bounds: 'kMax' (29) is out of bounds [1; 20]", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = 30), "Argument out of bounds: 'kMax' (30) is out of bounds [1; 20]", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = Inf), "Argument out of bounds: 'kMax' (Inf) is out of bounds [1; 20]", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = 2, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (2) - 1", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = 3, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (3) - 1", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = 4, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (4) - 1", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = 6, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (6) - 1", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = 7, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (7) - 1", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = 8, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (8) - 1", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = 9, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (9) - 1", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = 10, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (10) - 1", fixed = TRUE)
expect_warning(expect_error(getDesignInverseNormal(kMax = 11, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (11) - 1", fixed = TRUE))
expect_warning(expect_error(getDesignInverseNormal(kMax = 12, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (12) - 1", fixed = TRUE))
expect_warning(expect_error(getDesignInverseNormal(kMax = 13, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (13) - 1", fixed = TRUE))
expect_warning(expect_error(getDesignInverseNormal(kMax = 14, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (14) - 1", fixed = TRUE))
expect_warning(expect_error(getDesignInverseNormal(kMax = 15, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (15) - 1", fixed = TRUE))
expect_warning(expect_error(getDesignInverseNormal(kMax = 16, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (16) - 1", fixed = TRUE))
expect_warning(expect_error(getDesignInverseNormal(kMax = 17, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (17) - 1", fixed = TRUE))
expect_warning(expect_error(getDesignInverseNormal(kMax = 18, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (18) - 1", fixed = TRUE))
expect_warning(expect_error(getDesignInverseNormal(kMax = 19, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (19) - 1", fixed = TRUE))
expect_warning(expect_error(getDesignInverseNormal(kMax = 20, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (20) - 1", fixed = TRUE))
expect_error(getDesignInverseNormal(futilityBounds = c(-7, 5)),
"Illegal argument: 'futilityBounds' (-7, 5) too extreme for this situation", fixed = TRUE)
expect_error(getDesignInverseNormal(futilityBounds = c(1, 7)),
"Argument out of bounds: 'futilityBounds' (1, 7) is out of bounds [-Inf; 6]", fixed = TRUE)
})
test_that("'getDesignGroupSequential': illegal arguments throw exceptions as expected", {
expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER,
userAlphaSpending = c(0.01, 0.02, 0.023, 0.023, 0.025), kMax = 4),
paste0("Conflicting arguments: length of 'userAlphaSpending' (5) ",
"must be equal to 'kMax' (4)"), fixed = TRUE)
expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER,
userAlphaSpending = c(0.01, 0.02, 0.023, 0.023, 0.021)),
paste0("'userAlphaSpending' = c(0.01, 0.02, 0.023, 0.023, 0.021) must be a vector that ",
"satisfies the following condition: 0 <= alpha_1 <= .. <= alpha_5 <= alpha = 0.021"), fixed = TRUE)
expect_error(getDesignGroupSequential(typeOfDesign = "asUser",
userAlphaSpending = c(0.01, 0.02, 0.023), alpha = 0.02),
paste0("'userAlphaSpending' = c(0.01, 0.02, 0.023) must be a vector that ",
"satisfies the following condition: 0 <= alpha_1 <= .. <= alpha_3 <= alpha = 0.02"), fixed = TRUE)
expect_equal(getDesignGroupSequential(typeOfDesign = "asUser",
userAlphaSpending = c(0.01, 0.02, 0.023))$alpha, 0.023)
expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_WT, deltaWT = NA_real_),
"Missing argument: parameter 'deltaWT' must be specified in design", fixed = TRUE)
expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_WT_OPTIMUM,
optimizationCriterion = "x"),
"Illegal argument: optimization criterion must be one of the following: 'ASNH1', 'ASNIFH1', 'ASNsum'", fixed = TRUE)
expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_AS_KD, gammaA = NA_real_),
"Missing argument: parameter 'gammaA' must be specified in design", fixed = TRUE)
expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_AS_HSD, gammaA = NA_real_),
"Missing argument: parameter 'gammaA' must be specified in design", fixed = TRUE)
expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER),
"Missing argument: parameter 'userAlphaSpending' must be specified in design", fixed = TRUE)
expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER,
userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = "x"),
paste0("Illegal argument: type of beta spending must be one of the following: ",
"'none', 'bsP', 'bsOF', 'bsKD', 'bsHSD', 'bsUser'"), fixed = TRUE)
expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER,
userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = C_TYPE_OF_DESIGN_BS_USER),
"Missing argument: parameter 'userBetaSpending' must be specified in design", fixed = TRUE)
expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER,
userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = C_TYPE_OF_DESIGN_BS_USER,
userBetaSpending = c(0.1, 0.2)),
paste0("Conflicting arguments: length of 'userBetaSpending' (2) must ",
"be equal to length of 'informationRates' (3)"), fixed = TRUE)
expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER,
userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = C_TYPE_OF_DESIGN_BS_USER,
userBetaSpending = c(0.2, 0.1, 0.05)),
paste0("'userBetaSpending' = c(0.2, 0.1, 0.05) must be a vector that satisfies the ",
"following condition: 0 <= beta_1 <= .. <= beta_3 <= beta = 0.05"), fixed = TRUE)
expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER,
userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = C_TYPE_OF_DESIGN_BS_USER,
userBetaSpending = c(0.1, 0.2, 0.3), beta = 0.2),
paste0("'userBetaSpending' = c(0.1, 0.2, 0.3) must be a vector that satisfies the ",
"following condition: 0 <= beta_1 <= .. <= beta_3 <= beta = 0.2"), fixed = TRUE)
expect_error(getDesignGroupSequential(kMax = Inf),
paste0("Argument out of bounds: 'kMax' (Inf) is out of bounds [1; ",
C_KMAX_UPPER_BOUND, "]"), fixed = TRUE)
expect_error(getDesignGroupSequential(kMax = -Inf),
paste0("Argument out of bounds: 'kMax' (-Inf) is out of bounds [1; ",
C_KMAX_UPPER_BOUND, "]"), fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = -5), "Argument out of bounds: 'kMax' (-5) is out of bounds [1; 20]", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = 0), "Argument out of bounds: 'kMax' (0) is out of bounds [1; 20]", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = 21), "Argument out of bounds: 'kMax' (21) is out of bounds [1; 20]", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = 2, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (2) - 1", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = 3, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (3) - 1", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = 4, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (4) - 1", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = 6, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (6) - 1", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = 7, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (7) - 1", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = 8, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (8) - 1", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = 9, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (9) - 1", fixed = TRUE)
expect_error(getDesignInverseNormal(kMax = 10, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (10) - 1", fixed = TRUE)
expect_warning(expect_error(getDesignInverseNormal(kMax = 11, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (11) - 1", fixed = TRUE))
expect_warning(expect_error(getDesignInverseNormal(kMax = 12, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (12) - 1", fixed = TRUE))
expect_warning(expect_error(getDesignInverseNormal(kMax = 13, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (13) - 1", fixed = TRUE))
expect_warning(expect_error(getDesignInverseNormal(kMax = 14, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (14) - 1", fixed = TRUE))
expect_warning(expect_error(getDesignInverseNormal(kMax = 15, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (15) - 1", fixed = TRUE))
expect_warning(expect_error(getDesignInverseNormal(kMax = 16, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (16) - 1", fixed = TRUE))
expect_warning(expect_error(getDesignInverseNormal(kMax = 17, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (17) - 1", fixed = TRUE))
expect_warning(expect_error(getDesignInverseNormal(kMax = 18, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (18) - 1", fixed = TRUE))
expect_warning(expect_error(getDesignInverseNormal(kMax = 19, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (19) - 1", fixed = TRUE))
expect_warning(expect_error(getDesignInverseNormal(kMax = 20, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (20) - 1", fixed = TRUE))
expect_error(getDesignGroupSequential(futilityBounds = c(-7, 5)),
"Illegal argument: 'futilityBounds' (-7, 5) too extreme for this situation", fixed = TRUE)
expect_error(getDesignGroupSequential(futilityBounds = c(1, 7)),
"Argument out of bounds: 'futilityBounds' (1, 7) is out of bounds [-Inf; 6]", fixed = TRUE)
}) |
"Cats" |
step.lmRob <- function(object, scope, scale, direction = c("both", "backward", "forward"),
trace = TRUE, keep = NULL, steps = 1000, fast = FALSE, ...)
{
if(missing(direction))
direction <- "backward"
else direction <- match.arg(direction)
if(direction != "backward")
stop("Presently step.lmRob only supports backward model selection.")
re.arrange <- function(keep)
{
namr <- names(k1 <- keep[[1]])
namc <- names(keep)
nc <- length(keep)
nr <- length(k1)
array(unlist(keep, recursive = FALSE), c(nr, nc), list(namr, namc))
}
make.step <- function(models, fit, scale, object)
{
change <- sapply(models, "[[", "change")
rdf <- sapply(models, "[[", "df.resid")
ddf <- c(NA, diff(rdf))
RFPE <- sapply(models, "[[", "RFPE")
heading <- c("Stepwise Model Path \nAnalysis of Deviance Table",
"\nInitial Model:", deparse(as.vector(formula(object))),
"\nFinal Model:", deparse(as.vector(formula(fit))),
"\n")
aod <- data.frame(Step = change, Df = ddf, "Resid. Df" = rdf,
RFPE = RFPE, check.names = FALSE)
attr(aod, "heading") <- heading
oldClass(aod) <- c("anova", "data.frame")
fit$anova <- aod
fit
}
backward <- direction == "both" || direction == "backward"
forward <- direction == "both" || direction == "forward"
if(missing(scope)) {
fdrop <- numeric(0)
fadd <- NULL
}
else {
if(is.list(scope)) {
fdrop <- if(!is.null(fdrop <- scope$lower)) attr(terms(
update.formula(object, fdrop)),
"factor") else numeric(0)
fadd <- if(!is.null(fadd <- scope$upper)) attr(terms(
update.formula(object, fadd)), "factor")
}
else {
fadd <- if(!is.null(fadd <- scope))
attr(terms(update.formula(object, scope)), "factor")
fdrop <- numeric(0)
}
}
if(is.null(fadd)) {
backward <- TRUE
forward <- FALSE
}
m <- model.frame(object)
obconts <- object$contrasts
objectcall <- object$call
robust.control <- object$robust.control
if(forward) {
add.rhs <- paste(dimnames(fadd)[[2]], collapse = "+")
add.rhs <- eval(parse(text = paste("~ . +", add.rhs)))
new.form <- update.formula(object, add.rhs, evaluate = FALSE)
fc <- objectcall
Terms <- terms(new.form)
fc$formula <- Terms
fobject <- list(call = fc)
oldClass(fobject) <- oldClass(object)
m <- model.frame(fobject)
x <- model.matrix(Terms, m, contrasts = obconts)
}
else {
Terms <- object$terms
x <- model.matrix(Terms, m, contrasts = obconts)
}
Asgn <- attr(x, "assign")
term.labels <- attr(Terms, "term.labels")
a <- attributes(m)
y <- model.extract(m, "response")
w <- model.extract(m, "weights")
if(is.null(w))
w <- rep(1, nrow(m))
models <- vector("list", steps)
if(!is.null(keep)) {
keep.list <- vector("list", steps)
nv <- 1
}
n <- length(object$fitted)
scale <- object$scale
fit <- object
bRFPE <- lmRob.RFPE(fit)
nm <- 1
Terms <- fit$terms
if(trace)
cat("Start: RFPE=", format(round(bRFPE, 4)), "\n",
deparse(as.vector(formula(fit))), "\n\n")
models[[nm]] <- list(df.resid = fit$df.resid, change = "", RFPE = bRFPE)
if(!is.null(keep))
keep.list[[nm]] <- keep(fit, bRFPE)
RFPE <- bRFPE + 1
while(bRFPE < RFPE & steps > 0) {
steps <- steps - 1
RFPE <- bRFPE
bfit <- fit
ffac <- attr(Terms, "factor")
scope <- factor.scope(ffac, list(add = fadd, drop = fdrop))
aod <- NULL
change <- NULL
if(backward && (ndrop <- length(scope$drop))) {
aod <- drop1.lmRob(fit, scope$drop, scale)
if(trace)
print(aod)
change <- rep("-", ndrop + 1)
}
if(forward && (nadd <- length(scope$add))) {
aodf <- add1.lmRob(fit, scope$add, scale, x = x)
if(trace)
print(aodf)
change <- c(change, rep("+", nadd + 1))
if(is.null(aod))
aod <- aodf
else {
ncaod <- dim(aod)[1]
aod[seq(ncaod + 1, ncaod + nadd + 1), ] <- aodf
}
}
if(is.null(aod))
break
o <- order(aod[, "RFPE"])[1]
if(o[1] == 1) break
change <- paste(change[o], dimnames(aod)[[1]][o])
Terms <- terms(update(formula(fit), eval(parse(text = paste("~ .", change)))))
attr(Terms, "formula") <- new.formula <- formula(Terms)
newfit <- lmRob(new.formula, data = m, control = robust.control)
bRFPE <- aod[, "RFPE"][o]
if(trace)
cat("\nStep: RFPE =", format(round(bRFPE, 4)), "\n",
deparse(as.vector(formula(Terms))), "\n\n")
if(bRFPE >= RFPE)
break
nm <- nm + 1
models[[nm]] <- list(df.resid = newfit$df.resid, change = change, RFPE = bRFPE)
fit <- c(newfit, list(formula = new.formula))
oc <- objectcall
oc$formula <- as.vector(fit$formula)
fit$call <- oc
oldClass(fit) <- oldClass(object)
if(!is.null(keep))
keep.list[[nm]] <- keep(fit, bRFPE)
}
if(!is.null(keep))
fit$keep <- re.arrange(keep.list[seq(nm)])
make.step(models = models[seq(nm)], fit, scale, object)
}
add1.lmRob <- function(u, v, w, x)
stop("add1.lmRob is not implemented") |
context("ml feature tokenizer")
skip_databricks_connect()
test_that("ft_tokenizer() param setting", {
test_requires_latest_spark()
sc <- testthat_spark_connection()
test_args <- list(
input_col = "foo",
output_col = "bar"
)
test_param_setting(sc, ft_tokenizer, test_args)
})
test_that("ft_tokenizer.tbl_spark() works as expected", {
sc <- testthat_spark_connection()
test_requires("janeaustenr")
austen <- austen_books()
austen_tbl <- testthat_tbl("austen")
spark_tokens <- austen_tbl %>%
na.omit() %>%
dplyr::filter(length(text) > 0) %>%
head(10) %>%
ft_tokenizer("text", "tokens") %>%
sdf_read_column("tokens") %>%
lapply(unlist)
r_tokens <- austen %>%
dplyr::filter(nzchar(text)) %>%
head(10) %>%
`$`("text") %>%
tolower() %>%
strsplit("\\s")
expect_identical(spark_tokens, r_tokens)
}) |
rm(list = ls())
s <- search()[-1]
s <- s[-match(c("package:base", "package:stats", "package:graphics", "package:grDevices",
"package:utils", "package:datasets", "package:methods", "Autoloads"), s)]
if (length(s) > 0) sapply(s, detach, character.only = TRUE)
if (!file.exists("tables")) dir.create("tables")
if (!file.exists("figures")) dir.create("figures")
set.seed(290875)
options(prompt = "R> ", continue = "+ ",
width = 63,
show.signif.stars = FALSE,
SweaveHooks = list(leftpar = function()
par(mai = par("mai") * c(1, 1.05, 1, 1)),
bigleftpar = function()
par(mai = par("mai") * c(1, 1.7, 1, 1))))
HSAURpkg <- require("HSAUR3")
if (!HSAURpkg) stop("cannot load package ", sQuote("HSAUR3"))
rm(HSAURpkg)
a <- Sys.setlocale("LC_ALL", "C")
book <- TRUE
refs <- cbind(c("AItR", "DAGD", "SI", "CI", "ANOVA", "MLR", "GLM",
"DE", "RP", "GAM", "SA", "ALDI", "ALDII", "SIMC", "MA", "PCA",
"MDS", "CA"), 1:18)
ch <- function(x) {
ch <- refs[which(refs[,1] == x),]
if (book) {
return(paste("Chapter~\\\\ref{", ch[1], "}", sep = ""))
} else {
return(paste("Chapter~", ch[2], sep = ""))
}
}
if (file.exists("deparse.R"))
source("deparse.R")
setHook(packageEvent("lattice", "attach"), function(...) {
lattice.options(default.theme =
function()
standard.theme("pdf", color = FALSE))
})
book <- FALSE |
setGeneric(name = ".VerifyTieMethod",
def = function(tieMethod, ...) {
standardGeneric(".VerifyTieMethod")
})
setMethod(f = ".VerifyTieMethod",
signature = c(tieMethod = "ANY"),
definition = function(tieMethod, ...) {
stop("tieMethod must be one of {'random', 'first'}",
call. = FALSE)
})
setMethod(f = ".VerifyTieMethod",
signature = c(tieMethod = "character"),
definition = function(tieMethod, ...) {
tieMethod <- tolower(x = tieMethod)
if (!{tieMethod %in% c("random", "first")}) {
stop("tieMethod must be one of {'random', 'first'}",
call. = FALSE)
}
return( tieMethod )
}) |
etaisrec<-function(point,rec)
{
d<-length(rec)/2
res<-0
for (i in 1:d){
if (point[i]>rec[2*i]) res<-res+(point[i]-rec[2*i])^2
else if (point[i]<rec[2*i-1]) res<-res+(point[i]-rec[2*i-1])^2
}
return(res)
} |
impute.ltrcrfsrc <- function(formula, data,
ntree = 500, nodesize = 1, nsplit = 10,
nimpute = 2, fast = FALSE, blocks,
mf.q, max.iter = 10, eps = 0.01,
ytry = NULL, always.use = NULL, verbose = TRUE,
...)
{
if (missing(data)) {
stop("data is missing")
}
which.na <- is.na(data)
if (!any(which.na) || all(which.na)) {
return(invisible(data))
}
rfnames <- c("mtry",
"splitrule",
"bootstrap",
"sampsize",
"samptype")
dots <- list(...)
dots <- dots[names(dots) %in% rfnames]
p <- ncol(data)
n <- nrow(data)
all.r.na <- rowSums(which.na) == p
all.c.na <- colSums(which.na) == n
data <- data[!all.r.na, !all.c.na, drop = FALSE]
which.na <- which.na[!all.r.na, !all.c.na, drop = FALSE]
if (!any(which.na)) {
return(data)
}
p <- ncol(data)
n <- nrow(data)
all.var.names <- colnames(data)
if (missing(mf.q)) {
mforest <- FALSE
}
else {
mforest <- TRUE
}
if (!missing(blocks)) {
blocks <- cv.folds(nrow(data), max(1, blocks))
}
else {
blocks <- list(1:nrow(data))
}
if (!mforest) {
if (missing(formula)) {
if (is.null(ytry)) {
ytry <- min(p - 1, max(25, ceiling(sqrt(p))))
}
dots$formula <- as.formula(paste("Unsupervised(", ytry, ") ~ ."))
dots$splitrule <- NULL
}
else {
dots$formula <- formula
}
nullBlocks <- lapply(blocks, function(blk) {
dta <- data[blk,, drop = FALSE]
retO <- tryCatch({do.call("generic.impute.ltrcrfsrc",
c(list(data = dta,
ntree = ntree,
nodesize = nodesize,
nsplit = nsplit,
nimpute = nimpute,
fast = fast), dots))}, error = function(e) {NULL})
if (!is.null(retO)) {
if (!is.null(retO$missing$row)) {
blk <- blk[-retO$missing$row]
}
if (!is.null(retO$missing$col)) {
ynames <- all.var.names[-retO$missing$col]
}
else {
ynames <- all.var.names
}
data[blk, ynames] <<- retO$data[, ynames, drop = FALSE]
}
NULL
})
rm(nullBlocks)
}
if (mforest) {
x.na <- lapply(1:p, function(k) {
if (sum(which.na[, k]) > 0) {
as.numeric(which(which.na[, k]))
}
else {
NULL
}
})
which.x.na <- which(sapply(x.na, length) > 0)
names(x.na) <- all.var.names <- colnames(data)
var.names <- all.var.names[which.x.na]
if (!is.null(always.use)) {
always.use <- is.element(all.var.names, always.use)
if (sum(always.use) > 0) {
always.use <- which(always.use)
}
}
p0 <- length(which.x.na)
if (mf.q == 0) {
stop("mf.q must be greater than zero")
}
if (mf.q >= 1) {
mf.q <- min(p0 - 1, mf.q) / p0
}
K <- max(1 / mf.q, 2)
dots.rough <- dots
dots.rough$mtry <- dots.rough$splitrule <- NULL
data <- do.call("generic.impute.ltrcrfsrc",
c(list(data = data,
ntree = 250,
nodesize = nodesize,
nsplit = nsplit,
nimpute = 3,
fast = fast), dots.rough))$data
diff.err <- Inf
check <- TRUE
nullWhile <- lapply(1:max.iter, function(m) {
if (!check) {
return(NULL)
}
if (verbose && max.iter > 1) {
cat("\t iteration", m, "\n")
}
data.old <- data
nullBlocks <- lapply(blocks, function(blk) {
var.grp <- cv.folds(p0, K)
nullObj <- lapply(var.grp, function(grp) {
ynames <- unique(c(var.names[grp], all.var.names[always.use]))
dots$formula <- as.formula(paste("Multivar(", paste(ynames, collapse = ","), paste(") ~ ."), sep = ""))
dta <- data[blk,, drop = FALSE]
dta[, ynames] <- lapply(ynames, function(nn) {
xk <- data[, nn]
xk[unlist(x.na[nn])] <- NA
xk[blk]
})
retO <- tryCatch({do.call("generic.impute.ltrcrfsrc",
c(list(data = dta,
ntree = ntree,
nodesize = nodesize,
nsplit = nsplit,
nimpute = 1,
fast = fast), dots))}, error = function(e) {NULL})
if (!is.null(retO)) {
if (!is.null(retO$missing$row)) {
blk <- blk[-retO$missing$row]
}
if (!is.null(retO$missing$col)) {
ynames <- ynames[-retO$missing$col]
}
data[blk, ynames] <<- retO$data[, ynames, drop = FALSE]
rm(dta)
}
NULL
})
NULL
})
diff.new.err <- mean(sapply(var.names, function(nn) {
xo <- data.old[unlist(x.na[nn]), nn]
xn <- data[unlist(x.na[nn]), nn]
if (!is.numeric(xo)) {
sum(xn != xo, na.rm = TRUE) / (.001 + length(xn))
}
else {
var.xo <- var(xo, na.rm = TRUE)
if (is.na(var.xo)) {
var.xo <- 0
}
sqrt(mean((xn - xo)^2, na.rm = TRUE) / (.001 + var.xo))
}
}), na.rm = TRUE)
if (verbose) {
err <- paste("err = " , format(diff.new.err, digits = 3), sep = "")
drp <- paste("drop = ", format(diff.err - diff.new.err, digits = 3), sep = "")
cat(" >> ", err, ", ", drp, "\n")
}
check <<- ((diff.err - diff.new.err) >= eps)
diff.err <<- diff.new.err
rm(data.old)
NULL
})
}
invisible(data)
} |
knitr::opts_chunk$set(
collapse = TRUE,
comment = "
)
library(causaloptim)
knitr::include_graphics("TwoIVs.png")
library(causaloptim)
library(igraph)
b <- graph_from_literal(Z1 -+ X, Z2 -+ X, Z2 -+ Z1, Ul -+ Z1, Ul -+ Z2, X -+ Y, Ur -+ X, Ur -+ Y)
V(b)$leftside <- c(1, 0, 1, 1, 0, 0)
V(b)$latent <- c(0, 0, 0, 1, 0, 1)
V(b)$nvals <- c(2, 2, 2, 2, 2, 2)
E(b)$rlconnect <- c(0, 0, 0, 0, 0, 0, 0, 0)
E(b)$edge.monotone <- c(0, 0, 0, 0, 0, 0, 0, 0)
obj <- analyze_graph(b, constraints = NULL, effectt = "p{Y(X = 1) = 1} - p{Y(X = 0) = 1}")
oldbnds <- readRDS("MIV-bounds-result.RData")
newbnds <- optimize_effect_2(obj)
eval_newbnds <- interpret_bounds(newbnds$bounds, obj$parameters)
eval_oldbnds <- interpret_bounds(oldbnds$bounds, obj$parameters)
sim.qs <- rbeta(length(obj$variables), .05, 1)
sim.qs <- sim.qs / sum(sim.qs)
names(sim.qs) <- obj$variables
inenv <- new.env()
for(j in 1:length(sim.qs)) {
assign(names(sim.qs)[j], sim.qs[j], inenv)
}
res <- lapply(as.list(obj$constraints[-1]), function(x){
x1 <- strsplit(x, " = ")[[1]]
x0 <- paste(x1[1], " = ", x1[2])
eval(parse(text = x0), envir = inenv)
})
params <- lapply(obj$parameters, function(x) get(x, envir = inenv))
names(params) <- obj$parameters
do.call(eval_newbnds, params)
do.call(eval_oldbnds, params)
knitr::include_graphics("User_Input_DAG.png")
knitr::include_graphics("Causal_Graph.png")
knitr::include_graphics("Canonical_Partitions.png") |
tVaRFigure <- function(...){
if (nargs() < 4) {
stop("Too few arguments")
}
if (nargs() > 5) {
stop("Too many arguments")
}
args <- list(...)
if (nargs() == 5) {
mu <- args$mu
cl <- args$cl
sigma <- args$sigma
hp <- args$hp
df <- args$df
}
if (nargs() == 4) {
mu <- mean(args$returns)
cl <- args$cl
sigma <- sd(args$returns)
hp <- args$hp
df <- args$df
}
mu <- as.matrix(mu)
mu.row <- dim(mu)[1]
mu.col <- dim(mu)[2]
if (max(mu.row, mu.col) > 1) {
stop("Mean must be a scalar")
}
sigma <- as.matrix(sigma)
sigma.row <- dim(sigma)[1]
sigma.col <- dim(sigma)[2]
if (max(sigma.row, sigma.col) > 1) {
stop("Standard deviation must be a scalar")
}
cl <- as.matrix(cl)
cl.row <- dim(cl)[1]
cl.col <- dim(cl)[2]
if (min(cl.row, cl.col) > 1) {
stop("Confidence level must be a scalar or a vector")
}
hp <- as.matrix(hp)
hp.row <- dim(hp)[1]
hp.col <- dim(hp)[2]
if (min(hp.row, hp.col) > 1) {
stop("Holding period must be a scalar or a vector")
}
df <- as.matrix(df)
df.row <- dim(df)[1]
df.col <- dim(df)[2]
if (max(df.row, df.col) > 1) {
stop("Number of degrees of freedom must be a scalar")
}
if (cl.row > cl.col) {
cl <- t(cl)
}
if (hp.row > hp.col) {
hp <- t(hp)
}
if (sigma < 0) {
stop("Standard deviation must be non-negative")
}
if (df < 3) {
stop("Number of degrees of freedom must be at least 3 for first two moments
of distribution to be defined")
}
if (max(cl) >= 1){
stop("Confidence level(s) must be less than 1")
}
if (min(cl) <= 0){
stop("Confidence level(s) must be greater than 0")
}
if (min(hp) <= 0){
stop("Holding Period(s) must be greater than 0")
}
if (max(cl.row, cl.col) > 1 & max(hp.row, hp.col) > 1) {
print('VaR results with confidence level varying across row and holding
period down column')
}
cl.row <- dim(cl)[1]
cl.col <- dim(cl)[2]
VaR <- - sigma[1,1] * sqrt(hp) %*% qnorm(1 - cl, 0, 1) - mu[1,1] * hp %*% matrix(1,cl.row,cl.col)
x.min <- -mu - 5 * sigma
x.max <- -mu + 5 * sigma
delta <- (x.max-x.min) / 100
x <- seq(x.min, x.max, delta)
p <- dt((x-mu)/sigma, df)
plot(x, p, type = "l", xlim = c(x.min, x.max), ylim = c(0, max(p)*1.1),
xlab = "Loss (+) / Profit (-)", ylab = "Probability", main = "t- VaR")
u <- c(VaR, VaR)
v <- c(0, .6*max(p))
lines(0,0,2,.6,type="l")
lines(u, v, type = "l", col = "blue")
cl.for.label <- 100*cl
text(1,.95*max(p), pos = 1, 'Input parameters', cex=.75, font = 2)
text(1, .875*max(p),pos = 1, paste('Daily mean L/P = ', round(mu,2)), cex=.75)
text(1, .8*max(p),pos = 1, paste('St. dev. of daily L/P = ',round(sigma,2)), cex=.75)
text(1, .725*max(p),pos = 1, paste('Holding period = ', hp,' day(s)'), cex=.75)
text(VaR, .7*max(p),pos = 2, paste('VaR at ', cl.for.label,'% CL'), cex=.75)
text(VaR, .64 * max(p),pos = 2, paste('= ',VaR), cex=.75)
} |
eMixMargDen=
function(grid,probdraw,compdraw)
{
den=matrix(0,nrow(grid),ncol(grid))
for(i in 1:length(compdraw)) den=den+mixDen(grid,probdraw[i,],compdraw[[i]])
return(den/length(compdraw))
} |
plotDIFLogistic <- function(x, item = 1, item.name, group.names = c("Reference", "Focal"),
Data, group, match, draw.empirical = TRUE) {
res <- x
i <- ifelse(is.character(item) | is.factor(item),
(1:length(res$names))[res$names == item],
item
)
if (missing(item.name)) {
if (is.character(item) | is.factor(item)) {
item.name <- paste(item)
} else {
item.name <- paste("Item", item)
}
}
if (any(is.na(res$logitPar[i, ]))) {
stop("Selected item is an anchor item!",
call. = FALSE
)
}
coef <- res$logitPar[i, ]
if (missing(Data) & draw.empirical) {
stop("'Data' needs to be specified! ", .call = FALSE)
}
if (missing(group) & draw.empirical) {
stop("'group' needs to be specified! ", .call = FALSE)
}
if (missing(match)) {
match <- res$match
}
if (res$purification & res$DIFitems[1] != "No DIF item detected") {
ANCHOR <- c(1:nrow(res$logitPar))[-res$DIFitems]
} else {
ANCHOR <- c(1:nrow(res$logitPar))
}
if (match[1] == "score") {
xlab <- "Total score"
if (draw.empirical) {
MATCHCRIT <- rowSums(Data[, ANCHOR])
} else {
MATCHCRIT <- c(0, nrow(res$logitPar))
}
} else if (match[1] == "zscore") {
xlab <- "Standardized total score"
if (draw.empirical) {
MATCHCRIT <- scale(apply(as.data.frame(Data[, ANCHOR]), 1, sum))
} else {
MATCHCRIT <- c(0, nrow(res$logitPar))
}
} else if (length(match) != nrow(Data)) {
stop("'match' needs to be either 'score', 'zscore' or numeric vector of the same length as number of observations in 'Data'. ", .call = FALSE)
} else {
MATCHCRIT <- match
xlab <- "Observed score"
}
LR_plot <- function(x, group, b0, b1, b2, b3) {
return(1 / (1 + exp(-(b0 + b1 * x + b2 * group + b3 * x * group))))
}
if (draw.empirical) {
score_R <- MATCHCRIT[group == 0]
score_F <- MATCHCRIT[group == 1]
empirical_R <- data.frame(
score = as.numeric(levels(as.factor(score_R))),
probability = tapply(Data[group == 0, i], as.factor(score_R), mean)
)
empirical_F <- data.frame(
score = as.numeric(levels(as.factor(score_F))),
probability = tapply(Data[group == 1, i], as.factor(score_F), mean)
)
empirical <- data.frame(rbind(
cbind(empirical_R, Group = "gr1"),
cbind(empirical_F, Group = "gr2")
))
empirical$size <- c(table(score_R), table(score_F))
colnames(empirical) <- c("Score", "Probability", "Group", "Count")
}
max_score <- max(MATCHCRIT, na.rm = TRUE) + 0.1
min_score <- min(MATCHCRIT, na.rm = TRUE) - 0.1
col <- c("dodgerblue2", "goldenrod2")
alpha <- .5
shape <- 21
size <- .8
linetype <- c("solid", "dashed")
g <- ggplot() +
xlim(min_score, max_score) +
stat_function(aes(colour = "gr1", linetype = "gr1"),
fun = LR_plot,
args = list(
group = 0,
b0 = coef[1],
b1 = coef[2],
b2 = coef[3],
b3 = coef[4]
),
size = size, geom = "line"
) +
stat_function(aes(colour = "gr2", linetype = "gr2"),
fun = LR_plot,
args = list(
group = 1,
b0 = coef[1],
b1 = coef[2],
b2 = coef[3],
b3 = coef[4]
),
size = size, geom = "line"
) +
scale_colour_manual(
values = col,
breaks = c("gr1", "gr2"),
labels = group.names
) +
scale_linetype_manual(
values = linetype,
breaks = c("gr1", "gr2"),
labels = group.names
) +
guides(colour = guide_legend(title = "Group", order = 2)) +
guides(linetype = guide_legend(title = "Group", order = 2)) +
xlab(xlab) +
ylab("Probability of correct answer") +
scale_y_continuous(limits = c(0, 1)) +
theme_app() +
theme(
legend.box.just = "top",
legend.position = c(0.01, 0.98),
legend.justification = c(0, 1),
legend.key.width = unit(1, "cm"),
legend.box = "horizontal"
) +
ggtitle(item.name)
if (draw.empirical) {
g <- g +
geom_point(
data = empirical,
aes_string(x = "Score", y = "Probability", colour = "Group", fill = "Group", size = "Count"),
alpha = alpha, shape = shape
) +
guides(size = guide_legend(title = "Count", order = 1)) +
scale_fill_manual(
values = col,
breaks = c("gr1", "gr2"),
labels = group.names
) +
guides(fill = guide_legend(title = "Group", order = 2))
}
return(g)
} |
setMethod("summary", signature(object="kRp.corpus"), function(
object, missing=NA, ...
){
available_rdb <- nullToList(unlist(corpusMeta(object, "readability", fail=FALSE)[["index"]]), entry="index")
available_lex_div <- nullToList(unlist(corpusMeta(object, "lex_div", fail=FALSE)[["index"]]), entry="index")
summary.info <- meta(corpusTm(object))[, c("doc_id", names(corpusHierarchy(object))), drop=FALSE]
if(hasFeature(object, "stopwords")){
summary.info[["stopwords"]] <- corpusStopwords(object)[["sum"]]
} else {}
summary.rdb <- summary.lexdiv <- NULL
if(!is.null(available_rdb)){
if(length(available_rdb[["index"]]) > 0){
summary.rdb <- t(as.data.frame(sapply(names(corpusReadability(object)), function(thisText){
thisSummary <- summary(corpusReadability(object)[[thisText]], flat=TRUE)
return(fixMissingIndices(have=thisSummary, want=available_rdb[["index"]], missing=missing))
}, simplify=FALSE)))
summary.info <- cbind(
summary.info,
getRdbDesc(object),
summary.rdb
)
} else {}
} else {}
if(!is.null(available_lex_div)){
if(length(available_lex_div[["index"]]) > 0){
summary.lexdiv <- t(as.data.frame(sapply(names(corpusLexDiv(object)), function(thisText){
thisSummary <- summary(corpusLexDiv(object)[[thisText]], flat=TRUE)
return(fixMissingIndices(have=thisSummary, want=available_lex_div[["index"]], missing=missing))
}, simplify=FALSE)))
if("TTR" %in% colnames(summary.info) & "TTR" %in% colnames(summary.lexdiv)){
summary.lexdiv <- summary.lexdiv[, !(names(summary.lexdiv) %in% "TTR")]
} else {}
summary.info <- cbind(
summary.info,
summary.lexdiv
)
} else {}
}
if(!is.null(summary.info)){
rownames(summary.info) <- as.character(summary.info[["doc_id"]])
} else {
summary.info <- data.frame()
}
corpusSummary(object) <- summary.info
return(object)
}
)
setGeneric("corpusSummary", function(obj) standardGeneric("corpusSummary"))
setMethod("corpusSummary",
signature=signature(obj="kRp.corpus"),
function (obj){
return(feature(obj, "summary"))
}
)
setGeneric("corpusSummary<-", function(obj, value) standardGeneric("corpusSummary<-"))
setMethod("corpusSummary<-",
signature=signature(obj="kRp.corpus"),
function (obj, value){
feature(obj, "summary") <- value
return(obj)
}
) |
library("LINselect")
set.seed(15)
ex <- simulData(p=25,n=25,r=0.5,rSN=10)
resVARselect <- VARselect(ex$Y,ex$X,exhaustive.dmax=4)
resVARselect$summary
resVARselect <-
VARselect(ex$Y,ex$X,normalize=FALSE,dmax=15,exhaustive.dmax=4,verbose=FALSE)
resVARselect$summary |
optim.relatedness <- function(obs, theta0 = 0, theta1 = 0.03, theta.tol = 10^(-7), theta.step = NULL,
max.bisect = 15, probs, var.list = NULL, init.alpha = 10^c(-4, -6, -8, -10), init.keep = FALSE,
objFunction = c("T2", "T1", "C3", "C2", "C1"), collapse = FALSE, trace = FALSE, solnp.ctrl = list(tol = 10^(-9),
rho = 10, delta = min(init.alpha) * 0.01, trace = FALSE)) {
if (!any(objFunction == c("T2", "T1", "C3", "C2", "C1")))
stop("Wrong objFunction is supplied. Use any of: \"T2\", \"T1\", \"C3\", \"C2\" or \"C1\")")
if (sum(init.alpha) > 1 | any(init.alpha < 0) | any(init.alpha > 1))
stop("init.alpha is a probability vector, i.e. 0<=alpha[i]<=1 and sum(alpha)<=1")
objFunction <- objFunction[1]
n <- (1 + sqrt(1 + 8 * sum(obs, na.rm = TRUE)))/2
if (is.matrix(obs))
obs <- t(obs)[up.tri(obs)]
if (collapse) {
warning("attempt to 'collapse' observations into mathcing alleles ignored. The options is not yet implemented.")
}
C1 <- function(x, expected, observed, variance = NULL) {
EE <- x[1] * expected[[1]]
for (r in 2:length(expected)) EE <- EE + x[r] * expected[[r]]
sum(((observed - EE)^2/EE), na.rm = TRUE)
}
C2 <- function(x, expected, observed, variance = NULL) {
EE <- x[1] * expected[[1]]
for (r in 2:length(expected)) EE <- EE + x[r] * expected[[r]]
sqrt(sum((observed - EE)^2, na.rm = TRUE))
}
C3 <- function(x, expected, observed, variance = NULL) {
EE <- x[1] * expected[[1]]
for (r in 2:length(expected)) EE <- EE + x[r] * expected[[r]]
sum(abs(observed - EE)/EE, na.rm = TRUE)
}
T1 <- function(x, expected, observed, variance = NULL) {
EE <- x[1] * expected[[1]]
for (r in 2:length(expected)) EE <- EE + x[r] * expected[[r]]
sum((EE - observed)^2/diag(variance))
}
T2 <- function(x, expected, observed, variance = NULL) {
EE <- x[1] * expected[[1]]
for (r in 2:length(expected)) EE <- EE + x[r] * expected[[r]]
gginv <- eigen(variance, T, F, T)
ivariance <- gginv$vec %*% diag(c(1/gginv$val[-length(gginv$val)], 0)) %*% t(gginv$vec)
as.numeric(t(EE - observed) %*% ivariance %*% (EE - observed))
}
objFun <- get(objFunction)
tgrid <- seq(from = theta0, to = theta1, len = 3)
val.df <- data.frame(theta = NA, value = NA)[0, ]
min.t <- rep(tgrid[2], 2)
if (!is.null(theta.step)) {
tgrid <- seq(from = theta0, to = theta1, by = theta.step)
grid.search <- TRUE
} else grid.search <- FALSE
bb <- 0
min.val <- Inf
min.id <- 1
if (sum(init.alpha) != 1)
init.alpha <- c(1 - sum(init.alpha), init.alpha)
min.res <- init.alpha
if (trace) {
if (grid.search)
cat("Grid search... ") else cat("Bisectional search...\n")
}
while (bb < max.bisect) {
bb <- bb + 1
if (trace & !grid.search)
cat(paste("Iteration: ", format(bb, width = 2), "\n", sep = ""))
if (diff(range(tgrid)) < theta.tol & !grid.search) {
if (trace) {
if (solnp.ctrl$trace)
cat("\n")
cat("Interval converged\n")
}
break
}
if (length(min.t) > 10) {
if (abs(diff(min.t[length(min.t) - c(0, 5)])) < (theta.tol)^2 & !grid.search) {
if (trace)
cat("No change in theta for several iterations\n")
break
}
}
if (grepl("T", objFunction)) {
if (is.null(var.list)) {
if (trace)
cat("Variances are being computed... Please wait")
var.list <- dbVariance(probs = probs, theta = tgrid, n = 1)
names(var.list) <- paste(tgrid)
if (trace)
cat(" Done..!\n")
} else {
if (all(is.element(paste(tgrid), names(var.list))) & trace)
cat("All needed variances are provided in the input...\n") else {
if (trace)
cat(paste("Missing variances (", sum(!is.element(paste(tgrid), names(var.list))),
") are being computed... Please wait", sep = ""))
ttgrid <- tgrid[!is.element(paste(tgrid), names(var.list))]
vvar.list <- dbVariance(probs, theta = ttgrid, n = 1)
if (length(ttgrid) == 1)
vvar.list <- list(vvar.list)
names(vvar.list) <- ttgrid
var.list <- c(var.list, vvar.list)
var.list <- var.list[sort.list(names(var.list))]
if (trace)
cat(" Done..!\n")
}
}
variances <- lapply(var.list, function(x, n) choose(n, 2) * x$V1 + 6 * choose(n,
3) * x$V2 + 6 * choose(n, 4) * x$V3, n = n)
variances <- variances[paste(tgrid)]
} else {
var.list <- NULL
variances <- replicate(length(tgrid), NULL, simplify = FALSE)
}
expects <- lapply(tgrid, function(t, n) list(UN = dbExpect(probs = probs, theta = t,
n = n, vector = TRUE, k = c(0, 0, 1)), FC = dbExpect(probs = probs, theta = t, n = n,
vector = TRUE, k = c(0, 1, 3)/4), AV = dbExpect(probs = probs, theta = t, n = n,
vector = TRUE, k = c(0, 1, 1)/2), PC = dbExpect(probs = probs, theta = t, n = n,
vector = TRUE, k = c(0, 1, 0)), FS = dbExpect(probs = probs, theta = t, n = n, vector = TRUE,
k = c(1, 2, 1)/4)), n = n)
for (i in 1:length(tgrid)) {
t <- tgrid[i]
solnpObjFun <- function(x) objFun(x, expected = expects[[i]], observed = obs, variance = variances[[i]])
alpha <- min.res
if (init.keep)
alpha <- init.alpha
est <- try(Rsolnp::solnp(pars = alpha, fun = solnpObjFun, eqfun = sum, eqB = 1, LB = rep(0,
5), UB = rep(1, 5), control = solnp.ctrl), silent = TRUE)
if (length(est) == 1) {
est <- list(pars = init.alpha, values = NA)
val.df <- rbind(val.df, data.frame(theta = t, value = NA))
warn.message <- paste("NAs were returned for theta =", format(t, digits = 5),
"by the solnp procedure. This could indicate numerical problems with the identified solution.",
sep = " ")
if (!grid.search)
warn.message <- paste(warn.message, "\n An attempt to overcome or approximate/solve the problem is to use a grid search instead using 'theta.step' to set step size.")
warning(warn.message)
} else {
step.val <- est$values[length(est$values)]
if (step.val <= min.val) {
min.val <- step.val
min.res <- est$pars
min.id <- i
min.t <- c(min.t, t)
}
val.df <- rbind(val.df, data.frame(theta = t, value = step.val))
}
}
if (grid.search) {
if (trace)
cat("\n")
break
}
if (min.id == 1)
tgrid <- seq(from = tgrid[1], to = tgrid[2], len = 3) else if (min.id == length(tgrid))
tgrid <- seq(from = tgrid[min.id - 1], to = tgrid[min.id], len = 3) else {
tgrid.length <- c(tgrid[min.id] - min(tgrid), max(tgrid) - tgrid[min.id])
tgrid <- sort(c(tgrid[1], tgrid[min.id], tgrid[length(tgrid)], tgrid[min.id] + 0.25^bb *
tgrid.length[1], tgrid[min.id] - 0.25^bb * tgrid.length[2]))
}
}
names(min.res) <- c("Unrelated", "First-Cousins", "Avuncular", "Parent-child", "Full-siblings")
val.df <- val.df[!duplicated(val.df$theta), ]
res <- list(value = val.df[order(val.df$theta), ], solution = c(theta = tgrid[min.id], min.res),
var.list = var.list)
attributes(res)$objFun <- objFunction
attributes(res)$class <- "dbOptim"
res
}
plot.dbOptim <- function(x, type = "l", ...) {
objFun <- attributes(x)$objFun
ylabel <- switch(objFun, C1 = expression(C[1](theta)), C2 = expression(C[2](theta)), C3 = expression(C[3](theta)),
T1 = expression(T[1](theta)), T2 = expression(T[2](theta)))
graphics::plot(value ~ theta, x$value, xlab = expression(theta), ylab = ylabel, type = type, ...)
}
points.dbOptim <- function(x, type = "p", ...) {
graphics::points(value ~ theta, x$value, type = type, ...)
}
lines.dbOptim <- function(x, type = "l", ...) {
graphics::points(value ~ theta, x$value, type = type, ...)
}
print.dbOptim <- function(x, var.list = FALSE, ...) {
if (var.list)
print(x[c("value", "solution", "var.list")]) else print(x[c("value", "solution")])
} |
test_that("broc", {
skip_if_not_installed("ggplot2")
expect_s3_class(scale_edge_colour_broc(midpoint = 10), "ggproto")
expect_s3_class(scale_edge_colour_broc(midpoint = 10), "ScaleContinuous")
expect_s3_class(scale_edge_colour_broc(), "ggproto")
expect_s3_class(scale_edge_colour_broc(), "ScaleContinuous")
expect_s3_class(scale_edge_color_broc(), "ggproto")
expect_s3_class(scale_edge_color_broc(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_broc(midpoint = 10), "ggproto")
expect_s3_class(scale_edge_fill_broc(midpoint = 10), "ScaleContinuous")
expect_s3_class(scale_edge_fill_broc(), "ggproto")
expect_s3_class(scale_edge_fill_broc(), "ScaleContinuous")
})
test_that("cork", {
skip_if_not_installed("ggplot2")
expect_s3_class(scale_edge_colour_cork(midpoint = 10), "ggproto")
expect_s3_class(scale_edge_colour_cork(midpoint = 10), "ScaleContinuous")
expect_s3_class(scale_edge_colour_cork(), "ggproto")
expect_s3_class(scale_edge_colour_cork(), "ScaleContinuous")
expect_s3_class(scale_edge_color_cork(), "ggproto")
expect_s3_class(scale_edge_color_cork(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_cork(midpoint = 10), "ggproto")
expect_s3_class(scale_edge_fill_cork(midpoint = 10), "ScaleContinuous")
expect_s3_class(scale_edge_fill_cork(), "ggproto")
expect_s3_class(scale_edge_fill_cork(), "ScaleContinuous")
})
test_that("vik", {
skip_if_not_installed("ggplot2")
expect_s3_class(scale_edge_colour_vik(midpoint = 10), "ggproto")
expect_s3_class(scale_edge_colour_vik(midpoint = 10), "ScaleContinuous")
expect_s3_class(scale_edge_colour_vik(), "ggproto")
expect_s3_class(scale_edge_colour_vik(), "ScaleContinuous")
expect_s3_class(scale_edge_color_vik(), "ggproto")
expect_s3_class(scale_edge_color_vik(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_vik(midpoint = 10), "ggproto")
expect_s3_class(scale_edge_fill_vik(midpoint = 10), "ScaleContinuous")
expect_s3_class(scale_edge_fill_vik(), "ggproto")
expect_s3_class(scale_edge_fill_vik(), "ScaleContinuous")
})
test_that("lisbon", {
skip_if_not_installed("ggplot2")
expect_s3_class(scale_edge_colour_lisbon(midpoint = 10), "ggproto")
expect_s3_class(scale_edge_colour_lisbon(midpoint = 10), "ScaleContinuous")
expect_s3_class(scale_edge_colour_lisbon(), "ggproto")
expect_s3_class(scale_edge_colour_lisbon(), "ScaleContinuous")
expect_s3_class(scale_edge_color_lisbon(), "ggproto")
expect_s3_class(scale_edge_color_lisbon(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_lisbon(midpoint = 10), "ggproto")
expect_s3_class(scale_edge_fill_lisbon(midpoint = 10), "ScaleContinuous")
expect_s3_class(scale_edge_fill_lisbon(), "ggproto")
expect_s3_class(scale_edge_fill_lisbon(), "ScaleContinuous")
})
test_that("tofino", {
skip_if_not_installed("ggplot2")
expect_s3_class(scale_edge_colour_tofino(midpoint = 10), "ggproto")
expect_s3_class(scale_edge_colour_tofino(midpoint = 10), "ScaleContinuous")
expect_s3_class(scale_edge_colour_tofino(), "ggproto")
expect_s3_class(scale_edge_colour_tofino(), "ScaleContinuous")
expect_s3_class(scale_edge_color_tofino(), "ggproto")
expect_s3_class(scale_edge_color_tofino(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_tofino(midpoint = 10), "ggproto")
expect_s3_class(scale_edge_fill_tofino(midpoint = 10), "ScaleContinuous")
expect_s3_class(scale_edge_fill_tofino(), "ggproto")
expect_s3_class(scale_edge_fill_tofino(), "ScaleContinuous")
})
test_that("berlin", {
skip_if_not_installed("ggplot2")
expect_s3_class(scale_edge_colour_berlin(midpoint = 10), "ggproto")
expect_s3_class(scale_edge_colour_berlin(midpoint = 10), "ScaleContinuous")
expect_s3_class(scale_edge_colour_berlin(), "ggproto")
expect_s3_class(scale_edge_colour_berlin(), "ScaleContinuous")
expect_s3_class(scale_edge_color_berlin(), "ggproto")
expect_s3_class(scale_edge_color_berlin(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_berlin(midpoint = 10), "ggproto")
expect_s3_class(scale_edge_fill_berlin(midpoint = 10), "ScaleContinuous")
expect_s3_class(scale_edge_fill_berlin(), "ggproto")
expect_s3_class(scale_edge_fill_berlin(), "ScaleContinuous")
})
test_that("roma", {
skip_if_not_installed("ggplot2")
expect_s3_class(scale_edge_colour_roma(midpoint = 10), "ggproto")
expect_s3_class(scale_edge_colour_roma(midpoint = 10), "ScaleContinuous")
expect_s3_class(scale_edge_colour_roma(), "ggproto")
expect_s3_class(scale_edge_colour_roma(), "ScaleContinuous")
expect_s3_class(scale_edge_color_roma(), "ggproto")
expect_s3_class(scale_edge_color_roma(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_roma(midpoint = 10), "ggproto")
expect_s3_class(scale_edge_fill_roma(midpoint = 10), "ScaleContinuous")
expect_s3_class(scale_edge_fill_roma(), "ggproto")
expect_s3_class(scale_edge_fill_roma(), "ScaleContinuous")
})
test_that("bam", {
skip_if_not_installed("ggplot2")
expect_s3_class(scale_edge_colour_bam(midpoint = 10), "ggproto")
expect_s3_class(scale_edge_colour_bam(midpoint = 10), "ScaleContinuous")
expect_s3_class(scale_edge_colour_bam(), "ggproto")
expect_s3_class(scale_edge_colour_bam(), "ScaleContinuous")
expect_s3_class(scale_edge_color_bam(), "ggproto")
expect_s3_class(scale_edge_color_bam(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_bam(midpoint = 10), "ggproto")
expect_s3_class(scale_edge_fill_bam(midpoint = 10), "ScaleContinuous")
expect_s3_class(scale_edge_fill_bam(), "ggproto")
expect_s3_class(scale_edge_fill_bam(), "ScaleContinuous")
})
test_that("vanimo", {
skip_if_not_installed("ggplot2")
expect_s3_class(scale_edge_colour_vanimo(midpoint = 10), "ggproto")
expect_s3_class(scale_edge_colour_vanimo(midpoint = 10), "ScaleContinuous")
expect_s3_class(scale_edge_colour_vanimo(), "ggproto")
expect_s3_class(scale_edge_colour_vanimo(), "ScaleContinuous")
expect_s3_class(scale_edge_color_vanimo(), "ggproto")
expect_s3_class(scale_edge_color_vanimo(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_vanimo(midpoint = 10), "ggproto")
expect_s3_class(scale_edge_fill_vanimo(midpoint = 10), "ScaleContinuous")
expect_s3_class(scale_edge_fill_vanimo(), "ggproto")
expect_s3_class(scale_edge_fill_vanimo(), "ScaleContinuous")
})
test_that("batlow", {
skip_if_not_installed("ggplot2")
expect_s3_class(scale_edge_colour_batlow(), "ggproto")
expect_s3_class(scale_edge_colour_batlow(), "ScaleContinuous")
expect_s3_class(scale_edge_colour_batlow(discrete = TRUE), "ScaleDiscrete")
expect_s3_class(scale_edge_color_batlow(), "ggproto")
expect_s3_class(scale_edge_color_batlow(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_batlow(), "ggproto")
expect_s3_class(scale_edge_fill_batlow(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_batlow(discrete = TRUE), "ScaleDiscrete")
})
test_that("batlowW", {
skip_if_not_installed("ggplot2")
expect_s3_class(scale_edge_colour_batlowW(), "ggproto")
expect_s3_class(scale_edge_colour_batlowW(), "ScaleContinuous")
expect_s3_class(scale_edge_colour_batlowW(discrete = TRUE), "ScaleDiscrete")
expect_s3_class(scale_edge_color_batlowW(), "ggproto")
expect_s3_class(scale_edge_color_batlowW(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_batlowW(), "ggproto")
expect_s3_class(scale_edge_fill_batlowW(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_batlowW(discrete = TRUE), "ScaleDiscrete")
})
test_that("batlowK", {
skip_if_not_installed("ggplot2")
expect_s3_class(scale_edge_colour_batlowK(), "ggproto")
expect_s3_class(scale_edge_colour_batlowK(), "ScaleContinuous")
expect_s3_class(scale_edge_colour_batlowK(discrete = TRUE), "ScaleDiscrete")
expect_s3_class(scale_edge_color_batlowK(), "ggproto")
expect_s3_class(scale_edge_color_batlowK(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_batlowK(), "ggproto")
expect_s3_class(scale_edge_fill_batlowK(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_batlowK(discrete = TRUE), "ScaleDiscrete")
})
test_that("devon", {
skip_if_not_installed("ggplot2")
expect_s3_class(scale_edge_colour_devon(), "ggproto")
expect_s3_class(scale_edge_colour_devon(), "ScaleContinuous")
expect_s3_class(scale_edge_colour_devon(discrete = TRUE), "ScaleDiscrete")
expect_s3_class(scale_edge_color_devon(), "ggproto")
expect_s3_class(scale_edge_color_devon(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_devon(), "ggproto")
expect_s3_class(scale_edge_fill_devon(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_devon(discrete = TRUE), "ScaleDiscrete")
})
test_that("lajolla", {
skip_if_not_installed("ggplot2")
expect_s3_class(scale_edge_colour_lajolla(), "ggproto")
expect_s3_class(scale_edge_colour_lajolla(), "ScaleContinuous")
expect_s3_class(scale_edge_colour_lajolla(discrete = TRUE), "ScaleDiscrete")
expect_s3_class(scale_edge_color_lajolla(), "ggproto")
expect_s3_class(scale_edge_color_lajolla(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_lajolla(), "ggproto")
expect_s3_class(scale_edge_fill_lajolla(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_lajolla(discrete = TRUE), "ScaleDiscrete")
})
test_that("bamako", {
skip_if_not_installed("ggplot2")
expect_s3_class(scale_edge_colour_bamako(), "ggproto")
expect_s3_class(scale_edge_colour_bamako(), "ScaleContinuous")
expect_s3_class(scale_edge_colour_bamako(discrete = TRUE), "ScaleDiscrete")
expect_s3_class(scale_edge_color_bamako(), "ggproto")
expect_s3_class(scale_edge_color_bamako(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_bamako(), "ggproto")
expect_s3_class(scale_edge_fill_bamako(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_bamako(discrete = TRUE), "ScaleDiscrete")
})
test_that("davos", {
skip_if_not_installed("ggplot2")
expect_s3_class(scale_edge_colour_davos(), "ggproto")
expect_s3_class(scale_edge_colour_davos(), "ScaleContinuous")
expect_s3_class(scale_edge_colour_davos(discrete = TRUE), "ScaleDiscrete")
expect_s3_class(scale_edge_color_davos(), "ggproto")
expect_s3_class(scale_edge_color_davos(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_davos(), "ggproto")
expect_s3_class(scale_edge_fill_davos(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_davos(discrete = TRUE), "ScaleDiscrete")
})
test_that("bilbao", {
skip_if_not_installed("ggplot2")
expect_s3_class(scale_edge_colour_bilbao(), "ggproto")
expect_s3_class(scale_edge_colour_bilbao(), "ScaleContinuous")
expect_s3_class(scale_edge_colour_bilbao(discrete = TRUE), "ScaleDiscrete")
expect_s3_class(scale_edge_color_bilbao(), "ggproto")
expect_s3_class(scale_edge_color_bilbao(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_bilbao(), "ggproto")
expect_s3_class(scale_edge_fill_bilbao(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_bilbao(discrete = TRUE), "ScaleDiscrete")
})
test_that("nuuk", {
skip_if_not_installed("ggplot2")
expect_s3_class(scale_edge_colour_nuuk(), "ggproto")
expect_s3_class(scale_edge_colour_nuuk(), "ScaleContinuous")
expect_s3_class(scale_edge_colour_nuuk(discrete = TRUE), "ScaleDiscrete")
expect_s3_class(scale_edge_color_nuuk(), "ggproto")
expect_s3_class(scale_edge_color_nuuk(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_nuuk(), "ggproto")
expect_s3_class(scale_edge_fill_nuuk(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_nuuk(discrete = TRUE), "ScaleDiscrete")
})
test_that("oslo", {
skip_if_not_installed("ggplot2")
expect_s3_class(scale_edge_colour_oslo(), "ggproto")
expect_s3_class(scale_edge_colour_oslo(), "ScaleContinuous")
expect_s3_class(scale_edge_colour_oslo(discrete = TRUE), "ScaleDiscrete")
expect_s3_class(scale_edge_color_oslo(), "ggproto")
expect_s3_class(scale_edge_color_oslo(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_oslo(), "ggproto")
expect_s3_class(scale_edge_fill_oslo(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_oslo(discrete = TRUE), "ScaleDiscrete")
})
test_that("grayC", {
skip_if_not_installed("ggplot2")
expect_s3_class(scale_edge_colour_grayC(), "ggproto")
expect_s3_class(scale_edge_colour_grayC(), "ScaleContinuous")
expect_s3_class(scale_edge_colour_grayC(discrete = TRUE), "ScaleDiscrete")
expect_s3_class(scale_edge_color_grayC(), "ggproto")
expect_s3_class(scale_edge_color_grayC(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_grayC(), "ggproto")
expect_s3_class(scale_edge_fill_grayC(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_grayC(discrete = TRUE), "ScaleDiscrete")
})
test_that("hawaii", {
skip_if_not_installed("ggplot2")
expect_s3_class(scale_edge_colour_hawaii(), "ggproto")
expect_s3_class(scale_edge_colour_hawaii(), "ScaleContinuous")
expect_s3_class(scale_edge_colour_hawaii(discrete = TRUE), "ScaleDiscrete")
expect_s3_class(scale_edge_color_hawaii(), "ggproto")
expect_s3_class(scale_edge_color_hawaii(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_hawaii(), "ggproto")
expect_s3_class(scale_edge_fill_hawaii(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_hawaii(discrete = TRUE), "ScaleDiscrete")
})
test_that("lapaz", {
skip_if_not_installed("ggplot2")
expect_s3_class(scale_edge_colour_lapaz(), "ggproto")
expect_s3_class(scale_edge_colour_lapaz(), "ScaleContinuous")
expect_s3_class(scale_edge_colour_lapaz(discrete = TRUE), "ScaleDiscrete")
expect_s3_class(scale_edge_color_lapaz(), "ggproto")
expect_s3_class(scale_edge_color_lapaz(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_lapaz(), "ggproto")
expect_s3_class(scale_edge_fill_lapaz(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_lapaz(discrete = TRUE), "ScaleDiscrete")
})
test_that("tokyo", {
skip_if_not_installed("ggplot2")
expect_s3_class(scale_edge_colour_tokyo(), "ggproto")
expect_s3_class(scale_edge_colour_tokyo(), "ScaleContinuous")
expect_s3_class(scale_edge_colour_tokyo(discrete = TRUE), "ScaleDiscrete")
expect_s3_class(scale_edge_color_tokyo(), "ggproto")
expect_s3_class(scale_edge_color_tokyo(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_tokyo(), "ggproto")
expect_s3_class(scale_edge_fill_tokyo(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_tokyo(discrete = TRUE), "ScaleDiscrete")
})
test_that("buda", {
skip_if_not_installed("ggplot2")
expect_s3_class(scale_edge_colour_buda(), "ggproto")
expect_s3_class(scale_edge_colour_buda(), "ScaleContinuous")
expect_s3_class(scale_edge_colour_buda(discrete = TRUE), "ScaleDiscrete")
expect_s3_class(scale_edge_color_buda(), "ggproto")
expect_s3_class(scale_edge_color_buda(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_buda(), "ggproto")
expect_s3_class(scale_edge_fill_buda(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_buda(discrete = TRUE), "ScaleDiscrete")
})
test_that("acton", {
skip_if_not_installed("ggplot2")
expect_s3_class(scale_edge_colour_acton(), "ggproto")
expect_s3_class(scale_edge_colour_acton(), "ScaleContinuous")
expect_s3_class(scale_edge_colour_acton(discrete = TRUE), "ScaleDiscrete")
expect_s3_class(scale_edge_color_acton(), "ggproto")
expect_s3_class(scale_edge_color_acton(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_acton(), "ggproto")
expect_s3_class(scale_edge_fill_acton(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_acton(discrete = TRUE), "ScaleDiscrete")
})
test_that("turku", {
skip_if_not_installed("ggplot2")
expect_s3_class(scale_edge_colour_turku(), "ggproto")
expect_s3_class(scale_edge_colour_turku(), "ScaleContinuous")
expect_s3_class(scale_edge_colour_turku(discrete = TRUE), "ScaleDiscrete")
expect_s3_class(scale_edge_color_turku(), "ggproto")
expect_s3_class(scale_edge_color_turku(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_turku(), "ggproto")
expect_s3_class(scale_edge_fill_turku(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_turku(discrete = TRUE), "ScaleDiscrete")
})
test_that("imola", {
skip_if_not_installed("ggplot2")
expect_s3_class(scale_edge_colour_imola(), "ggproto")
expect_s3_class(scale_edge_colour_imola(), "ScaleContinuous")
expect_s3_class(scale_edge_colour_imola(discrete = TRUE), "ScaleDiscrete")
expect_s3_class(scale_edge_color_imola(), "ggproto")
expect_s3_class(scale_edge_color_imola(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_imola(), "ggproto")
expect_s3_class(scale_edge_fill_imola(), "ScaleContinuous")
expect_s3_class(scale_edge_fill_imola(discrete = TRUE), "ScaleDiscrete")
}) |
CombineSplits <- function(cml.result, metric = "enhancement",
m = NA, thresh = 0.5) {
out <- Performance(cml.result, metric, m, thresh)
names(out)[4] <- "Model.Acc"
methods <- as.character(unique(out$Method))
descriptors <- as.character(unique(out$Descriptor))
num.enhs <- dim(out)[1]
out$Trmt <- vector(mode = "logical", length = num.enhs)
for (i in 1:num.enhs) {
out$Trmt[i] <- which(methods == out$Method[i])
}
for (i in 1:num.enhs) {
out$Trmt[i] <- out$Trmt[i] + 100 * (which(descriptors == out$Descriptor[i]))
}
out$Trmt <- factor(out$Trmt)
for (i in 1:nrow(out)) {
if (is.na(out$Model.Acc[i])) {
index <- out$Trmt == out$Trmt[i]
out$Model.Acc[i] <- mean(out$Model.Acc[index], na.rm = T)
}
if (is.na(out$Model.Acc[i])) {
out$Model.Acc[i] <- 0
}
}
SplitAnova(out, metric)
}
Performance <- function(cml.result, metrics = "enhancement",
m = NA, thresh = 0.5) {
y <- cml.result$responses
if (is.na(m) && "enhancement" %in% metrics) {
at <- min(300, ceiling(length(y)/4))
} else if (is.na(m)) {
at <- length(y)
} else if (m > length(y)) {
stop("'at' needs to be smaller than the number of responses")
} else {
at <- m
}
abbrev.names <- c()
num.desc <- length(cml.result$des.names)
des.names <- cml.result$des.names
if (grepl("Descriptor Set", des.names[1])) {
for (i in 1:num.desc) {
abbrev.names <- c(abbrev.names, paste0("Des", i))
}
} else {
for (i in 1:num.desc) {
abbrev.names <- c(abbrev.names, substr(des.names[i], 1, 4))
}
}
for (k in seq_along(metrics)) {
metric <- metrics[k]
if (!cml.result$classify) {
if (exists("sp", inherits = FALSE))
rm(sp)
if (exists("ds", inherits = FALSE))
rm(ds)
if (exists("me", inherits = FALSE))
rm(me)
if (exists("ma", inherits = FALSE))
rm(ma)
for (split in 1:length(cml.result$all.preds)) {
pred <- cml.result$all.preds[[split]]
for (i in 1:length(pred)) {
desc <- abbrev.names[i]
for (j in 2:ncol(pred[[i]])) {
if (metric == "enhancement") {
model.acc <- EnhancementCont(pred[[i]][, j], y, at)
} else if (metric == "R2") {
pred.order <- order(pred[[i]][, j], decreasing = TRUE)
model.acc <- (cor(y, pred[[i]][, j]))^2
} else if (metric == "RMSE") {
model.acc <- sqrt(mean((y - pred[[i]][, j])^2))
} else if (metric == "rho") {
model.acc <- cor(y, pred[[i]][, j], method = "spearman")
} else {
stop("y is continuous. 'metrics' should be model accuracy measures
implemented for continuous response in ChemModLab")
}
meth <- names(pred[[i]])[j]
if (!exists("sp", inherits = FALSE))
sp <- split
else sp <- c(sp, split)
if (!exists("ds", inherits = FALSE))
ds <- desc
else ds <- c(ds, desc)
if (!exists("me", inherits = FALSE))
me <- meth
else me <- c(me, meth)
if (!exists("ma", inherits = FALSE))
ma <- model.acc
else ma <- c(ma, model.acc)
}
}
}
} else {
if (exists("sp", inherits = FALSE))
rm(sp)
if (exists("ds", inherits = FALSE))
rm(ds)
if (exists("me", inherits = FALSE))
rm(me)
if (exists("ma", inherits = FALSE))
rm(ma)
for (split in 1:length(cml.result$all.preds)) {
prob <- cml.result$all.probs[[split]]
pred <- cml.result$all.preds[[split]]
for (i in 1:length(prob)) {
desc <- abbrev.names[i]
if (ncol(prob[[i]]) > 1) {
for (j in 2:ncol(prob[[i]])) {
yhat <- prob[[i]][, j] > thresh
if (metric == "enhancement") {
model.acc <- Enhancement(prob[[i]][, j], y, at)
} else if (metric == "auc") {
model.acc <- BackAUC(prob[[i]][, j], yhat, y, at)
} else if (metric == "error rate") {
model.acc <- BackErrorRate(prob[[i]][, j], yhat, y, at)
} else if (metric == "specificity") {
model.acc <- BackSpecificity(prob[[i]][, j], yhat, y, at)
} else if (metric == "sensitivity") {
model.acc <- BackSensitivity(prob[[i]][, j], yhat, y, at)
} else if (metric == "ppv") {
model.acc <- BackPPV(prob[[i]][, j], yhat, y, at)
} else if (metric == "fmeasure") {
model.acc <- BackFMeasure(prob[[i]][, j], yhat, y, at)
} else {
stop("y is binary. 'metrics' should be model accuracy measures
implemented for binary response in ChemModLab")
}
meth <- names(prob[[i]])[j]
if (!exists("sp", inherits = FALSE))
sp <- split
else sp <- c(sp, split)
if (!exists("ds", inherits = FALSE))
ds <- desc
else ds <- c(ds, desc)
if (!exists("me", inherits = FALSE))
me <- meth
else me <- c(me, meth)
if (!exists("ma", inherits = FALSE))
ma <- model.acc
else ma <- c(ma, model.acc)
}
}
}
for (i in 1:length(pred)) {
desc <- abbrev.names[i]
if (ncol(pred[[i]]) > 1) {
for (j in 2:ncol(pred[[i]])) {
yhat <- pred[[i]][, j] > thresh
if (metric == "enhancement") {
model.acc <- Enhancement(pred[[i]][, j], y, at)
} else if (metric == "auc") {
model.acc <- as.numeric(pROC::auc(y, pred[[i]][, j]))
} else if (metric == "error rate") {
model.acc <- BackErrorRate(pred[[i]][, j], yhat, y, at)
} else if (metric == "specificity") {
model.acc <- BackSpecificity(pred[[i]][, j], yhat, y, at)
} else if (metric == "sensitivity") {
model.acc <- BackSensitivity(pred[[i]][, j], yhat, y, at)
} else if (metric == "ppv") {
model.acc <- BackPPV(pred[[i]][, j], yhat, y, at)
} else if (metric == "fmeasure") {
model.acc <- BackFMeasure(pred[[i]][, j], yhat, y, at)
} else {
stop("y is binary. 'metric' should be a model accuracy measure
implemented for binary response in ChemModLab")
}
meth <- names(pred[[i]])[j]
if (!exists("sp", inherits = FALSE))
sp <- split
else sp <- c(sp, split)
if (!exists("ds", inherits = FALSE))
ds <- desc
else ds <- c(ds, desc)
if (!exists("me", inherits = FALSE))
me <- meth
else me <- c(me, meth)
if (!exists("ma", inherits = FALSE))
ma <- model.acc
else ma <- c(ma, model.acc)
}
}
}
}
}
if (k == 1) {
out <- data.frame(sp, ds, me, ma)
} else {
out <- data.frame(out, ma)
}
}
names(out) <- c("Split", "Descriptor", "Method", metrics)
out <- out[!duplicated(out[, 1:3]), ]
out$Split <- factor(out$Split)
out
}
SplitAnova <- function(splitdata, metric) {
single.desc <- (length(unique(splitdata$Descriptor)) == 1)
out <- glm(Model.Acc ~ Split + Trmt, data = splitdata)
mod.df <- out$df.null - out$df.residual
mod.dev <- out$null.deviance - out$deviance
mod.ms <- mod.dev/mod.df
err.df <- out$df.residual
err.dev <- out$deviance
err.ms <- err.dev/err.df
tot.df <- out$df.null
tot.dev <- out$null.deviance
f.stat <- mod.ms/err.ms
p.val <- df(f.stat, mod.df, err.df)
if (p.val < 1e-04)
p.val <- "<.0001"
form.df <- format(c("DF", mod.df, err.df, tot.df), width = 3, justify = "right")
form.dec.num <- format(c(mod.dev, err.dev, tot.dev, mod.ms, err.ms, f.stat),
digits = 4, nsmall = 3)
form.dec <- format(c("SS", "MS", "F", form.dec.num), digits = 4, nsmall = 3,
justify = "right")
form.p <- format(c("p-value",
if (is.numeric(p.val)) round(p.val, digits = 4) else p.val),
digits = 1, nsmall = 4, justify = "right")
cat(paste0(" Analysis of Variance on: '",metric,"'\n"))
if (single.desc)
cat(paste(" Using factors: Split and Method\n"))
else cat(paste(" Using factors: Split and Descriptor/Method combination\n"))
cat(paste("Source", form.df[1], form.dec[1], form.dec[2], form.dec[3], form.p[1],
"\n", sep = " "))
cat(paste("Model ", form.df[2], form.dec[4], form.dec[7], form.dec[9], form.p[2],
"\n", sep = " "))
cat(paste("Error ", form.df[3], form.dec[5], form.dec[8], "\n", sep = " "))
cat(paste("Total ", form.df[4], form.dec[6], "\n", sep = " "))
root.mse <- sqrt(err.ms)
all.mean <- mean(splitdata$Model.Acc)
coef.var <- root.mse/all.mean * 100
r.sq <- mod.dev/tot.dev
form.stat.num <- format(c(r.sq, coef.var, root.mse, all.mean), digits = 3, nsmall = 4)
form.stat <- format(c("R-Square", "Coef Var", "Root MSE", "Mean", form.stat.num),
digits = 3, nsmall = 4, justify = "right")
cat(paste(" ", form.stat[1], form.stat[2], form.stat[3], form.stat[4], "\n",
sep = " "))
cat(paste(" ", form.stat[5], form.stat[6], form.stat[7], form.stat[8], "\n",
sep = " "))
aout <- anova(out)
f.df <- aout$Df[2]
f.dev <- aout$Deviance[2]
f.ms <- f.dev/f.df
f.f.stat <- f.ms/err.ms
t.df <- aout$Df[3]
t.dev <- aout$Deviance[3]
t.ms <- t.dev/t.df
t.f.stat <- t.ms/err.ms
f.p.val <- df(f.f.stat, f.df, t.df)
if (f.p.val < 1e-04)
f.p.val <- "<.0001"
t.p.val <- df(t.f.stat, t.df, err.df)
if (t.p.val < 1e-04)
t.p.val <- "<.0001"
form.df <- format(c("DF", f.df, t.df), width = 3, justify = "right")
form.dec.num <- format(c(f.dev, f.ms, f.f.stat, t.dev, t.ms, t.f.stat), digits = 3,
nsmall = 3)
form.dec <- format(c("SS", "MS", "F", form.dec.num), digits = 3, nsmall = 3,
justify = "right")
form.p <- format(c("p-value", f.p.val, t.p.val), digits = 1, nsmall = 4, justify = "right")
form.p <- format(c("p-value", if (is.numeric(f.p.val)) round(f.p.val, digits = 4) else f.p.val,
if (is.numeric(t.p.val)) round(t.p.val, digits = 4)
else t.p.val), digits = 1, nsmall = 4, justify = "right")
cat(paste("Source ", form.df[1], form.dec[1], form.dec[2], form.dec[3], form.p[1],
"\n", sep = " "))
cat(paste("Split ", form.df[2], form.dec[4], form.dec[5], form.dec[6], form.p[2],
"\n", sep = " "))
if (single.desc)
cat(paste("Method ", form.df[3], form.dec[7], form.dec[8], form.dec[9],
form.p[3], "\n", sep = " "))
else cat(paste("Desc/Meth", form.df[3], form.dec[7], form.dec[8], form.dec[9],
form.p[3], "\n", sep = " "))
lsmeans <- c()
fit <- fitted.values(out)
for (i in levels(splitdata$Trmt)) lsmeans <- c(lsmeans, mean(fit[splitdata$Trmt ==
i]))
aov.out <- aov(Model.Acc ~ Split + Trmt, data = splitdata)
tout <- TukeyHSD(aov.out, "Trmt")
n <- length(levels(splitdata$Trmt))
pval <- matrix(NA, nrow = n, ncol = n)
for (i in 1:(n - 1)) {
for (j in (i + 1):n) {
pval[i, j] <- tout$Trmt[n * (i - 1) - i * (i - 1)/2 + j - i, 4]
pval[j, i] <- pval[i, j]
}
}
McsPlot(lsmeans, pval, as.numeric(levels(splitdata$Trmt)),
as.character(unique(splitdata$Descriptor)),
as.character(unique(splitdata$Method)), single.desc, metric)
} |
findCodeType <- function(code, date = NULL, regex = TRUE, full = FALSE)
{
types <- getCodeTypeList(date)
code <- tolower(code)
if (!regex) code <- paste0("^", code, "$")
select <- c(unlist(lapply(code, grep, tolower(types$LongDescription))),
unlist(lapply(code, grep, tolower(types$Description))),
unlist(lapply(code, grep, tolower(types$Key))))
select <- sort(unique(select))
types <- types[select,]
if (full) {
types
} else {
types$Key
}
} |
create_null_distribution<-function(sample_data, extreme, rand_matrix,
permutation_null_function,test_stat,
variable,iterations){
perm_samples<-permutation_null_function(rand_matrix,variable,iterations)
null_hist<-hist(perm_samples,breaks=100,col = "gold",
main=paste("Null Distribution"), xlab="Test Statistics")
abline(v=test_stat,col="black",lty=2, lwd=5)
if (extreme==0){
pvalue<-sum(perm_samples<=(test_stat))/iterations
} else {
pvalue<-sum(perm_samples>=(test_stat))/iterations
}
print(test_stat)
print(pvalue)
return(invisible(perm_samples))
} |
simSP <- function(ref, qw, gtype_distMat = sim2dist(grainSimilarity_evaluate(triag = FALSE)),
verbose = FALSE, returnDF = FALSE) {
refNL <- nrow(ref$layers)
qwNL <- nrow(qw$layers)
id <- suppressWarnings(which(ref$layers$height == qw$layers$height))
n_id <- length(id)
if (n_id == 0) {
id <- suppressWarnings(which(unique(rev(ref$layers$height)) == unique(rev(qw$layers$height))))
cref <- length(which(ref$layers$height == ref$layers$height[refNL])) - 1
id <- rev(refNL + 1 - cref - id)
seq_qw <- suppressWarnings(which(unique(rev(qw$layers$height)) == unique(rev(ref$layers$height))))
cqw <- length(which(qw$layers$height == qw$layers$height[qwNL])) - 1
seq_qw <- rev(qwNL + 1 - cqw - seq_qw)
if (seq_qw[1] > 1) {
ref$layers[seq_qw, ] <- ref$layers[id, ]
ref$layers[seq(seq_qw[1] - 1), ] <- NA
ref$layers[seq(seq_qw[1] - 1), "height"] <- seq(from = 0, to = qw$layers$height[seq_qw[1]], length.out = seq_qw[1] + 1)[-c(1, seq_qw[1] + 1)]
ref$layers[seq(seq_qw[1] - 1), "hardness"] <- 0
id <- seq_qw
} else if (id[1] > 1) {
qw$layers[id, ] <- qw$layers[seq_qw, ]
qw$layers[seq(id[1] - 1), ] <- NA
qw$layers[seq(id[1] - 1), "height"] <- seq(from = 0, to = ref$layers$height[id[1]], length.out = id[1] + 1)[-c(1, id[1] + 1)]
qw$layers[seq(id[1] - 1), "hardness"] <- 0
}
n_id <- length(id)
}
if (n_id > 0) {
if (any(diff(id) == 0)) {
warning(paste0("Profiles don't seem to be on the same depth grid!
queryWarped: ", paste0(qw$layers$height, collapse = " "), "
ref: ", paste0(ref$layers$height, collapse = " ")))
verbose <- TRUE
}
} else {
warning(paste0("Profiles have no single layer interface at the same height!
queryWarped: ", paste0(qw$layers$height, collapse = " "), "
ref: ", paste0(ref$layers$height, collapse = " "), "
returning 'NA'.."))
return(NA)
}
RES <- resampleSPpairs(qw$layers[id,], ref$layers[id,], mergeBeforeResampling = TRUE, dims = c("gtype", "hardness"))
rl <- RES$ref
qwl <- RES$query
refGrains <- as.character(rl$gtype)
qwGrains <- as.character(qwl$gtype)
matchedGrid <- rep(rl$height, times = 2)
nGrains <- length(refGrains)
nonMatchedIn <- which(c(refNL, qwNL) > n_id)
nMI <- length(nonMatchedIn)
if (nMI == 1) {
suppressWarnings(missingLayers <- mergeIdentLayers(list(ref, qw)[[nonMatchedIn]]$layers[-(id), ]))
toDel <- which(missingLayers$height %in% rl$height)
if (length(toDel) > 0) missingLayers <- missingLayers[-(toDel), ]
if (nrow(missingLayers) == 0) missingLayers <- NA
} else if (nMI == 0) {
missingLayers <- NA
} else if (nMI == 2) {
if (all(ref$layers[-(id), 'height'] %in% rl$height)) nonMatchedIn <- nonMatchedIn[!nonMatchedIn == 1]
if (all(qw$layers[-(id), 'height'] %in% qwl$height)) nonMatchedIn <- nonMatchedIn[!nonMatchedIn == 2]
missingLayers <- data.frame()
for (nM in nonMatchedIn) {
suppressWarnings(missingLayers <- rbind(missingLayers,
mergeIdentLayers(list(ref, qw)[[nM]]$layers[-(id), ])))
}
if (length(nonMatchedIn) > 1) {
print("simSP: non-matched layers in both profiles:")
print(missingLayers)
}
}
dGT <- extractFromScoringMatrix(ScoringFrame = gtype_distMat,
grainType1 = qwGrains,
grainType2 = refGrains)
simGT <- sim2dist(dGT)
dHHI <- hardnessDistance(qwl$hardness, rl$hardness, normalize = TRUE, absDist = TRUE)
simHHI <- sim2dist(dHHI)
pen_offset <- 0
sim <- (simGT * simHHI)[, 1] - pen_offset
if (is.data.frame(missingLayers)) {
missingDF <- data.frame(grains = as.factor(as.character(missingLayers[, "gtype"])),
sim = 0.5)
} else {
missingDF <- data.frame()
}
matchedDF <- data.frame(grains = as.factor(c(as.character(refGrains), as.character(qwGrains))),
sim = sim)
if (any(is.na(matchedDF$sim))) warning("simSP: NAs produced in similarity assessment of matched layers. Investigate why!")
cat_wls <- c("SH", "DH")
cat_cr <- c("MFcr", "IF")
cat_pps <- c("PP", "DF")
cat_special <- c(cat_wls, cat_cr, cat_pps)
iNoHHI <- which(matchedDF$grains %in% c(cat_wls, cat_cr))
iExc <- which(iNoHHI > nGrains)
iNoHHI_trans <- iNoHHI
iNoHHI_trans[iExc] <- iNoHHI[iExc] - nGrains
matchedDF[iNoHHI, "sim"] <- simGT[iNoHHI_trans, 1] - pen_offset
nWLs <- max(c(length(which(matchedDF$grains[1:nGrains] %in% cat_wls)),
length(which(matchedDF$grains[(nGrains+1):(2*nGrains)] %in% cat_wls))))
if (nWLs > 1) {
maxMatchedGrid <- max(matchedGrid)
gridBoundsStep <- maxMatchedGrid/(nWLs+1)
gridBounds <- seq(gridBoundsStep, maxMatchedGrid+1, length.out = nWLs)
gridBounds <- matrix(c(0, gridBounds[1:(nWLs-1)], gridBounds), ncol = 2)
iMatchedWLs <- c(which(matchedDF$grains %in% cat_wls))
matchedWLs <- matrix(c(matchedGrid[iMatchedWLs], matchedDF[iMatchedWLs, "sim"]), ncol = 2)
simWLs <- sapply(seq(nWLs), function(i) {
mean(matchedWLs[which(matchedWLs[, 1] >= gridBounds[i, 1] & matchedWLs[, 1] < gridBounds[i, 2]), 2], na.rm = T)
})
} else {
simWLs <- mean(matchedDF[matchedDF$grains %in% cat_wls, "sim"])
}
nCRs <- max(c(length(which(matchedDF$grains[1:nGrains] %in% cat_cr)),
length(which(matchedDF$grains[(nGrains+1):(2*nGrains)] %in% cat_cr))))
if (nCRs > 1) {
maxMatchedGrid <- max(matchedGrid)
gridBoundsStep <- maxMatchedGrid/(nCRs+1)
gridBounds <- seq(gridBoundsStep, maxMatchedGrid+1, length.out = nCRs)
gridBounds <- matrix(c(0, gridBounds[1:(nCRs-1)], gridBounds), ncol = 2)
iMatchedCRs <- c(which(matchedDF$grains %in% cat_cr))
matchedCRs <- matrix(c(matchedGrid[iMatchedCRs], matchedDF[iMatchedCRs, "sim"]), ncol = 2)
simCRs <- sapply(seq(nCRs), function(i) {
mean(matchedCRs[which(matchedCRs[, 1] >= gridBounds[i, 1] & matchedCRs[, 1] < gridBounds[i, 2]), 2], na.rm = T)
})
} else {
simCRs <- mean(matchedDF[matchedDF$grains %in% cat_cr, "sim"])
}
simDF <- data.frame(
wl = mean(c(simWLs, suppressWarnings(mean(missingDF[missingDF$grains %in% cat_wls, "sim"]))),
na.rm = TRUE),
cr = mean(c(simCRs, suppressWarnings(mean(missingDF[missingDF$grains %in% cat_cr, "sim"]))),
na.rm = TRUE),
pp = mean(c(matchedDF[matchedDF$grains %in% cat_pps, "sim"],
rep(missingDF[missingDF$grains %in% cat_pps, "sim"], times = 1))),
bulk = mean(c(matchedDF[!matchedDF$grains %in% cat_special, "sim"],
rep(missingDF[!missingDF$grains %in% cat_special, "sim"], times = 1)))
)
simDF <- simDF + pen_offset
rownames(simDF) <- "sim [0, 1]: "
simSP <- mean(as.double(simDF[1, ]), na.rm = TRUE)
if (length(simSP) == 0) {
simSP <- NA
warning("simSP: problem in calculating similarity score, returning NA")
}
simDF <- round((simDF)*100)/100
if (verbose) {
print(simDF)
cat(paste("simple similarity =", round(simSP*1000)/1000, "\n"))
}
ifelse(returnDF, return(list(sim = simSP, simDF = simDF)), return(simSP))
} |
knitr::opts_chunk$set(fig.width = 7, fig.height = 5)
library(MazamaSpatialUtils)
longitude <- c(-122.3, -73.5, 21.1, 2.5)
latitude <- c(47.5, 40.75, 52.1, 48.5)
getCountry(longitude, latitude)
getCountryCode(longitude, latitude)
getCountry(longitude, latitude, allData = TRUE)
getTimezone(longitude, latitude)
countryCodes <- getCountryCode(longitude, latitude)
getTimezone(longitude, latitude, countryCodes = countryCodes)
getTimezone(longitude, latitude, allData = TRUE, countryCodes = countryCodes)
library(sp)
colorIndices <- .bincode(SimpleTimezones@data$UTC_offset, breaks = seq(-12.5,12.5,1))
plot(SimpleTimezones, col = rainbow(25)[colorIndices])
title(line = 0, 'Timezone Offsets from UTC')
library(sp)
prod <- read.csv(url('http://mazamascience.com/OilExport/BP_2016_oil_production_bbl.csv'),
skip = 6, stringsAsFactors = FALSE, na.strings = 'na')
cons <- read.csv(url('http://mazamascience.com/OilExport/BP_2016_oil_consumption_bbl.csv'),
skip = 6, stringsAsFactors = FALSE, na.strings = 'na')
prodCountryCodes <- names(prod)[ stringr::str_length(names(prod)) == 2 ]
consCountryCodes <- names(cons)[ stringr::str_length(names(cons)) == 2 ]
lastRow <- nrow(prod)
year <- prod$YEAR[lastRow]
sharedCountryCodes <- intersect(prodCountryCodes,consCountryCodes)
net <- prod[lastRow, sharedCountryCodes] - cons[lastRow, sharedCountryCodes]
netExportCodes <- sharedCountryCodes[net > 0]
netImportCodes <- sharedCountryCodes[net <= 0]
exportOnlyCodes <- setdiff(prodCountryCodes,consCountryCodes)
importOnlyCodes <- setdiff(consCountryCodes,prodCountryCodes)
netExportMask <- SimpleCountries@data$countryCode %in% netExportCodes
netImportMask <- SimpleCountries@data$countryCode %in% netImportCodes
onlyExportMask <- SimpleCountries@data$countryCode %in% exportOnlyCodes
onlyImportMask <- SimpleCountries@data$countryCode %in% importOnlyCodes
color_export = '
color_import = '
color_missing = 'gray90'
notAQ <- SimpleCountries@data$countryCode != 'AQ'
plot(SimpleCountries[notAQ,], col = color_missing)
plot(SimpleCountries[netExportMask,], col = color_export, add = TRUE)
plot(SimpleCountries[onlyExportMask,], col = color_export, add = TRUE)
plot(SimpleCountries[netImportMask,], col = color_import, add = TRUE)
plot(SimpleCountries[onlyImportMask,], col = color_import, add = TRUE)
legend(
'bottomleft',
legend = c('Net Exporters','Net Importers'),
fill = c(color_export,color_import)
)
title(line = 0, paste('World Crude Oil in', year)) |
R.home <- function(component="home")
{
rh <- .Internal(R.home())
switch(component,
"home" = rh,
"bin" = if(.Platform$OS.type == "windows" &&
nzchar(p <- .Platform$r_arch)) file.path(rh, component, p)
else file.path(rh, component),
"share" = if(nzchar(p <- Sys.getenv("R_SHARE_DIR"))) p
else file.path(rh, component),
"doc" = if(nzchar(p <- Sys.getenv("R_DOC_DIR"))) p
else file.path(rh, component),
"include" = if(nzchar(p <- Sys.getenv("R_INCLUDE_DIR"))) p
else file.path(rh, component),
"modules" = if(nzchar(p <- .Platform$r_arch)) file.path(rh, component, p)
else file.path(rh, component),
file.path(rh, component))
}
file.show <-
function (..., header = rep("", nfiles), title = "R Information",
delete.file = FALSE, pager = getOption("pager"), encoding = "")
{
files <- path.expand(c(...))
nfiles <- length(files)
if(nfiles == 0L)
return(invisible(NULL))
if(l10n_info()[["UTF-8"]] && encoding == "UTF-8") encoding <- ""
if(l10n_info()[["Latin-1"]] && encoding == "latin1") encoding <- ""
if(!is.na(encoding) && nzchar(encoding)) {
for(i in seq_along(files)) {
f <- files[i]
tf <- tempfile()
tmp <- readLines(f, warn = FALSE)
tmp2 <- try(iconv(tmp, encoding, "", "byte"))
if(inherits(tmp2, "try-error")) file.copy(f, tf)
else writeLines(tmp2, tf)
files[i] <- tf
if(delete.file) unlink(f)
}
delete.file <- TRUE
}
if(is.function(pager))
pager(files, header = header, title = title, delete.file = delete.file)
else
.Internal(file.show(files, header, title, delete.file, pager))
}
file.append <- function(file1, file2)
.Internal(file.append(file1, file2))
file.remove <- function(...)
.Internal(file.remove(c(...)))
file.rename <- function(from, to)
.Internal(file.rename(from, to))
list.files <-
function(path = ".", pattern = NULL, all.files = FALSE,
full.names = FALSE, recursive = FALSE,
ignore.case = FALSE, include.dirs = FALSE, no.. = FALSE)
.Internal(list.files(path, pattern, all.files, full.names,
recursive, ignore.case, include.dirs, no..))
dir <- list.files
list.dirs <- function(path = ".", full.names = TRUE, recursive = TRUE)
.Internal(list.dirs(path, full.names, recursive))
file.path <-
function(..., fsep=.Platform$file.sep)
.Internal(file.path(list(...), fsep))
file.exists <- function(...) .Internal(file.exists(c(...)))
file.create <- function(..., showWarnings = TRUE)
.Internal(file.create(c(...), showWarnings))
file.choose <- function(new=FALSE) .Internal(file.choose(new))
file.copy <- function(from, to,
overwrite = recursive, recursive = FALSE,
copy.mode = TRUE, copy.date = FALSE)
{
if (!(nf <- length(from))) return(logical())
if (!(nt <- length(to))) stop("no files to copy to")
if (nt == 1 && dir.exists(to)) {
if (recursive && to %in% from)
stop("attempt to copy a directory to itself")
if(.Platform$OS.type == "windows") {
from <- gsub("/", "\\", from, fixed = TRUE)
to <- gsub("/", "\\", to, fixed = TRUE)
}
return(.Internal(file.copy(from, to, overwrite, recursive,
copy.mode, copy.date)))
} else if (nf > nt) stop("more 'from' files than 'to' files")
else if (recursive)
warning("'recursive' will be ignored as 'to' is not a single existing directory")
if(nt > nf) from <- rep_len(from, length.out = nt)
okay <- file.exists(from)
if (!overwrite) okay[file.exists(to)] <- FALSE
if (any(from[okay] %in% to[okay]))
stop("file can not be copied both 'from' and 'to'")
if (any(okay)) {
okay[okay] <- file.create(to[okay])
if(any(okay)) {
okay[okay] <- file.append(to[okay], from[okay])
if(copy.mode || copy.date) {
fi <- file.info(from[okay], extra_cols = FALSE)
if(copy.mode) Sys.chmod(to[okay], fi$mode, TRUE)
if(copy.date) Sys.setFileTime(to[okay], fi$mtime)
}
}
}
okay
}
file.symlink <- function(from, to) {
if (!(length(from))) stop("no files to link from")
if (!(nt <- length(to))) stop("no files/directory to link to")
if (nt == 1 && file.exists(to) && file.info(to, extra_cols = FALSE)$isdir)
to <- file.path(to, basename(from))
.Internal(file.symlink(from, to))
}
file.link <- function(from, to) {
if (!(length(from))) stop("no files to link from")
if (!length(to)) stop("no files to link to")
.Internal(file.link(from, to))
}
file.info <- function(..., extra_cols = TRUE)
{
res <- .Internal(file.info(fn <- c(...), extra_cols))
res$mtime <- .POSIXct(res$mtime)
res$ctime <- .POSIXct(res$ctime)
res$atime <- .POSIXct(res$atime)
class(res) <- "data.frame"
attr(res, "row.names") <- fn
res
}
file.mode <- function(...) file.info(..., extra_cols = FALSE)$mode
file.mtime <- function(...) file.info(..., extra_cols = FALSE)$mtime
file.size <- function(...) file.info(..., extra_cols = FALSE)$size
file.access <- function(names, mode = 0)
{
res <- .Internal(file.access(names, mode))
names(res) <- names
res
}
dir.exists <- function(paths) .Internal(dir.exists(paths))
dir.create <- function(path, showWarnings = TRUE, recursive = FALSE,
mode = "0777")
.Internal(dir.create(path, showWarnings, recursive, as.octmode(mode)))
system.file <- function(..., package = "base", lib.loc = NULL, mustWork = FALSE)
{
if(nargs() == 0L)
return(file.path(.Library, "base"))
if(length(package) != 1L)
stop("'package' must be of length 1")
packagePath <- find.package(package, lib.loc, quiet = TRUE)
ans <- if(length(packagePath)) {
FILES <- file.path(packagePath, ...)
present <- file.exists(FILES)
if(any(present)) FILES[present] else ""
} else ""
if (mustWork && identical(ans, "")) stop("no file found")
ans
}
getwd <- function()
.Internal(getwd())
setwd <- function(dir)
.Internal(setwd(dir))
basename <- function(path)
.Internal(basename(path))
dirname <- function(path)
.Internal(dirname(path))
Sys.info <- function()
.Internal(Sys.info())
Sys.sleep <- function(time)
.Internal(Sys.sleep(time))
path.expand <- function(path)
.Internal(path.expand(path))
Sys.glob <- function(paths, dirmark = FALSE)
.Internal(Sys.glob(path.expand(paths), dirmark))
unlink <- function(x, recursive = FALSE, force = FALSE)
.Internal(unlink(as.character(x), recursive, force))
Sys.chmod <- function(paths, mode = "0777", use_umask = TRUE)
.Internal(Sys.chmod(paths, as.octmode(mode), use_umask))
Sys.umask <- function(mode = NA)
.Internal(Sys.umask(if(is.na(mode)) NA_integer_ else as.octmode(mode)))
Sys.readlink <- function(paths)
.Internal(Sys.readlink(paths))
readRenviron <- function(path)
.Internal(readRenviron(path))
normalizePath <- function(path, winslash = "\\", mustWork = NA)
.Internal(normalizePath(path.expand(path), winslash, mustWork))
Sys.setFileTime <- function(path, time)
{
if (!is.character(path) || length(path) != 1L)
stop("invalid 'path' argument")
time <- as.POSIXct(time)
if (is.na(time)) stop("invalid 'time' argument")
.Internal(setFileTime(path, time))
} |
calculSeSp <-
function (matSeSp, seuilSeSp){
if(length(colnames(matSeSp)[colnames(matSeSp)=="score"])==0){
matSeSp <- cbind(matSeSp, score=round(apply(matSeSp[,3:ncol(matSeSp)], MARGIN=1, FUN=sum), 7))
}
matSeSp <- cbind(matSeSp, risque=0)
idx <- which(matSeSp[, "score"] >= seuilSeSp)
matSeSp[, "risque"][idx] <- 1
vp <- length(which(matSeSp[,"ETAT"]==1 & matSeSp[,"risque"]==1))
fp <- length(which(matSeSp[,"ETAT"]==0 & matSeSp[,"risque"]==1))
vn <- length(which(matSeSp[,"ETAT"]==0 & matSeSp[,"risque"]==0))
fn <- length(which(matSeSp[,"ETAT"]==1 & matSeSp[,"risque"]==0))
if (vp == 0){fn <- 1}
if (vn == 0){fp <- 1}
Se <- vp / (vp + fn) * 100
Sp <- vn / (vn + fp) * 100
return(c(Sens=Se, Spec=Sp, VP=vp, FP=fp, VN=vn, FN=fn))
} |
IRT.factor.scores.xxirt <- function( object, type="EAP", ... )
{
if ( ! ( type %in% c("EAP") ) ){
stop("Requested type is not supported!\n")
}
if ( type=="EAP"){
ll <- object$EAP
}
attr(ll,"type") <- type
return(ll)
} |
issue_close = function(repo, number) {
arg_is_chr(repo)
res = purrr::map2(
repo, number,
function(repo, number) {
res = purrr::safely(github_api_issue_edit)(
repo, number, state = "closed"
)
num_text = paste0("
status_msg(
res,
"Closed issue {.val {num_text}} for repo {.val {repo}}.",
"Failed to close issue {.val {num_text}} for repo {.val {repo}}."
)
res
}
)
invisible(res)
} |
expected <- eval(parse(text="c(NaN, 9.51350769866873, 4.5908437119988, 2.99156898768759, 2.21815954375769, 1.77245385090552, 1.48919224881282, 1.29805533264756, 1.1642297137253, 1.06862870211932, 1)"));
test(id=0, code={
argv <- eval(parse(text="list(c(NaN, 9.51350769866873, 4.5908437119988, 2.99156898768759, 2.21815954375769, 1.77245385090552, 1.48919224881282, 1.29805533264756, 1.1642297137253, 1.06862870211932, 1))"));
do.call(`as.double`, argv);
}, o=expected); |
rowSplit <- function(x,f,tran=F)
{
if(tran) {
stopifnot(ncol(x)==length(f))
}
else{
stopifnot(nrow(x)==length(f))
}
if(tran) x <- t(x)
idx <- split(1:nrow(x),f)
if(tran){
r <- lapply(idx,function(elmt) t(x[elmt,,drop=FALSE]))
}
else{
r <- lapply(idx,function(elmt) x[elmt,,drop=FALSE])
}
r
} |
tCorpus$set('public', 'annotate_rsyntax', function(column, ..., block=NULL, fill=TRUE, overwrite=NA, block_fill=FALSE, verbose=FALSE) {
if (column %in% self$names && is.na(overwrite)) stop(sprintf('The specified column (%s) already exists. Set overwrite argument to TRUE to overwrite the column or FALSE to consider existing annotations as a chain.', column))
cnames = paste0(column, c('','_id','_fill'))
ti = rsyntax::annotate_tqueries(self$tokens, column = column, ..., block = block, fill = fill, overwrite = overwrite, block_fill = block_fill, copy=T, verbose=verbose)
ti = subset(ti, select = c('doc_id','token_id',cnames))
for (cn in cnames) if (cn %in% self$names) self$set(cn, NULL)
self$tokens = merge(self$tokens, ti, by=c('doc_id','token_id'))
self$validate_tokens()
invisible(self$tokens[])
})
tCorpus$set('public', 'fold_rsyntax', function(annotation, by_label, ..., txt=F, rm_by=T, copy=F) {
if (copy) {
selfcopy = self$copy()$fold_rsyntax(annotation=annotation, by_label=by_label, ..., txt=txt, rm_by=rm_by, copy=F)
return(selfcopy)
}
.annotation = annotation
.annotation_id = paste0(annotation, '_id')
if (!all(c(.annotation,.annotation_id) %in% self$names)) stop('annotation does not refer to a valid rsyntax annotation column (see annotate_rsyntax)')
.by_label = by_label
.is_label = self$tokens[list(.by_label),,on=.annotation, which=T]
agg_cols = self$tokens[.is_label, eval(substitute(list(...))), by = c('doc_id', 'sentence', .annotation_id)]
if (txt) {
txt_col = paste0(paste(.by_label, collapse='_'), '_txt')
if (nrow(agg_cols) == 0) {
agg_cols = tokens[.is_label, list(.txt = paste(token, collapse=' ')), by = c('doc_id', 'sentence', .annotation_id)]
data.table::setnames(agg_cols, '.txt', txt_col)
}
else agg_cols[[txt_col]] = tokens[.is_label, list(.txt = paste(token, collapse=' ')), by = c('doc_id', 'sentence', .annotation_id)]$.txt
}
if (rm_by) self$tokens = self$tokens[!1:nrow(self$tokens) %in% .is_label,]
self$tokens = merge(self$tokens, agg_cols, by=c('doc_id', 'sentence', .annotation_id), all=T, sort = F)
self$validate_tokens()
invisible(self)
})
fold_rsyntax <- function(tc, annotation, by_label, ..., txt=F, rm_by=T) {
tc$fold_rsyntax(annotation=annotation,by_label=by_label, ..., txt=txt, rm_by=rm_by, copy=T)
}
agg_label <- function(label, ...) {
list(label=label, agg_list = substitute(list(...)))
}
aggregate_rsyntax <- function(tc, annotation, ..., by_col=NULL, txt=F, labels=NULL, rm_na=T) {
token = NULL
tokens = if (inherits(tc, 'tCorpus')) tc$tokens else tc
.annotation = annotation
.annotation_id = paste0(.annotation, '_id')
if (!all(c(.annotation,.annotation_id) %in% colnames(tokens))) stop('annotation does not refer to a valid rsyntax annotation column (see annotate_rsyntax)')
l = list(...)
lname = sapply(l, function(x) x$label)
if (is.null(labels)) labels = unique(tokens[[.annotation]])
if (is.logical(txt)) {
txt = if (txt) labels else c()
}
out = unique(tokens[,c('doc_id','sentence', .annotation_id,by_col),with=F])
if (!is.null(by_col))
.drop = Matrix::rowSums(is.na(out[,c(.annotation_id,by_col),with=F])) == (length(by_col) + 1)
else
.drop = is.na(out[[.annotation_id]])
out = out[!.drop,]
for (.label in labels) {
if (is.na(.label)) next
.is_label = tokens[list(.label),,on=.annotation, which=T]
if (length(.is_label) == 0) next
if (.label %in% lname) {
.by = l[[which(lname == .label)]]
agg_cols = tokens[.is_label, eval(.by$agg_list), by = c('doc_id', 'sentence', .annotation_id, by_col)]
} else agg_cols = NULL
if (.label %in% txt) {
txt_col = paste0(paste(.label, collapse='_'), '_txt')
if (is.null(agg_cols)) {
agg_cols = tokens[.is_label, list(.txt = paste(token, collapse=' ')), by = c('doc_id', 'sentence', .annotation_id, by_col)]
data.table::setnames(agg_cols, '.txt', txt_col)
}
else agg_cols[[txt_col]] = tokens[.is_label, list(.txt = paste(token, collapse=' ')), by = c('doc_id', 'sentence', .annotation_id, by_col)]$.txt
}
if (!is.null(agg_cols))
out = merge(out, agg_cols, by=c('doc_id', 'sentence',.annotation_id, by_col), all=T, allow.cartesian = T, nomatch=0)
}
out
}
transform_rsyntax <- function(tc, f, ...) {
tokens = as_tokenindex(tc$tokens)
tokens = f(tokens, ...)
tokens_to_tcorpus(tokens, meta=tc$meta, model=paste(tc$model))
} |
context("VaR stressed")
library("SWIM")
set.seed(0)
x <- as.data.frame(cbind(
"normal" = rnorm(1000),
"gamma" = rgamma(1000, shape = 2)))
alpha <- c(0.8, 0.9)
q_ratio <- 1.05
s_ratio <- 1.1
k <- 1
res <- stress(type = "VaR ES", x = x, alpha = alpha, q_ratio = q_ratio,
s_ratio = s_ratio, k = k)
levels <- seq(0.1, 0.9, by = 0.1)
.ES.stressed1 <- ES_stressed(res, alpha = levels, xCol = k, wCol = 1, base = TRUE)
.ES.stressed2 <- ES_stressed(res, alpha = levels, xCol = k, wCol = 2, base = TRUE)
test_that("names", {
expect_equal(colnames(.ES.stressed1), colnames(.ES.stressed2))
expect_equal(colnames(.ES.stressed1), c(colnames(x)[k], paste("base", colnames(x)[k])))
expect_equal(rownames(.ES.stressed1), paste(100 * levels, "%", sep = ""))
})
x1 <- x[, 1]
.VaR.stressed1 <- VaR_stressed(res, alpha = levels, xCol = k, wCol = 1, base = TRUE)
.VaR.stressed2 <- VaR_stressed(res, alpha = levels, xCol = k, wCol = 2, base = TRUE)
w <- get_weights(res)
ES1 <- rep( 0, length(levels))
ES2 <- rep( 0, length(levels))
for(i in 1:length(levels)){
ES1[i] <- mean(w[, 1] * (x1 - .VaR.stressed1[i, 1]) * (x1 > .VaR.stressed1[i, 1])) /
(1 - levels[i]) + .VaR.stressed1[i, 1]
ES2[i] <- mean(w[, 2] * (x1 - .VaR.stressed2[i, 1]) * (x1 > .VaR.stressed2[i, 1])) /
(1 - levels[i]) + .VaR.stressed2[i, 1]
}
test_that("stressed", {
expect_equal(as.numeric(.ES.stressed1[, 1]), ES1)
expect_equal(as.numeric(.ES.stressed2[, 1]), ES2)
expect_true(all(as.numeric(.ES.stressed1) > as.numeric(.VaR.stressed1)))
expect_true(all(as.numeric(.ES.stressed2) > as.numeric(.VaR.stressed2)))
})
x2 <- x[, 2]
.VaR.stressed1 <- VaR_stressed(res, alpha = levels, xCol = 2, wCol = 1, base = FALSE)
.VaR.stressed2 <- VaR_stressed(res, alpha = levels, xCol = 2, wCol = 2, base = FALSE)
w <- get_weights(res)
ES1 <- rep( 0, length(levels))
ES2 <- rep( 0, length(levels))
for(i in 1:length(levels)){
ES1[i] <- mean(w[, 1] * (x2 - .VaR.stressed1[i]) * (x2 > .VaR.stressed1[i])) /
(1 - levels[i]) + .VaR.stressed1[i]
ES2[i] <- mean(w[, 2] * (x2 - .VaR.stressed2[i]) * (x2 > .VaR.stressed2[i])) /
(1 - levels[i]) + .VaR.stressed2[i]
}
.ES.stressed1 <- ES_stressed(res, alpha = levels, xCol = 2, wCol = 1, base = FALSE)
.ES.stressed2 <- ES_stressed(res, alpha = levels, xCol = 2, wCol = 2, base = TRUE)
test_that("stressed", {
expect_equal(as.numeric(.ES.stressed1[, 1]), ES1)
expect_equal(as.numeric(.ES.stressed2[, 1]), ES2)
expect_true(all(as.numeric(.ES.stressed1) > as.numeric(.VaR.stressed1)))
expect_true(all(as.numeric(.ES.stressed2) > as.numeric(.VaR.stressed2)))
})
x <- x
.VaR.base <- VaR_stressed(res, alpha = levels, xCol = "all", wCol = 1, base = TRUE)[, 3:4]
ES1 <- rep( 0, length(levels))
ES2 <- rep( 0, length(levels))
for(i in 1:length(levels)){
ES1[i] <- mean((x[, 1] - .VaR.base[i, 1]) * (x[, 1] > .VaR.base[i, 1])) /
(1 - levels[i]) + .VaR.base[i, 1]
ES2[i] <- mean((x[, 2] - .VaR.base[i, 2]) * (x[, 2] > .VaR.base[i, 2])) /
(1 - levels[i]) + .VaR.base[i, 2]
}
.ES.stressed1 <- ES_stressed(res, alpha = levels, xCol = "all", wCol = 1, base = TRUE)[, 3:4]
.ES.stressed2 <- ES_stressed(res, alpha = levels, xCol = "all", wCol = 2, base = TRUE)[, 3:4]
test_that("stressed", {
expect_equal(as.numeric(.ES.stressed1[, 1]), ES1)
expect_equal(as.numeric(.ES.stressed2[, 1]), ES1)
expect_equal(as.numeric(.ES.stressed1[, 2]), ES2)
expect_equal(as.numeric(.ES.stressed2[, 2]), ES2)
expect_true(all(as.numeric(.ES.stressed1) > as.numeric(.VaR.base)))
expect_true(all(as.numeric(.ES.stressed2) > as.numeric(.VaR.base)))
}) |
listFinCenter <-
function(pattern = ".*")
{
FinCenterList = c(
"Europe/Andorra",
"Asia/Dubai",
"Asia/Kabul",
"America/Antigua",
"America/Anguilla",
"Europe/Tirane",
"Asia/Yerevan",
"America/Curacao",
"Africa/Luanda",
"Antarctica/McMurdo",
"Antarctica/South_Pole",
"Antarctica/Rothera",
"Antarctica/Palmer",
"Antarctica/Mawson",
"Antarctica/Davis",
"Antarctica/Casey",
"Antarctica/Vostok",
"Antarctica/DumontDUrville",
"Antarctica/Syowa",
"America/Argentina/Buenos_Aires",
"America/Argentina/Cordoba",
"America/Argentina/Jujuy",
"America/Argentina/Tucuman",
"America/Argentina/Catamarca",
"America/Argentina/La_Rioja",
"America/Argentina/San_Juan",
"America/Argentina/Mendoza",
"America/Argentina/Rio_Gallegos",
"America/Argentina/Ushuaia",
"Pacific/Pago_Pago",
"Europe/Vienna",
"Australia/Lord_Howe",
"Australia/Hobart",
"Australia/Currie",
"Australia/Melbourne",
"Australia/Sydney",
"Australia/Broken_Hill",
"Australia/Brisbane",
"Australia/Lindeman",
"Australia/Adelaide",
"Australia/Darwin",
"Australia/Perth",
"Australia/Eucla",
"America/Aruba",
"Europe/Mariehamn",
"Asia/Baku",
"Europe/Sarajevo",
"America/Barbados",
"Asia/Dhaka",
"Europe/Brussels",
"Africa/Ouagadougou",
"Europe/Sofia",
"Asia/Bahrain",
"Africa/Bujumbura",
"Africa/Porto-Novo",
"America/St_Barthelemy",
"Atlantic/Bermuda",
"Asia/Brunei",
"America/La_Paz",
"America/Noronha",
"America/Belem",
"America/Fortaleza",
"America/Recife",
"America/Araguaina",
"America/Maceio",
"America/Bahia",
"America/Sao_Paulo",
"America/Campo_Grande",
"America/Cuiaba",
"America/Porto_Velho",
"America/Boa_Vista",
"America/Manaus",
"America/Eirunepe",
"America/Rio_Branco",
"America/Nassau",
"Asia/Thimphu",
"Africa/Gaborone",
"Europe/Minsk",
"America/Belize",
"America/St_Johns",
"America/Halifax",
"America/Glace_Bay",
"America/Moncton",
"America/Goose_Bay",
"America/Blanc-Sablon",
"America/Montreal",
"America/Toronto",
"America/Nipigon",
"America/Thunder_Bay",
"America/Iqaluit",
"America/Pangnirtung",
"America/Resolute",
"America/Atikokan",
"America/Rankin_Inlet",
"America/Winnipeg",
"America/Rainy_River",
"America/Regina",
"America/Swift_Current",
"America/Edmonton",
"America/Cambridge_Bay",
"America/Yellowknife",
"America/Inuvik",
"America/Dawson_Creek",
"America/Vancouver",
"America/Whitehorse",
"America/Dawson",
"Indian/Cocos",
"Africa/Kinshasa",
"Africa/Lubumbashi",
"Africa/Bangui",
"Africa/Brazzaville",
"Europe/Zurich",
"Africa/Abidjan",
"Pacific/Rarotonga",
"America/Santiago",
"Africa/Douala",
"Asia/Shanghai",
"Asia/Harbin",
"Asia/Chongqing",
"Asia/Urumqi",
"Asia/Kashgar",
"America/Bogota",
"America/Costa_Rica",
"America/Havana",
"Atlantic/Cape_Verde",
"Indian/Christmas",
"Asia/Nicosia",
"Europe/Prague",
"Europe/Berlin",
"Africa/Djibouti",
"Europe/Copenhagen",
"America/Dominica",
"America/Santo_Domingo",
"Africa/Algiers",
"America/Guayaquil",
"Pacific/Galapagos",
"Europe/Tallinn",
"Africa/Cairo",
"Africa/El_Aaiun",
"Africa/Asmara",
"Europe/Madrid",
"Africa/Ceuta",
"Atlantic/Canary",
"Africa/Addis_Ababa",
"Europe/Helsinki",
"Pacific/Fiji",
"Atlantic/Stanley",
"Pacific/Truk",
"Pacific/Ponape",
"Pacific/Kosrae",
"Atlantic/Faroe",
"Europe/Paris",
"Africa/Libreville",
"Europe/London",
"America/Grenada",
"Asia/Tbilisi",
"America/Cayenne",
"Europe/Guernsey",
"Africa/Accra",
"Europe/Gibraltar",
"America/Godthab",
"America/Danmarkshavn",
"America/Scoresbysund",
"America/Thule",
"Africa/Banjul",
"Africa/Conakry",
"America/Guadeloupe",
"Africa/Malabo",
"Europe/Athens",
"Atlantic/South_Georgia",
"America/Guatemala",
"Pacific/Guam",
"Africa/Bissau",
"America/Guyana",
"Asia/Hong_Kong",
"America/Tegucigalpa",
"Europe/Zagreb",
"America/Port-au-Prince",
"Europe/Budapest",
"Asia/Jakarta",
"Asia/Pontianak",
"Asia/Makassar",
"Asia/Jayapura",
"Europe/Dublin",
"Asia/Jerusalem",
"Europe/Isle_of_Man",
"Asia/Calcutta",
"Indian/Chagos",
"Asia/Baghdad",
"Asia/Tehran",
"Atlantic/Reykjavik",
"Europe/Rome",
"Europe/Jersey",
"America/Jamaica",
"Asia/Amman",
"Asia/Tokyo",
"Africa/Nairobi",
"Asia/Bishkek",
"Asia/Phnom_Penh",
"Pacific/Tarawa",
"Pacific/Enderbury",
"Pacific/Kiritimati",
"Indian/Comoro",
"America/St_Kitts",
"Asia/Pyongyang",
"Asia/Seoul",
"Asia/Kuwait",
"America/Cayman",
"Asia/Almaty",
"Asia/Qyzylorda",
"Asia/Aqtobe",
"Asia/Aqtau",
"Asia/Oral",
"Asia/Vientiane",
"Asia/Beirut",
"America/St_Lucia",
"Europe/Vaduz",
"Asia/Colombo",
"Africa/Monrovia",
"Africa/Maseru",
"Europe/Vilnius",
"Europe/Luxembourg",
"Europe/Riga",
"Africa/Tripoli",
"Africa/Casablanca",
"Europe/Monaco",
"Europe/Chisinau",
"Europe/Podgorica",
"America/Marigot",
"Indian/Antananarivo",
"Pacific/Majuro",
"Pacific/Kwajalein",
"Europe/Skopje",
"Africa/Bamako",
"Asia/Rangoon",
"Asia/Ulaanbaatar",
"Asia/Hovd",
"Asia/Choibalsan",
"Asia/Macau",
"Pacific/Saipan",
"America/Martinique",
"Africa/Nouakchott",
"America/Montserrat",
"Europe/Malta",
"Indian/Mauritius",
"Indian/Maldives",
"Africa/Blantyre",
"America/Mexico_City",
"America/Cancun",
"America/Merida",
"America/Monterrey",
"America/Mazatlan",
"America/Chihuahua",
"America/Hermosillo",
"America/Tijuana",
"Asia/Kuala_Lumpur",
"Asia/Kuching",
"Africa/Maputo",
"Africa/Windhoek",
"Pacific/Noumea",
"Africa/Niamey",
"Pacific/Norfolk",
"Africa/Lagos",
"America/Managua",
"Europe/Amsterdam",
"Europe/Oslo",
"Asia/Katmandu",
"Pacific/Nauru",
"Pacific/Niue",
"Pacific/Auckland",
"Pacific/Chatham",
"Asia/Muscat",
"America/Panama",
"America/Lima",
"Pacific/Tahiti",
"Pacific/Marquesas",
"Pacific/Gambier",
"Pacific/Port_Moresby",
"Asia/Manila",
"Asia/Karachi",
"Europe/Warsaw",
"America/Miquelon",
"Pacific/Pitcairn",
"America/Puerto_Rico",
"Asia/Gaza",
"Europe/Lisbon",
"Atlantic/Madeira",
"Atlantic/Azores",
"Pacific/Palau",
"America/Asuncion",
"Asia/Qatar",
"Indian/Reunion",
"Europe/Bucharest",
"Europe/Belgrade",
"Europe/Kaliningrad",
"Europe/Moscow",
"Europe/Volgograd",
"Europe/Samara",
"Asia/Yekaterinburg",
"Asia/Omsk",
"Asia/Novosibirsk",
"Asia/Krasnoyarsk",
"Asia/Irkutsk",
"Asia/Yakutsk",
"Asia/Vladivostok",
"Asia/Sakhalin",
"Asia/Magadan",
"Asia/Kamchatka",
"Asia/Anadyr",
"Africa/Kigali",
"Asia/Riyadh",
"Pacific/Guadalcanal",
"Indian/Mahe",
"Africa/Khartoum",
"Europe/Stockholm",
"Asia/Singapore",
"Atlantic/St_Helena",
"Europe/Ljubljana",
"Arctic/Longyearbyen",
"Europe/Bratislava",
"Africa/Freetown",
"Europe/San_Marino",
"Africa/Dakar",
"Africa/Mogadishu",
"America/Paramaribo",
"Africa/Sao_Tome",
"America/El_Salvador",
"Asia/Damascus",
"Africa/Mbabane",
"America/Grand_Turk",
"Africa/Ndjamena",
"Indian/Kerguelen",
"Africa/Lome",
"Asia/Bangkok",
"Asia/Dushanbe",
"Pacific/Fakaofo",
"Asia/Dili",
"Asia/Ashgabat",
"Africa/Tunis",
"Pacific/Tongatapu",
"Europe/Istanbul",
"America/Port_of_Spain",
"Pacific/Funafuti",
"Asia/Taipei",
"Africa/Dar_es_Salaam",
"Europe/Kiev",
"Europe/Uzhgorod",
"Europe/Zaporozhye",
"Europe/Simferopol",
"Africa/Kampala",
"Pacific/Johnston",
"Pacific/Midway",
"Pacific/Wake",
"America/New_York",
"America/Detroit",
"America/Kentucky/Louisville",
"America/Kentucky/Monticello",
"America/Indiana/Indianapolis",
"America/Indiana/Vincennes",
"America/Indiana/Knox",
"America/Indiana/Winamac",
"America/Indiana/Marengo",
"America/Indiana/Vevay",
"America/Chicago",
"America/Indiana/Tell_City",
"America/Indiana/Petersburg",
"America/Menominee",
"America/North_Dakota/Center",
"America/North_Dakota/New_Salem",
"America/Denver",
"America/Boise",
"America/Shiprock",
"America/Phoenix",
"America/Los_Angeles",
"America/Anchorage",
"America/Juneau",
"America/Yakutat",
"America/Nome",
"America/Adak",
"Pacific/Honolulu",
"America/Montevideo",
"Asia/Samarkand",
"Asia/Tashkent",
"Europe/Vatican",
"America/St_Vincent",
"America/Caracas",
"America/Tortola",
"America/St_Thomas",
"Asia/Saigon",
"Pacific/Efate",
"Pacific/Wallis",
"Pacific/Apia",
"Asia/Aden",
"Indian/Mayotte",
"Africa/Johannesburg",
"Africa/Lusaka",
"Africa/Harare")
if (pattern == "*") pattern = "\\\\*"
sort(as.character(FinCenterList[grep(pattern = pattern,
x = FinCenterList)]))
}
.FinCenterList <- listFinCenter() |
"print.varsum" <-
function(x, digits = max(3, getOption("digits") - 3), signif.stars = getOption("show.signif.stars"), ...){
dim <- length(x$names)
text1 <- "\nVAR Estimation Results:\n"
cat(text1)
row <- paste(rep("=", nchar(text1)), collapse = "")
cat(row, "\n")
cat(paste("Endogenous variables:", paste(colnames(x$covres), collapse = ", "), "\n", collapse = " "))
cat(paste("Deterministic variables:", paste(x$type, collapse = ", "), "\n", collapse = " "))
cat(paste("Sample size:", x$obs, "\n"))
cat(paste("Log Likelihood:", round(x$logLik, 3), "\n"))
cat("Roots of the characteristic polynomial:\n")
cat(formatC(x$roots, digits = digits))
cat("\nCall:\n")
print(x$call)
cat("\n\n")
for (i in 1:dim) {
result <- x$varresult[[x$names[i]]]
text1 <- paste("Estimation results for equation ", x$names[i], ":", sep = "")
cat(text1, "\n")
row <- paste(rep("=", nchar(text1)), collapse = "")
cat(row, "\n")
text2 <- paste(x$names[i], " = ", paste(rownames(result$coef), collapse = " + "), sep = "")
cat(text2, "\n\n")
printCoefmat(result$coef, digits = digits, signif.stars = signif.stars, na.print = "NA", ...)
cat("\n")
cat("\nResidual standard error:", format(signif(result$sigma, digits)), "on", result$df[2L], "degrees of freedom\n")
if (!is.null(result$fstatistic)) {
cat("Multiple R-Squared:", formatC(result$r.squared, digits = digits))
cat(",\tAdjusted R-squared:", formatC(result$adj.r.squared, digits = digits), "\nF-statistic:", formatC(result$fstatistic[1], digits = digits), "on", result$fstatistic[2], "and", result$fstatistic[3], "DF, p-value:", format.pval(pf(result$fstatistic[1L], result$fstatistic[2L], result$fstatistic[3L], lower.tail = FALSE), digits = digits), "\n")
}
cat("\n\n")
}
cat("\nCovariance matrix of residuals:\n")
print(x$covres, digits = digits, ...)
cat("\nCorrelation matrix of residuals:\n")
print(x$corres, digits = digits, ...)
cat("\n\n")
invisible(x)
} |
summary.CV_Result <- function(object,...){
cat("\nContaining the cross validation result. \n");
if (!is.null(object$r_optimal))
cat(paste0("Selected r parameter is:",object$r_optimal, "\n"));
if (!is.null(object$s_optimal))
cat(paste0("Selected s parameter is:",object$s_optimal, "\n"));
} |
set.seed(23479)
knitr::opts_chunk$set(
collapse = TRUE,
comment = "
)
library(santoku)
x <- runif(10, 0, 10)
(chopped <- chop(x, breaks = 0:10))
data.frame(x, chopped)
chopped <- chop(x, breaks = 3:7)
data.frame(x, chopped)
x_fives <- x
x_fives[1:5] <- 5
chopped <- chop(x_fives, c(2, 5, 5, 8))
data.frame(x_fives, chopped)
tab(1:10, c(2, 5, 8))
chopped <- chop_width(x, 2)
data.frame(x, chopped)
chopped <- chop_evenly(x, intervals = 3)
data.frame(x, chopped)
chopped <- chop_n(x, 4)
table(chopped)
chopped <- chop_equally(x, groups = 5)
table(chopped)
chopped <- chop_quantiles(x, c(0.25, 0.5, 0.75))
data.frame(x, chopped)
chopped <- chop_mean_sd(x)
data.frame(x, chopped)
tab_n(x, 4)
tab_width(x, 2)
tab_evenly(x, 5)
tab_mean_sd(x)
library(lubridate)
y2k <- as.Date("2000-01-01") + 0:365
months <- chop_width(y2k, months(1))
table(months)
chopped <- chop(x, c(2, 5, 8), labels = c("Lowest", "Low", "Higher", "Highest"))
data.frame(x, chopped)
chopped <- chop(x, c(2, 5, 8), lbl_dash())
data.frame(x, chopped)
chopped <- chop(x, c(2, 5, 8), lbl_seq())
data.frame(x, chopped)
chop(x, c(2, 5, 8), lbl_seq("(1)"))
chop(x, c(2, 5, 8), lbl_seq("i."))
chopped <- chop(x, c(2, 5, 8), lbl_format("%s to %s"))
data.frame(x, chopped)
library(scales)
r <- runif(10)
chopped <- chop(r, c(.3, .5, .7), lbl_intervals(fmt = label_percent(0.1)))
data.frame(r, chopped)
chopped <- chop(x, c(3, 5, 7), extend = FALSE)
data.frame(x, chopped)
y <- 1:5
data.frame(
y = y,
left_closed = chop(y, 1:5),
right_closed = chop(y, 1:5, left = FALSE)
)
data.frame(
y = y,
rightmost_open = chop(y, 1:5),
rightmost_closed = chop(y, 1:5, close_end = TRUE)
) |
match.data.frame = function(x1, x2, ...){
match(do.call("paste", c(x1, sep = "\r")),
do.call("paste", c(x2, sep = "\r")), ...)
}
ballgownrsem = function(dir="", samples, gtf, UCSC=TRUE, tfield='transcript_id',
attrsep='; ', bamout='transcript', pData=NULL, verbose=TRUE, meas='all',
zipped=FALSE){
bamout = match.arg(bamout, c('transcript', 'genome', 'none'))
meas = match.arg(meas, c('all', 'TPM', 'FPKM'))
if('all' %in% meas & length(meas) > 1){
stop(.makepretty('when meas is "all", all types of measurements are
included by default.'))
}
if(verbose){
message(date())
}
f = paste0(dir, '/', samples[1], '.isoforms.results')
if(zipped){
f = paste0(f, '.gz')
}
t2g = read.table(f, header=TRUE,
colClasses=c('character', 'character', rep("NULL", 6)))
names(t2g) = c('t_id', 'g_id')
if(verbose){
message(paste0(date(), ': reading annotation'))
}
tgtf = gffRead(gtf)
tgtf$t_name = getAttributeField(tgtf$attributes, tfield, attrsep)
if(UCSC){
tgtf$t_name = substr(tgtf$t_name, 2, nchar(tgtf$t_name)-1)
}
tgtf = tgtf[tgtf$feature == "exon",]
if(verbose){
message(paste0(date(), ': handling exons'))
}
estrand = as.character(tgtf$strand)
estrand[estrand=="."] = "*"
e_id = match.data.frame(tgtf[,c(1,4,5,7)], unique(tgtf[,c(1,4,5,7)]))
t_id = tgtf$t_name
e2t = data.frame(e_id, t_id)
tgtf$e_id = e_id
exons_unique = subset(tgtf, !duplicated(tgtf[,c(1,4,5,7)]))
exon = data.frame(e_id=exons_unique$e_id, chr=exons_unique$seqname,
strand=exons_unique$strand, start=exons_unique$start,
end=exons_unique$end)
exon = exon[order(exon$e_id),]
tnamesex = split(as.character(e2t$t_id), e2t$e_id)
tnamesex_ord = as.character(tnamesex)[match(exon$e_id, names(tnamesex))]
exongr = GRanges(seqnames=Rle(as.character(exon$chr)),
ranges=IRanges(start=exon$start, end=exon$end),
strand=Rle(exon$strand),
id=exon$e_id, transcripts = tnamesex_ord)
if(verbose){
message(paste0(date(), ': handling introns'))
}
mm = match(e2t$e_id, mcols(exongr)$id)
if(any(is.na(mm))){
warning(paste('the following exon(s) did not appear in the data:',
paste(e2t$e_id[which(is.na(mm))], collapse=", ")))
}
tgrl = split(exongr[mm[!is.na(mm)]], as.character(e2t$t_id)[!is.na(mm)])
unltrans = unlist(tgrl)
transcriptIDs = rep(names(tgrl), times=elementNROWS(tgrl))
notLast = rev(duplicated(rev(transcriptIDs)))
introngr = GRanges(seqnames=seqnames(unltrans)[notLast],
ranges=IRanges(start=end(unltrans)[notLast]+1,
end=start(unltrans)[which(notLast)+1]-1),
strand=strand(unltrans)[notLast])
idf = as.data.frame(introngr)
i_id = match.data.frame(idf, unique(idf))
mcols(introngr)$id = i_id
i2t = data.frame(i_id, t_id=transcriptIDs[notLast])
tnamesin = split(as.character(i2t$t_id), i2t$i_id)
mcols(introngr)$transcripts = as.character(tnamesin)[match(i_id,
names(tnamesin))]
introngr = unique(introngr)
dftmp = IRanges::as.data.frame(introngr)
stopifnot(all(names(dftmp) ==
c('seqnames', 'start', 'end', 'width', 'strand', 'id', 'transcripts')))
intron = data.frame(i_id=dftmp$id, chr=dftmp$seqnames, strand=dftmp$strand,
start=dftmp$start, end=dftmp$end)
intron = intron[order(intron$i_id),]
if(verbose){
message(paste0(date(), ': handling transcripts'))
}
isofiles = paste0(dir, '/', samples, '.isoforms.results')
if(zipped){
isofiles = paste0(isofiles, '.gz')
}
isodata = lapply(isofiles, function(x){
read.table(x, header=TRUE)
})
gene = isodata[[1]]$gene_id[match(names(tgrl), isodata[[1]]$transcript_id)]
trans = data.frame(t_id=names(tgrl),
chr=unlist(runValue(seqnames(tgrl))),
strand=unlist(runValue(strand(tgrl))),
start=sapply(start(tgrl), min), end=sapply(end(tgrl), max),
t_name=names(tgrl), num_exons=elementNROWS(tgrl),
length=sapply(width(tgrl), sum), gene_id=gene, gene_name=gene)
for(i in seq_along(isodata)){
data_order = match(names(tgrl), isodata[[i]]$transcript_id)
if(meas == 'all' | meas == 'TPM'){
trans[,paste('TPM', samples[i], sep='.')] =
isodata[[i]]$TPM[data_order]
}
if(meas == 'all' | meas == 'FPKM'){
trans[,paste('FPKM', samples[i], sep='.')] =
isodata[[i]]$FPKM[data_order]
}
}
stopifnot(is.null(pData) | class(pData) == 'data.frame')
if(verbose){
message(paste0(date(), ': handling pData'))
}
if(!is.null(pData)){
if(!all(pData[,1] == samples)){
msg = 'Rows of pData did not seem to be in the same order as the
columns of the expression data. Attempting to rearrange
pData...'
warning(.makepretty(msg))
tmp = try(pData <- pData[,match(samples, pData[,1])],
silent=TRUE)
if(class(tmp) == "try-error"){
msg = 'first column of pData does not match the names of the
folders containing the ballgown data.'
stop(.makepretty(msg))
}else{
message('successfully rearranged!')
}
}
}
if(bamout == 'none'){
bamfiles=NULL
}else{
bamfiles = paste0(dir, '/', samples, '.', bamout, '.bam')
}
genefiles = paste0(dir, '/', samples, '.genes.results')
if(zipped){
genefiles = paste0(genefiles, '.gz')
}
genedata = lapply(genefiles, function(x){
read.table(x, header=TRUE)
})
g = data.frame(gene_id=sort(genedata[[1]]$gene_id))
for(i in seq_along(genedata)){
data_order = match(g$gene_id, genedata[[i]]$gene_id)
if(meas == 'all' | meas == 'TPM'){
g[,paste('TPM', samples[i], sep='.')] =
genedata[[i]]$TPM[data_order]
}
if(meas == 'all' | meas == 'FPKM'){
g[,paste('FPKM', samples[i], sep='.')] =
genedata[[i]]$FPKM[data_order]
}
}
gm = as.matrix(g[,-1])
rownames(gm) = as.character(g$gene_id)
colnames(gm) = names(g)[-1]
rm(g)
result = new("ballgown",
expr=list(intron=intron, exon=exon, trans=trans, gm=gm),
indexes=list(e2t=e2t, i2t=i2t, t2g=t2g, bamfiles=bamfiles, pData=pData),
structure=list(intron=introngr, exon=exongr, trans=tgrl),
dirs=paste0(normalizePath(dir), '/', samples),
mergedDate=date(), meas=meas, RSEM=TRUE)
if(verbose) message(paste0(date(), ': done!'))
return(result)
} |
1 + 2
(1 + 2) / 3
7^2
1 - Inf
class(7^2)
log(1) > 0
log(1) >= 0
log(1) == 0
log(1) != 1
1 + log10(1e7) == 8 * exp(1)^0
class(1 > 1)
"Hello R World!"
paste("Hello", "R", "World", "!")
cat("Hello", "R", "World", "!\n")
class("some text")
1:3
sum(1:3)
prod(1:3)
sum(1:3) == prod(1:3)
print(Hello World!)
print("Hello World")
install.packages("ggplot2")
library(ggplot2)
library("ggplot2")
require(ggplot2)
require("ggplot2")
require('ggplot2')
paste0("Hello World! Today is ", date(), ".") |
NOT_CRAN <- identical(tolower(Sys.getenv("NOT_CRAN")),"true")
knitr::opts_chunk$set(purl = NOT_CRAN)
library(insee)
library(tidyverse)
embed_png <- function(path, dpi = NULL) {
meta <- attr(png::readPNG(path, native = TRUE, info = TRUE), "info")
if (!is.null(dpi)) meta$dpi <- rep(dpi, 2)
knitr::asis_output(paste0(
"<img src='", path, "'",
" width=", round(meta$dim[1] / (meta$dpi[1] / 96)),
" height=", round(meta$dim[2] / (meta$dpi[2] / 96)),
" />"
))}
embed_png("inflation.png")
library(kableExtra)
library(magrittr)
library(htmltools)
library(prettydoc) |
LoadVisJS <- function(){
if(!any(list.files() =='vizjs.js')){
try({
Hash<-downloader::sha_url('http://vizjs.org/viz.v1.1.0.min.js')
if(Hash == "295e915d475fdf1c0d7db13668b6b0b526a8e910"){
downloader::download('http://vizjs.org/viz.v1.1.0.min.js', 'vizjs.js' )
}else{
warning("Hash of http://vizjs.org/viz.v1.1.0.min.js doesn't match stored hash, so not downloading in case it has been modified.
Please save an up-to-date version of the viz.js library as vizjs.js library in the working directory" )
}
} )
}
} |
library(lpSolve)
f.obj <- c(1, 1)
f.con <- matrix (c(50, 24, 30, 33, 1, 0, 0, 1, 1, 1), ncol=2, byrow=TRUE)
f.con
f.dir <- c("<=", "<=", ">=", ">=", ">=")
f.rhs <- c(2400, 2100, 45, 5,50)
cbind(f.con, f.dir, f.rhs)
lp ("max", f.obj, f.con, f.dir, f.rhs)
lp ("max", f.obj, f.con, f.dir, f.rhs)$solution
library(lpSolveAPI)
lprec <- make.lp(0, 2)
lprec
set.objfn(lprec, c(1, 1))
lprec
lp.control(lprec, sense="max")
lprec
set.type(lprec, c(1,2), type = c("integer"))
lprec
add.constraint(lprec, c(50,24), "<=", 2400)
lprec
add.constraint(lprec, c(30,33), "<=", 2100)
lprec
add.constraint(lprec, c(1,1), ">=", 50)
lprec
set.bounds(lprec, lower = c(45, 5), columns = c(1, 2))
lprec
RowNames <- c("MachineA", "MachineB","TotalInitial")
ColNames <- c("ProductX", "ProductY")
dimnames(lprec) <- list(RowNames, ColNames)
lprec
solve(lprec)
get.objective(lprec)
get.variables(lprec)
get.constraints(lprec)
plot(lprec)
print(lprec) |
`covStruct.create` <-
function(covtype, d, known.covparam, var.names, coef.cov=NULL, coef.var=NULL, nugget=NULL, nugget.estim=FALSE, nugget.flag=FALSE, iso=FALSE, scaling=FALSE, knots=NULL, kernel=NULL) {
if (covtype=="matern5_2add0") {
weight <- coef.cov[(d+1):(2*d)]
covStruct <- new("covAdditive0",
d = as.integer(d),
name = "matern5_2add0",
var.names = as.character(var.names),
sd2 = as.numeric(sum(weight)),
known.covparam = as.character(known.covparam),
range.val = as.numeric(coef.cov[1:d]),
range.names = paste("range", var.names, sep="."),
weight = as.numeric(weight),
weight.names = paste("weight", var.names, sep="."),
nugget = as.numeric(nugget),
nugget.flag = TRUE,
nugget.estim = nugget.estim,
param.n = as.integer(2*d+1)
)
return(covStruct)
}
if( covtype=="covUser" ){
covStruct <- new("covUser", kernel=kernel, nugget.flag=length(nugget)>0, nugget=as.double(nugget))
return(covStruct)
}
if (scaling & iso) {
iso <- FALSE
warning("At this stage no isotropic version is available, regular scaling is applied.")
}
covsetI <- c("gauss", "exp", "matern3_2", "matern5_2")
covsetII <- c("powexp")
classType <- "covTensorProduct"
if (iso) classType <- "covIso"
if (scaling) classType <- "covScaling"
covStruct <- new(classType, d=as.integer(d), name=as.character(covtype),
sd2 = as.numeric(coef.var), var.names=as.character(var.names),
nugget = as.double(nugget), nugget.flag=nugget.flag, nugget.estim=nugget.estim, known.covparam=known.covparam)
if (!scaling) {
[email protected] = "theta"
if (is.element(covtype, covsetI)) {
[email protected] <- as.integer(1)
if (iso) {
[email protected] <- as.integer(1)
} else {
[email protected] <- as.integer(d)
[email protected] <- as.integer(d)
}
} else {
[email protected] <- as.integer(2)
[email protected] <- as.integer(2*d)
[email protected] <- as.integer(d)
[email protected] <- as.integer(d)
[email protected] <- "p"
}
if (length(coef.cov)>0) covStruct <- vect2covparam(covStruct, coef.cov)
} else {
eta.flag <- (length(coef.cov)>0)
if (eta.flag) eta <- coef.cov
for (i in 1:length(knots)) {
if (is.unsorted(knots[[i]])) {
ordKnots <- sort(knots[[i]], index.return = TRUE)
knots[[i]] <- ordKnots$x
if (eta.flag) {
if (length(eta[[i]]) != length(knots[[i]]))
stop("mismatch between number of knots and number of values at knots")
eta[[i]] <- eta[[i]][ordKnots$ix]
}
}
}
names(knots) <- var.names
covStruct@knots <- knots
[email protected] <- sum(sapply(knots, length))
[email protected] <- as.integer(1)
if (eta.flag) covStruct@eta <- eta
}
validObject(covStruct)
return(covStruct)
} |
assessfit <- function(params, DEdata, fit=gamtable1(), simple=TRUE) {
if (length(params) != 2 | !is.numeric(params)) {
stop("params must be a numeric vector of length 2")
}
if (!is.data.frame(DEdata)) stop("DEdata must be a data frame.")
if (any(is.na(match(c("dose", "ntot", "pfx", "fxcateg"), names(DEdata))))) {
stop("DEdata must include at least four variables:",
"dose, ntot, pfx, fxcateg.")
}
if (length(simple) != 1 | !is.logical(simple)) {
stop("simple must be a logical scalar")
}
expected <- invprobit(params[1] + params[2]*log10(DEdata$dose))
sel <- (!is.na(expected) & expected >= 0.00005 & expected <= 0.99995) |
(!is.na(DEdata$fxcateg) & DEdata$fxcateg==50)
n <- sum(sel)
cor.obs <- rep(NA, length(sel))
cor.obs[sel & DEdata$fxcateg==0] <-
correctval(pmin(expected[sel & DEdata$fxcateg==0], 0.495), fit)
cor.obs[sel & DEdata$fxcateg==50] <- DEdata$pfx[sel & DEdata$fxcateg==50]
cor.obs[sel & DEdata$fxcateg==100] <-
correctval(pmax(expected[sel & DEdata$fxcateg==100], 0.505), fit)
if (n < 0.5) {
chilist <- list(chi=c(chistat=NA, df=NA, pval=NA), contrib=NA)
} else {
chilist <- LWchi2((cor.obs*DEdata$ntot)[sel], (expected*DEdata$ntot)[sel],
DEdata$ntot[sel])
}
stepB <- matrix(NA, nrow=length(expected), ncol=3,
dimnames=list(NULL, c("exp", "obscorr", "contrib")))
stepB[, "exp"] <- expected
stepB[, "obscorr"] <- cor.obs
stepB[sel, "contrib"] <- chilist$contrib
if (simple) {
y <- chilist$chi["chistat"]
} else {
y <- list(chi=chilist$chi, contrib=stepB)
}
y
} |
mixture.design <- function(nfactors=NULL, nlevels, randomize=TRUE, seed=NULL, replications=1,
repeat.only=FALSE, factor.names=if (!is.null(nfactors)) paste("X",1:nfactors,sep="") else NULL, ...){
cat("This does not work yet.\n")
} |
isbfReg <-
function(X,Y,epsilon=0.05,K=1,impmin=1/100,favgroups=0,centX=TRUE,centY=TRUE,s=NULL,v=NULL)
{
D = dim(X)
n = D[1]
p = D[2]
dimension = K*p - K*(K-1)/2
if (n!=length(Y))
{
print("ERROR: The dimension of X should be (n,p) and the dimension of Y (n,1)")
return(NULL)
}
if (is.null(v)) v = var(Y)/2
if (K>p)
{
print("Error: K>p")
return(NULL)
}
if (is.null(s)) s = -sqrt(v)*qnorm(epsilon/(2*dimension))
if (centX==TRUE) for (cptcent in 1:p) X[,cptcent] = X[,cptcent] - mean(X[,cptcent])
if (centY==TRUE) Y = Y-mean(Y)
beta = rep(0,p)
COV = rep(0,dimension)
RESULT=.C("isbfRegC",X=as.double(X),Y=as.double(Y),COV=as.double(COV),beta=as.double(beta),s=as.double(s),impmin=as.double(impmin),fgroups=as.double(favgroups),p=as.integer(p),n=as.integer(n),K=as.integer(K),PACKAGE="ISBF")
return(list(beta=RESULT$beta,s=RESULT$s,impmin=RESULT$impmin,K=RESULT$K))
} |
"nlrq.control" <- function (maxiter=100, k=2, InitialStepSize = 1,
big=1e+20, eps=1.0e-07, beta=0.97)
{
list(maxiter=maxiter, k=k, InitialStepSize = InitialStepSize,
big=big, eps=eps, beta=beta)
}
"nlrqModel" <- function (form, tau, data, start)
{
thisEnv <- environment()
env <- new.env(parent = environment(form))
for (i in names(data)) {
assign(i, data[[i]], envir = env)
}
ind <- as.list(start)
parLength <- 0
for (i in names(ind)) {
temp <- start[[i]]
storage.mode(temp) <- "double"
assign(i, temp, envir = env)
ind[[i]] <- parLength + seq(along = start[[i]])
parLength <- parLength + length(start[[i]])
}
useParams <- rep(TRUE, parLength)
lhs <- eval(form[[2]], envir = env)
rhs <- eval(form[[3]], envir = env)
resid <- lhs - rhs
tau <- tau
dev <- sum(tau * pmax(resid, 0) + (tau - 1) * pmin(resid, 0))
if (is.null(attr(rhs, "gradient"))) {
getRHS.noVarying <- function() numericDeriv(form[[3]],
names(ind), env)
getRHS <- getRHS.noVarying
rhs <- getRHS()
}
else {
getRHS.noVarying <- function() eval(form[[3]], envir = env)
getRHS <- getRHS.noVarying
}
dimGrad <- dim(attr(rhs, "gradient"))
marg <- length(dimGrad)
if (marg > 0) {
gradSetArgs <- vector("list", marg + 1)
for (i in 2:marg) gradSetArgs[[i]] <- rep(TRUE, dimGrad[i -
1])
useParams <- rep(TRUE, dimGrad[marg])
}
else {
gradSetArgs <- vector("list", 2)
useParams <- rep(TRUE, length(attr(rhs, "gradient")))
}
npar <- length(useParams)
gradSetArgs[[1]] <- (~attr(ans, "gradient"))[[2]]
gradCall <- switch(length(gradSetArgs) - 1, call("[", gradSetArgs[[1]],
gradSetArgs[[2]]), call("[", gradSetArgs[[1]], gradSetArgs[[2]],
gradSetArgs[[2]]), call("[", gradSetArgs[[1]], gradSetArgs[[2]],
gradSetArgs[[2]], gradSetArgs[[3]]), call("[", gradSetArgs[[1]],
gradSetArgs[[2]], gradSetArgs[[2]], gradSetArgs[[3]],
gradSetArgs[[4]]))
getRHS.varying <- function() {
ans <- getRHS.noVarying()
attr(ans, "gradient") <- eval(gradCall)
ans
}
QR <- qr(attr(rhs, "gradient"))
qrDim <- min(dim(QR$qr))
if (QR$rank < qrDim)
stop("singular gradient matrix at initial parameter estimates")
getPars.noVarying <- function() unlist(setNames(lapply(names(ind),
get, envir = env), names(ind)))
getPars.varying <- function() unlist(setNames(lapply(names(ind),
get, envir = env), names(ind)))[useParams]
getPars <- getPars.noVarying
internalPars <- getPars()
setPars.noVarying <- function(newPars) {
assign("internalPars", newPars, envir = thisEnv)
for (i in names(ind)) {
assign(i, unname(newPars[ind[[i]]]), envir = env)
}
}
setPars.varying <- function(newPars) {
internalPars[useParams] <- newPars
for (i in names(ind)) {
assign(i, unname(internalPars[ind[[i]]]), envir = env)
}
}
setPars <- setPars.noVarying
on.exit(remove(i, data, parLength, start, temp))
m <- list(resid = function() resid, fitted = function() rhs,
formula = function() form, tau = function() tau, deviance = function() dev,
gradient = function() attr(rhs, "gradient"), incr = function() qr.coef(QR, resid), setVarying = function(vary = rep(TRUE,
length(useParams))) {
assign("useParams", if (is.character(vary)) {
temp <- logical(length(useParams))
temp[unlist(ind[vary])] <- TRUE
temp
} else if (is.logical(vary) && length(vary) != length(useParams)) stop("setVarying : vary length must match length of parameters") else {
vary
}, envir = thisEnv)
gradCall[[length(gradCall)]] <<- useParams
if (all(useParams)) {
assign("setPars", setPars.noVarying, envir = thisEnv)
assign("getPars", getPars.noVarying, envir = thisEnv)
assign("getRHS", getRHS.noVarying, envir = thisEnv)
assign("npar", length(useParams), envir = thisEnv)
} else {
assign("setPars", setPars.varying, envir = thisEnv)
assign("getPars", getPars.varying, envir = thisEnv)
assign("getRHS", getRHS.varying, envir = thisEnv)
assign("npar", length((1:length(useParams))[useParams]),
envir = thisEnv)
}
}, changeTau = function(newTau) {
assign("tau", newTau, envir = thisEnv)
assign("dev", sum(tau * pmax(resid, 0) + (tau - 1) * pmin(resid, 0)), envir = thisEnv)
return(dev)
}, setPars = function(newPars) {
setPars(newPars)
assign("resid", lhs - assign("rhs", getRHS(), envir = thisEnv),
envir = thisEnv)
assign("dev", sum(tau * pmax(resid, 0) + (tau - 1) * pmin(resid, 0)), envir = thisEnv)
assign("QR", qr(attr(rhs, "gradient")), envir = thisEnv)
return(QR$rank < min(dim(QR$qr)))
}, getPars = function() getPars(), getAllPars = function() getPars(),
getEnv = function() env, trace = function() cat(format(dev),
": ", format(getPars()), "\n"), Rmat = function() qr.R(QR),
predict = function(newdata = list(), qr = FALSE) {
Env <- new.env()
for (i in objects(envir = env)) {
assign(i, get(i, envir = env), envir = Env)
}
newdata <- as.list(newdata)
for (i in names(newdata)) {
assign(i, newdata[[i]], envir = Env)
}
eval(form[[3]], envir = Env)
})
class(m) <- "nlrqModel"
m
}
nlrq_m <- function (formula, data=parent.frame(), start, tau=0.5,
control, trace=FALSE, method = "L-BFGS-B")
{
mf <- match.call()
formula <- as.formula(formula)
varNames <- all.vars(formula)
if (length(formula) == 2) {
formula[[3]] <- formula[[2]]
formula[[2]] <- 0
}
if (missing(start)) {
if (!is.null(attr(data, "parameters"))) {
pnames <- names(attr(data, "parameters"))
}
else {
cll <- formula[[length(formula)]]
func <- get(as.character(cll[[1]]))
pnames <- as.character(as.list(match.call(func, call = cll))[-1][attr(func, "pnames")])
}
}
else {
pnames <- names(start)
}
varNames <- varNames[is.na(match(varNames, pnames, nomatch = NA))]
varIndex <- sapply(varNames, function(varName, data, respLength) {
length(eval(as.name(varName), data))%%respLength == 0
}, data, length(eval(formula[[2]], data)))
mf$formula <- parse(text = paste("~", paste(varNames[varIndex], collapse = "+")))[[1]]
mf$start <- mf$tau <- mf$control <- mf$algorithm <- mf$trace <- mf$method <- NULL
mf[[1]] <- as.name("model.frame")
mf <- as.list(eval(mf, parent.frame()))
if (missing(start)) {
start <- getInitial(formula, mf)
}
for (var in varNames[!varIndex]) mf[[var]] <- eval(as.name(var), data)
ctrl <- nlrq.control()
if (!missing(control)) {
control <- as.list(control)
ctrl[names(control)] <- control
}
m <- nlrqModel(formula, tau, mf, start)
nlrq.calc <- function (model, ctrl, trace) {
meketon <- function(x, y, w, tau, ctrl) {
yw <- ctrl$big
k <- 1
while(k <= ctrl$k & yw - crossprod(y, w) > ctrl$eps) {
d <- pmin(tau - w, 1 - tau + w)
z <- lsfit(x, y, d^2, intercept=FALSE)
yw <- sum(tau * pmax(z$resid, 0) + (tau - 1) * pmin(z$resid, 0))
k <- k + 1
s <- z$resid * d^2
alpha <- max(ctrl$eps, pmax(s/(tau - w), -s/(1 - tau + w)))
w <- w + (ctrl$beta/alpha) * s
}
coef <- z$coef
return(list(coef=coef, w=w, d = d))
}
model.step <- function(lambda, Step, model, pars) {
model$setPars(pars + lambda * Step)
model$deviance()
}
w <- rep(0, length(model$resid()))
snew <- model$deviance()
sold <- ctrl$big
nit <- 0
if (trace) {
model$trace()
optim.ctrl <- list(trace=1)
} else {
optim.ctrl <- list(trace=0)
}
lam0 <- ctrl$InitialStepSize
D <- list()
while(sold - snew > ctrl$eps & nit < ctrl$maxiter) {
z <- meketon(model$gradient(),as.vector(model$resid()), w, tau=tau, ctrl=ctrl)
Step <- z$coef
Pars <- model$getPars()
lam <- try(optim(par=lam0, fn=model.step, method=method, lower=0, upper=1,
Step=Step, model=model, pars=Pars, control=optim.ctrl)$par)
if(inherits(lam,"try.error") || !is.finite(lam))
stop("optim unable to find valid step size")
if (trace) {cat("lambda =", lam, "\n")}
model$setPars(Pars + lam * Step)
sold <- snew
snew <- model$deviance()
w <- qr.resid(qr(model$gradient()), z$w)
w1 <- max(pmax(w, 0))
if(w1 > tau) {w <- (w * tau)/(w1 + ctrl$eps)}
w0 <- max(pmax( - w, 0))
if(w0 > 1 - tau) {w <- (w * (1 - tau))/(w0 + ctrl$eps)}
if (trace) {model$trace()}
if (R.Version()$os == "Win32") {flush.console()}
nit <- nit + 1
D[[nit]] <- z$d
}
Rho <- function(u,tau) u * (tau - (u < 0))
model$rho <- sum(Rho(model$resid(),tau))
model$D <- D
model
}
nlrq.out <- list(m=nlrq.calc(m, ctrl, trace), data=substitute(data),
call=match.call(), PACKAGE = "quantreg")
nlrq.out$call$control <- ctrl
nlrq.out$call$trace <- trace
class(nlrq.out) <- "nlrq"
nlrq.out
} |
imageMBF<-function(img,zlim=NULL,reverse=TRUE)
{
Z=dim(img)[3]
if (is.null(zlim))
zlim=c(0,max(img,na.rm=TRUE))
img[img>zlim[2]]=zlim[2]
img[img<zlim[1]]=zlim[1]
sum.na<-function(x)return(sum(x,na.rm=TRUE))
yrange=range(which(apply(img[,,],2,sum,na.rm=TRUE)!=0))
xrange=range(which(apply(img[,,1],1,sum,na.rm=TRUE)!=0))
drange=max(diff(xrange)-diff(yrange),0)/2
yrange=yrange+floor(c(-1,1)*drange)
fullimg=img[xrange[1]:xrange[2],yrange[1]:yrange[2],1]
for (i in 2:Z)
{
fullimg=rbind(fullimg,rep(NA,diff(yrange)+1))
fullimg=rbind(fullimg,rep(NA,diff(yrange)+1))
xrange=range(which(apply(img[,,i],1,sum.na)!=0))
fullimg=rbind(fullimg,img[xrange[1]:xrange[2],yrange[1]:yrange[2],i])
}
farbe=fields::tim.colors(64)
if(reverse)farbe=rev(farbe)
fields::image.plot(fullimg,zlim=zlim,legend.width=1.8, axes=FALSE)
} |
NULL
print.hreal <- function(x, n=20, ...){
options(digits=4)
cat("------------------------------------------\n")
cat("Simulation result of marked Hawkes model.\n")
print(x$hspec)
cat("Realized path (with right continuous representation):\n")
mtrx <- as.matrix(x)
dimens <- x$hspec@dimens
name_N <- paste0("N", 1:dimens)
name_lambda <- paste0("lambda", 1:dimens)
name_lambda_component <- colnames(x$lambda_component)
len <- min(n, length(mtrx[,"arrival"]))
print(mtrx[1:len, c("arrival", name_N, name_lambda, name_lambda_component)])
if ( length(mtrx[,"arrival"]) > len){
remaning <- length(mtrx[,"arrival"]) - len
cat("... with ")
cat(remaning)
cat(" more rows \n")
}
cat("------------------------------------------\n")
options(digits=7)
}
summary.hreal <- function(object, n=20, ...){
options(digits=5)
cat("------------------------------------`------\n")
cat("Simulation result of marked Hawkes model.\n")
cat("Realized path (with right continuous representation):\n")
mtrx <- as.matrix(object)
dimens <- object$hspec@dimens
name_N <- paste0("N", 1:dimens)
name_lambda <- paste0("lambda", 1:dimens)
len <- min(n, length(mtrx[,"arrival"]))
print(mtrx[1:len, c("arrival", name_N, name_lambda)])
if ( length(mtrx[,"arrival"]) > len){
remaning <- length(mtrx[,"arrival"]) - len
cat("... with ")
cat(remaning)
cat(" more rows \n")
}
cat("------------------------------------------\n")
options(digits=7)
}
as.matrix.hreal <- function(x, ...){
mtrx <- numeric()
for (i in 2:length(x)){
mtrx <- cbind(mtrx, x[[i]])
if(is.vector(x[[i]])){
colnames(mtrx)[i-1] <- names(x)[i]
}
}
mtrx
} |
simulate_iid_brownian_motion <- function(N, v = seq(from = 0, to = 1, length.out = 100), sig = 1){
dv <- length(v)
b_motion <- matrix(0, ncol = dv, nrow = N)
b_motion[,2:dv] <- t(apply(matrix(stats::rnorm( (dv-1) *N, sd = sig), ncol = dv-1, nrow = N), 1, cumsum))
return(b_motion)
}
simulate_iid_brownian_bridge <- function(N, v = seq(from = 0, to = 1, length.out = 100), sig = 1){
dv <- length(v)
b_bridge <- matrix(0, ncol = dv, nrow = N)
b_bridge[,2:dv] <- simulate_iid_brownian_motion(N, v = v[1:dv-1], sig)
b_bridge <- b_bridge - b_bridge[,dv]*matrix(rep(v, times = N),ncol = dv, nrow = N, byrow = T)
return(b_bridge)
} |
bridge_sampler.varstan <- function(samples, ...) {
if(!is.varstan(samples))
stop("The current object is not a varstan class")
out = try(bridge_sampler(samples$stanfit, ...))
return(out)
}
bayes_factor.varstan <- function(x1, x2, log = FALSE, ...) {
bridge1 = bridge_sampler(x1, ...)
bridge2 = bridge_sampler(x2, ...)
out = bayes_factor(bridge1, bridge2, log = log)
attr(out, "model_names") = c("model1", "model2")
return(out)
} |
logPretty1 <- function(xMin, xMax) {
xFirst <- floor(log(xMin, 10))
xLast <- ceiling(log(xMax, 10))
xTicks <- seq(xFirst, xLast)
xTicks <- 10 ^ xTicks
return(xTicks)
}
logPretty3<-function(xMin,xMax) {
xMin <- xMin * 1.00001
xFirst <- floor(log(xMin, 10))
xLast <- ceiling(log(xMax, 10))
cycles <- xLast - xFirst + 1
trio <- c(0, log(2, 10), log(5, 10))
xTicks <- xFirst + trio
top <- cycles - 2
for(icycle in 1:top) {
newTrio<-xFirst+icycle+trio
xTicks<-c(xTicks,newTrio)
}
xTicks<-c(xTicks,xLast)
numTicks<-length(xTicks)
shortTicks<-numTicks-4
xTicks<-if(cycles<=2) xTicks[1:shortTicks] else xTicks
keepLow <- ifelse(log(xMin, 10) < xTicks, 1, 0)
keepHigh <- ifelse(log(xMax, 10) > xTicks, 1, 0)
top <- length(keepLow) - 1
kLow <- keepLow
kHigh <- keepHigh
for(i in 1:top) {
kLow[i] <- keepLow[i + 1]
}
for(i in 1:top) {
kHigh[i + 1] <- keepHigh[i]
}
keep <- kLow * kHigh
trim <- data.frame(xTicks, keep)
trim <- subset(trim, keep > 0)
xTicks <- 10 ^ trim$xTicks
return(xTicks)
} |
NULL
eval_cost_summary <- function(x, solution) UseMethod("eval_cost_summary")
eval_cost_summary.default <- function(x, solution) {
stop("argument to x must be a ConservationProblem object")
}
eval_cost_summary.ConservationProblem <- function(x, solution) {
assertthat::assert_that(inherits(x, "ConservationProblem"))
solution <- planning_unit_solution_status(x, solution)
cost_data <- x$planning_unit_costs()
costs <- unname(c(colSums(cost_data * solution, na.rm = TRUE)))
total_cost <- sum(costs)
if (x$number_of_zones() > 1) {
out <- tibble::tibble(
summary = c("overall", zone_names(x)),
cost = c(total_cost, costs))
} else {
out <- tibble::tibble(summary = "overall", cost = total_cost)
}
out
} |
maxcovar <- function(x, k=1)
{
} |
HELPrct %>%
summarise(x.bar = mean(age), s = sd(age)) |
grid.brackets <-
function(x1, y1, x2, y2, h=NULL, ticks=0.5, curvature=0.5, type=1, col=1, lwd=1, lty='solid')
{
if(is.null(h)) h <- 0.05
if(!is.unit(x1) | !is.unit(y1) | !is.unit(x2) | !is.unit(y2) | !is.unit(h)){
x1 <- unit(x1, 'native')
x2 <- unit(x2, 'native')
y1 <- unit(y1, 'native')
y2 <- unit(y2, 'native')
h <- unit(h, 'npc')
}
if(!is.numeric(curvature)) stop('curvature must be numeric')
if(!is.numeric(type)) stop('type must be a integer, 1 to 5')
if(length(ticks)==1) if(is.na(ticks)) ticks<- NULL
if(!is.numeric(ticks) & !is.null(ticks)) stop('ticks must be numeric or NULL')
if(length(ticks)>1){
if(any(duplicated(abs(ticks)))) stop('duplicated ticks')
}
if(curvature<0) curvature<- 0
if(curvature>1) curvature<- 1
myangle <- 180*atan2((convertY(y2, unitTo='inches', valueOnly = TRUE)-convertY(y1, unitTo='inches', valueOnly = TRUE)),(convertX(x2, unitTo='inches', valueOnly = TRUE)-convertX(x1, unitTo='inches', valueOnly = TRUE)))/pi
mywidth <- sqrt((convertX(x2, unitTo='inches', valueOnly = TRUE)-convertX(x1, unitTo='inches', valueOnly = TRUE))^2+(convertY(y2, unitTo='inches', valueOnly = TRUE)-convertY(y1, unitTo='inches', valueOnly = TRUE))^2)
mywidth <- convertWidth(unit(mywidth, units='inches'), unitTo='npc', valueOnly = TRUE)
myheight <- convertHeight(h, unitTo='npc', valueOnly = TRUE)
x1<-convertX(x1, unitTo='npc', valueOnly = TRUE)
x2<-convertX(x2, unitTo='npc', valueOnly = TRUE)
y1<-convertY(y1, unitTo='npc', valueOnly = TRUE)
y2<-convertY(y2, unitTo='npc', valueOnly = TRUE)
xd <- (x2-x1)
yd <- (y2-y1)
v1 <- viewport(x=x1, y=y1, width=mywidth, height=myheight, angle=myangle, just=c("left", "bottom"))
pushViewport(v1)
brackets<- a_cb_brackets(phi=curvature, ticks=ticks, type=type)
grid.lines(brackets[1,], brackets[2,], gp=gpar(col=col, lwd=lwd, lty=lty))
popViewport()
} |
lapply(c( "shiny",
"leaflet",
"RPostgreSQL"
),function(pkg){
if (!(pkg %in% installed.packages()[,1])){
install.packages(pkg)
}
library(pkg,character.only = TRUE,quietly = TRUE)
}
)
lapply(c( "PEcAn.DB",
"PEcAn.visualization"
),function(pkg){
library(pkg,character.only = TRUE,quietly = TRUE)
}
)
server <- shinyServer(function(input, output, session) {
output$typeSelector <- renderUI({
data.atmosphere.registration <- system.file("registration", package="PEcAn.data.atmosphere")
datatype <- list.files(data.atmosphere.registration)
datatype <- gsub("register.", "", datatype)
datatype <- gsub(".xml", "", datatype)
selectInput("type", "Type", datatype)
})
output$modelSelector <- renderUI({
bety <- betyConnect("../../web/config.php")
on.exit(db.close(bety), add = TRUE)
models <- db.query("SELECT name FROM modeltypes;", bety)
selectInput("model", "Model", models)
})
output$agreementUI <- renderUI({
if (is.null(input$type)) {
return()
}
switch(
input$type,
"Ameriflux" = checkboxInput(
"agreement",
HTML(
"I agree to <a href='http://ameriflux.lbl.gov/data/data-policy/'>AmeriFlux license</a>."
),
value = FALSE,
width = NULL
),
"NARR" = checkboxInput(
"agreement",
HTML(
"I agree to <a href='http://www.esrl.noaa.gov/psd/data/gridded/data.narr.html'>NARR license</a>."
),
value = FALSE,
width = NULL
),
"Fluxnet2015" = checkboxInput(
"agreement",
HTML("I agree to FLUXNET license."),
value = FALSE,
width = NULL
)
)
})
observeEvent(input$type, {
bety <- betyConnect("../../web/config.php")
on.exit(db.close(bety), add = TRUE)
sites <-
db.query(
paste0(
"SELECT sitename, ST_X(ST_CENTROID(geometry)) AS lon, ST_Y(ST_CENTROID(geometry))
AS lat FROM sites, sitegroups_sites where sites.geometry IS NOT NULL AND sites.id = sitegroups_sites.site_id
and sitegroups_sites.sitegroup_id in ( select id from sitegroups where
sitegroups.name like '",
input$type,
"');"
),
bety
)
if(length(sites) > 0){
ids <- sites$sitename
latitude <- sites$lat
longitude <- sites$lon
map = createLeafletMap(session, 'map')
session$onFlushed(once = TRUE, function() {
map$clearMarkers()
map$addMarker(lat = latitude,
lng = longitude,
layerId = ids)
})
} else{
map = createLeafletMap(session, 'map')
session$onFlushed(once = TRUE, function() {
map$clearMarkers()
})
}
observe({
click <- input$map_marker_click
if (is.null(click)) {
click <- list(id = "US-Dk3",
lat = 35.9782,
lng = -79.0942)
} else{
text <- paste(click$id)
map$clearPopups()
map$showPopup(click$lat, click$lng, text)
}
selectedsite <- reactive({
if ( !(input$type %in% c("Ameriflux", "NARR", "Fluxnet2015")) || input$agreement) {
paste(
c(
"<input>",
paste0(" <type>", input$type, "</type>"),
paste0(" <site>", click$id, "</site>"),
paste0(" <lat>", click$lat, "</lat>"),
paste0(" <lon>", click$lng, "</lon>"),
paste0(
" <start_date>",
if (input$start_date != "")
input$start_date
else
"2001",
"-01-01 00:00:00</start_date>"
),
paste0(
" <end_date>",
if (input$end_date != "")
input$end_date
else
"2001",
"-12-31 23:59:59</end_date>"
),
"</input>"
),
collapse = "\n"
)
}
else{
"Please check the agreement. "
}
})
output$xmltext <- renderText({
selectedsite()
})
output$downloadXML <- downloadHandler(
filename = function() {
"example.xml"
},
content = function(file) {
if (!(input$type %in% c("Ameriflux", "NARR", "Fluxnet2015")) || input$agreement) {
writeLines(
c(
"<input>",
paste0(" <type>", input$type, "</type>"),
paste0(" <site>", click$id, "</site>"),
paste0(" <lat>", click$lat, "</lat>"),
paste0(" <lon>", click$lng, "</lon>"),
paste0(
" <start_date>",
if (input$start_date != "")
input$start_date
else
"2001",
"-01-01 00:00:00</start_date>"
),
paste0(
" <end_date>",
if (input$end_date != "")
input$end_date
else
"2001",
"-12-31 23:59:59</end_date>"
),
"</input>"
),
file
)
}
}
)
output$downloadData <- downloadHandler(
filename = function() {
paste0("example.met.", input$model)
},
content = function(file) {
xml_filename <- tempfile("pecan", fileext = ".xml")
fileConn <- file(xml_filename)
writeLines(selectedsite(), fileConn)
bds <- "https://bd-api-dev.ncsa.illinois.edu"
output_path <- "/tmp/"
url <-
BrownDog::convert_file(
bds,
xml_filename,
paste0("met.", input$model),
output_path,
input$token,
download = FALSE
)
BrownDog::download(url, file, input$token)
}
)
})
})
}) |
conv_o2 = function(o2 = 100, from = "percent_a.s.", to = "all", temp = 25, sal = 35, atm_pres = 1013.25){
air_pres = measurements::conv_unit(atm_pres, 'mbar', 'bar')
all_units = c('percent_a.s.', 'percent_o2', 'hPa', 'kPa', 'torr', 'mmHg', 'inHg', 'mg_per_l', 'ug_per_l', 'umol_per_l', 'mmol_per_l', 'ml_per_l', 'mg_per_kg', 'ug_per_kg', 'umol_per_kg', 'mmol_per_kg', 'ml_per_kg', 'volumes_percent')
if(!(from %in% all_units)) stop('the \'from\' argument is not an acceptable unit.')
if(!(to %in% c(all_units, 'all'))) stop('the \'to\' argument is not an acceptable unit.')
if(from == 'percent_a.s.') perc_a.s. = o2
if(from == 'percent_o2') perc_a.s. = o2 / marelac::atmComp('O2')
if(from == 'hPa') perc_a.s. = measurements::conv_unit(o2, 'hPa', 'atm') * 100 / (air_pres - marelac::vapor(S = sal, t = temp)) / marelac::atmComp('O2')
if(from == 'kPa') perc_a.s. = measurements::conv_unit(o2, 'kPa', 'atm') * 100 / (air_pres - marelac::vapor(S = sal, t = temp)) / marelac::atmComp('O2')
if(from == 'torr') perc_a.s. = measurements::conv_unit(o2, 'torr', 'atm') * 100 / (air_pres - marelac::vapor(S = sal, t = temp)) / marelac::atmComp('O2')
if(from == 'mmHg') perc_a.s. = measurements::conv_unit(o2, 'mmHg', 'atm') * 100 / (air_pres - marelac::vapor(S = sal, t = temp)) / marelac::atmComp('O2')
if(from == 'inHg') perc_a.s. = measurements::conv_unit(o2, 'inHg', 'atm') * 100 / (air_pres - marelac::vapor(S = sal, t = temp)) / marelac::atmComp('O2')
if(from == 'mg_per_l') perc_a.s. = o2 * 100 / marelac::gas_satconc(S = sal, t = temp, P = air_pres, species = 'O2') / 1e-6 / marelac::molweight('O2') / 1e3
if(from == 'ug_per_l') perc_a.s. = o2 * 100 / marelac::gas_satconc(S = sal, t = temp, P = air_pres, species = 'O2') / 1e-6 / marelac::molweight('O2') / 1e6
if(from == 'umol_per_l') perc_a.s. = o2 * 100 / marelac::gas_satconc(S = sal, t = temp, P = air_pres, species = 'O2')
if(from == 'mmol_per_l') perc_a.s. = o2 * 1000 * 100 / marelac::gas_satconc(S = sal, t = temp, P = air_pres, species = 'O2')
if(from == 'ml_per_l') perc_a.s. = measurements::conv_unit(o2, 'ml', 'l') / marelac::molvol(t = temp, P = air_pres, species = 'O2', quantity = 1 / measurements::conv_unit(100 / marelac::gas_satconc(S = sal, t = temp, P = air_pres, species = 'O2'), 'mol', 'umol'))
if(from == 'mg_per_kg') perc_a.s. = o2 * 100 / marelac::gas_satconc(S = sal, t = temp, P = air_pres, species = 'O2') / 1e-6 / marelac::molweight('O2') / 1e3 * (as.numeric(seacarb::rho(S = sal, T = temp)) / 1000)
if(from == 'ug_per_kg') perc_a.s. = o2 * 100 / marelac::gas_satconc(S = sal, t = temp, P = air_pres, species = 'O2') / 1e-6 / marelac::molweight('O2') / 1e3 * (as.numeric(seacarb::rho(S = sal, T = temp)) / 1e6)
if(from == 'umol_per_kg') perc_a.s. = o2 * 100 / marelac::gas_satconc(S = sal, t = temp, P = air_pres, species = 'O2') * (as.numeric(seacarb::rho(S = sal, T = temp)) / 1000)
if(from == 'mmol_per_kg') perc_a.s. = o2 * 100 / marelac::gas_satconc(S = sal, t = temp, P = air_pres, species = 'O2') * (as.numeric(seacarb::rho(S = sal, T = temp)))
if(from == 'ml_per_kg') perc_a.s. = measurements::conv_unit(o2, 'ml', 'l') / marelac::molvol(t = temp, P = air_pres, species = 'O2', quantity = 1 / measurements::conv_unit(100 / marelac::gas_satconc(S = sal, t = temp, P = air_pres, species = 'O2'), 'mol', 'umol')) * (as.numeric(seacarb::rho(S = sal, T = temp)) / 1000)
if(from == 'volumes_percent') perc_a.s. = measurements::conv_unit(o2, 'ml', 'l') / marelac::molvol(t = temp, P = air_pres, species = 'O2', quantity = 1 / measurements::conv_unit(100 / marelac::gas_satconc(S = sal, t = temp, P = air_pres, species = 'O2'), 'mol', 'umol')) * 10
x = list()
if(to == 'percent_a.s.' | to == 'all') x$percent_a.s. = perc_a.s.
if(to == 'percent_o2' | to == 'all') x$percent_o2 = marelac::atmComp('O2') * perc_a.s.
if(to == 'hPa' | to == 'all') x$hPa = measurements::conv_unit((air_pres - marelac::vapor(S = sal, t = temp)) * marelac::atmComp('O2') * perc_a.s. / 100, 'atm', 'hPa')
if(to == 'kPa' | to == 'all') x$kPa = measurements::conv_unit((air_pres - marelac::vapor(S = sal, t = temp)) * marelac::atmComp('O2') * perc_a.s. / 100, 'atm', 'kPa')
if(to == 'torr' | to == 'all') x$torr = measurements::conv_unit((air_pres - marelac::vapor(S = sal, t = temp)) * marelac::atmComp('O2') * perc_a.s. / 100, 'atm', 'torr')
if(to == 'mmHg' | to == 'all') x$mmHg = measurements::conv_unit((air_pres - marelac::vapor(S = sal, t = temp)) * marelac::atmComp('O2') * perc_a.s. / 100, 'atm', 'mmHg')
if(to == 'inHg' | to == 'all') x$inHg = measurements::conv_unit((air_pres - marelac::vapor(S = sal, t = temp)) * marelac::atmComp('O2') * perc_a.s. / 100, 'atm', 'inHg')
if(to == 'mg_per_l' | to == 'all') x$mg_per_l = marelac::gas_satconc(S = sal, t = temp, P = air_pres, species = 'O2') * 1e-6 * marelac::molweight('O2') * 1e3 * perc_a.s. / 100
if(to == 'ug_per_l' | to == 'all') x$ug_per_l = marelac::gas_satconc(S = sal, t = temp, P = air_pres, species = 'O2') * 1e-6 * marelac::molweight('O2') * 1e6 * perc_a.s. / 100
if(to == 'umol_per_l' | to == 'all') x$umol_per_l = marelac::gas_satconc(S = sal, t = temp, P = air_pres, species = 'O2') * perc_a.s. / 100
if(to == 'mmol_per_l' | to == 'all') x$mmol_per_l = marelac::gas_satconc(S = sal, t = temp, P = air_pres, species = 'O2') * perc_a.s. / 100 / 1000
if(to == 'ml_per_l' | to == 'all') x$ml_per_l = measurements::conv_unit(as.numeric(marelac::molvol(t = temp, P = air_pres, species = 'O2', quantity = measurements::conv_unit(marelac::gas_satconc(S = sal, t = temp, P = air_pres, species = 'O2') * perc_a.s. / 100, 'umol', 'mol'))), 'l', 'ml')
if(to == 'mg_per_kg' | to == 'all') x$mg_per_kg = marelac::gas_satconc(S = sal, t = temp, P = air_pres, species = 'O2') * 1e-6 * marelac::molweight('O2') * 1e3 * perc_a.s. / 100 / (as.numeric(seacarb::rho(S = sal, T = temp)) / 1000)
if(to == 'ug_per_kg' | to == 'all') x$ug_per_kg = marelac::gas_satconc(S = sal, t = temp, P = air_pres, species = 'O2') * 1e-6 * marelac::molweight('O2') * 1e3 * perc_a.s. / 100 / (as.numeric(seacarb::rho(S = sal, T = temp)) / 1e6)
if(to == 'umol_per_kg' | to == 'all') x$umol_per_kg = marelac::gas_satconc(S = sal, t = temp, P = air_pres, species = 'O2') * perc_a.s. / 100 / (as.numeric(seacarb::rho(S = sal, T = temp)) / 1000)
if(to == 'mmol_per_kg' | to == 'all') x$mmol_per_kg = marelac::gas_satconc(S = sal, t = temp, P = air_pres, species = 'O2') * perc_a.s. / 100 / 1000 / (as.numeric(seacarb::rho(S = sal, T = temp)) / 1000)
if(to == 'ml_per_kg' | to == 'all') x$ml_per_kg = measurements::conv_unit(as.numeric(marelac::molvol(t = temp, P = air_pres, species = 'O2', quantity = measurements::conv_unit(marelac::gas_satconc(S = sal, t = temp, P = air_pres, species = 'O2') * perc_a.s. / 100, 'umol', 'mol'))), 'l', 'ml') / (as.numeric(seacarb::rho(S = sal, T = temp)) / 1000)
if(to == 'volumes_percent' | to == 'all') x$volumes_percent = measurements::conv_unit(as.numeric(marelac::molvol(t = temp, P = air_pres, species = 'O2', quantity = measurements::conv_unit(marelac::gas_satconc(S = sal, t = temp, P = air_pres, species = 'O2') * perc_a.s. / 100, 'umol', 'mol'))), 'l', 'ml') / 10
attr(x$percent_o2, 'names') = NULL
attr(x$hPa, 'names') = NULL
attr(x$kPa, 'names') = NULL
attr(x$torr, 'names') = NULL
attr(x$mmHg, 'names') = NULL
attr(x$inHg, 'names') = NULL
attr(x$mg_per_l, 'names') = NULL
attr(x$ug_per_l, 'names') = NULL
attr(x$umol_per_l, 'names') = NULL
attr(x$mmol_per_l, 'names') = NULL
attr(x$ml_per_l, 'names') = NULL
attr(x$mg_per_kg, 'names') = NULL
attr(x$ug_per_kg, 'names') = NULL
attr(x$umol_per_kg, 'names') = NULL
attr(x$mmol_per_kg, 'names') = NULL
attr(x$ml_per_kg, 'names') = NULL
attr(x$volumes_percent, 'names') = NULL
if(to != 'all') x = unlist(x, use.names = FALSE)
return(x)
} |
library("tram")
library("tbm")
library("partykit")
source("setup.R")
myFUN <- function(ldata, lweights, model = c("normal", "logistic", "minextrval"), order) {
bctrl <- boost_control(mstop = 100, risk = "oob", nu = 0.01)
model <- match.arg(model)
m0 <- switch(model,
"normal" = BoxCox(y ~ 1, data = ldata, order = order, support = sup, bounds = bds),
"logistic" = Colr(y ~ 1, data = ldata, order = order, support = sup, bounds = bds),
"minextrval" = Coxph(y ~ 1, data = ldata, order = order, support = sup, bounds = bds))
fm <- "y ~ x1 + x2"
nm <- colnames(ldata)
nx <- nm[grep("^nx", nm)]
if (length(nx) > 0)
fm <- paste(fm, "+", paste(nx, collapse = "+"))
fm <- as.formula(fm)
tctrl <- ctree_control(minsplit = 4, minbucket = 2, mincriterion = 0,
maxdepth = 6, splittest = FALSE,
testtype = "Teststatistic")
l1 <- stmboost(m0, formula = fm,
data = ldata, method = quote(mboost::blackboost),
weights = lweights, control = bctrl, tree_control = tctrl)
while(!(which.min(risk(l1)) < mstop(l1)) && mstop(l1) < 2000)
l1 <- l1[2 * mstop(l1)]
mstop <- which.min(risk(l1))
l1 <- stmboost(m0, formula = fm,
data = ldata, method = quote(mboost::blackboost),
control = bctrl, tree_control = tctrl)[mstop]
return(l1)
}
ret <- c()
for (NOBS in tNOBS) {
for (PNON in tPNON) {
for (TD in tTD) {
for (OR in tOR * order) {
FUN <- function(...) myFUN(..., model = TD, order = OR)
source("run.R", echo = TRUE)
res$model <- "tram_tree"
res$PNON <- PNON
res$NOBS <- NOBS
res$order <- OR
res$todistr <- TD
ret <- rbind(ret, res)
}
}
}
}
save(ret, file = "tram_tree.rda")
sessionInfo() |
is.prim <-
function(y){
starten <- 1
enden <- length(y)+1
while(starten<enden){
x <- y[starten]
error <- 0
test <- is.negative(x)
if(test==TRUE){
x <- x*(-1)
}
test <- is.whole(x)
if(test==FALSE) {
error <- error+1
}
if(x==0){
error <- error+1
}
test <- is.even(x)
if(test==TRUE & x!=2){
error <- error+1
}
test <- is.whole(sqrt(x))
if(test==TRUE & x!=1){
error <- error+1
}
if(error>0){
if (starten==1){
result=FALSE
}else{
result <- c(result, FALSE)
}
}else{
if(x==1 |x==2|x==3|x==5|x==7){
if (starten==1){
result=TRUE
}else{
result <- c(result, TRUE)
}
}else{
anfang <- 3
ende <- ceiling(sqrt(x))
while(anfang<ende){
test1 <- x/anfang
test2 <- floor(test1)
if(test1==test2){
error <- error+1
}
anfang <- anfang+2
}
if(error==0){
if (starten==1){
result=TRUE
}else{
result <- c(result, TRUE)
}
} else{
if (starten==1){
result=FALSE
}else{
result <- c(result, FALSE)
}
}
}
}
starten <- starten+1
}
return(result)
} |
fname <- fxfer("show4calls.R")
file.edit(fname) |
logit <- function(p) {
log(p / (1 - p))
}
inv_logit <- function(x) {
1 / (1 + exp(-x))
}
cloglog <- function(x) {
log(-log(1 - x))
}
inv_cloglog <- function(x) {
1 - exp(-exp(x))
}
Phi <- function(x) {
pnorm(x)
}
incgamma <- function(a, x) {
pgamma(x, shape = a) * gamma(a)
}
square <- function(x) {
x^2
}
cbrt <- function(x) {
x^(1/3)
}
exp2 <- function(x) {
2^x
}
pow <- function(x, y) {
x^y
}
inv <- function(x) {
1/x
}
inv_sqrt <- function(x) {
1/sqrt(x)
}
inv_square <- function(x) {
1/x^2
}
hypot <- function(x, y) {
stopifnot(all(x >= 0))
stopifnot(all(y >= 0))
sqrt(x^2 + y^2)
}
log1m <- function(x) {
log(1 - x)
}
step <- function(x) {
ifelse(x > 0, 1, 0)
}
logm1 <- function(x, base = exp(1)) {
log(x - 1, base = base)
}
expp1 <- function(x) {
exp(x) + 1
}
logit_scaled <- function(x, lb = 0, ub = 1) {
logit((x - lb) / (ub - lb))
}
inv_logit_scaled <- function(x, lb = 0, ub = 1) {
inv_logit(x) * (ub - lb) + lb
}
multiply_log <- function(x, y) {
ifelse(x == y & x == 0, 0, x * log(y))
}
log1p_exp <- function(x) {
log(1 + exp(x))
}
log1m_exp <- function(x) {
ifelse(x < 0, log(1 - exp(x)), NaN)
}
log_diff_exp <- function(x, y) {
stopifnot(length(x) == length(y))
ifelse(x > y, log(exp(x) - exp(y)), NaN)
}
log_sum_exp <- function(x, y) {
max <- pmax(x, y)
max + log(exp(x - max) + exp(y - max))
}
log_mean_exp <- function(x) {
max_x <- max(x)
max_x + log(sum(exp(x - max_x))) - log(length(x))
}
expm1 <- function(x) {
exp(x) - 1
}
log_expm1 <- function(x) {
log(expm1(x))
}
log_inv_logit <- function(x) {
log(inv_logit(x))
}
log1m_inv_logit <- function(x) {
log(1 - inv_logit(x))
}
scale_unit <- function(x, lb = min(x), ub = max(x)) {
(x - lb) / (ub - lb)
}
fabs <- function(x) {
abs(x)
}
softmax <- function(x) {
ndim <- length(dim(x))
if (ndim <= 1) {
x <- matrix(x, nrow = 1)
ndim <- length(dim(x))
}
x <- exp(x)
dim_noncat <- dim(x)[-ndim]
marg_noncat <- seq_along(dim(x))[-ndim]
catsum <- array(apply(x, marg_noncat, sum), dim = dim_noncat)
sweep(x, marg_noncat, catsum, "/")
}
log_softmax <- function(x) {
ndim <- length(dim(x))
if (ndim <= 1) {
x <- matrix(x, nrow = 1)
ndim <- length(dim(x))
}
dim_noncat <- dim(x)[-ndim]
marg_noncat <- seq_along(dim(x))[-ndim]
catsum <- log(array(apply(exp(x), marg_noncat, sum), dim = dim_noncat))
sweep(x, marg_noncat, catsum, "-")
}
inv_odds <- function(x) {
x / (1 + x)
} |
library(glmmTMB)
salamander1 <- glmmTMB(count ~ mined + (1|site),
zi=~mined,
family=poisson, data=Salamanders)
saveRDS(salamander1,"salamander1.rds",version=2) |
plot.PVE <- function(x,xlab="Number of Features",ylab="PVE",...) {
if (!inherits(x,"PVE")) {
stop("'x' must be of class 'PVE'")
}
plot(x=x$J,y=x$PVEs,xlab=xlab,ylab=ylab,...)
} |
fitted_LB <- function(object, type = c("link", "response")){
type <- match.arg(type[1], c("link","response"))
n = nrow(object$Ahat)
theta = as.matrix(cbind(rep(1,n),object$Ahat))%*%t(object$Bhat)
P = plogis(theta)
if (type == "link") {
return(theta)
} else if (type == "response") {
return(P)
}
} |
context("checkstyle_output")
test_that("return lint report as checkstyle xml", {
lints <- structure(
list(
Lint(filename = "test_file",
line_number = 1,
column_number = 2,
type = "error",
line = "a line",
message = "foo"),
Lint(filename = "test_file",
line_number = 2,
column_number = 1,
type = "style",
line = "another line",
message = "bar"),
Lint(filename = "test_file2",
line_number = 1,
column_number = 1,
type = "warning",
line = "yet another line",
message = "baz")
),
class = "lints")
tmp <- tempfile()
checkstyle_output(lints, tmp)
expect_equal(readLines(tmp), readLines("checkstyle.xml"))
}) |
test_that("path.dist called safely", {
library("TreeTools")
expect_equal(c(5.66, 6, 6, 6.32, 6.32, 5.74),
PathDist(as.phylo(0:5, 6), BalancedTree(6)),
tolerance = 2)
expect_equal(c(5.66, 6, 6, 6.32, 6.32, 5.74),
PathDist(BalancedTree(6), as.phylo(0:5, 6)),
tolerance = 2)
expect_equal(PathDist(BalancedTree(6), PectinateTree(6)),
PathDist(list(BalancedTree(6), PectinateTree(6)))[1],
ignore_attr = TRUE)
}) |
N <- 20000
M <- 2000
K <- 5
S <- tcrossprod(matrix(rnorm(M * K), M)) + 5 * diag(M)
X <- MASS::mvrnorm(N, mu = rep(0, M), Sigma = S)
X <- scale(X)
k <- 10
L <- k + 50
n <- nrow(X)
m <- ncol(X)
I <- 5
tol <- 1e-3
true <- svd(X, nu = k, nv = k)
diffPCs <- function(test, rot) {
k <- ncol(test)
diff1 <- 2 * abs(test - rot[, 1:k]) / (abs(test) + abs(rot[, 1:k]))
diff2 <- 2 * abs(test + rot[, 1:k]) / (abs(test) + abs(rot[, 1:k]))
diff <- pmin(diff1, diff2)
mean(diff)
}
set.seed(1)
G <- list(matrix(rnorm(n * L), n, L))
R <- list(crossprod(X, G[[1]]))
conv <- FALSE
it <- 0
while (!conv && it < 5) {
print(it <- it + 1)
for (i in 1:I) {
G[[i + 1]] <- X %*% (crossprod(X, G[[i]])) / m
R[[i + 1]] <- crossprod(X, X %*% R[[i]]) / n
}
U1 <- svd(do.call(cbind, G), nv = 0)$u
U2 <- svd(do.call(cbind, R), nv = 0)$u
T1.t <- crossprod(X, U1)
T2.t <- X %*% U2
T1.svd <- svd(T1.t, nu = L, nv = L)
T2.svd <- svd(T2.t, nu = L, nv = L)
u1 = U1 %*% T1.svd$v
v1 = T1.svd$u
u2 = T2.svd$u
v2 = U2 %*% T2.svd$v
diff1 <- diffPCs(u1[, 1:k], u2)
diff2 <- diffPCs(v1[, 1:k], v2)
print(m1 <- max(diff1, diff2))
conv <- (m1 < tol)
G <- list(u2)
R <- list(v2)
}
test <- list(d = T2.svd$d[1:k], u = u2[, 1:k], v = v2[, 1:k])
print(all.equal(test$d, true$d[1:k]))
plot(test$u, true$u)
plot(test$v, true$v)
require(foreach)
R2 <- foreach(i = 1:k, .combine = 'cbind') %do% {
R2.1 <- summary(lm(true$u[, i] ~ test$u[, i] - 1))$r.squared
R2.2 <- summary(lm(true$v[, i] ~ test$v[, i] - 1))$r.squared
c(R2.1, R2.2)
}
print(R2)
print(diffPCs(true$u, test$u))
print(diffPCs(test$v, true$v)) |
"big_word_club" |
context("generateDesignOfDefaults")
test_that("generateDesignOfDefaults", {
ps = makeParamSet(
makeNumericParam("x", lower = 1, upper = 5, default = 1),
makeIntegerParam("y", lower = 2, upper = 6, default = 3)
)
d = generateDesignOfDefaults(ps)
e = data.frame(x = 1, y = 3)
attr(e, "trafo") = FALSE
expect_equal(d, e)
ps = makeParamSet(
makeNumericParam("u", lower = 1, upper = 5, default = 1),
makeIntegerParam("v", lower = 2, upper = 6, default = 3),
makeLogicalParam("w", default = TRUE),
makeDiscreteParam("x", values = c("a", "b"), default = "a")
)
d = generateDesignOfDefaults(ps)
e = data.frame(u = 1, v = 3, w = TRUE, x = factor("a", levels = c("a", "b")))
attr(e, "trafo") = FALSE
expect_equal(d, e)
ps = makeParamSet(
makeNumericVectorParam("x", len = 2L, lower = 1, upper = 2, default = c(1, 2)),
makeIntegerVectorParam("y", len = 2L, lower = 3, upper = 4, default = c(3, 3)),
makeLogicalVectorParam("z", len = 2L, default = c(TRUE, FALSE))
)
d = generateDesignOfDefaults(ps)
e = data.frame(x1 = 1, x2 = 2, y1 = 3, y2 = 3, z1 = TRUE, z2 = FALSE)
attr(e, "trafo") = FALSE
expect_equal(d, e)
ps = makeParamSet(
makeNumericParam("x", lower = 0, upper = 1, default = 0),
makeNumericParam("y", lower = 3, upper = 4, trafo = function(x) 2 * x, default = 3)
)
d = generateDesignOfDefaults(ps, trafo = TRUE)
e = data.frame(x = 0, y = 6)
attr(e, "trafo") = TRUE
expect_equal(d, e)
ps = makeParamSet(
makeNumericParam("x", lower = 1, upper = 5),
makeIntegerParam("y", lower = 2, upper = 6, default = 3)
)
expect_error(generateDesignOfDefaults(ps), regexp = "No default parameter setting for: x")
ps = makeParamSet(
makeNumericParam("x", lower = 1, upper = 3, default = 2),
makeNumericParam("y", lower = 1, upper = 3, default = 1, requires = quote(x > 2))
)
d = generateDesignOfDefaults(ps)
e = data.frame(x = 2, y = NA_real_)
attr(e, "trafo") = FALSE
expect_equal(d, e)
ps = makeParamSet(
makeIntegerParam("x", lower = 1L, upper = 3L, default = 2)
)
d = generateDesignOfDefaults(ps)
e = data.frame(x = 2L)
attr(e, "trafo") = FALSE
expect_identical(class(d[, 1]), class(e[, 1]))
ps = makeParamSet(
makeNumericParam(id = "x1", lower = 0, upper = 2, default = "BLA", special.vals = list("BLA")),
makeNumericParam(id = "x2", lower = 0, upper = 2, default = iris, special.vals = list(iris))
)
expect_error(generateDesignOfDefaults(ps), regexp = "special.vals as default for Parameter(s): x1,x2", fixed = TRUE)
})
test_that("generateDesignOfDefaults works with discrete params and complex values", {
ps = makeParamSet(
makeDiscreteParam("p", values = c("a", "b"), default = "b")
)
d = generateDesignOfDefaults(ps)
expect_equal(d, data.frame(p = factor("b", levels = c("a", "b"))), check.attributes = FALSE)
ps = makeParamSet(
makeDiscreteParam("p", values = c(ir = "ir", foo = "bar"), default = "ir")
)
d = generateDesignOfDefaults(ps)
expect_equal(d, data.frame(p = factor("ir", levels = c("ir", "foo"))), check.attributes = FALSE)
p = makeDiscreteParam("p", values = c(ir = "ir", foo = "bar"), default = "bar")
ps = makeParamSet(p)
d = generateDesignOfDefaults(ps)
expect_equal(d, data.frame(p = factor("foo", levels = c("ir", "foo"))), check.attributes = FALSE)
p = makeDiscreteParam("p", values = list(ir = "ir", foo = iris), default = iris)
ps = makeParamSet(p)
d = generateDesignOfDefaults(ps)
expect_equal(d, data.frame(p = factor("foo", levels = c("ir", "foo"))), check.attributes = FALSE)
}) |
fieldRotate <- function(mosaic, theta = NULL, clockwise = TRUE, h = FALSE, n.core = NULL, extentGIS = FALSE,
DSMmosaic = NULL, plot = TRUE, type = "l", lty = 2, lwd = 3, fast.plot = FALSE) {
mosaic <- raster::stack(mosaic)
num.band<-length(mosaic@layers)
print(paste(num.band," layers available", sep = ""))
par(mfrow=c(1,2))
if(!is.null(DSMmosaic)){
par(mfrow=c(1,3))
if(raster::projection(DSMmosaic)!=raster::projection(mosaic)){stop("DSMmosaic and RGBmosaic must have the same projection CRS")}}
if(plot|is.null(theta)){
if(fast.plot){
raster::plot(mosaic[[1]], col=grey(1:100/100), axes=FALSE, box=FALSE, legend=FALSE)}
if(!fast.plot){
if(num.band>2){plotRGB(RGB.rescale(mosaic,num.band=3), r = 1, g = 2, b = 3)}
if(num.band<3){raster::plot(mosaic, axes=FALSE, box=FALSE)}}}
rotate <- function(x, angle=0, resolution=res(x)) {
y <- x
raster::crs(y) <- "+proj=aeqd +ellps=sphere +lat_0=90 +lon_0=0"
projectRaster(y, res=resolution, crs=paste0("+proj=aeqd +ellps=sphere +lat_0=90 +lon_0=", -angle))}
if(is.null(theta)){
print("Select 2 points from left to right on image in the plots space. Use any horizontal line in the field trial of interest as a reference.")
c1.a <- locator(type="p",n = 1, col="red",pch=19)
c1.b <- locator(type="p",n = 1, col="red",pch=19)
c1<-as.data.frame(mapply(c, c1.a, c1.b))
colnames(c1)<-c("x","y")
lines(c1, col= "red", type=type, lty=lty, lwd=lwd)
if((c1$y[1]>=c1$y[2])&(c1$x[2]>=c1$x[1])){theta = (atan2((c1$y[1] - c1$y[2]), (c1$x[2] - c1$x[1])))*(180/pi)}
if((c1$y[2]>=c1$y[1])&(c1$x[2]>=c1$x[1])){theta = (atan2((c1$y[2] - c1$y[1]), (c1$x[2] - c1$x[1])))*(180/pi)}
if((c1$y[1]>=c1$y[2])&(c1$x[1]>=c1$x[2])){theta = (atan2((c1$y[1] - c1$y[2]), (c1$x[1] - c1$x[2])))*(180/pi)}
if((c1$y[2]>=c1$y[1])&(c1$x[1]>=c1$x[2])){theta = (atan2((c1$y[2] - c1$y[1]), (c1$x[1] - c1$x[2])))*(180/pi)}
if(!h){theta=90-theta}
if(clockwise){theta=-theta}
theta=round(theta,3)
print(paste("Theta rotation: ",theta,sep = ""))
}
if (is.null(n.core)) {
r<-rotate(mosaic,angle = theta)
}
if (!is.null(n.core)) {
if (n.core > detectCores()) {
stop(paste(" 'n.core' must be less than ", detectCores(),sep = ""))
}
cl <- parallel::makeCluster(n.core, output = "", setup_strategy = "sequential")
registerDoParallel(cl)
r <- foreach(i=1:length(mosaic@layers), .packages = c("raster")) %dopar% {rotate(mosaic[[i]], angle = theta)}
parallel::stopCluster(cl)
}
r <- raster::stack(r)
if(extentGIS){
m11<-apply(matrix(as.numeric(as.matrix(raster::extent(mosaic))),2),1,function(x){mean(x)})
m22<-apply(matrix(as.numeric(as.matrix(raster::extent(r))),2),1,function(x){abs(diff(c(x[2],x[1]))/2)})
raster::extent(r)<-c(as.numeric(c(m11[1]-m22[1])), as.numeric(c(m11[1]+m22[1])), as.numeric(c(m11[2]-m22[2])), as.numeric(c(m11[2]+m22[2])))
raster::crs(r)<-raster::crs(mosaic)
}
Out<-r
if(plot){
if(fast.plot){
raster::plot(r[[1]], col=grey(1:100/100), axes=FALSE, box=FALSE, legend=FALSE)}
if(!fast.plot){
if(num.band > 2){
X_GB <- RGB.rescale(r,num.band=3)
raster::plotRGB(X_GB, r = 1, g = 2, b = 3)
}
if(num.band<3){raster::plot(r, axes=FALSE, box=FALSE)}}}
if(!is.null(DSMmosaic)){
DSMmosaic <- raster::stack(DSMmosaic)
DSMmosaic <- rotate(DSMmosaic,angle = theta)
raster::plot(DSMmosaic, axes=FALSE, box=FALSE)
if(extentGIS){
raster::extent(DSMmosaic)<-c(as.numeric(c(m11[1]-m22[1])), as.numeric(c(m11[1]+m22[1])), as.numeric(c(m11[2]-m22[2])), as.numeric(c(m11[2]+m22[2])))
raster::crs(DSMmosaic)<-raster::crs(mosaic)
}
Out<-list(rotatedMosaic=r,rotatedDSM=DSMmosaic)
}
par(mfrow=c(1,1))
return(Out)
} |
expected <- eval(parse(text="structure(c(FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE), .Dim = c(20L, 5L), .Dimnames = list(NULL, c(\"
test(id=0, code={
argv <- eval(parse(text="list(structure(c(FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE), .Dim = c(5L, 20L), .Dimnames = list(c(\"
.Internal(aperm(argv[[1]], argv[[2]], argv[[3]]));
}, o=expected); |
testthat::test_that("FinalPred: initialize function works", {
testthat::expect_is(FinalPred$new(),
"FinalPred")
})
testthat::test_that("FinalPred: set function works", {
prob <- data.frame(c(0.2, 0.5, 0.6), c(0.8, 0.5, 0.4))
raw <- factor(c("Negative", "Positive", "Positive"))
class.values <- c("Positive", "Negative")
positive.class <- "Positive"
testthat::expect_silent(FinalPred$new()$set(prob = prob,
raw = raw,
class.values = class.values,
positive.class = positive.class))
prob <- data.frame(c(0.2, 0.5, 0.6), c(0.8, 0.5, 0.4))
raw <- c("Negative", "Positive", "Positive")
class.values <- c("Positive", "Negative")
positive.class <- "Positive"
testthat::expect_silent(FinalPred$new()$set(prob = prob,
raw = raw,
class.values = class.values,
positive.class = positive.class))
})
testthat::test_that("FinalPred: set function checks parameter type", {
prob <- data.frame(c(0.2, 0.5, 0.6), c(0.8, 0.5, 0.4))
raw <- factor(c("Negative", "Positive", "Positive"))
class.values <- c("Positive", "Negative")
positive.class <- "wrong"
testthat::expect_error(FinalPred$new()$set(prob = prob,
raw = raw,
class.values = class.values,
positive.class = positive.class),
"[FinalPred][FATAL] Positive class is invalid. Must be one of (Positive, Negative). Aborting...",
fixed = TRUE)
prob <- NULL
raw <- c("Negative", "Positive", "Positive")
class.values <- c("Positive", "Negative")
positive.class <- "Positive"
testthat::expect_error(FinalPred$new()$set(prob = prob,
raw = raw,
class.values = class.values,
positive.class = positive.class),
"[FinalPred][FATAL] Predictions were not computed. Aborting...",
fixed = TRUE)
prob <- data.frame(c(0.2, 0.5, 0.6), c(0.8, 0.5, 0.4))
raw <- data.frame(c("Negative", "Positive", "Positive"))
class.values <- c("Positive", "Negative")
positive.class <- "Positive"
testthat::expect_error(FinalPred$new()$set(prob = prob,
raw = raw,
class.values = class.values,
positive.class = positive.class),
"[FinalPred][FATAL] Class values contains NA's. Aborting...",
fixed = TRUE)
})
testthat::test_that("FinalPred: getProb function works", {
testthat::expect_null(FinalPred$new()$getProb())
})
testthat::test_that("FinalPred: getRaw function works", {
testthat::expect_null(FinalPred$new()$getRaw())
})
testthat::test_that("FinalPred: getClassValues function works", {
testthat::expect_null(FinalPred$new()$getClassValues())
})
testthat::test_that("FinalPred: getPositiveClass function works", {
testthat::expect_null(FinalPred$new()$getPositiveClass())
})
testthat::test_that("FinalPred: getNegativeClass function works", {
testthat::expect_null(FinalPred$new()$getNegativeClass())
}) |
declare_rs <- function(N = NULL,
strata = NULL,
clusters = NULL,
n = NULL,
n_unit = NULL,
prob = NULL,
prob_unit = NULL,
strata_n = NULL,
strata_prob = NULL,
simple = FALSE,
check_inputs = TRUE) {
all_args <- mget(names(formals(sys.function())))
if (check_inputs) {
input_check <- check_samplr_arguments_new(all_args)
for (i in names(input_check))
all_args[[i]] <- input_check[[i]]
all_args$check_inputs <-
FALSE
}
is_strata <- is.vector(strata) || is.factor(strata)
is_clust <- is.vector(clusters) || is.factor(clusters)
if (is_strata && is_clust) {
rs_type <- "stratified_and_clustered"
} else if (is_clust) {
rs_type <- "clustered"
} else if (is_strata) {
rs_type <- "stratified"
} else if (simple == FALSE) {
rs_type <- "complete"
} else {
rs_type <- "simple"
}
return_object <- list2env(all_args, parent = emptyenv())
return_object$rs_function <- function() {
.Deprecated("draw_rs")
rs_function(return_object)
}
delayedAssign("rs_type", {
warning("rs_type is deprecated; check the class attribute instead.")
rs_type
}, assign.env = return_object)
delayedAssign("cleaned_arguments", {
warning("cleaned_arguments is deprecated")
input_check
}, assign.env = return_object)
delayedAssign("probabilities_vector",
rs_probabilities(return_object),
assign.env = return_object)
delayedAssign("probabilities_matrix",
cbind((1 - rs_probabilities(return_object)),
rs_probabilities(return_object)),
assign.env = return_object)
class(return_object) <-
c("rs_declaration", paste0("rs_", rs_type))
attr(return_object, "call") <- match.call()
return(return_object)
}
draw_rs <- function(declaration = NULL) {
if (is.null(declaration)) {
all_args <- mget(names(formals(declare_rs)))
declaration <- do.call(declare_rs, all_args)
}
rs_function(declaration)
}
formals(draw_rs) <- c(formals(draw_rs), formals(declare_rs))
obtain_inclusion_probabilities <- function(declaration = NULL) {
if (is.null(declaration)) {
all_args <- mget(names(formals(declare_rs)))
declaration <- do.call(declare_rs, all_args)
} else if (!inherits(declaration, "rs_declaration")) {
stop("You must provide a random sampling declaration created by declare_rs().")
}
declaration$probabilities_vector
}
formals(obtain_inclusion_probabilities) <-
c(formals(obtain_inclusion_probabilities),
formals(declare_rs))
summary.rs_declaration <- function(object, ...) {
print(object, ... = ...)
}
print.rs_declaration <- function(x, ...) {
S <- draw_rs(x)
n <- length(S)
cat("Random sampling procedure:",
switch(
class(x)[2],
"rs_stratified" = "Stratified",
"rs_clustered" = "Cluster",
"rs_simple" = "Simple",
"rs_stratified_and_clustered" = "Stratified and clustered",
"rs_complete" = "Complete"
),
"random sampling",
"\n")
cat("Number of units:", n, "\n")
if (!is.null(x$strata)) {
cat("Number of strata:", length(unique(x$strata)), "\n")
}
if (!is.null(x$clusters)) {
cat("Number of clusters:", length(unique(x$clusters)), "\n")
}
if (is_constant(x$probabilities_vector)) {
cat("The inclusion probabilities are constant across units.")
} else{
cat(
"The inclusion probabilities are NOT constant across units.",
"Your analysis strategy must account for differential inclusion probabilities,",
"typically by employing inverse probability weights."
)
}
invisible(x)
} |
t1 <- read.table(text = "ID1 Name1 Type1 Name2
1 A1 T1 B1
2 A2 T2 B2
3 A3 T1 B3_a
4 A4 T3 B4_a", header = T)
t1
t2 <- read.table(text = "NameBank TypeBank
A1 T1
A2 T2
A3 T1
A4 T3
B1 T1
B2 T4
B3 T2
B4 T3", header = T)
t2
t1;t2
library(dplyr)
t2[1,]
t1[1,]
chrs <- charmatch(t2$NameBank, t1$Name2)
chrs
cbind(
t1[chrs[!is.na(chrs)],],
t2[which(!is.na(chrs)),]
)
t1$Type2 <- NA_character_
t1
library(stringr)
for( row in seq_len( nrow( t2 ) ) ) {
t1$Type2[ substr( t1$Name2, 0, nchar( t2$NameBank[row] ) ) == t2$NameBank[row] ] <- t2$TypeBank[row]
} |
lama_write <- function(x, yaml_path) {
err_handler <- composerr("Error while calling 'lama_write'")
tryCatch(
yaml::write_yaml(dictionary_to_yaml(x), yaml_path),
error = function(e) err_handler(e),
warning = function(w) err_handler(w)
)
} |
kc_s2 = function(doy, RG, Ta, a, b){
b2 <- raster("B2.tif")
b3 <- raster("B3.tif")
b4 <- raster("B4.tif")
b8 <- raster("B8.tif")
mask <- readOGR("mask.shp")
b2_crop <- crop(b2, extent(mask))
b2_mascara <- mask(b2_crop, mask)
b3_crop <- crop(b3, extent(mask))
b3_mascara <- mask(b3_crop, mask)
b4_crop <- crop(b4, extent(mask))
b4_mascara <- mask(b4_crop, mask)
b8_crop <- crop(b8, extent(mask))
b8_mascara <- mask(b8_crop, mask)
b2_mascara <- b2_mascara/10000
b3_mascara <- b3_mascara/10000
b4_mascara <- b4_mascara/10000
b8_mascara <- b8_mascara/10000
Alb_Top = b2_mascara*0.32+b3_mascara*0.26+b4_mascara*0.25+b8_mascara*0.17
Alb_sur = 0.6054*Alb_Top + 0.0797
Alb_24 = 1.0223*Alb_sur + 0.0149
writeRaster(Alb_24, "Alb_24", format = "GTiff", overwrite=TRUE)
NDVI =(b8_mascara-b4_mascara)/(b8_mascara+b4_mascara)
writeRaster(NDVI, "NDVI", format = "GTiff", overwrite=TRUE)
lati <- long <- b2_mascara
xy <- coordinates(b2_mascara)
long[] <- xy[, 1]
long <- crop(long, extent(mask))
lati[] <- xy[, 2]
lati <- crop(lati, extent(mask))
map1 <- (long/long)*((2*pi)/365)*(doy-1)
Et <- (0.000075+0.001868*cos(map1)-0.032077*sin(map1)-0.014615*cos(2*map1)-0.04089*sin(2*map1))
LAT <- (13+(4*long/60)+(Et/60))
Dec <- 0.006918-0.399912*cos(map1)+0.070257*sin(map1)+0.006758*cos(2*map1)+0.000907*sin(2*map1)-0.002697*cos(3*map1)+0.00148*sin(3*map1)
W <- 15*(LAT-12)*(pi/180)
cos_zwn <- sin(lati*pi/180)*sin(Dec)+cos(lati*pi/180)*cos(Dec)*cos(W)
E0 <- (1.00011+0.034221*cos(map1)+0.00128*sin(map1)+0.000719*cos(2*map1)+0.000077*sin(2*map1))
Ws = acos(((-1)*tan(lati*pi/180))*tan(Dec))
R =(Ws*sin(lati*pi/180)*sin(Dec))+(cos(lati*pi/180)*cos(Dec)*sin(Ws))
RsTOP_aux =(1367/pi)*E0*R
RsTOP = resample(RsTOP_aux, b2_mascara, method="bilinear")
Transm =(RG*11.6)/RsTOP
Rn_coeff =6.99*Ta-39.99
Rn =((1-Alb_24)*(RG*11.6))-(Rn_coeff*Transm)
Rn_MJ =Rn/11.6
writeRaster(Rn_MJ, "Rn_MJ", format = "GTiff", overwrite=TRUE)
slope =(4098*(0.6108*exp((17.27*(Ta))/((Ta)+237.3)))/((Ta)+237.3)^2)
LEeq = (slope*Rn)/(slope+0.066)
rm(b2, b3, b4, b8, b2_mascara, b3_mascara, b4_mascara, b8_mascara, slope, Rn_coeff, RsTOP, RsTOP_aux, R, Ws, E0, cos_zwn, W, Dec, LAT, Et, map1, lati, long)
RR =Alb_24*RG
Emiss_atm = 0.9364*(((-1)*log(Transm))^0.1135)
Emiss_atm[Emiss_atm > 1] <- 1
RLdown_wm2 =(Emiss_atm*5.67*(10^(-8))*((Ta +273.15)^4))
RL_down =RLdown_wm2/11.6
RL_up =(RG-RR+RL_down-Rn_MJ)
Esurf_r1 <- NDVI
Esurf_r1[NDVI < 0] <- 1
Esurf_r1[NDVI >= 0] <- NA
Esurf_r2 <- 1.0035+0.0589*log(NDVI)
Esurf <- merge(Esurf_r1, Esurf_r2)
TS24 =((RL_up*11.6)/((Esurf*5.67)*(10^(-8))))^(0.25)
TS24[TS24 < 273.15] = NA
writeRaster(TS24, "LST", format = "GTiff", overwrite=TRUE)
NDVI[NDVI <= 0] = NA
kc=exp((a)+(b*((TS24-273.15)/(Alb_24*NDVI))))
writeRaster(kc, "kc", format = "GTiff", overwrite=TRUE)
}
evapo_s2 = function(doy, RG, Ta, ET0, a, b){
b2 <- raster("B2.tif")
b3 <- raster("B3.tif")
b4 <- raster("B4.tif")
b8 <- raster("B8.tif")
mask <- readOGR("mask.shp")
b2_crop <- crop(b2, extent(mask))
b2_mascara <- mask(b2_crop, mask)
b3_crop <- crop(b3, extent(mask))
b3_mascara <- mask(b3_crop, mask)
b4_crop <- crop(b4, extent(mask))
b4_mascara <- mask(b4_crop, mask)
b8_crop <- crop(b8, extent(mask))
b8_mascara <- mask(b8_crop, mask)
b2_mascara <- b2_mascara/10000
b3_mascara <- b3_mascara/10000
b4_mascara <- b4_mascara/10000
b8_mascara <- b8_mascara/10000
Alb_Top = b2_mascara*0.32+b3_mascara*0.26+b4_mascara*0.25+b8_mascara*0.17
Alb_sur = 0.6054*Alb_Top + 0.0797
Alb_24 = 1.0223*Alb_sur + 0.0149
writeRaster(Alb_24, "Alb_24", format = "GTiff", overwrite=TRUE)
NDVI =(b8_mascara-b4_mascara)/(b8_mascara+b4_mascara)
writeRaster(NDVI, "NDVI", format = "GTiff", overwrite=TRUE)
lati <- long <- b2_mascara
xy <- coordinates(b2_mascara)
long[] <- xy[, 1]
long <- crop(long, extent(mask))
lati[] <- xy[, 2]
lati <- crop(lati, extent(mask))
map1 <- (long/long)*((2*pi)/365)*(doy-1)
Et <- (0.000075+0.001868*cos(map1)-0.032077*sin(map1)-0.014615*cos(2*map1)-0.04089*sin(2*map1))
LAT <- (13+(4*long/60)+(Et/60))
Dec <- 0.006918-0.399912*cos(map1)+0.070257*sin(map1)+0.006758*cos(2*map1)+0.000907*sin(2*map1)-0.002697*cos(3*map1)+0.00148*sin(3*map1)
W <- 15*(LAT-12)*(pi/180)
cos_zwn <- sin(lati*pi/180)*sin(Dec)+cos(lati*pi/180)*cos(Dec)*cos(W)
E0 <- (1.00011+0.034221*cos(map1)+0.00128*sin(map1)+0.000719*cos(2*map1)+0.000077*sin(2*map1))
Ws = acos(((-1)*tan(lati*pi/180))*tan(Dec))
R =(Ws*sin(lati*pi/180)*sin(Dec))+(cos(lati*pi/180)*cos(Dec)*sin(Ws))
RsTOP_aux =(1367/pi)*E0*R
RsTOP = resample(RsTOP_aux, b2_mascara, method="bilinear")
Transm =(RG*11.6)/RsTOP
Rn_coeff =6.99*Ta-39.99
Rn =((1-Alb_24)*(RG*11.6))-(Rn_coeff*Transm)
Rn_MJ =Rn/11.6
writeRaster(Rn_MJ, "Rn_MJ", format = "GTiff", overwrite=TRUE)
slope =(4098*(0.6108*exp((17.27*(Ta))/((Ta)+237.3)))/((Ta)+237.3)^2)
LEeq = (slope*Rn)/(slope+0.066)
rm(b2, b3, b4, b8, b2_mascara, b3_mascara, b4_mascara, b8_mascara, slope, Rn_coeff, RsTOP, RsTOP_aux, R, Ws, E0, cos_zwn, W, Dec, LAT, Et, map1, lati, long)
RR =Alb_24*RG
Emiss_atm = 0.9364*(((-1)*log(Transm))^0.1135)
Emiss_atm[Emiss_atm > 1] <- 1
RLdown_wm2 =(Emiss_atm*5.67*(10^(-8))*((Ta +273.15)^4))
RL_down =RLdown_wm2/11.6
RL_up =(RG-RR+RL_down-Rn_MJ)
Esurf_r1 <- NDVI
Esurf_r1[NDVI < 0] <- 1
Esurf_r1[NDVI >= 0] <- NA
Esurf_r2 <- 1.0035+0.0589*log(NDVI)
Esurf <- merge(Esurf_r1, Esurf_r2)
TS24 =((RL_up*11.6)/((Esurf*5.67)*(10^(-8))))^(0.25)
TS24[TS24 < 273.15] = NA
writeRaster(TS24, "LST", format = "GTiff", overwrite=TRUE)
NDVI[NDVI <= 0] = NA
kc=exp((a)+(b*((TS24-273.15)/(Alb_24*NDVI))))
writeRaster(kc, "kc", format = "GTiff", overwrite=TRUE)
ET=kc*ET0
writeRaster(ET, "evapo", format = "GTiff", overwrite=TRUE)
}
radiation_s2 = function(doy, RG, Ta, ET0, a, b){
b2 <- raster("B2.tif")
b3 <- raster("B3.tif")
b4 <- raster("B4.tif")
b8 <- raster("B8.tif")
mask <- readOGR("mask.shp")
b2_crop <- crop(b2, extent(mask))
b2_mascara <- mask(b2_crop, mask)
b3_crop <- crop(b3, extent(mask))
b3_mascara <- mask(b3_crop, mask)
b4_crop <- crop(b4, extent(mask))
b4_mascara <- mask(b4_crop, mask)
b8_crop <- crop(b8, extent(mask))
b8_mascara <- mask(b8_crop, mask)
b2_mascara <- b2_mascara/10000
b3_mascara <- b3_mascara/10000
b4_mascara <- b4_mascara/10000
b8_mascara <- b8_mascara/10000
Alb_Top = b2_mascara*0.32+b3_mascara*0.26+b4_mascara*0.25+b8_mascara*0.17
Alb_sur = 0.6054*Alb_Top + 0.0797
Alb_24 = 1.0223*Alb_sur + 0.0149
writeRaster(Alb_24, "Alb_24", format = "GTiff", overwrite=TRUE)
NDVI =(b8_mascara-b4_mascara)/(b8_mascara+b4_mascara)
writeRaster(NDVI, "NDVI", format = "GTiff", overwrite=TRUE)
lati <- long <- b2_mascara
xy <- coordinates(b2_mascara)
long[] <- xy[, 1]
long <- crop(long, extent(mask))
lati[] <- xy[, 2]
lati <- crop(lati, extent(mask))
map1 <- (long/long)*((2*pi)/365)*(doy-1)
Et <- (0.000075+0.001868*cos(map1)-0.032077*sin(map1)-0.014615*cos(2*map1)-0.04089*sin(2*map1))
LAT <- (13+(4*long/60)+(Et/60))
Dec <- 0.006918-0.399912*cos(map1)+0.070257*sin(map1)+0.006758*cos(2*map1)+0.000907*sin(2*map1)-0.002697*cos(3*map1)+0.00148*sin(3*map1)
W <- 15*(LAT-12)*(pi/180)
cos_zwn <- sin(lati*pi/180)*sin(Dec)+cos(lati*pi/180)*cos(Dec)*cos(W)
E0 <- (1.00011+0.034221*cos(map1)+0.00128*sin(map1)+0.000719*cos(2*map1)+0.000077*sin(2*map1))
Ws = acos(((-1)*tan(lati*pi/180))*tan(Dec))
R =(Ws*sin(lati*pi/180)*sin(Dec))+(cos(lati*pi/180)*cos(Dec)*sin(Ws))
RsTOP_aux =(1367/pi)*E0*R
RsTOP = resample(RsTOP_aux, b2_mascara, method="bilinear")
Transm =(RG*11.6)/RsTOP
Rn_coeff =6.99*Ta-39.99
Rn =((1-Alb_24)*(RG*11.6))-(Rn_coeff*Transm)
Rn_MJ =Rn/11.6
writeRaster(Rn_MJ, "Rn_MJ", format = "GTiff", overwrite=TRUE)
slope =(4098*(0.6108*exp((17.27*(Ta))/((Ta)+237.3)))/((Ta)+237.3)^2)
LEeq = (slope*Rn)/(slope+0.066)
rm(b2, b3, b4, b8, b2_mascara, b3_mascara, b4_mascara, b8_mascara, slope, Rn_coeff, RsTOP, RsTOP_aux, R, Ws, E0, cos_zwn, W, Dec, LAT, Et, map1, lati, long)
RR =Alb_24*RG
Emiss_atm = 0.9364*(((-1)*log(Transm))^0.1135)
Emiss_atm[Emiss_atm > 1] <- 1
RLdown_wm2 =(Emiss_atm*5.67*(10^(-8))*((Ta +273.15)^4))
RL_down =RLdown_wm2/11.6
RL_up =(RG-RR+RL_down-Rn_MJ)
Esurf_r1 <- NDVI
Esurf_r1[NDVI < 0] <- 1
Esurf_r1[NDVI >= 0] <- NA
Esurf_r2 <- 1.0035+0.0589*log(NDVI)
Esurf <- merge(Esurf_r1, Esurf_r2)
TS24 =((RL_up*11.6)/((Esurf*5.67)*(10^(-8))))^(0.25)
TS24[TS24 < 273.15] = NA
writeRaster(TS24, "LST", format = "GTiff", overwrite=TRUE)
NDVI[NDVI <= 0] = NA
kc=exp((a)+(b*((TS24-273.15)/(Alb_24*NDVI))))
ET=kc*ET0
LE_MJ =ET*2.45
writeRaster(LE_MJ, "LE_MJ", format = "GTiff", overwrite=TRUE)
G_Rn =3.98*exp(-25.47*Alb_24)
G_MJ =G_Rn*Rn_MJ
writeRaster(G_MJ, "G_MJ", format = "GTiff", overwrite=TRUE)
H_MJ =Rn_MJ-LE_MJ-G_MJ
writeRaster(H_MJ, "H_MJ", format = "GTiff", overwrite=TRUE)
}
albedo_s2 = function(){
b2 <- raster("B2.tif")
b3 <- raster("B3.tif")
b4 <- raster("B4.tif")
b8 <- raster("B8.tif")
mask <- readOGR("mask.shp")
b2_crop <- crop(b2, extent(mask))
b2_mascara <- mask(b2_crop, mask)
b3_crop <- crop(b3, extent(mask))
b3_mascara <- mask(b3_crop, mask)
b4_crop <- crop(b4, extent(mask))
b4_mascara <- mask(b4_crop, mask)
b8_crop <- crop(b8, extent(mask))
b8_mascara <- mask(b8_crop, mask)
b2_mascara <- b2_mascara/10000
b3_mascara <- b3_mascara/10000
b4_mascara <- b4_mascara/10000
b8_mascara <- b8_mascara/10000
Alb_Top = b2_mascara*0.32+b3_mascara*0.26+b4_mascara*0.25+b8_mascara*0.17
Alb_sur = 0.6054*Alb_Top + 0.0797
Alb_24 = 1.0223*Alb_sur + 0.0149
writeRaster(Alb_24, "Alb_24", format = "GTiff", overwrite=TRUE)
} |
load("claytonRho.rda")
load("gumbelRho.rda")
load("plackettTau.rda")
load("galambos.rda")
load("huslerReiss.rda")
load("tev.rda")
save(.claytonRhoNeg, .claytonRhoPos,
.gumbelRho,
.plackettTau,
.galambosTau, .galambosRho,
.huslerReissTau, .huslerReissRho,
.tevTau, .tevRho,
file = "sysdata.rda", compress=TRUE) |
library(BayesianFROC)
data.example <- list(
c=c(3,2,1),
h=c(97,32,31),
f=c(1,14,74),
NL=259,
NI=57,
C=3)
data.example <- give_name_srsc_data(data.example)
viewdata(data.example)
draw.CFP.CTP.from.dataList(data.example)
fit <- fit_Bayesian_FROC(data.example,cha = 3)
fit.stanfit <- as(fit, "stanfit")
summary(fit.stanfit)
print(fit.stanfit)
rstan::traceplot(fit.stanfit,par=c("A"))
rstan::stan_dens(fit.stanfit,par=c("A"))
check_divergences(fit.stanfit)
check_hmc_diagnostics(fit.stanfit)
pairs(fit.stanfit,pars=c("A","lp__","m"))
get_posterior_mean(fit.stanfit)
check_rhat(fit.stanfit)
rstan::stan_hist(fit.stanfit)
rstan::stan_rhat(fit.stanfit)
message("
", crayon::bgBlack$cyan$bold$italic$underline("data.example <- list( "),"
", crayon::bgBlack$cyan$bold$italic$underline(" c=c(3,2,1),
", crayon::bgBlack$cyan$bold$italic$underline("h=c(97,32,31),
", crayon::bgBlack$cyan$bold$italic$underline("f=c(1,14,74),
", crayon::bgBlack$cyan$bold$italic$underline(" NL=259,
", crayon::bgBlack$cyan$bold$italic$underline(" NI=57,
", crayon::bgBlack$cyan$bold$italic$underline(" C=3)
", crayon::bgBlack$cyan$bold$italic$underline("data.example <- give_name_srsc_data(data.example)"),"
", crayon::bgBlack$cyan$bold$italic$underline("viewdata(data.example)"),"
", crayon::bgBlack$cyan$bold$italic$underline("draw.CFP.CTP.from.dataList(data.example)"),"
", crayon::bgBlack$cyan$bold$italic$underline("fit <- fit_Bayesian_FROC(data.example,cha = 3)"),"
", crayon::bgBlack$cyan$bold$italic$underline("fit.stanfit <- as(fit, \"stanfit\")"),"
", crayon::bgBlack$cyan$bold$italic$underline("summary(fit.stanfit)"),"
", crayon::bgBlack$cyan$bold$italic$underline("print(fit.stanfit)"),"
", crayon::bgBlack$cyan$bold$italic$underline("rstan::traceplot(fit.stanfit,par=c(\"A\"))"),"
", crayon::bgBlack$cyan$bold$italic$underline("rstan::stan_dens(fit.stanfit,par=c(\"A\"))"),"
", crayon::bgBlack$cyan$bold$italic$underline("check_divergences(fit.stanfit)"),"
", crayon::bgBlack$cyan$bold$italic$underline("check_hmc_diagnostics(fit.stanfit)"),"
", crayon::bgBlack$cyan$bold$italic$underline("pairs(fit.stanfit,pars=c(\"A\",\"lp__\",\"m\"))"),"
", crayon::bgBlack$cyan$bold$italic$underline("get_posterior_mean(fit.stanfit)"),"
", crayon::bgBlack$cyan$bold$italic$underline("check_rhat(fit.stanfit)"),"
", crayon::bgBlack$cyan$bold$italic$underline("rstan::stan_hist(fit.stanfit)"),"
", crayon::bgBlack$cyan$bold$italic$underline("rstan::stan_rhat(fit.stanfit)"),"
") |
BY <- function(x, ...) UseMethod("BY")
BY.default <- function(x, g, FUN, ..., use.g.names = TRUE, sort = TRUE,
expand.wide = FALSE, parallel = FALSE, mc.cores = 1L,
return = c("same", "vector", "list")) {
if(!is.atomic(x)) stop("x needs to be an atomic vector")
if(is.matrix(x) && !inherits(x, "matrix")) return(UseMethod("BY", unclass(x)))
if(!(is.function(FUN) || is.character(FUN))) stop("FUN needs to be a function")
aplyfun <- if(parallel) function(...) mclapply(..., mc.cores = mc.cores) else lapply
simplify <- switch(return[1L], same = 1L, vector = 2L, list = 3L, stop("BY.default only supports same, vector and list output!"))
g <- GRP(g, return.groups = use.g.names, sort = sort, call = FALSE)
res <- aplyfun(gsplit(x, g), FUN, ...)
if(use.g.names) names(res) <- GRPnames(g, FALSE)
if(simplify == 3L) return(res)
if(expand.wide) return(do.call(rbind, res))
if(use.g.names) {
res <- unlist(res, recursive = FALSE)
if(simplify == 1L) return(copyMostAttributes(res, x))
} else {
if(simplify == 1L) {
res <- unlist(res, FALSE, FALSE)
if(length(res) == length(x) && typeof(res) == typeof(x) && isTRUE(g$ordered[2L])) return(duplAttributes(res, x))
return(copyMostAttributes(res, x))
}
ll <- length(res)
nr1 <- names(res[[1L]])
res <- unlist(res, FALSE, FALSE)
if(length(res) != ll && length(nr1) && length(res) == length(nr1)*ll)
names(res) <- rep(nr1, ll)
}
res
}
copysplaplfun <- function(x, g, FUN, ...) copyMostAttributes(unlist(lapply(gsplit(x, g), FUN, ...), FALSE, FALSE), x)
splaplfun <- function(x, g, FUN, ...) unlist(lapply(gsplit(x, g), FUN, ...), FALSE, FALSE)
BY.data.frame <- function(x, g, FUN, ..., use.g.names = TRUE, sort = TRUE,
expand.wide = FALSE, parallel = FALSE, mc.cores = 1L,
return = c("same", "matrix", "data.frame", "list")) {
if(!is.list(x)) stop("x needs to be a list")
if(!(is.function(FUN) || is.character(FUN))) stop("FUN needs to be a function")
aplyfun <- if(parallel) function(...) mclapply(..., mc.cores = mc.cores) else lapply
return <- switch(return[1L], same = 1L, matrix = 3L, data.frame = 2L, list = 0L,
stop("Unknown return option!"))
g <- GRP(g, return.groups = use.g.names, sort = sort, call = FALSE)
if(return != 0L) {
ax <- attributes(x)
if(expand.wide) {
if(return < 3L) {
splitfun <- function(y) .Call(Cpp_mctl, do.call(rbind, lapply(gsplit(y, g), FUN, ...)), TRUE, 0L)
res <- unlist(aplyfun(x, splitfun), recursive = FALSE, use.names = TRUE)
if(return == 1L) {
isDTl <- inherits(x, "data.table")
ax[["names"]] <- names(res)
ax[["row.names"]] <- if(use.g.names && !inherits(x, "data.table") && length(gn <- GRPnames(g))) gn else
.set_row_names(length(res[[1L]]))
} else {
isDTl <- FALSE
ax <- list(names = names(res),
row.names = if(use.g.names && length(gn <- GRPnames(g))) gn else .set_row_names(length(res[[1L]])),
class = "data.frame")
}
} else {
attributes(x) <- NULL
splitfun <- function(y) do.call(rbind, lapply(gsplit(y, g), FUN, ...))
res <- do.call(cbind, aplyfun(x, splitfun))
cn <- dimnames(res)[[2L]]
namr <- rep(ax[["names"]], each = ncol(res)/length(x))
dimnames(res) <- list(if(use.g.names) GRPnames(g) else NULL,
if(length(cn)) paste(namr, cn, sep = ".") else namr)
return(res)
}
} else {
matl <- return == 3L
isDTl <- !matl && return != 2L && inherits(x, "data.table")
attributes(x) <- NULL
if(return == 2L) ax <- list(names = ax[["names"]], row.names = ax[["row.names"]], class = "data.frame")
if(use.g.names && !isDTl && length(gn <- GRPnames(g))) {
res <- vector("list", length(x))
res1 <- lapply(gsplit(x[[1L]], g), FUN, ...)
names(res1) <- gn
res[[1L]] <- unlist(res1, FALSE, TRUE)
namres1 <- names(res[[1L]])
if(matl) dn <- list(namres1, ax[["names"]]) else
if(length(namres1)) ax[["row.names"]] <- namres1 else
if(length(res[[1L]]) != length(x[[1L]])) ax[["row.names"]] <- .set_row_names(length(res[[1L]]))
if(length(namres1)) names(res[[1L]]) <- NULL
if(matl) {
if(length(res) > 1L) res[-1L] <- aplyfun(x[-1L], splaplfun, g, FUN, ...)
res <- do.call(cbind, res)
dimnames(res) <- dn
return(res)
} else {
copyMostAttributes(res[[1L]], x[[1L]])
if(length(res) > 1L) res[-1L] <- aplyfun(x[-1L], copysplaplfun, g, FUN, ...)
}
} else {
if(matl) {
res <- do.call(cbind, aplyfun(x, splaplfun, g, FUN, ...))
sl <- isTRUE(g$ordered[2L]) && nrow(res) == length(x[[1L]])
if(sl) rn1 <- ax[["row.names"]][1L]
dimnames(res) <- list(if(sl && length(rn1) && is.character(rn1) && rn1 != "1")
ax[["row.names"]] else NULL, ax[["names"]])
return(res)
} else {
res <- aplyfun(x, copysplaplfun, g, FUN, ...)
if(length(res[[1L]]) != length(x[[1L]]) || !isTRUE(g$ordered[2L]))
ax[["row.names"]] <- .set_row_names(length(res[[1L]]))
}
}
}
return(condalcSA(res, ax, isDTl))
}
if(expand.wide) return(aplyfun(x, function(y) do.call(rbind, lapply(gsplit(y, g, use.g.names), FUN, ...))))
return(aplyfun(x, function(y) lapply(gsplit(y, g, use.g.names), FUN, ...)))
}
BY.list <- function(x, ...) BY.data.frame(x, ...)
BY.matrix <- function(x, g, FUN, ..., use.g.names = TRUE, sort = TRUE,
expand.wide = FALSE, parallel = FALSE, mc.cores = 1L,
return = c("same", "matrix", "data.frame", "list")) {
if(!is.matrix(x)) stop("x needs to be a matrix")
if(!(is.function(FUN) || is.character(FUN))) stop("FUN needs to be a function")
aplyfun <- if(parallel) function(...) parallel::mclapply(..., mc.cores = mc.cores) else lapply
return <- switch(return[1L], same = 3L, matrix = 2L, data.frame = 1L, list = 0L,
stop("Unknown return option!"))
g <- GRP(g, return.groups = use.g.names, sort = sort, call = FALSE)
if(return != 0L) {
if(expand.wide) {
if(return == 1L) {
splitfun <- function(y) .Call(Cpp_mctl, do.call(rbind, lapply(gsplit(y, g), FUN, ...)), TRUE, 0L)
res <- unlist(aplyfun(.Call(Cpp_mctl, x, TRUE, 0L), splitfun), recursive = FALSE, use.names = TRUE)
ax <- list(names = names(res),
row.names = if(use.g.names && length(gn <- GRPnames(g))) gn else .set_row_names(length(res[[1L]])),
class = "data.frame")
} else {
splitfun2 <- function(y) do.call(rbind, lapply(gsplit(y, g), FUN, ...))
res <- do.call(cbind, aplyfun(.Call(Cpp_mctl, x, FALSE, 0L), splitfun2))
cn <- dimnames(res)[[2L]]
namr <- rep(dimnames(x)[[2L]], each = ncol(res)/ncol(x))
dn <- list(if(use.g.names) GRPnames(g) else NULL,
if(length(cn)) paste(namr, cn, sep = ".") else namr)
if(return == 2L) return(`dimnames<-`(res, dn))
ax <- attributes(x)
ax[["dim"]] <- dim(res)
ax[["dimnames"]] <- dn
}
} else {
if(use.g.names && length(gn <- GRPnames(g))) {
res <- vector("list", ncol(x))
res1 <- lapply(gsplit(`names<-`(x[, 1L], NULL), g), FUN, ...)
names(res1) <- gn
res[[1L]] <- unlist(res1, FALSE, TRUE)
namres1 <- names(res[[1L]])
if(length(namres1)) names(res[[1L]]) <- NULL
if(length(res) > 1L) res[-1L] <- aplyfun(.Call(Cpp_mctl, x[, -1L, drop = FALSE], FALSE, 0L), splaplfun, g, FUN, ...)
if(return > 1L) {
res <- do.call(cbind, res)
dn <- list(namres1, dimnames(x)[[2L]])
if(return == 2L) return(`dimnames<-`(res, dn))
ax <- attributes(x)
ax[["dim"]] <- dim(res)
ax[["dimnames"]] <- dn
} else {
ax <- list(names = dimnames(x)[[2L]],
row.names = if(length(namres1)) namres1 else .set_row_names(length(res[[1L]])),
class = "data.frame")
}
} else {
res <- aplyfun(.Call(Cpp_mctl, x, TRUE, 0L), splaplfun, g, FUN, ...)
if(return > 1L) {
res <- do.call(cbind, res)
if(return == 2L) return(res)
ax <- attributes(x)
if(length(dimnames(x)[[1L]]) && !(isTRUE(g$ordered[2L]) && nrow(res) == nrow(x))) {
ax[["dimnames"]][1L] <- list(NULL)
ax[["dim"]] <- dim(res)
}
} else {
lr1 <- length(res[[1L]])
ax <- list(names = names(res),
row.names = if(lr1 == nrow(x) && length(rn <- dimnames(x)[[1L]]) && isTRUE(g$ordered[2L])) rn else .set_row_names(lr1),
class = "data.frame")
}
}
}
return(setAttributes(res, ax))
}
if(expand.wide) return(aplyfun(.Call(Cpp_mctl, x, TRUE, 0L), function(y) do.call(rbind, lapply(gsplit(y, g, use.g.names), FUN, ...))))
return(aplyfun(.Call(Cpp_mctl, x, TRUE, 0L), function(y) lapply(gsplit(y, g, use.g.names), FUN, ...)))
}
BY.grouped_df <- function(x, FUN, ..., keep.group_vars = TRUE, use.g.names = FALSE) {
g <- GRP.grouped_df(x, call = FALSE)
gn <- which(attr(x, "names") %in% g[[5L]])
res <- BY.data.frame(if(length(gn)) fcolsubset(x, -gn) else x, g, FUN, ..., use.g.names = use.g.names)
if(!is.data.frame(res)) return(res)
nrr <- fnrow2(res)
same_size <- nrr == fnrow2(x)
if(!keep.group_vars) return(if(same_size && isTRUE(g$ordered[2L])) res else fungroup(res))
if(!((same_size && isTRUE(g$ordered[2L])) || nrr == g[[1L]])) return(fungroup(res))
if(same_size) {
ar <- attributes(res)
ar[["names"]] <- c(g[[5L]], ar[["names"]])
return(condalcSA(c(.subset(x, gn), res), ar, any(ar$class == "data.table")))
}
ar <- attributes(fungroup2(res, oldClass(res)))
attributes(res) <- NULL
ar[["names"]] <- c(g[[5L]], ar[["names"]])
condalcSA(c(g[[4L]], res), ar, any(ar$class == "data.table"))
} |
rue_no_split <- function(x_train,
x_test,
num_trees = 500L,
response_name = "label") {
rue_args <- classifier_args(
data = x_train,
num_trees = num_trees,
response_name = response_name
)
rue_model <- do.call(what = ranger::ranger, args = rue_args)
rue_train <- se_predicted(rue_model, x_train, is_training = TRUE)
rue_test <- se_predicted(rue_model, x_test, is_training = FALSE)
return(list(test = rue_test, train = rue_train))
}
rue_pt <- function(x_train,
x_test,
R = 1e3,
sub_ratio = 1 / 2,
num_trees = 500L,
response_name = "label") {
scorer <- function(x_train, x_test) {
rue_no_split(
x_train = x_train,
x_test = x_test,
response_name = response_name,
num_trees = num_trees
)
}
result <- exchangeable_null(x_train, x_test, scorer, R = R, is_oob = TRUE)
return(result)
} |
expected <- eval(parse(text="102:112"));
test(id=0, code={
argv <- eval(parse(text="list(102L, 112L, 1L)"));
do.call(`seq.int`, argv);
}, o=expected); |
iorder <- function(obj = NULL, var_name = NULL) {
styler::cache_deactivate(verbose = FALSE)
recoding_styles <- c(
"factor" = "factor",
"fct_relevel (forcats)" = "forcats"
)
selected_recoding_style <- "factor"
if (exists("fct_relevel")) {
selected_recoding_style <- "forcats"
}
run_as_addin <- ifunc_run_as_addin()
if (is.null(obj)) {
if (ifunc_run_as_addin()) {
context <- rstudioapi::getActiveDocumentContext()
obj <- context$selection[[1]]$text
if (obj == "") obj <- NULL
}
obj_name <- NULL
var_name <- NULL
}
if (!is.null(obj)) {
if (is.character(obj) && length(obj) == 1) {
obj_name <- obj
try(
{
obj <- get(obj_name, envir = .GlobalEnv)
},
silent = TRUE
)
} else {
obj_name <- deparse(substitute(obj))
}
if (grepl("\\$", obj_name)) {
s <- strsplit(obj_name, "\\$")
obj_name <- gsub("^\\s*", "", s[[1]][1])
var_name <- gsub("\\s*$", "", s[[1]][2])
var_name <- gsub("`", "", var_name)
obj <- get(obj_name, envir = .GlobalEnv)
}
if (inherits(obj, "tbl_df") || inherits(obj, "data.table")) obj <- as.data.frame(obj)
if (!is.data.frame(obj) && !is.vector(obj) && !is.factor(obj)) {
stop(sQuote(paste0(obj_name, " must be a vector, a factor or a data frame.")))
}
if (is.data.frame(obj)) {
is_char <- FALSE
is_null <- FALSE
try(
{
if (is.character(var_name)) is_char <- TRUE
if (is.null(var_name)) is_null <- TRUE
},
silent = TRUE
)
if (!is_char && !is_null) {
var_name <- deparse(substitute(var_name))
}
if (!is.null(var_name) && !(var_name %in% names(obj))) {
stop(sQuote(paste0(var_name, " must be a column of ", obj_name, ".")))
}
}
}
jquery.ui.file <- system.file(file.path("shiny", "js", "jquery-ui.js"), package = "questionr")
jquery.ui.content <- paste(readLines(jquery.ui.file), collapse = "\n")
js.file <- system.file(file.path("shiny", "js", "iorder.js"), package = "questionr")
js.content <- paste(readLines(js.file), collapse = "\n")
ui <- miniUI::miniPage(
tags$head(
tags$script(HTML(jquery.ui.content)),
tags$script(HTML(js.content)),
tags$style(ifunc_get_css())
),
miniUI::gadgetTitleBar(gettext("Interactive levels ordering", domain = "R-questionr")),
miniUI::miniTabstripPanel(
miniUI::miniTabPanel(
gettext("Variable and settings", domain = "R-questionr"),
icon = icon("sliders-h"),
miniUI::miniContentPanel(
ifunc_show_alert(run_as_addin),
tags$h4(icon("columns"), gettext("Variable to be recoded", domain = "R-questionr")),
wellPanel(
fluidRow(
column(
6,
selectizeInput(
"obj_name",
gettext("Data frame or vector to recode from", domain = "R-questionr"),
choices = Filter(
function(x) {
inherits(get(x, envir = .GlobalEnv), "data.frame") ||
is.vector(get(x, envir = .GlobalEnv)) ||
is.factor(get(x, envir = .GlobalEnv))
}, ls(.GlobalEnv)
),
selected = obj_name, multiple = FALSE
)
),
column(6, uiOutput("varInput"))
)
),
uiOutput("nblevelsAlert"),
tags$h4(icon("sliders-h"), gettext("Recoding settings", domain = "R-questionr")),
wellPanel(
fluidRow(
column(4, uiOutput("newvarInput")),
column(4, selectInput("recstyle", gettext("Recoding style", domain = "R-questionr"),
recoding_styles,
selected = selected_recoding_style
)),
)
),
uiOutput("alreadyexistsAlert"),
uiOutput("loadedforcatsAlert")
)
),
miniUI::miniTabPanel(
gettext("Ordering", domain = "R-questionr"),
icon = icon("arrows-alt"),
miniUI::miniContentPanel(
wellPanel(htmlOutput("levelsInput"))
)
),
miniUI::miniTabPanel(
gettext("Code and result", domain = "R-questionr"),
icon = icon("code"),
miniUI::miniContentPanel(
tags$h4(icon("code"), gettext("Code", domain = "R-questionr")),
htmlOutput("codeOut"),
tags$h4(icon("table"), gettext("Check", domain = "R-questionr")),
p(
class = "header",
gettext("Old variable as rows, new variable as columns.", domain = "R-questionr")
),
tableOutput("tableOut")
)
)
)
)
server <- function(input, output) {
robj <- reactive({
obj <- get(req(input$obj_name), envir = .GlobalEnv)
if (inherits(obj, "tbl_df") || inherits(obj, "data.table")) obj <- as.data.frame(obj)
obj
})
rvar <- reactive({
invisible(input$obj_name)
if (is.data.frame(robj())) {
return(robj()[[req(input$var_name)]])
}
if (is.vector(robj()) || is.factor(robj())) {
return(robj())
}
return(NULL)
})
src_var <- reactive({
if (is.data.frame(robj())) {
result <- ifelse(grepl(" ", req(input$var_name)),
sprintf('%s[,"%s"]', req(input$obj_name), req(input$var_name)),
sprintf("%s$%s", req(input$obj_name), req(input$var_name))
)
}
if (is.vector(robj()) || is.factor(robj())) {
result <- req(input$obj_name)
}
return(result)
})
output$levelsInput <- renderText({
out <- "<ol id='sortable' class='sortable'>"
if (is.factor(rvar())) {
levs <- levels(rvar())
} else {
levs <- sort(stats::na.omit(unique(rvar())))
}
for (l in levs) {
out <- paste0(
out,
'<li><span class="glyphicon glyphicon-move"> </span> <span class="level">',
htmltools::htmlEscape(l),
"</span></li>"
)
}
out <- paste0(out, "</ol>")
HTML(out)
})
output$varInput <- renderUI({
if (is.data.frame(robj())) {
selectizeInput("var_name",
gettext("Data frame column to recode", domain = "R-questionr"),
choices = names(robj()),
selected = var_name,
multiple = FALSE
)
}
})
output$newvarInput <- renderUI({
new_name <- NULL
if (is.data.frame(robj())) {
new_name <- req(input$var_name)
}
if (is.vector(robj()) || is.factor(robj())) {
new_name <- req(input$obj_name)
}
if (!is.null(new_name)) {
textInput(
"newvar_name",
gettext("New variable name", domain = "R-questionr"),
new_name
)
}
})
output$nblevelsAlert <- renderUI({
if (length(unique(rvar())) > 50) {
div(
class = "alert alert-warning alert-dismissible",
HTML('<button type="button" class="close" data-dismiss="alert" aria-label="Close"><span aria-hidden="true">×</span></button>'),
HTML(gettext("<strong>Warning :</strong> The variable to be recoded has more than 50 levels.", domain = "R-questionr"))
)
}
})
output$loadedforcatsAlert <- renderUI({
if (input$recstyle == "forcats" && !exists("fct_recode")) {
div(
class = "alert alert-warning alert-dismissible",
HTML('<button type="button" class="close" data-dismiss="alert" aria-label="Close"><span aria-hidden="true">×</span></button>'),
HTML(gettext("<strong>Warning :</strong> The <tt>forcats</tt> package must be installed and loaded for the generated code to be run.", domain = "R-questionr"))
)
}
})
output$alreadyexistsAlert <- renderUI({
exists <- FALSE
if (is.data.frame(robj()) && req(input$newvar_name) %in% names(robj())) {
exists <- TRUE
orig_name <- req(input$var_name)
}
if (is.vector(robj()) && exists(req(input$newvar_name), envir = .GlobalEnv)) {
exists <- TRUE
orig_name <- req(input$obj_name)
}
if (exists && req(input$newvar_name) != orig_name) {
div(
class = "alert alert-warning alert-dismissible",
HTML('<button type="button" class="close" data-dismiss="alert" aria-label="Close"><span aria-hidden="true">×</span></button>'),
HTML(gettext("<strong>Warning :</strong> This new variable already exists.", domain = "R-questionr"))
)
}
})
generate_code_factor <- function(dest_var) {
newlevels <- paste0(utils::capture.output(dput(input$sortable)), collapse = "\n")
sprintf("%s <- factor(%s,\n levels=%s)", dest_var, src_var(), newlevels)
}
generate_code_forcats <- function(dest_var) {
out <- sprintf("%s <- %s %%>%%\n ", dest_var, src_var())
if (is.numeric(rvar())) {
out <- paste0(out, "as.character() %>%\n ")
}
newlevels <- paste0(utils::capture.output(dput(input$sortable)), collapse = "\n")
newlevels <- gsub("(^c\\(|\\)$)", "", newlevels)
paste0(out, sprintf("fct_relevel(\n %s\n )", newlevels))
}
generate_code <- function(check = FALSE) {
if (is.data.frame(robj())) {
dest_var <- ifelse(grepl(" ", req(input$newvar_name)),
sprintf('%s[,"%s"]', req(input$obj_name), req(input$newvar_name)),
sprintf("%s$%s", req(input$obj_name), req(input$newvar_name))
)
}
if (is.vector(robj()) || is.factor(robj())) {
dest_var <- req(input$newvar_name)
}
if (check) dest_var <- ".iorder_tmp"
out <- gettextf("
if (src_var() != dest_var) out <- paste0(out, gettextf(" into %s", dest_var, domain = "R-questionr"))
out <- paste0(out, "\n")
recstyle <- input$recstyle
if (recstyle == "factor") out <- paste0(out, generate_code_factor(dest_var))
if (recstyle == "forcats") out <- paste0(out, generate_code_forcats(dest_var))
return(out)
}
output$codeOut <- renderText({
if (is.data.frame(robj())) {
header <- HTML(gettextf("<p class='header'>Reordering <tt>%s</tt> from <tt>%s</tt> of class <tt>%s</tt>.</p>",
req(input$var_name), req(input$obj_name), class(rvar()),
domain = "R-questionr"
))
}
if (is.vector(robj()) || is.factor(robj())) {
header <- HTML(gettextf("<p class='header'>Reordering <tt>%s</tt> of class <tt>%s</tt>.</p>",
req(input$obj_name), class(rvar()),
domain = "R-questionr"
))
}
out <- generate_code()
out <- styler::style_text(out)
out <- paste(highr::hi_html(out), collapse = "\n")
out <- paste0(header, "<pre class='r'><code class='r' id='codeout'>", out, "</code></pre>")
out
})
observeEvent(input$done, {
out <- generate_code()
out <- styler::style_text(out)
out <- paste(out, collapse = "\n")
if (run_as_addin) {
rstudioapi::insertText(text = out)
} else {
out <- paste0(
gettext("\n-------- Start recoding code --------\n\n", domain = "R-questionr"),
out,
gettext("\n--------- End recoding code ---------\n", domain = "R-questionr")
)
cat(out)
}
stopApp()
})
observeEvent(input$cancel, {
invisible(stopApp())
})
output$tableOut <- renderTable(
{
code <- generate_code(check = TRUE)
if (!exists("fct_relevel") && input$recstyle == "forcats") {
return(NULL)
}
eval(parse(text = code), envir = .GlobalEnv)
tab <- freq(get(".iorder_tmp"))
tab
},
rownames = TRUE
)
}
runGadget(ui, server, viewer = dialogViewer("iorder", width = 800, height = 700))
} |
bigGP.init <- function(P = NULL, parallelRNGpkg = "rlecuyer", seed = 0){
if(.bigGP$P == 0) {
cat("Initializing processes:")
if(mpi.comm.size() == 0){
if(is.null(P)){
stop("bigGP.init: You must specify the number of worker processes to initiate (potentially each having multiple cores to allow for threading) and which should be equal to D(D+1)/2 for some integer D.")
} else{
mpi.spawn.Rslaves(nslaves = P)
}
} else{
if(!is.null(P) && P != mpi.comm.size() - 1)
warning(paste("bigGP.init: Number of worker processes requested, ", P, " does not match number of active MPI slave processes, ", mpi.comm.size()-1, "; ignoring 'P' argument.", sep = ""))
}
.bigGP.fill()
if(sum(unlist(mpi.remote.exec(require, "bigGP", ret = TRUE))) != .bigGP$P)
stop("bigGP.init: error in loading bigGP on slaves.")
mpi.remote.exec(.Call("init_comms", as.integer(mpi.comm.c2f()), PACKAGE="bigGP"), ret = FALSE)
out <- .Call("init_comms", as.integer(mpi.comm.c2f()), PACKAGE="bigGP")
if(out != .bigGP$D)
stop("bigGP.init: number of processes may not be consistent with the partition number, D.")
mpi.bcast.cmd(.bigGP.fill())
cat("... Done.\n")
cat("Using ", .bigGP$P, " processes with a partition size (D) of ", .bigGP$D, ".\n", sep = "")
if(is.null(parallelRNGpkg) || parallelRNGpkg == ""){
warning("bigGP.init: Initializing process seeds sequentially; not guaranteed to give independent streams; please use rlecuyer or rsprng to be certain.")
mpi.bcast.Robj2slave(seed)
mpi.bcast.cmd(set.seed(mpi.comm.rank() + seed))
} else{
if(parallelRNGpkg == "rlecuyer"){
if(sum(unlist(mpi.remote.exec(requireNamespace, "rlecuyer", ret = TRUE))) != .bigGP$P)
stop("bigGP.init: error in using rlecuyer on slaves.")
RNGkind("L'Ecuyer-CMRG")
mpi.setup.rngstream(iseed = seed)
} else{
if(parallelRNGpkg == "rsprng"){
stop("bigGP.init: the rsprng package is no longer available on CRAN. Advanced users who install rsprng from the CRAN archived packages can uncomment the appropriate lines in bigGP.init() below this stop() call and rebuild the bigGP package.")
} else {
warning('bigGP.init: parallelRNGpkg ', parallelRNGpkg, ' not recognized. Initializing process seeds sequentially; not guaranteed to give independent streams; please use rlecuyer or rsprng to be certain.')
mpi.bcast.Robj2slave(seed)
mpi.bcast.cmd(set.seed(mpi.comm.rank() + seed))
}
}
}
}
invisible(NULL)
}
calcIJ <- function(D) {
'Finds row and column indices of subblock assigned to the process within the h=1th part of the overall matrix'
rank <- mpi.comm.rank()
if( rank < 1 || rank > D * (D + 1) / 2 )
warning(paste("calcIJ: Invalid rank", rank, ".", sep = " "))
I <- 0
J <- 0
Dd <- D
rd <- rank - 1
while( rd >= Dd ) {
J <- J + 1
rd <- rd - Dd
Dd <- Dd - 1
}
I <- J+rd
return(list(I = as.integer(I), J = as.integer(J)))
}
calcD <- function(P){
' Calculates the partition number D given number of processes P; i.e., D such that D*(D+1)/2 <= P '
D <- floor((sqrt(1+8*P) - 1) / 2)
if(P != D*(D+1)/2)
stop(paste("calcD: Number of slave processes, ", P, " is not equal to D(D+1)/2 for integer D.", sep = ""))
return(as.integer(D))
}
getDistributedVectorLength <- function(n, h = 1){
if(.bigGP$I == .bigGP$J) return(h * ceiling(n / (.bigGP$D * h))) else return(0)
}
getDistributedTriangularMatrixLength <- function(n, h = 1){
if(.bigGP$I == .bigGP$J){
return(h * (h+1) / 2 * (ceiling(n / (.bigGP$D * h)))^2)
} else{
return(h^2 * (ceiling(n / (.bigGP$D * h)))^2)
}
}
getDistributedRectangularMatrixLength <- function(n1, n2, h1 = 1, h2 = 1){
len <- h1 * h2 * ceiling(n1 / (.bigGP$D * h1)) * ceiling(n2 / (.bigGP$D * h2))
if(.bigGP$I == .bigGP$J) return(len) else return(2*len)
}
remoteGetIndices <- function(type = "vector", objName, objPos = ".GlobalEnv", n1, n2 = NULL, h1 = 1, h2 = 1) {
if(!is.element(type, c("vector", "triangular", "symmetric", "rectangular")))
stop("remoteGetIndices: type must be one of 'vector', 'triangular', 'symmetric', 'rectangular'.")
.n1 <- n1; .n2 <- n2; .h1 <- h1; .h2 <- h2
mpi.bcast.Robj2slave(.n1)
mpi.bcast.Robj2slave(.n2)
mpi.bcast.Robj2slave(.h1)
mpi.bcast.Robj2slave(.h2)
if(type == "vector")
mpi.bcast.cmd(.tmp <- localGetVectorIndices(.n1, .h1))
if(type == "symmetric" || type == "triangular")
mpi.bcast.cmd(.tmp <- localGetTriangularMatrixIndices(.n1, .h1))
if(type == "rectangular")
mpi.bcast.cmd(.tmp <- localGetRectangularMatrixIndices(.n1, .n2, .h1, .h2))
mpi.remote.exec(localAssign, objName, ".tmp", objPos)
remoteRm(.tmp)
return(NULL)
}
localGetVectorIndices <- function(n, h = 1){
' Finds the indices of the entries this processor owns of a vector'
if(.bigGP$I == .bigGP$J){
bs <- (n + .bigGP$D * h - 1)%/%(.bigGP$D *h)
ind <- matrix(nrow = bs*h, ncol = 1)
for( JJ in 0:(h-1) ) {
ind[JJ*bs+(0:(bs-1))+1, 1] <- JJ*bs*.bigGP$D + .bigGP$J*bs + (0:(bs-1))
}
ind[ind >= n] <- 0
} else{
ind <- numeric(0)
}
return(ind + 1)
}
localGetTriangularMatrixIndices <- function(n, h = 1){
' Finds the indices of the entries this processor owns of a lower-triangular matrix, as a two-column matrix'
bs <- (n+.bigGP$D*h-1) %/% (.bigGP$D*h)
if( .bigGP$I == .bigGP$J ) {
ind <- matrix(nrow = bs*bs*h*(h+1)/2, ncol = 2)
} else {
ind <- matrix(nrow = bs*bs*h*h, ncol = 2)
}
start <- 1
for( JJ in 0:(h-1) ) {
for( II in JJ:(h-1) ) {
if( (II == JJ) || (.bigGP$I == .bigGP$J) ) {
ind[start:(start+(bs*bs-1)), 1] <- II*bs*.bigGP$D+.bigGP$I*bs + rep(0:(bs-1), bs)
ind[start:(start+(bs*bs-1)), 2] <- JJ*bs*.bigGP$D+.bigGP$J*bs + rep(0:(bs-1), each = bs)
start <- start + bs*bs
} else {
ind[start:(start+(bs*bs-1)), 1] <- II*bs*.bigGP$D+.bigGP$I*bs + rep(0:(bs-1), bs)
ind[start:(start+(bs*bs-1)), 2] <- JJ*bs*.bigGP$D+.bigGP$J*bs + rep(0:(bs-1), each = bs)
start <- start + bs*bs
ind[start:(start+(bs*bs-1)), 1] <- II*bs*.bigGP$D+.bigGP$J*bs + rep(0:(bs-1), bs)
ind[start:(start+(bs*bs-1)), 2] <- JJ*bs*.bigGP$D+.bigGP$I*bs + rep(0:(bs-1), each = bs)
start <- start + bs*bs
}
}
}
ind[ind >= n] <- 0
return(ind + 1)
}
localGetRectangularMatrixIndices <- function(n1, n2, h1 = 1, h2 = 1) {
bsr <- (n2 + .bigGP$D*h2 - 1) %/% (.bigGP$D*h2)
bsc <- (n1 + .bigGP$D*h1 - 1) %/% (.bigGP$D*h1)
if( .bigGP$I == .bigGP$J ) {
ind <- matrix(nrow=bsr*bsc*h2*h1,ncol=2)
} else {
ind <- matrix(nrow=2*bsr*bsc*h2*h1,ncol=2)
}
start <- 1
for( JJ in 0:(h1-1) ) {
for( II in 0:(h2-1) ) {
if( (.bigGP$I == .bigGP$J) ) {
ind[start:(start+bsc*bsr-1),2] = II*bsr*.bigGP$D+.bigGP$I*bsr + rep(0:(bsr-1) , bsc)
ind[start:(start+bsc*bsr-1),1] = JJ*bsc*.bigGP$D+.bigGP$J*bsc + rep(0:(bsc-1) , each = bsr)
start = start + bsc*bsr
} else {
ind[start:(start+bsc*bsr-1),2] = II*bsr*.bigGP$D+.bigGP$I*bsr + rep(0:(bsr-1) , bsc)
ind[start:(start+bsc*bsr-1),1] = JJ*bsc*.bigGP$D+.bigGP$J*bsc + rep(0:(bsc-1) , each = bsr)
start = start + bsc*bsr
ind[start:(start+bsc*bsr-1),2] = II*bsr*.bigGP$D+.bigGP$J*bsr + rep(0:(bsr-1) , bsc)
ind[start:(start+bsc*bsr-1),1] = JJ*bsc*.bigGP$D+.bigGP$I*bsc + rep(0:(bsc-1) , each = bsr)
start = start + bsc*bsr
}
}
}
ind[ind[ , 2] >= n2, ] <- 0
ind[ind[ , 1] >= n1, ] <- 0
return(ind + 1)
}
alloc <- function(input, inputPos = '.GlobalEnv'){
if(is.numeric(input)){
tmp <- 0; length(tmp) <- input
return(tmp)
} else{
if(is.character(input)){
return(1*get(input, pos = eval(as.name(inputPos))))
} else stop("alloc: 'input' must be a numeric value or character string.")
}
}
.bigGP.fill <- function(init = FALSE) {
'initializes .bigGP object holding the information on the distributed setup'
if(init) {
.bigGP$P <- 0
.bigGP$D <- 0
.bigGP$I <- -1
.bigGP$J <- -1
} else {
.bigGP$P <- mpi.comm.size() - 1
.bigGP$D <- calcD(.bigGP$P)
if(mpi.comm.rank()) {
tmp <- calcIJ(.bigGP$D)
.bigGP$I <- tmp$I
.bigGP$J <- tmp$J
}
}
invisible(NULL)
}
".bigGP" <- new.env()
.bigGP.fill(init = TRUE)
".onAttach" <- function (lib, pkg) {
packageStartupMessage("
=========================================================================================
Loading bigGP.\n
Warning: before using bigGP, you must initialize the slave processes using bigGP.init() \n
(which can also be done indirectly via initializing a krigeProblem object).\n
If R was started through mpirun/orterun/mpiexec, please quit by using bigGP.quit().
=========================================================================================\n
")
}
bigGP.quit <- function(save = "no"){
if (is.loaded("mpi_initialize")){
if (mpi.comm.size(1) > 0) {
mpi.close.Rslaves()
}
mpi.quit(save)
}
}
bigGP.exit <- function(){
if (is.loaded("mpi_initialize")){
if (mpi.comm.size(1) > 0) {
mpi.close.Rslaves()
}
mpi.exit()
detach(package:bigGP, unload = TRUE)
}
}
if(FALSE) {
.Last.lib <- function(libpath){
if (is.loaded("mpi_initialize")){
if (mpi.comm.size(1) > 0) {
print("Please use mpi.close.Rslaves() to close slaves.")
mpi.close.Rslaves()
}
print("Please use mpi.quit() to quit R")
.Call("mpi_finalize")
}
}
} |
check.graph <- function(){
if(requireNamespace("graph", quietly=TRUE)){
TRUE
}else{
FALSE
}
}
make.graph <- function(){
if (!check.graph()){ skip("graph package cannot be loaded"); }
V <- LETTERS[1:10];
edL <- vector("list", length=length(V));
names(edL) <- V;
edL[[1]] <- list(edges=c(2,3,7,5));
edL[[2]] <- list(edges=c(4,6));
edL[[3]] <- list(edges=c(6,8));
edL[[4]] <- list(edges=c(6,9));
edL[[5]] <- list(edges=c(8));
edL[[6]] <- list(edges=c(8,9,10));
edL[[7]] <- list(edges=c(8,5));
edL[[8]] <- list(edges=c());
edL[[9]] <- list(edges=c());
g <- graph::graphNEL(nodes=V, edgeL=edL, edgemode="directed");
return(g);
}
make.scores <- function(){
set.seed(12345);
S <- matrix(round(runif(40),2), nrow=4);
dimnames(S) <- list(paste0("pr",1:4), LETTERS[1:ncol(S)]);
return(S);
}
make.spec.ann <- function(){
pr1 <- c(1,0,0);
pr2 <- c(0,1,0);
pr3 <- c(0,0,1);
pr4 <- c(1,0,0);
spec.ann <- rbind(pr1,pr2,pr3,pr4);
colnames(spec.ann) <- c("I","H","J");
return(spec.ann);
}
make.ann <- function(){
g <- make.graph();
spec.ann <- make.spec.ann();
anc <- build.ancestors(g);
ann <- transitive.closure.annotations(spec.ann, anc);
return(ann);
} |
tk_get_row_and_columns <- function(widget, ...) {
UseMethod("tk_get_row_and_columns", widget)
}
tk_get_row_and_columns.loon <- function(widget, ...) {
list(
row = 20,
column = 20
)
}
tk_get_row_and_columns.l_pairs <- function(widget, ...) {
args <- list(...)
span <- args$span
histspan <- args$histspan
histAdjust <- args$histAdjust
row_column <- extract_num(names(widget))
column <- max(vapply(row_column, function(x) x[1L], as.numeric(1L)) * span,
na.rm = TRUE) - span + histspan
row <- max((vapply(row_column, function(x) x[2L], as.numeric(2L)) + histAdjust) * span,
na.rm = TRUE) - span + histspan
list(
row = row,
column = column
)
}
tk_get_row_and_columns.l_facet_wrap <- function(widget, ...) {
args <- list(...)
by <- args$by
title <- args$title
loc <- l_getLocations(widget)
nrow <- loc$nrow
ncol <- loc$ncol
if(is.atomic(by)) len_by <- 1
else len_by <- length(by)
span <- 10
column <- ncol * span + 1
row <- nrow * (span + len_by) + if(is.null(title)) 0 else 1 + 1
list(
row = row,
column = column
)
}
tk_get_row_and_columns.l_facet_grid <- function(widget, ...) {
args <- list(...)
by <- args$by
title <- args$title
loc <- l_getLocations(widget)
nrow <- loc$nrow
ncol <- loc$ncol
n <- nrow * ncol
if(is.atomic(by)) len_by <- 1
else len_by <- length(by)
span <- 10
column <- ncol * span + 1 + floor(len_by/2)
row <- nrow * span + (floor(len_by/2) + len_by %% 2) +
if(is.null(title)) 0 else 1 + 2
list(
row = row,
column = column
)
} |
mlnormal_equal_list_matrices <- function( list_mat1, list_mat2, dim_list,
eps=1E-30)
{
if ( is.null(dim_list) ){
dim_list <- length(list_mat1)
}
v1 <- 1
for ( dd in 1:dim_list ){
v1 <- v1 * mlnormal_equal_matrix( mat1=list_mat1[[dd]],
mat2=list_mat2[[dd]], eps=eps )
}
res <- if ( v1==1 ){ TRUE } else { FALSE }
return(res)
} |
writeRmd <-
function(..., file = "", append = TRUE, sep = " ", end = "\n", dump = FALSE, start = FALSE, stop = FALSE, options = NULL) {
if(!is.character(file)) {return(warning("the parameter 'file' has to be a character chain giving the name of the .Rmd file to write in"))}
if(!is.logical(append)) {return(warning("the argument 'append' must be logical"))}
if(!is.logical(dump)) {return(warning("the argument 'dump' must be logical"))}
if(!is.logical(start)) {return(warning("the argument 'start' must be logical"))}
if(!is.logical(stop)) {return(warning("the argument 'stop' must be logical"))}
if(!is.character(options) & !is.null(options)) {return(warning("the argument 'options' must be a character chain"))}
cat(file = file, append = append)
if(start) {
cat("```", file = file, append = TRUE)
if(!is.null(options)) {cat("{", options, "}", sep = "", file = file, append = TRUE)}
cat("\n", file = file, append = TRUE)
}
if(dump) {
dump(..., file = file, append = TRUE)
} else {
cat(..., file = file, append = TRUE, sep = sep)
}
if(stop) {cat("\n```", file = file, append = TRUE)}
cat(end, file = file, append = TRUE)
} |
context("Testing project_to_curve")
test_projection <- function(x, s, stretch, fit) {
expect_equal(names(fit), c("s", "ord", "lambda", "dist_ind", "dist"))
expect_equal(rownames(fit$s), rownames(x))
expect_equal(colnames(fit$s), colnames(x))
expect_equal(names(fit$lambda), rownames(x))
expect_equal(names(fit$dist_ind), rownames(x))
expect_equal(names(fit$ord), NULL)
expect_equal(names(fit$dist), NULL)
sord <- fit$s[fit$ord,]
slam <- cumsum(c(0, sqrt(rowSums((sord[-nrow(sord),] - sord[-1,])^2))))
expect_lte(sum((slam - fit$lambda[fit$ord])^2), 1e-10)
dist_ind <- rowSums((fit$s - x)^2)
expect_lte(sum((dist_ind - fit$dist_ind)^2), 1e-10)
dist <- sum(dist_ind)
expect_lte(sum((dist - fit$dist)^2), 1e-10)
fit2 <- project_to_curve(fit$s, fit$s, stretch = 0)
expect_lte(sum((fit2$s - fit$s)^2), 1e-10)
}
z <- seq(-1, 1, length.out = 100)
s <- cbind(z, z^2, z^3, z^4)
x <- s + rnorm(length(s), mean = 0, sd = .005)
ord <- sample.int(nrow(x))
test_that("Verify s is not modified", {
s_orig <- s + 0
fit <- project_to_curve(
x = x,
s = s,
stretch = 2
)
expect_equal(s, s_orig, tolerance = 1e-5)
})
test_that("Testing project_to_curve", {
fit <- project_to_curve(
x = x,
s = s,
stretch = 0
)
test_projection(x, s, stretch = 0, fit)
expect_gte(cor(as.vector(fit$s), as.vector(s)), .99)
expect_gte(cor(fit$ord, seq_len(100)), .99)
})
test_that("Checking whether project_to_curve retains dimnames", {
colnames(x) <- paste0("Comp", seq_len(ncol(x)))
fit <- project_to_curve(x = x, s = s, stretch = 0)
test_projection(x, s, stretch = 0, fit)
rownames(x) <- paste0("Sample", seq_len(nrow(x)))
fit <- project_to_curve(x = x, s = s, stretch = 0)
test_projection(x, s, stretch = 0, fit)
colnames(x) <- NULL
fit <- project_to_curve(x = x, s = s, stretch = 0)
test_projection(x, s, stretch = 0, fit)
})
test_that("Testing project_to_curve with shuffled order", {
fit <- project_to_curve(
x = x[ord,],
s = s,
stretch = 0
)
test_projection(x[ord,], s, stretch = 0, fit)
expect_gte(cor(as.vector(fit$s[fit$ord,]), as.vector(s)), .99)
expect_gte(cor(order(fit$ord), ord), .99)
})
test_that("Values are more or less correct", {
constant_s <- matrix(c(-1, 1, .1, .1, .2, .2, .3, .3), nrow = 2, byrow = FALSE)
x[,1] <- z
fit <- project_to_curve(
x = x,
s = constant_s,
stretch = 0
)
test_projection(x, constant_s, stretch = 0, fit)
expect_true(all(abs(fit$s[,1] - x[,1]) < 1e-6))
expect_true(all(abs(fit$s[,2] - .1) < 1e-6))
expect_true(all(abs(fit$s[,3] - .2) < 1e-6))
expect_true(all(abs(fit$s[,4] - .3) < 1e-6))
expect_equal(fit$ord, seq_along(z))
expect_true(all(abs(fit$lambda - seq(0, 2, length.out = 100)) < 1e-6))
dist_ind <-
(x[,2] - .1)^2 +
(x[,3] - .2)^2 +
(x[,4] - .3)^2
expect_true(all(abs(fit$dist_ind - dist_ind) < 1e-10))
expect_true(abs(sum(dist_ind) - fit$dist) < 1e-10)
})
test_that("Values are more or less correct, with stretch = 2 and a given ord", {
constant_s <- matrix(c(-.9, .9, .1, .1, .2, .2, .3, .3), nrow = 2, byrow = FALSE)
x[,1] <- z
fit <- project_to_curve(
x = x,
s = constant_s,
stretch = 2
)
test_projection(x, constant_s, stretch = 0, fit)
expect_true(all(abs(fit$s[,1] - x[,1]) < 1e-6))
expect_true(all(abs(fit$s[,2] - .1) < 1e-6))
expect_true(all(abs(fit$s[,3] - .2) < 1e-6))
expect_true(all(abs(fit$s[,4] - .3) < 1e-6))
expect_equal(fit$ord, seq_along(z))
expect_true(all(abs(fit$lambda - seq(0, 2, length.out = 100)) < 1e-3))
dist_ind <-
(x[,2] - .1)^2 +
(x[,3] - .2)^2 +
(x[,4] - .3)^2
expect_true(all(abs(fit$dist_ind - dist_ind) < 1e-10))
expect_true(abs(sum(dist_ind) - fit$dist) < 1e-10)
})
test_that("Values are more or less correct, without stretch", {
cut <- 0.89898990
constant_s <- matrix(c(-cut, cut, .1, .1, .2, .2, .3, .3), nrow = 2, byrow = FALSE)
x[,1] <- z
fit <- project_to_curve(
x = x,
s = constant_s,
stretch = 0
)
test_projection(x, constant_s, stretch = 2, fit)
f <- z < -cut | z > cut
expect_true(all(abs(fit$s[!f,1] - x[!f,1]) < 1e-6))
expect_false(any(abs(fit$s[f,1] - x[f,1]) < 1e-6))
expect_true(all(abs(fit$s[,2] - .1) < 1e-6))
expect_true(all(abs(fit$s[,3] - .2) < 1e-6))
expect_true(all(abs(fit$s[,4] - .3) < 1e-6))
expect_true(cor(fit$ord, seq_along(z)) > cut)
lambda <- apply(fit$s, 1, function(x) sqrt(sum((x - constant_s[1,])^2)))
expect_true(all(abs(fit$lambda - lambda) < 1e-8))
dist_ind <-
ifelse(f, (abs(z) - cut)^2, 0) +
(x[,2] - .1)^2 +
(x[,3] - .2)^2 +
(x[,4] - .3)^2
expect_true(all(abs(fit$dist_ind - dist_ind) < 1e-10))
expect_true(abs(sum(dist_ind) - fit$dist) < 1e-10)
})
test_that("Expect project_to_curve to error elegantly", {
expect_error(project_to_curve(x, s, stretch = -1), "larger than or equal to 0")
expect_error(project_to_curve(x, cbind(s, s)), "must have an equal number of columns")
})
test_that("Projecting to random data produces correct results", {
for (i in seq_len(10)) {
s <- matrix(runif(100), ncol = 2)
x <- matrix(runif(100), ncol = 2)
fit <- project_to_curve(
x = x,
s = s,
stretch = 0
)
test_projection(x, s, stretch = 0, fit)
}
}) |
setClass("CFunc",
representation(
code="character"
),
contains="function"
)
setClass( "CFuncList", contains = "list" )
cfunction <- function(sig=character(), body=character(), includes=character(), otherdefs=character(),
language=c("C++", "C", "Fortran", "F95", "ObjectiveC", "ObjectiveC++"),
verbose=FALSE, convention=c(".Call", ".C", ".Fortran"), Rcpp=FALSE,
cppargs=character(), cxxargs=character(), libargs=character(),
dim = NULL, implicit = NULL, module = NULL, name = NULL) {
if (missing (convention) & !missing(language))
convention <- switch (EXPR = language, "Fortran" = ".Fortran", "F95" = ".Fortran", ".C" = ".C", ObjectiveC = ".Call", "ObjectiveC++" = ".Call", "C++" = ".Call")
convention <- match.arg(convention)
if ( missing(language) ) language <- ifelse(convention == ".Fortran", "Fortran", "C++")
else language <- match.arg(language)
language <- switch(EXPR=tolower(language), cpp="C++", f="Fortran", f95="F95",
objc="ObjectiveC", objcpp= ,"objc++"="ObjectiveC++", language)
f <- basename(tempfile())
if (is.null(name)) {
name <- f
}
if ( !is.list(sig) ) {
sig <- list(sig)
names(sig) <- name
names(body) <- name
}
if( length(sig) != length(body) )
stop("mismatch between the number of functions declared in 'sig' and the number of function bodies provided in 'body'")
if (is.null(dim))
dim <- as.list(rep("(*)", length(sig)))
else {
if (!is.list(dim))
dim <- list(dim)
if (length(dim) != length(sig))
stop("mismatch between the number of functions declared in 'sig' and the number of dimensions declared in 'dim'")
}
if (Rcpp) {
if (!requireNamespace("Rcpp", quietly=TRUE))
stop("Rcpp cannot be loaded, install it or use the default Rcpp=FALSE", call.=FALSE)
rcppdir <- system.file("include", package="Rcpp")
if (.Platform$OS.type == "windows") rcppdir <- utils::shortPathName(normalizePath(rcppdir))
cxxargs <- c(paste("-I", rcppdir, sep=""), cxxargs)
}
if (length(cppargs) != 0) {
args <- paste(cppargs, collapse=" ")
if (verbose) cat("Setting PKG_CPPFLAGS to", args, "\n")
Sys.setenv(PKG_CPPFLAGS=args)
}
if (length(cxxargs) != 0) {
args <- paste(cxxargs, collapse=" ")
if (verbose) cat("Setting PKG_CXXFLAGS to", args, "\n")
Sys.setenv(PKG_CXXFLAGS=args)
}
if (length(libargs) != 0) {
args <- paste(libargs, collapse=" ")
if (verbose) cat("Setting PKG_LIBS to", args, "\n")
Sys.setenv(PKG_LIBS=args)
}
types <- vector(mode="list", length=length(sig))
for ( i in seq_along(sig) ) {
if ( convention == ".Call" ) {
if (i == 1) {
code <- ifelse(Rcpp,
"
paste("
"
code <- paste(c(code, includes, ""), collapse="\n")
code <- paste(c(code, otherdefs, ""), collapse="\n")
}
if ( length(sig[[i]]) > 0 ) {
funCsig <- paste("SEXP", names(sig[[i]]), collapse=", " )
}
else funCsig <- ""
funCsig <- paste("SEXP", names(sig)[i], "(", funCsig, ")", sep=" ")
if ( language == "C++" || language == "ObjectiveC++")
code <- paste( code, "extern \"C\" {\n ", funCsig, ";\n}\n\n", sep="")
code <- paste( code, funCsig, " {\n", sep="")
code <- paste( code, paste(body[[i]], collapse="\n"), sep="")
code <- paste(code, "\n ",
ifelse(Rcpp, "Rf_warning", "warning"),
"(\"your C program does not return anything!\");\n return R_NilValue;\n}\n", sep="");
}
else if ( convention == ".C" ) {
if (i == 1) {
code <- ifelse(Rcpp,"
code <- paste(c(code, includes, ""), collapse="\n")
code <- paste(c(code, otherdefs, ""), collapse="\n")
}
if ( length(sig[[i]]) > 0 ) {
types[[i]] <- pmatch(sig[[i]], c("logical", "integer", "double", "complex",
"character", "raw", "numeric"), duplicates.ok = TRUE)
if ( any(is.na(types[[i]])) ) stop( paste("Unrecognized type", sig[[i]][is.na(types[[i]])]) )
decls <- c("int *", "int *", "double *", "Rcomplex *", "char **",
"unsigned char *", "double *")[ types[[i]] ]
funCsig <- paste(decls, names(sig[[i]]), collapse=", ")
}
else funCsig <- ""
funCsig <- paste("void", names(sig)[i], "(", funCsig, ")", sep=" ")
if ( language == "C++" || language == "ObjectiveC++" )
code <- paste( code, "extern \"C\" {\n ", funCsig, ";\n}\n\n", sep="")
code <- paste( code, funCsig, " {\n", sep="")
code <- paste( code, paste(body[[i]], collapse="\n"), sep="")
code <- paste( code, "\n}\n", sep="")
}
else {
lead <- ifelse (language == "Fortran", " ","")
if (i == 1) {
code <- paste(includes, collapse="\n")
code <- paste(c(code, otherdefs, ""), collapse="\n")
}
if ( length(sig[[i]]) > 0 ) {
types[[i]] <- pmatch(sig[[i]], c("logical", "integer", "double", "complex",
"character", "raw", "numeric"), duplicates.ok = TRUE)
if ( any(is.na(types[[i]])) ) stop( paste("Unrecognized type", sig[[i]][is.na(types[[i]])]) )
if (6 %in% types[[i]]) stop( "raw type unsupported by .Fortran()" )
decls <- c("INTEGER", "INTEGER", "DOUBLE PRECISION", "DOUBLE COMPLEX",
"CHARACTER*255", "Unsupported", "DOUBLE PRECISION")[ types[[i]] ]
decls <- paste(lead, decls, " ", names(sig[[i]]), dim[[i]], sep="", collapse="\n")
funCsig <- paste(names(sig[[i]]), collapse=", ")
}
else {
decls <- ""
funCsig <- ""
}
funCsig <- paste(lead,"SUBROUTINE", names(sig)[i], "(", funCsig, ")\n", sep=" ")
if (language == "Fortran") {
if ((cl <- nchar(funCsig)) >= 72) {
fstring <- substr(funCsig, 72, cl)
funCsig <- substr(funCsig, 1, 71)
while ((cf <- nchar(fstring)) > 66) {
funCsig <- paste(funCsig, "\n &", substr(fstring, 1, 66), sep = "")
fstring <- substr(fstring, 67, cf)
}
if (cf > 0) funCsig <- paste(funCsig, "\n &", fstring, sep = "")
funCsig <- paste(funCsig, "\n")
}
}
if (is.character(module)) funCsig <- paste(funCsig, lead, "USE ", module, "\n", sep = "")
if (is.character(implicit)) funCsig <- paste(funCsig, lead, "IMPLICIT ", implicit, "\n", sep = "")
code <- paste( code, funCsig, decls, "\n", collapse="\n", sep="")
code <- paste( code, paste(body[[i]], collapse="\n"), sep="")
code <- paste( code, "\n", lead, "RETURN\n", lead, "END\n\n", sep="")
}
}
libLFile <- compileCode(f, code, language, verbose)
libLFile_orig <- libLFile
cleanup <- function(env) {
if ( f %in% names(getLoadedDLLs()) ) dyn.unload(libLFile_orig)
unlink(libLFile_orig)
}
reg.finalizer(environment(), cleanup, onexit=TRUE)
res <- vector("list", length(sig))
names(res) <- names(sig)
for ( i in seq_along(sig) ) {
res[[i]] <- new("CFunc", code = code)
fn <- function(arg) {
NULL
}
DLL <- dyn.load( libLFile )
args <- formals(fn)[ rep(1, length(sig[[i]])) ]
names(args) <- names(sig[[i]])
formals(fn) <- args
if (convention == ".Call") {
body <- quote( CONVENTION("EXTERNALNAME", ARG) )[ c(1:2, rep(3, length(sig[[i]]))) ]
for ( j in seq_along(sig[[i]]) ) body[[j+2]] <- as.name(names(sig[[i]])[j])
}
else {
body <- quote( CONVENTION("EXTERNALNAME", as.logical(ARG), as.integer(ARG),
as.double(ARG), as.complex(ARG), as.character(ARG),
as.raw(ARG), as.double(ARG)) )[ c(1:2,types[[i]]+2) ]
names(body) <- c( NA, "", names(sig[[i]]) )
for ( j in seq_along(sig[[i]]) ) body[[j+2]][[2]] <- as.name(names(sig[[i]])[j])
}
body[[1]] <- get(convention)
body[[2]] <- getNativeSymbolInfo( names(sig)[i], DLL )$address
body(fn) <- body
res[[i]]@.Data <- fn
}
if ( verbose ) {
cat("Program source:\n")
lines <- strsplit(code, "\n")
for ( i in 1:length(lines[[1]]) )
cat(format(i,width=3), ": ", lines[[1]][i], "\n", sep="")
}
remove(list = c("args", "body", "fn", "funCsig", "i", "includes", "j"))
if (length(res) == 1 && names(res) == name) return( res[[1]] )
else return( new( "CFuncList", res ) )
}
compileCode <- function(f, code, language, verbose) {
wd = getwd()
on.exit(setwd(wd))
extension <- switch(language, "C++"=".cpp", C=".c", Fortran=".f", F95=".f95",
ObjectiveC=".m", "ObjectiveC++"=".mm")
libCFile <- file.path(tempdir(), paste0(f, extension))
libLFile <- file.path(tempdir(), paste0(f, .Platform$dynlib.ext))
write(code, libCFile)
if ( file.exists(libLFile) ) file.remove( libLFile )
setwd(dirname(libCFile))
errfile <- paste( basename(libCFile), ".err.txt", sep = "" )
cmd <- paste0(R.home(component="bin"), "/R")
if ( verbose ) system2(cmd, args = paste(" CMD SHLIB --dry-run", basename(libCFile)))
compiled <- system2(cmd, args = paste(" CMD SHLIB", basename(libCFile)),
stdout = FALSE, stderr = errfile)
errmsg <- readLines( errfile )
unlink( errfile )
if ( !file.exists(libLFile) ) {
cat("\nERROR(s) during compilation: source code errors or compiler configuration errors!\n")
if ( !verbose ) system2(cmd, args = paste(" CMD SHLIB --dry-run --preclean", basename(libCFile)))
cat("\nProgram source:\n")
code <- strsplit(code, "\n")
for (i in 1:length(code[[1]])) cat(format(i,width=3), ": ", code[[1]][i], "\n", sep="")
cat("\nCompilation ERROR, function(s)/method(s) not created!\n")
if ( sum(nchar(errmsg)) > getOption("warning.length") ) stop(tail(errmsg))
else stop(errmsg)
}
return( libLFile )
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.