code
stringlengths
1
13.8M
variance.par <- function(penden.env) { return(my.positive.definite.solve(get("Derv2.pen",penden.env))%*%get("Derv2.cal",penden.env)%*%my.positive.definite.solve(get("Derv2.pen",penden.env))) }
library(DOPE) library(dplyr) library(stringr) dea_brands <- DOPE::dea_brands %>% rename("brand" = brands) %>% mutate(across(where(is.character), tolower)) %>% mutate(brand = str_remove_all(brand, "®")) %>% mutate(brand = case_when(brand == "kadianms-contin" ~ "kadian", TRUE ~ brand)) %>% bind_rows(c(category = "morphine", brand = "ms contin")) dea_class_cat <- DOPE::dea_factsheets %>% mutate(across(where(is.character), tolower)) %>% mutate(category = case_when(category == "ghb - gamma-hydroxybutyric acid" ~ "ghb", category == "ecstasy or mdma (also known as molly)" ~ "mdma", TRUE ~ category)) dea_street_names <- DOPE::dea_street_names %>% mutate(across(where(is.character), tolower)) %>% mutate(category = case_when(category == "amphetamine" ~ "amphetamines", category == "fentanyl and fentanyl derivatives" ~ "fentanyl", category == "mescaline" ~ "peyote and mescaline", category == "peyote" ~ "peyote and mescaline", TRUE ~ category)) categories <- data.frame(allCategory = c(dea_brands$category, dea_street_names$category, dea_class_cat$category)) %>% distinct() %>% arrange(allCategory) library(sqldf) lookup_df <- sqldf("select cc.class, a.category, a.syn synonym from (select b.category, b.brand as syn from dea_brands as b union select s1.category, s1.slang as syn from dea_street_names as s1 union select s2.category, s2.brand as syn from dea_street_names as s2 where s2.brand <> NULL) as a left join dea_class_cat as cc on a.category = cc.category") use_data(lookup_df, overwrite = TRUE) x <- sqldf("select distinct c.allCategory, b.Brand, s.Slang, cc.CategoryCat from categories as c left join (select category, 'Yes' as Brand from dea_brands) as b on b.category = c.allCategory left join (select category, 'Yes' as Slang from dea_street_names) as s on s.category = c.allCategory left join (select category, 'Yes' as CategoryCat from dea_class_cat) as cc on cc.category = c.allCategory" ) dea_controlled <- DOPE::dea_controlled %>% mutate(across(where(is.character), tolower)) synPlusSubstance <- sqldf( "select b.*, c.substance from (select cc.class, a.category, a.syn from (select b.category, b.brand as syn from dea_brands as b union select s1.category, s1.slang as syn from dea_street_names as s1 union select s2.category, s2.brand as syn from dea_street_names as s2 where s2.brand <> NULL ) as a left join dea_class_cat as cc on a.category = cc.category) as b left join dea_controlled as c on b.syn = c.synonym or b.class=c.synonym")
EHI_2d <- function(x, model, critcontrol=NULL, type = "UK", paretoFront = NULL){ n.obj <- length(model) d <- model[[1]]@d if (!is.matrix(x)) x <- matrix(x, 1, d) n.candidates <- nrow(x) if(is.null(paretoFront) || is.null(critcontrol$refPoint)){ observations <- Reduce(cbind, lapply(model, slot, "y")) if(is.null(paretoFront)) paretoFront <- t(nondominated_points(t(observations))) } if (is.unsorted(paretoFront[,1])){ paretoFront <- paretoFront[order(paretoFront[,1]),] } refPoint <- critcontrol$refPoint if (is.null(refPoint)){ if(is.null(critcontrol$extendper)) critcontrol$extendper <- 0.2 PF_range <- apply(paretoFront, 2, range) refPoint <- matrix(PF_range[2,] + pmax(1, (PF_range[2,] - PF_range[1,]) * critcontrol$extendper), 1, n.obj) cat("No refPoint provided, ", signif(refPoint, 3), "used \n") } if (n.obj!=2){ print("Analytical hypervolume EI only works with 2 objectives") return(NULL) } else { pred <- predict_kms(model, newdata=x, type=type, checkNames = FALSE, light.return = TRUE, cov.compute = FALSE) mu <- t(pred$mean) sigma <- t(pred$sd) check <- checkPredict(x, model, threshold = critcontrol$threshold, distance = critcontrol$distance, type = type) resu <- rep(0, n.candidates) resu[check] <- -1 resu[!check] <- EHI_2d_wrap_Rcpp(paretoFront, refPoint, mu[!check,,drop=FALSE], sigma[!check,,drop=FALSE]) return(resu) } }
context("pifMatch") library(raster) data(lsat) lsat_b <- log(lsat) for(m in c("cor", "sam", "ed")) { test_that("pifMatch return classes", { expect_is(lb <- pifMatch(lsat_b, lsat, method = m, returnPifMap = TRUE, returnSimMap = TRUE, returnModels = TRUE), "list", info = sprintf("method=%s",m)) expect_equal(names(lb), c("img", "simMap", "pifMap", "models")) expect_is(lb$models$B1_dn, "lm") expect_true( all(vapply(lb[2:3],inherits, logical(1), "RasterLayer"))) expect_is(lb$img, "RasterStack") }) } test_that("error messages", { expect_error(lb <- pifMatch(lsat_b, lsat, method = "ok", returnPifMap = TRUE, returnSimMap = TRUE, returnModels = TRUE), "method must be one of") })
NULL groundstation_cancel_contact <- function(contactId) { op <- new_operation( name = "CancelContact", http_method = "DELETE", http_path = "/contact/{contactId}", paginator = list() ) input <- .groundstation$cancel_contact_input(contactId = contactId) output <- .groundstation$cancel_contact_output() config <- get_config() svc <- .groundstation$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .groundstation$operations$cancel_contact <- groundstation_cancel_contact groundstation_create_config <- function(configData, name, tags = NULL) { op <- new_operation( name = "CreateConfig", http_method = "POST", http_path = "/config", paginator = list() ) input <- .groundstation$create_config_input(configData = configData, name = name, tags = tags) output <- .groundstation$create_config_output() config <- get_config() svc <- .groundstation$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .groundstation$operations$create_config <- groundstation_create_config groundstation_create_dataflow_endpoint_group <- function(endpointDetails, tags = NULL) { op <- new_operation( name = "CreateDataflowEndpointGroup", http_method = "POST", http_path = "/dataflowEndpointGroup", paginator = list() ) input <- .groundstation$create_dataflow_endpoint_group_input(endpointDetails = endpointDetails, tags = tags) output <- .groundstation$create_dataflow_endpoint_group_output() config <- get_config() svc <- .groundstation$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .groundstation$operations$create_dataflow_endpoint_group <- groundstation_create_dataflow_endpoint_group groundstation_create_mission_profile <- function(contactPostPassDurationSeconds = NULL, contactPrePassDurationSeconds = NULL, dataflowEdges, minimumViableContactDurationSeconds, name, tags = NULL, trackingConfigArn) { op <- new_operation( name = "CreateMissionProfile", http_method = "POST", http_path = "/missionprofile", paginator = list() ) input <- .groundstation$create_mission_profile_input(contactPostPassDurationSeconds = contactPostPassDurationSeconds, contactPrePassDurationSeconds = contactPrePassDurationSeconds, dataflowEdges = dataflowEdges, minimumViableContactDurationSeconds = minimumViableContactDurationSeconds, name = name, tags = tags, trackingConfigArn = trackingConfigArn) output <- .groundstation$create_mission_profile_output() config <- get_config() svc <- .groundstation$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .groundstation$operations$create_mission_profile <- groundstation_create_mission_profile groundstation_delete_config <- function(configId, configType) { op <- new_operation( name = "DeleteConfig", http_method = "DELETE", http_path = "/config/{configType}/{configId}", paginator = list() ) input <- .groundstation$delete_config_input(configId = configId, configType = configType) output <- .groundstation$delete_config_output() config <- get_config() svc <- .groundstation$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .groundstation$operations$delete_config <- groundstation_delete_config groundstation_delete_dataflow_endpoint_group <- function(dataflowEndpointGroupId) { op <- new_operation( name = "DeleteDataflowEndpointGroup", http_method = "DELETE", http_path = "/dataflowEndpointGroup/{dataflowEndpointGroupId}", paginator = list() ) input <- .groundstation$delete_dataflow_endpoint_group_input(dataflowEndpointGroupId = dataflowEndpointGroupId) output <- .groundstation$delete_dataflow_endpoint_group_output() config <- get_config() svc <- .groundstation$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .groundstation$operations$delete_dataflow_endpoint_group <- groundstation_delete_dataflow_endpoint_group groundstation_delete_mission_profile <- function(missionProfileId) { op <- new_operation( name = "DeleteMissionProfile", http_method = "DELETE", http_path = "/missionprofile/{missionProfileId}", paginator = list() ) input <- .groundstation$delete_mission_profile_input(missionProfileId = missionProfileId) output <- .groundstation$delete_mission_profile_output() config <- get_config() svc <- .groundstation$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .groundstation$operations$delete_mission_profile <- groundstation_delete_mission_profile groundstation_describe_contact <- function(contactId) { op <- new_operation( name = "DescribeContact", http_method = "GET", http_path = "/contact/{contactId}", paginator = list() ) input <- .groundstation$describe_contact_input(contactId = contactId) output <- .groundstation$describe_contact_output() config <- get_config() svc <- .groundstation$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .groundstation$operations$describe_contact <- groundstation_describe_contact groundstation_get_config <- function(configId, configType) { op <- new_operation( name = "GetConfig", http_method = "GET", http_path = "/config/{configType}/{configId}", paginator = list() ) input <- .groundstation$get_config_input(configId = configId, configType = configType) output <- .groundstation$get_config_output() config <- get_config() svc <- .groundstation$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .groundstation$operations$get_config <- groundstation_get_config groundstation_get_dataflow_endpoint_group <- function(dataflowEndpointGroupId) { op <- new_operation( name = "GetDataflowEndpointGroup", http_method = "GET", http_path = "/dataflowEndpointGroup/{dataflowEndpointGroupId}", paginator = list() ) input <- .groundstation$get_dataflow_endpoint_group_input(dataflowEndpointGroupId = dataflowEndpointGroupId) output <- .groundstation$get_dataflow_endpoint_group_output() config <- get_config() svc <- .groundstation$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .groundstation$operations$get_dataflow_endpoint_group <- groundstation_get_dataflow_endpoint_group groundstation_get_minute_usage <- function(month, year) { op <- new_operation( name = "GetMinuteUsage", http_method = "POST", http_path = "/minute-usage", paginator = list() ) input <- .groundstation$get_minute_usage_input(month = month, year = year) output <- .groundstation$get_minute_usage_output() config <- get_config() svc <- .groundstation$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .groundstation$operations$get_minute_usage <- groundstation_get_minute_usage groundstation_get_mission_profile <- function(missionProfileId) { op <- new_operation( name = "GetMissionProfile", http_method = "GET", http_path = "/missionprofile/{missionProfileId}", paginator = list() ) input <- .groundstation$get_mission_profile_input(missionProfileId = missionProfileId) output <- .groundstation$get_mission_profile_output() config <- get_config() svc <- .groundstation$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .groundstation$operations$get_mission_profile <- groundstation_get_mission_profile groundstation_get_satellite <- function(satelliteId) { op <- new_operation( name = "GetSatellite", http_method = "GET", http_path = "/satellite/{satelliteId}", paginator = list() ) input <- .groundstation$get_satellite_input(satelliteId = satelliteId) output <- .groundstation$get_satellite_output() config <- get_config() svc <- .groundstation$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .groundstation$operations$get_satellite <- groundstation_get_satellite groundstation_list_configs <- function(maxResults = NULL, nextToken = NULL) { op <- new_operation( name = "ListConfigs", http_method = "GET", http_path = "/config", paginator = list() ) input <- .groundstation$list_configs_input(maxResults = maxResults, nextToken = nextToken) output <- .groundstation$list_configs_output() config <- get_config() svc <- .groundstation$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .groundstation$operations$list_configs <- groundstation_list_configs groundstation_list_contacts <- function(endTime, groundStation = NULL, maxResults = NULL, missionProfileArn = NULL, nextToken = NULL, satelliteArn = NULL, startTime, statusList) { op <- new_operation( name = "ListContacts", http_method = "POST", http_path = "/contacts", paginator = list() ) input <- .groundstation$list_contacts_input(endTime = endTime, groundStation = groundStation, maxResults = maxResults, missionProfileArn = missionProfileArn, nextToken = nextToken, satelliteArn = satelliteArn, startTime = startTime, statusList = statusList) output <- .groundstation$list_contacts_output() config <- get_config() svc <- .groundstation$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .groundstation$operations$list_contacts <- groundstation_list_contacts groundstation_list_dataflow_endpoint_groups <- function(maxResults = NULL, nextToken = NULL) { op <- new_operation( name = "ListDataflowEndpointGroups", http_method = "GET", http_path = "/dataflowEndpointGroup", paginator = list() ) input <- .groundstation$list_dataflow_endpoint_groups_input(maxResults = maxResults, nextToken = nextToken) output <- .groundstation$list_dataflow_endpoint_groups_output() config <- get_config() svc <- .groundstation$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .groundstation$operations$list_dataflow_endpoint_groups <- groundstation_list_dataflow_endpoint_groups groundstation_list_ground_stations <- function(maxResults = NULL, nextToken = NULL, satelliteId = NULL) { op <- new_operation( name = "ListGroundStations", http_method = "GET", http_path = "/groundstation", paginator = list() ) input <- .groundstation$list_ground_stations_input(maxResults = maxResults, nextToken = nextToken, satelliteId = satelliteId) output <- .groundstation$list_ground_stations_output() config <- get_config() svc <- .groundstation$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .groundstation$operations$list_ground_stations <- groundstation_list_ground_stations groundstation_list_mission_profiles <- function(maxResults = NULL, nextToken = NULL) { op <- new_operation( name = "ListMissionProfiles", http_method = "GET", http_path = "/missionprofile", paginator = list() ) input <- .groundstation$list_mission_profiles_input(maxResults = maxResults, nextToken = nextToken) output <- .groundstation$list_mission_profiles_output() config <- get_config() svc <- .groundstation$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .groundstation$operations$list_mission_profiles <- groundstation_list_mission_profiles groundstation_list_satellites <- function(maxResults = NULL, nextToken = NULL) { op <- new_operation( name = "ListSatellites", http_method = "GET", http_path = "/satellite", paginator = list() ) input <- .groundstation$list_satellites_input(maxResults = maxResults, nextToken = nextToken) output <- .groundstation$list_satellites_output() config <- get_config() svc <- .groundstation$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .groundstation$operations$list_satellites <- groundstation_list_satellites groundstation_list_tags_for_resource <- function(resourceArn) { op <- new_operation( name = "ListTagsForResource", http_method = "GET", http_path = "/tags/{resourceArn}", paginator = list() ) input <- .groundstation$list_tags_for_resource_input(resourceArn = resourceArn) output <- .groundstation$list_tags_for_resource_output() config <- get_config() svc <- .groundstation$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .groundstation$operations$list_tags_for_resource <- groundstation_list_tags_for_resource groundstation_reserve_contact <- function(endTime, groundStation, missionProfileArn, satelliteArn, startTime, tags = NULL) { op <- new_operation( name = "ReserveContact", http_method = "POST", http_path = "/contact", paginator = list() ) input <- .groundstation$reserve_contact_input(endTime = endTime, groundStation = groundStation, missionProfileArn = missionProfileArn, satelliteArn = satelliteArn, startTime = startTime, tags = tags) output <- .groundstation$reserve_contact_output() config <- get_config() svc <- .groundstation$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .groundstation$operations$reserve_contact <- groundstation_reserve_contact groundstation_tag_resource <- function(resourceArn, tags) { op <- new_operation( name = "TagResource", http_method = "POST", http_path = "/tags/{resourceArn}", paginator = list() ) input <- .groundstation$tag_resource_input(resourceArn = resourceArn, tags = tags) output <- .groundstation$tag_resource_output() config <- get_config() svc <- .groundstation$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .groundstation$operations$tag_resource <- groundstation_tag_resource groundstation_untag_resource <- function(resourceArn, tagKeys) { op <- new_operation( name = "UntagResource", http_method = "DELETE", http_path = "/tags/{resourceArn}", paginator = list() ) input <- .groundstation$untag_resource_input(resourceArn = resourceArn, tagKeys = tagKeys) output <- .groundstation$untag_resource_output() config <- get_config() svc <- .groundstation$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .groundstation$operations$untag_resource <- groundstation_untag_resource groundstation_update_config <- function(configData, configId, configType, name) { op <- new_operation( name = "UpdateConfig", http_method = "PUT", http_path = "/config/{configType}/{configId}", paginator = list() ) input <- .groundstation$update_config_input(configData = configData, configId = configId, configType = configType, name = name) output <- .groundstation$update_config_output() config <- get_config() svc <- .groundstation$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .groundstation$operations$update_config <- groundstation_update_config groundstation_update_mission_profile <- function(contactPostPassDurationSeconds = NULL, contactPrePassDurationSeconds = NULL, dataflowEdges = NULL, minimumViableContactDurationSeconds = NULL, missionProfileId, name = NULL, trackingConfigArn = NULL) { op <- new_operation( name = "UpdateMissionProfile", http_method = "PUT", http_path = "/missionprofile/{missionProfileId}", paginator = list() ) input <- .groundstation$update_mission_profile_input(contactPostPassDurationSeconds = contactPostPassDurationSeconds, contactPrePassDurationSeconds = contactPrePassDurationSeconds, dataflowEdges = dataflowEdges, minimumViableContactDurationSeconds = minimumViableContactDurationSeconds, missionProfileId = missionProfileId, name = name, trackingConfigArn = trackingConfigArn) output <- .groundstation$update_mission_profile_output() config <- get_config() svc <- .groundstation$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .groundstation$operations$update_mission_profile <- groundstation_update_mission_profile
SSgitay <- selfStart(~ (k + slope*log(area))^2, function(mCall, data, LHS, ...) { xy <- sortedXyData(mCall[["area"]], LHS, data) value <- as.vector(coef(lm(sqrt(xy[,"y"]) ~ log(xy[,"x"])))) names(value) <- mCall[c("k","slope")] value }, c("k","slope"))
context("Testing if project source pack creation works properly [test_proj_pack]") library(RSuite) library(testthat) source("R/test_utils.R") source("R/project_management.R") test_that_managed("Project source pack creation under Jenkins CI", { prj_path <- file.path(get_wspace_dir(), "TestProject") prj <- init_test_project(skip_rc = TRUE) create_test_package("TestPackage", prj = prj, ver = "0.1") RSuite::prj_install_deps(prj = prj) Sys.setenv(JOB_NAME = "TestProjectJob", BUILD_NUMBER = "7") on_test_exit(function() { Sys.unsetenv(c("JOB_NAME", "BUILD_NUMBER")) }) RSuite::prj_pack(prj = prj, path = prj_path) expect_true(file.exists(file.path(prj_path, "prjpack_TestProject_0.1_7.zip"))) })
"precipDeviation"
ComparisonMap <- function (matrix.list, MatrixCompFunc, ..., repeat.vector = NULL, parallel = FALSE){ n.matrix <- length(matrix.list) if(is.null(names(matrix.list))) {names(matrix.list) <- 1:n.matrix} matrix.names <- names (matrix.list) CompareToN <- function(n) ldply(matrix.list[(n+1):n.matrix], function(x) {MatrixCompFunc(x, matrix.list[[n]], ...)[1:2]}) comparisons <- adply(1:(n.matrix-1), 1, CompareToN, .parallel = parallel) corrs <- suppressMessages(acast(comparisons[-4], X1~.id)[,matrix.names[-1]]) probs <- suppressMessages(acast(comparisons[-3], X1~.id)[,matrix.names[-1]]) probabilities <- array (0, c(n.matrix, n.matrix)) correlations <- probabilities probabilities[upper.tri(probabilities)] <- probs[upper.tri(probs, diag=T)] correlations[upper.tri(correlations)] <- corrs[upper.tri(probs, diag=T)] if (!is.null (repeat.vector)) { repeat.matrix <- sqrt(outer(repeat.vector, repeat.vector)) correlations[lower.tri(correlations)] <- t(correlations/repeat.matrix)[lower.tri(correlations)] diag (correlations) <- repeat.vector } rownames (correlations) <- matrix.names colnames (correlations) <- matrix.names dimnames (probabilities) <- dimnames (correlations) output <- list ('correlations' = t(correlations), 'probabilities' = t(probabilities)) return (output) }
emfa <- function(dta, nbf, min.err = 1e-06, verbose = FALSE, svd.method=c("fast.svd","irlba")) { svd.method = match.arg(svd.method) m = ncol(dta) n = nrow(dta) mdta = t(rep(1, n)) %*% dta/n vdta = (t(rep(1, n)) %*% dta^2/n) - mdta^2 sddta = sqrt(n/(n - 1)) * sqrt(vdta) cdta = dta - rep(1, n) %*% mdta if (nbf == 0) { B = NULL Psi = rep(1, m) Factors = NULL } if (nbf > 0) { if (svd.method=="fast.svd") svddta = fast.svd(cdta/sqrt(n - 1)) if (svd.method=="irlba") svddta = irlba(cdta/sqrt(n - 1),nv=nbf) evalues = (svddta$d[1:nbf])^2 evectors = svddta$v[, 1:nbf, drop = FALSE] B = evectors %*% diag(sqrt(evalues),nrow=nbf,ncol=nbf) Psi = as.vector(sddta^2 - (B^2 %*% rep(1, nbf))[, 1]) Psi[Psi<=1e-16] = 1e-16 crit = 1 while (crit > min.err) { iS = ifa(Psi, B) xiSB = cdta %*% iS$iSB Cyz = t(cdta) %*% xiSB/(n - 1) Czz = t(iS$iSB) %*% Cyz + diag(nbf) - t(B)%*%iS$iSB Bnew = Cyz %*% solve(Czz) Psinew = as.vector(sddta^2 - (Bnew^2 %*% rep(1, nbf))[,1]) Psinew[Psinew<=1e-16] = 1e-16 crit = mean((Psi - Psinew)^2) B = Bnew Psi = Psinew if (verbose) print(paste("Convergence criterion: ",signif(crit,digits=ceiling(-log10(min.err))),sep="")) } sB = scale(t(B), center = FALSE, scale = sqrt(Psi)) G = solve(diag(nbf) + sB %*% t(sB)) sB = scale(t(B), center = FALSE, scale = Psi) Factors = cdta %*% t(sB) %*% t(G) } res = list(B = B, Psi = as.vector(Psi), Factors = Factors, Objective=crit) return(res) }
make.path.matrix = function( res ) { stps = res$path[[1]] stps.name = res$path[[2]] fets = unique( stps.name ) mat = matrix( 0, ncol= length(fets), nrow=1+(length(stps)/2) ) cur = rep( 0, length(fets) ) names(cur) = fets for ( i in 1:(length(stps)/2) ) { cur[ stps.name[[i]] ] = cur[ stps.name[[i]] ] + stps[i] cur[ stps.name[[i+1]] ] = cur[ stps.name[[i+1]] ] + stps[i+1] mat[ i+1, ] = cur } attributes( mat )$features = fets mat } path.matrix.chart = function( path.matrix, xlab="step", ylab="beta", bty="n", ... ) { if ( is.textreg.result( path.matrix ) ) { path.matrix = make.path.matrix( path.matrix ) } fets = attributes(path.matrix)$features matplot( (1:nrow(path.matrix))/2, path.matrix, lty=1, col=1:length(fets), xlab=xlab, ylab=ylab, bty=bty, type="n", ...) abline( h=0, lty=3, col="grey" ) matplot( (1:nrow(path.matrix))/2, path.matrix, type="l", lwd=2, lty=1, col=1:length(fets), bty="n", pch=19, add=TRUE ) legend( "topright", fets, lty=1,col=1:length(fets), bty="n", cex=0.8, bg="white") } plot.textreg.result = function( x, ... ) { path.matrix.chart( x, ... ) }
[ { "title": "ChainLadder 0.1.6 released with chain-ladder factor models", "href": "http://www.magesblog.com/2013/08/chainladder-016-released-with-chain.html" }, { "title": "R package “fishdynr”", "href": "http://menugget.blogspot.com/2015/02/r-package-fishdynr.html" }, { "title": "Fortran and R – Speed Things Up", "href": "https://rollingyours.wordpress.com/2014/04/11/fortran-and-r-speed-things-up/" }, { "title": "Does anything NOT beat the GARCH(1,1)?", "href": "http://unstarched.net/2013/01/07/does-anything-not-beat-the-garch11/" }, { "title": "Kaplan-Meier Survival Plot – with at risk table", "href": "https://mcfromnz.wordpress.com/2011/11/06/kaplan-meier-survival-plot-with-at-risk-table/" }, { "title": "GNU R loop speed comparison", "href": "http://rsnippets.blogspot.com/2013/03/gnu-r-loop-speed-comparison.html" }, { "title": "Turn R (Shiny) Scripts Into Double-clickable OS X Applications With One Line of Code", "href": "http://datadrivensecurity.info/blog/posts/2014/Nov/os-x-yosemite-r-apps/" }, { "title": "ASReml-R: Storing A inverse as a sparse matrix", "href": "http://ggorjan.blogspot.com/2010/12/asreml-r-storing-inverse-as-matrix.html" }, { "title": "The R-Files: Jeff Ryan", "href": "http://blog.revolutionanalytics.com/2011/07/the-r-files-jeff-ryan.html" }, { "title": "Logistic Regression in R – Part One", "href": "https://mathewanalytics.com/2015/09/02/logistic-regression-in-r/" }, { "title": "gtrends 1.3.0 now on CRAN: Google Trends in R", "href": "http://dirk.eddelbuettel.com/blog/2015/11/29/" }, { "title": "Analyzing big data with Revolution R Enterprise", "href": "http://blog.revolutionanalytics.com/2011/03/analyzing-big-data-with-revolution-r-enterprise.html" }, { "title": "New Shiny website launched; Shiny 0.9 released", "href": "https://blog.rstudio.org/2014/03/27/shiny-website-and-0-9/" }, { "title": "SAS PROC MCMC example in R; Poisson Regression", "href": "http://wiekvoet.blogspot.com/2014/11/sas-proc-mcmc-example-in-r-poisson.html" }, { "title": "Calculate turning angles and step lengths from location data", "href": "http://quantitative-ecology.blogspot.com/2007/05/anglefun-function-xxyy-bearing-true-as.html" }, { "title": "Upgrading R (and packages)", "href": "http://www.quantumforest.com/2011/10/upgrading-r-and-packages/" }, { "title": "“Advanced R” Course – November 15-16, 2012", "href": "http://www.milanor.net/blog/advanced-r-course-november-15-16-2012/" }, { "title": "ggtree for outbreak data", "href": "http://guangchuangyu.github.io/2016/09/ggtree-for-outbreak-data/" }, { "title": "NeuralNetTools 1.0.0 now on CRAN", "href": "https://beckmw.wordpress.com/2014/12/20/neuralnettools-1-0-0-now-on-cran/" }, { "title": "An example of OOP in GNU R using S4 Classes", "href": "http://rsnippets.blogspot.com/2012/08/an-example-of-oop-in-gnu-r-using-s4.html" }, { "title": "R to Latex packages: Coverage", "href": "http://conjugateprior.org/2013/03/r-to-latex-packages-coverage/?utm_source=rss&utm_medium=rss&utm_campaign=r-to-latex-packages-coverage" }, { "title": "NFL Prediction – Algorithm 1", "href": "https://pirategrunt.wordpress.com/2012/12/06/nfl-prediction-algorithm-1/" }, { "title": "Le Monde puzzle [ "href": "https://xianblog.wordpress.com/2012/07/21/le-monde-puzzle-783/" }, { "title": "New RInside release", "href": "http://dirk.eddelbuettel.com/blog/2009/07/19/" }, { "title": "Moving The Earth (well, Alaska & Hawaii) With R", "href": "http://rud.is/b/2014/11/16/moving-the-earth-well-alaska-hawaii-with-r/" }, { "title": "A Shiny App for Experimenting with Dynamic Programming", "href": "http://www.econometricsbysimulation.com/2013/11/a-shiny-app-for-experimenting-with.html" }, { "title": "Abbreviations of R Commands Explained: 250+ R Abbreviations", "href": "http://jeromyanglim.blogspot.com/2010/05/abbreviations-of-r-commands-explained.html" }, { "title": "The end of the line for error bars in R", "href": "https://johnbaumgartner.wordpress.com/2013/11/06/line-endings/" }, { "title": "Using apply() to create a unique id", "href": "https://feedproxy.google.com/~r/CoffeeAndEconometricsInTheMorning/~3/qawLWLKzkPg/using-apply-to-create-unique-id.html" }, { "title": "Are new SEC rules enough to prevent another Flash Crash?", "href": "http://blog.revolutionanalytics.com/2011/09/flash-crash.html" }, { "title": "Etiquette on the mailing list, to RTFM or not to RTFM", "href": "http://www.numbertheory.nl/2011/11/01/etiquette-on-the-mailing-list-to-rtfm-or-not-to-rtfm/" }, { "title": "Species occurrence data", "href": "http://ropensci.org/blog/2014/03/17/spocc/" }, { "title": "Vacancies in the Netherlands", "href": "http://wiekvoet.blogspot.com/2015/12/vacancies-in-netherlands.html" }, { "title": "ABC in London", "href": "https://xianblog.wordpress.com/2011/02/16/abc-in-london/" }, { "title": "Busting gay stereotypes with data", "href": "http://blog.revolutionanalytics.com/2010/10/gay-stereotypes-busted.html" }, { "title": "Le Monde puzzle [52]", "href": "https://xianblog.wordpress.com/2010/12/31/le-monde-puzzle-52/" }, { "title": "Get your questions answered about Open Data", "href": "http://blog.revolutionanalytics.com/2013/05/get-your-questions-answered-about-open-data.html" }, { "title": "Modeling Match Results in La Liga Using a Hierarchical Bayesian Poisson Model: Part three.", "href": "http://www.sumsar.net/blog/2013/08/modeling-match-results-in-la-liga-part-three/" }, { "title": "Uncertainty in parameter estimates using multilevel models", "href": "http://andrewgelman.com/2013/08/03/uncertainty-in-parameter-estimates-using-multilevel-models/" }, { "title": "Comparing Stan to JAGS for Bayesian Inference (Part 1?)", "href": "http://www.personal.psu.edu/mar36/blogs/the_ubuntu_r_blog/2012/09/comparing-stan-to-jags-for-bayesian-inference-part-1.html" }, { "title": "PostgreSQL, Excel, R, and a Really Big Data Set!", "href": "https://rforwork.info/2012/04/20/postgresql-excel-r-and-a-really-big-data-set/" }, { "title": "Julia functions for the Rmath library", "href": "http://dmbates.blogspot.com/2012/03/julia-functions-for-rmath-library.html" }, { "title": "Open data sets you can use with R", "href": "http://blog.revolutionanalytics.com/2015/05/open-data-sets-you-can-use-with-r.html" }, { "title": "Commodities vs. commodity stocks", "href": "http://www.rcasts.com/2011/05/introducing-r-in-enterprise.html" }, { "title": "Dataset: Wisconsin Union Protester Tweets "href": "http://www.michaelbommarito.com/2011/02/21/dataset-wisconsin-union-protester-tweets-wiunion/" }, { "title": "LaTeX Typesetting – Basics", "href": "http://www.wekaleamstudios.co.uk/posts/latex-typesetting-basics/" }, { "title": "K-means Clustering (from “R in Action”)", "href": "https://www.r-statistics.com/2013/08/k-means-clustering-from-r-in-action/" }, { "title": "Converting strsplit() output to a data.frame", "href": "http://ggorjan.blogspot.com/2011/01/converting-strsplit-output-to-dataframe.html" }, { "title": "Using Jupyter Notebooks to Define Literate APIs", "href": "https://blog.ouseful.info/2016/02/02/using-jupyter-notebooks-to-define-literate-apis/" }, { "title": "Mathematical abstraction and the robustness to assumptions", "href": "https://bayesianbiologist.com/2013/04/12/mathematical-abstraction-and-the-robustness-to-assumptions/" } ]
context("bRacatus") input_data <- "Hemitriccus mirandae" test_that("Expected data structure",{ expect_equal(class(input_data),"character") })
context("SDP codes") test_that("calc_sdp works", { expect_error(suppressWarnings(calc_sdp(rbind(1,3,1)))) expect_equal(suppressWarnings( calc_sdp( rbind( c(1,1), c(3,1), c(1,3), c(3,3) ) ) ), c(1, 2) ) expect_equal(calc_sdp( rbind(c(3, 1, 1, 1), c(1, 3, 1, 1), c(1, 1, 3, 1), c(1, 1, 1, 3), c(3, 3, 1, 1), c(3, 1, 3, 1), c(3, 1, 1, 3), c(1, 3, 3, 1), c(1, 3, 1, 3), c(1, 1, 3, 3), c(3, 3, 3, 1), c(3, 3, 1, 3), c(3, 1, 3, 3), c(1, 3, 3, 3)) ), c(1, 2, 4, 8, 3, 5, 9, 6, 10, 12, 7, 11, 13, 14) ) g <- rbind(c(3,1,1,1,1,1,1,1), c(1,3,1,1,1,1,1,1), c(1,1,1,1,1,3,1,1), c(1,1,1,1,1,1,1,3), c(3,1,1,1,1,1,1,3), c(1,3,1,3,1,3,1,3), c(3,1,3,1,3,1,3,1), c(3,3,3,3,1,1,1,1), c(1,1,1,1,3,3,3,3), c(3,3,1,1,1,1,3,3)) expect_equal(calc_sdp(g), c(1,2,32,128,129,170,85,15,240,195) ) set.seed(38444584) g <- matrix(sample(c(1,3), 8*12, replace=TRUE), ncol=8) n_AA <- rowSums(g==1) g <- g[n_AA > 0 & n_AA < 8,] expect_equal(calc_sdp(g), apply(g, 1, function(a) sum(((a-1)/2)*2^(seq(along=a)-1)))) expect_equal( calc_sdp( c(1,1,1,3,1,1,1,1) ), 8) expect_equal( calc_sdp( data.frame(1,1,1,3,1,1,1,1)), 8) }) test_that("invert_sdp works", { expected <- rbind(c(3,1), c(1,3)) expect_equal(invert_sdp(c(1,2), 2), expected) g <- rbind(c(3, 1, 1, 1), c(1, 3, 1, 1), c(1, 1, 3, 1), c(1, 1, 1, 3), c(3, 3, 1, 1), c(3, 1, 3, 1), c(3, 1, 1, 3), c(1, 3, 3, 1), c(1, 3, 1, 3), c(1, 1, 3, 3), c(3, 3, 3, 1), c(3, 3, 1, 3), c(3, 1, 3, 3), c(1, 3, 3, 3)) expect_equal(invert_sdp(c(1, 2, 4, 8, 3, 5, 9, 6, 10, 12, 7, 11, 13, 14), 4), g) g <- rbind(c(3,1,1,1,1,1,1,1), c(1,3,1,1,1,1,1,1), c(1,1,1,1,1,3,1,1), c(1,1,1,1,1,1,1,3), c(3,1,1,1,1,1,1,3), c(1,3,1,3,1,3,1,3), c(3,1,3,1,3,1,3,1), c(3,3,3,3,1,1,1,1), c(1,1,1,1,3,3,3,3), c(3,3,1,1,1,1,3,3)) expect_equal(invert_sdp(c(1,2,32,128,129,170,85,15,240,195), 8), g) set.seed(38444584) g <- matrix(sample(c(1,3), 8*12, replace=TRUE), ncol=8) n_AA <- rowSums(g==1) g <- g[n_AA > 0 & n_AA < 8,] expect_equal(invert_sdp(calc_sdp(g), 8), g) }) test_that("sdp2char works", { expect_equal(sdp2char(c(1,2), 2), c("A|B", "B|A")) expect_equal(sdp2char(c(1, 2, 4, 8, 3, 5, 9, 6, 10, 12, 7, 11, 13, 14), 4), c("A|BCD", "B|ACD", "C|ABD", "D|ABC", "AB|CD", "AC|BD", "AD|BC", "BC|AD", "BD|AC", "CD|AB", "ABC|D", "ABD|C", "ACD|B", "BCD|A")) expect_equal(sdp2char(c(1, 2, 4, 8, 3, 5, 9, 6, 10, 12, 7, 11, 13, 14), strains=LETTERS[1:4]), c("A|BCD", "B|ACD", "C|ABD", "D|ABC", "AB|CD", "AC|BD", "AD|BC", "BC|AD", "BD|AC", "CD|AB", "ABC|D", "ABD|C", "ACD|B", "BCD|A")) expect_equal(sdp2char(c(1,2,32,128,129,170,85,15,240,195), 8), c("A|BCDEFGH", "B|ACDEFGH", "F|ABCDEGH", "H|ABCDEFG", "AH|BCDEFG", "BDFH|ACEG", "ACEG|BDFH", "ABCD|EFGH", "EFGH|ABCD", "ABGH|CDEF")) expect_equal(sdp2char(c(1,2,32,128,129,170,85,15,240,195), strains=LETTERS[1:8]), c("A|BCDEFGH", "B|ACDEFGH", "F|ABCDEGH", "H|ABCDEFG", "AH|BCDEFG", "BDFH|ACEG", "ACEG|BDFH", "ABCD|EFGH", "EFGH|ABCD", "ABGH|CDEF")) })
download_indicator_EUS <- function( indicator_code, fromTime, toTime, gender= c(NA,"T","F","M")[1], ageInterv = NA, countries = c("BE", "DK", "FR", "DE", "EL", "IE", "IT", "LU", "NL", "PT", "ES", "AT", "FI", "SE", "CY", "CZ", "EE", "HU", "LV", "LT", "MT", "PL", "SK", "SI", "BG", "RO", "HR"), rawDump=FALSE, uniqueIdentif = 1){ message_out<-list() out_obj <- convergEU_glb()$tmpl_out downTB <- NULL varFinali <- NULL test1 <- fromTime >= 1960 test2 <- toTime > fromTime if ((!test1) || (!test2)) { out_obj$err <- "Error: wrong time window." return(out_obj) } test3 <- indicator_code %in% convergEU_glb()$metaEUStat$selectorUser if (!test3) { out_obj$err <- "Error: indicator not available from Eurostat database." return(out_obj) }else{ posizName <- which(indicator_code == convergEU_glb()$metaEUStat$selectorUser) checkSubSel <- is.na(convergEU_glb()$metaEUStat$subSelection[posizName]) if(checkSubSel){ extName <- indicator_code }else{ extName <- convergEU_glb()$metaEUStat$Official_code_purified[posizName] } }; if ((!(gender %in% c("T","F","M"))) & !is.na(gender)) { out_obj$err <- "Error: Impossible gender selection." return(out_obj) } mytmp <- utils::capture.output( downTB <- purrr::possibly(eurostat::get_eurostat, NULL, quiet = FALSE)(extName, time_format = "num"), type = "message"); if(is.null(downTB)){ out_obj$err <- list("Error: data not available, check connection and indicator.", mytmp) return(out_obj) } if(rawDump) return(downTB) estrattore <- rep(FALSE,nrow(downTB)) namesDB <- unique(downTB$geo) test4 <- sapply(countries,function(vx){vx %in% namesDB}) if (any(!test4)) { out_obj$err <- "Error: at least one country not available." return(out_obj) }else{ varFinali <- countries for(aux in countries){ estrattore <- estrattore | (downTB$geo == aux) } } isTime <- "time" %in% names(downTB) if(isTime){ test91 <- fromTime <= downTB$time test92 <- toTime >= downTB$time if(sum(test91 & test92) < 1){ out_obj$err <- "Error: no observations in the selected time interval." return(out_obj) }else{}; varFinali <- c("time",varFinali) estrattore <- estrattore & ((downTB$time >= fromTime) & (downTB$time <= toTime)) }else{}; isGender <- "sex" %in% names(downTB) if(isGender ){ if(is.na(gender)){ gender <- "T" message_out[["gender"]] <- "Gender automatically set to 'T'." }else{}; test8 <- gender %in% unique(downTB$sex) if(!test8){ out_obj$err <- "Error: wrong gender selection." return(out_obj) }else{}; varFinali <- c("sex",varFinali) estrattore <- estrattore & (downTB$sex == gender) }else{}; isAgeClass <- "age" %in% names(downTB) if(isAgeClass){ if(is.na(ageInterv)){ ageInterv <- unique(downTB$age)[1] message_out[["Age"]] <- "Age automatically set." }else{}; test7 <- ageInterv %in% unique(downTB$age) if(!test7){ out_obj$err <- "Error: wrong age class." return(out_obj) }else{}; varFinali <- c("age",varFinali) estrattore <- estrattore & (downTB$age == ageInterv) }else{}; ttmp <- downTB[estrattore,] ttmp2 <- ttmp auxTag <- compo_cond_EUS(ttmp2) if(!is.null(auxTag)){ seleTagLs <- unique(auxTag) ttmp3 <- dplyr::mutate(ttmp2,auxTag = auxTag) ttmp3 <- dplyr::filter(ttmp3,auxTag == seleTagLs[uniqueIdentif]) resTB <- tidyr::spread(ttmp3, key = "geo", value = "values") message_out[["Further_Conditioning"]] <- list( current=paste0("Selected uniqueIdentif = ", uniqueIdentif," -> ",seleTagLs[uniqueIdentif]), available_seleTagLs= data.frame(uniqueIdentif = 1:length(seleTagLs), tags=seleTagLs) ) }else{ resTB <- tidyr::spread(ttmp2, key = "geo", value = "values") } message_out[["Conditioning"]] <- list( indicator_code = indicator_code, ageInterv = ageInterv, gender = gender ) if(isAgeClass && !("age" %in% varFinali)) varFinali <- c("age",varFinali); if(isGender && !("sex" %in% varFinali)) varFinali <- c("sex",varFinali); for(aux in varFinali){ if(!(aux %in% names(resTB) )){ resTB <- dplyr::mutate(resTB, !!aux := rep(NA,nrow(resTB))) }else{}; } out_obj$res <- resTB[,varFinali] if(length(message_out) > 0) out_obj$msg <- message_out return(out_obj) }
dcor.ttest <- function(x, y, distance=FALSE) { .Deprecated(new = "dcorT.test", package = "energy", msg = "dcort.ttest is deprecated, replaced by dcorT.test") if (distance == TRUE) { x <- as.dist(x) y <- as.dist(y) } return(dcorT.test(x, y)) } dcor.t <- function(x, y, distance=FALSE) { .Deprecated(new = "dcorT", package = "energy", msg = "dcort.t is deprecated, replaced by dcorT") if (distance == TRUE) { x <- as.dist(x) y <- as.dist(y) } return(dcorT(x, y)) } DCOR <- function(x, y, index=1.0) { .Deprecated(new = "dcor", package = "energy", msg = "DCOR is deprecated, replaced by dcor or dcov") if (!inherits(x, "dist")) x <- dist(x) if (!inherits(y, "dist")) y <- dist(y) x <- as.matrix(x) y <- as.matrix(y) n <- nrow(x) m <- nrow(y) if (n != m) stop("Sample sizes must agree") if (! (all(is.finite(c(x, y))))) stop("Data contains missing or infinite values") if (index < 0 || index > 2) { warning("index must be in [0,2), using default index=1") index=1.0} stat <- 0 dims <- c(n, ncol(x), ncol(y)) Akl <- function(x) { d <- as.matrix(x)^index m <- rowMeans(d) M <- mean(d) a <- sweep(d, 1, m) b <- sweep(a, 2, m) return(b + M) } A <- Akl(x) B <- Akl(y) dCov <- sqrt(mean(A * B)) dVarX <- sqrt(mean(A * A)) dVarY <- sqrt(mean(B * B)) V <- sqrt(dVarX * dVarY) if (V > 0) dCor <- dCov / V else dCor <- 0 return(list(dCov=dCov, dCor=dCor, dVarX=dVarX, dVarY=dVarY)) }
x <- c(1.6, 2.8, 6.2, 8.2, 8.5, 8.7) c(max(x), max(x) / 0.05^(1/6))
flattenlist <- function(x){ (n <- depth(x)) if(n == 2){ return(x) }else if(n ==3){ unlist(x, recursive=FALSE) }else{ morelists <- sapply(x, function(xprime) class(xprime)[1]=="list") out <- c(x[!morelists], unlist(x[morelists], recursive=FALSE)) if(sum(morelists)){ Recall(out) }else{ return(out) } } }
desviacion <- function(x, variable = NULL, pesos = NULL, tipo = c("muestral","cuasi")){ tipo <- tolower(tipo) tipo <- match.arg(tipo) x <- data.frame(x) varnames <- names(x) if(is.null(variable)){ varcuan <- names(x[unlist(lapply(x, is.numeric))]) seleccion = match(varcuan,varnames) x <- x[seleccion] varnames <- varcuan } else{ if(is.numeric(variable)){ if(all(variable <= length(x))){ variable <- variable } else{ stop("Selecci\u00f3n err\u00f3nea de variables") } } if(is.character(variable)){ if(all(variable %in% varnames)){ variable = match(variable,varnames) } else { stop("El nombre de la variable no es v\u00e1lido") } } } if(is.null(pesos) & !is.null(variable)){ x <- x[,variable] %>% as.data.frame() varnames <- varnames[variable] } if(!is.null(pesos) & !is.null(variable)){ if((length(variable) | length(pesos)) > 1){ stop("Para calcular la desviaci\u00f3n t\u00edpica a partir de la distribuci\u00f3n de frecuencias solo puedes seleccionar una variable y unos pesos") } if(is.numeric(pesos)){ pesos <- pesos } if(is.character(pesos)){ if(pesos %in% varnames){ pesos = match(pesos,varnames) } else { stop("El nombre de los pesos no es v\u00e1lido") } } x <- x[,c(variable,pesos)] %>% as.data.frame() varnames <- varnames[c(variable,pesos)] } clase <- sapply(x, class) if (!all(clase %in% c("numeric","integer"))) { stop("No puede calcularse la desviaci\u00f3n t\u00edpica, alguna variable que has seleccionado no es cuantitativa") } if(is.null(pesos) & tipo == "muestral"){ n <- nrow(x) factor = (n-1)/n } else{ factor <- 1 } if(is.null(pesos)){ desviacion <- apply(x,2,sd,na.rm=TRUE) desviacion <- sqrt(factor) * desviacion desviacion <- as.data.frame(t(desviacion)) } else{ desviacion <- x %>% na.omit %>% rename(variable2 = varnames[1], pesos = varnames[2]) %>% mutate(media = as.numeric(media(x,variable=1,pesos=2)), sumatorio = (variable2-media)^2*pesos) varnames <- varnames[1] if(tipo == "muestral"){ desviacion <- desviacion %>% summarize(desviacion = sqrt(sum(sumatorio)/sum(pesos))) } else{ desviacion <- desviacion %>% summarize(desviacion = sqrt(sum(sumatorio)/(sum(pesos)-1))) } names(desviacion) <- paste("desviacion_",varnames[1],sep="") } names(desviacion) <- paste("desviacion_",varnames,sep="") return(desviacion) }
geom_bar_interactive <- function(...) layer_interactive(geom_bar, ...) GeomInteractiveBar <- ggproto( "GeomInteractiveBar", GeomBar, default_aes = add_default_interactive_aes(GeomBar), parameters = interactive_geom_parameters, draw_key = interactive_geom_draw_key, draw_panel = function(self, data, panel_params, coord, width = NULL, flipped_aes = FALSE, .ipar = IPAR_NAMES) { GeomInteractiveRect$draw_panel(data, panel_params, coord, .ipar = .ipar) } )
GapsSpatPattern <- function(gap_SPDF_layer, chm_layer) { oldpar <- graphics::par(no.readonly = TRUE) on.exit(graphics::par(oldpar)) P <- spatstat.geom::as.ppp(sp::coordinates(gap_SPDF_layer), raster::extent(chm_layer)[]) K <- spatstat.core::envelope(P, spatstat.core::Kest, nsim = 99, verbose = F) L <- spatstat.core::envelope(P, spatstat.core::Lest, nsim = 99, verbose = F) graphics::par(mfrow = c(1, 2), mar = c(6, 5, 4, 2)) graphics::plot(K) graphics::plot(L) CE <- spatstat.core::clarkevans.test(P) return(CE) }
test_that("unglue works", { sentences <- c("666 is a number", "foo is a word", "42 is the answer", "Area 51 is unmatched") patterns <- c("{number=\\d+} is {what}", "{word=\\D+} is {what}") expect_equivalent(sapply(unglue(sentences, patterns), names), list(c("number", "what"), c("word", "what"), c("number", "what"), character(0))) }) test_that("unglue_sub works", { expect_equal( unglue_sub( c("a and b", "foo or BAR"), c("{x} and {y}", "{x} or {z}"), list(x= "XXX", y = ~toupper(.), z = tolower)), c("XXX and B", "XXX or bar")) })
expected <- eval(parse(text="FALSE")); test(id=0, code={ argv <- eval(parse(text="list(structure(list(usr = c(-4.82721591443179, -1.44459960821772, -4.82721591443179, -1.44459960821772)), .Names = \"usr\"))")); do.call(`is.character`, argv); }, o=expected);
`HWUppercl` <- function(r,verbose=FALSE,cex=1,curvecol="black",curtyp="solid") { ll <- r/(1+r) ul <- 1/(1+r) if(verbose) cat("Roots HW curve",ll,ul,"\n") if ((ll<=0.5) & (ul>=0.5)) { p <- seq(ll,ul,by=0.005) q <- 1 - p pt <- 2*(p-0.5)/sqrt(3) fpup <- 2*p*q + 2*p*q*r points(pt,fpup,type="l",lty=curtyp,col=curvecol,cex=cex) } }
chebyshev.u.polynomials <- function( n, normalized=FALSE ) { recurrences <- chebyshev.u.recurrences( n, normalized ) if ( normalized ) { h.0 <- pi / 2 p.0 <- polynomial( c( 1 / sqrt( h.0 ) ) ) polynomials <- orthonormal.polynomials( recurrences, p.0 ) } else polynomials <- orthogonal.polynomials( recurrences ) return( polynomials ) }
cmatrcd.mae<-function(trt.N,col.N,theta,des){ k=2 trtin<-contrasts(as.factor(des),contrasts=FALSE)[as.factor(des),] col.1<-rep(1:col.N,each=2) colin<-contrasts(as.factor(col.1),contrasts=FALSE)[as.factor(col.1),] vec.1<-rep(1,col.N*2) R.trt<-t(trtin)%*%trtin N.tb<-t(trtin)%*%colin r.trt<-t(trtin)%*%vec.1 forrow.1=contrasts(as.factor(c(des[1,],seq(1:trt.N))),contrasts=FALSE)[as.factor(c(des[1,],seq(1:trt.N))),] forrow.2=contrasts(as.factor(c(des[2,],seq(1:trt.N))),contrasts=FALSE)[as.factor(c(des[2,],seq(1:trt.N))),] mmmat.r=t(rbind(t(as.matrix(colSums(forrow.1)-1)), t(as.matrix(colSums(forrow.2)-1))))%*% rbind(t(as.matrix(colSums(forrow.1)-1)), t(as.matrix(colSums(forrow.2)-1))) cmat<-R.trt-(1/k)*(N.tb%*%t(N.tb))-(1/col.N)*(mmmat.r)+(1/(col.N*k))*(r.trt%*%t(r.trt))+ theta*((1/k)*(N.tb%*%t(N.tb))-(1/(col.N*k))*(r.trt%*%t(r.trt))) cmat }
library(shiny) library(shiny.fluent) script <- tags$script(HTML(" const React = jsmodule['react']; const Fluent = jsmodule['@fluentui/react']; const theme = Fluent.getTheme(); const styles = Fluent.mergeStyleSets({ photoList: { display: 'inline-block', border: '1px solid ' + theme.palette.neutralTertiary, margin: 0, padding: 10, overflow: 'hidden', userSelect: 'none', }, photoCell: { position: 'relative', display: 'inline-block', margin: 2, boxSizing: 'border-box', background: theme.palette.neutralLighter, lineHeight: 100, verticalAlign: 'middle', textAlign: 'center', selectors: { '&.is-selected': { background: theme.palette.themeLighter, border: '1px solid ' + theme.palette.themePrimary, }, }, }, checkbox: { margin: '10px 0', }, }); const useForceUpdate = () => { const [, setIt] = React.useState(false); return () => setIt(it => !it); }; jsmodule.exampleApp = {} jsmodule.exampleApp.MarqueeSelectionExample = function(params) { const forceUpdate = useForceUpdate(); const name = params['name']; const photos = params['photos']; if(window.selection === undefined) { window.selection = new Fluent.Selection({ items: photos, onSelectionChanged: function() { Shiny.setInputValue(name, window.selection.getSelectedIndices()); forceUpdate(); } }); } const items = photos.map((photo, index) => { return React.createElement('div', { key: index, 'data-is-focusable': true, className: Fluent.css(styles.photoCell, window.selection.isIndexSelected(index) && 'is-selected'), 'data-selection-index': index, style: { width: photo.width, height: photo.height } }, index) }); return React.createElement( Fluent.MarqueeSelection, { selection: window.selection, isEnabled: true }, React.createElement('ul', { className: styles.photoList }, items) ); }; ")) if (interactive()) { MarqueeSelectionExample <- function(...) shiny.react::reactElement( module = "exampleApp", name = "MarqueeSelectionExample", props = shiny.react::asProps(...), ) shinyApp( ui = tagList( script, div( textOutput("marqueeResult"), Label("Drag a rectangle around the items below to select them"), reactOutput("marqueeSelection") ) ), server = function(input, output) { photos <- lapply(1:50, function(index) { randomWidth <- 50 + sample.int(150, 1) list( key = index, url = paste0('http://placehold.it/', randomWidth, 'x100'), width = randomWidth, height = 100 ) }) output$marqueeResult <- renderText({ paste("You have selected: ", paste(input$selectedIndices, collapse = ", ")) }) output$marqueeSelection <- renderReact({ MarqueeSelectionExample( name = "selectedIndices", photos = photos ) }) } ) }
risk.decision <- function(x, alpha = c(0.05), beta = 1, p = 2) { x = as.matrix(x) D = matrix(rep(0, 11 * length(alpha)), ncol = length(alpha)) M = array(rep(0, 11 * length(alpha) * dim(x)[2]), dim = c(11, length(alpha), dim(x)[2])) N = rep(0, dim(x)[2]) for (m in 1 : dim(x)[2]) { M[,,m] = mean(x[,m]) / risk(x[,m], alpha, beta, p) N[m] = mean(x[,m]) } for (j in 1 : 11) { for (k in 1 : length(alpha)) { w = which(M[j,k,] == max(M[j,k,])) D[j,k] = if (length(w) == 1) { w }else{ which(N[w] == max(N[w])) } } } colnames(D) <- paste(round(100*alpha, 2), "%", sep="") rownames(D) <- c("StD", "VaR", "EL", "ELD", "ES", "SDR", "EVaR", "DEVaR", "ENT", "DENT", "ML") return(D) }
is.vector = function(x) { is.null(dim(x)) || (length(dim(x)) == 2) && (dim(x)[2] == 1) } is.real.number = function(x) { is.numeric(x) && (length(x) == 1) && is.finite(x) } is.real.vector = function(x) { is.numeric(x) && all(is.finite(x)) } is.positive = function(x) { is.numeric(x) && (length(x) == 1) && is.finite(x) && (x > 0) } is.non.negative = function(x) { is.numeric(x) && (length(x) == 1) && is.finite(x) && (x >= 0) } is.positive.integer = function(x) { is.positive(x) && ((x %/% 1) == x) } is.non.negative.integer = function(x) { is.non.negative(x) && ((x %/% 1) == x) } is.positive.vector = function(x) { is.numeric(x) && all(is.finite(x)) && all(x > 0) } is.nonnegative.vector = function(x) { is.numeric(x) && all(is.finite(x)) && all(x >= 0) } is.probability = function(x) { is.numeric(x) && (length(x) == 1) && is.finite(x) && (x >= 0) && (x <= 1) } is.probability.vector = function(x, zero = FALSE) { is.numeric(x) && all(is.finite(x)) && all(x >= 0) && all(x <= 1) && (zero || any(x > 0)) } is.string = function(x) { is.character(x) && (length(x) == 1) && !any(is.na(x)) && any(x != "") } is.string.vector = function(x) { is.character(x) && !any(is.na(x)) && any(x != "") } is.ndmatrix = function(x) { is(x, c("table", "matrix", "array")) } check.data.frame.finite = function(x) { .Call("data_frame_finite", data = x) } check.logical = function(bool) { if (!is.logical(bool) || is.na(bool) || (length(bool) != 1)) stop(sprintf("%s must be a logical value (TRUE/FALSE).", deparse(substitute(bool)))) } check.weights = function(weights, len) { if (missing(weights) || is.null(weights)) { weights = rep(1, len) } else { if (!is.nonnegative.vector(weights)) stop("missing or negative weights are not allowed.") if (length(weights) != len) stop("wrong number of weights, ", length(weights), " weights while ", len, " are needed.") weights = prop.table(weights) } return(weights) }
gsafgwc <- function(data, pop=NA, distmat=NA, ncluster=2, m=2, distance='euclidean', order=2, alpha=0.7, a=1, b=1, error=1e-5, max.iter=100,randomN=0,vi.dist="uniform",npar=10,par.no=2,par.dist='euclidean', par.order=2, gsa.same=10, G=1, vmax=0.7, new=F){ randomnn <- randomN ptm<-proc.time() n <- nrow(data) d <- ncol(data) iter=0 beta <- 1-alpha same=0 data <- as.matrix(data) if (alpha ==1) { pop <- rep(1,n) distmat <- matrix(1,n,n) } datax <- data pop <- matrix(pop,ncol=1) mi.mj <- pop%*%t(pop) par <- init.swarm(data, mi.mj, distmat, distance, order, vi.dist, ncluster, m, alpha, a, b, randomN, npar) par.swarm <- par$centroid par.other <- par$membership par.fit <- par$I par.finalpos <- par$centroid[[which.min(par.fit)]] par.finalpos.other <- par$membership[[which.min(par.fit)]] par.fit.finalbest <- par$I[[which.min(par.fit)]] v <- lapply(1:npar, function(x) matrix(0, ncluster, d)) pbest <- par$centroid pfit <- par$I conv <- c(par.fit[which.min(par.fit)]) repeat{ minmax <- c(which.min(par.fit)[1],which.max(par.fit)[1]) best <- minmax[1] worst <- minmax[2] G <- G*runif(1,0.95,1) v <- force_v(par,par.no,G,v,vmax,par.dist,par.order,randomN) par.swarm <- lapply(1:npar, function (x) v[[x]] + par.swarm[[x]]) if(new==TRUE){ par.swarm <- lapply(1:npar,function(x) new.move(par.swarm[[x]],pbest[[x]],par.finalpos,randomN+x)) } par.other <- lapply(1:npar, function(x) uij(data,par.swarm[[x]],m,distance,order)) par.other <- par$membership <- lapply(1:npar, function(x) renew_uij(data,par.other[[x]]$u,mi.mj,distmat,alpha,beta,a,b)) par.swarm <- par$centroid <- lapply(1:npar, function(x) vi(data,par.other[[x]],m)) par.fit <- par$I <- sapply(1:npar, function(x) jfgwcv(data,par.swarm[[x]],m,distance,order)) if(new==TRUE){ pbest.ind <- which(par.fit<pfit) if(length(pbest.ind)>0){ for(i in pbest.ind){ pbest[[i]] <- par.swarm[[i]] pfit[i] <- par.fit[i] } } } best <- which(par.fit==min(par.fit))[1] par.curbest <- par.swarm[[best]] par.curbest.other <- par.other[[best]] par.fit.curbest <- par.fit[best] conv <- c(conv,par.fit.finalbest) iter <- iter+1 if (abs(conv[iter+1]-conv[iter])<error) same <- same+1 else same <- 0 if (par.fit.curbest<=par.fit.finalbest) { par.finalpos <- par.curbest par.finalpos.other <- par.curbest.other par.fit.finalbest <- par.fit.curbest } randomN <- randomN+npar if (iter==max.iter || same==gsa.same) break } finaldata=determine_cluster(datax,par.finalpos.other) cluster=finaldata[,ncol(finaldata)] print(c(order, ncluster,m, randomN)) gsa <- list("converg"=conv,"f_obj"=jfgwcv(data,par.finalpos,m,distance,order),"membership"=par.finalpos.other,"centroid"=par.finalpos, "validation"=index_fgwc(data,cluster,par.finalpos.other,par.finalpos,m,exp(1)), "cluster"=cluster, "finaldata"=finaldata, "call"=match.call(),"iteration"=iter,"same"=same,"time"=proc.time()-ptm) class(gsa) <- 'fgwc' return(gsa) } force_v <- function(par,no,G,v,vmax,par.dist,par.order,randomN){ dd <- dim(par$centroid[[1]]) intel.par <- intel.ffly(par,no) mass <- (par$I-max(par$I))/(min(par$I)-max(par$I)) Mass <- mass/sum(mass) Mass.intel <- sort(Mass,decreasing=T)[1:no] v1 <- v for(i in 1:length(par$centroid)){ Fij <- lapply(1:no,c) for(j in 1:no){ r <- diag(cdist(par$centroid[[i]],intel.par$centroid[[j]],par.dist,par.order)) set.seed(randomN <- randomN+1) eps <- runif(length(r),0,1e-6) set.seed(randomN <- randomN+1) rand <- matrix(runif(dd[1]*dd[2]),ncol=dd[2]) Fij[[j]] <- rand*G*Mass[i]*Mass.intel[j]*(intel.par$centroid[[j]]-par$centroid[[i]])/(r+eps) } Fi <- Reduce("+",Fij) a <- Fi/Mass[i] set.seed(randomN <- randomN+1) v1[[i]] <- rand*v1[[i]]+a } return(v) } new.move <- function(par,pbest,gbest,randomN){ dd <- dim(par) mu <- (par+pbest+gbest)/3 sigma <- sqrt(((par-mu)^2+(pbest-mu)^2+(gbest-mu)^2)/3) set.seed(randomN+100) c1 <- matrix(runif(dd[1]*dd[2], 0,1), ncol=dd[2]) set.seed(randomN+101) c2 <- matrix(runif(dd[1]*dd[2], 0,1), ncol=dd[2]) z <- sqrt(-2*log(c1))*cos(2*pi*c2) return(mu+sigma*z) }
context("test cluster validate") test_clusterE <- qm_define(118600, 119101, 800000) test_clusterE2 <- qm_define("118600", "119101", "800000") test_clusterV <- qm_define(118600, 119101, 119300) test_sf <- stLouis test_sf <- dplyr::mutate(test_sf, TRACTCE = as.numeric(TRACTCE)) test_tbl <- dplyr::as_tibble(data.frame( x = c(1,2,3), y = c("a", "b", "a") )) expect_error(qm_validate(key = "TRACTCE", value = test_clusterV), "A reference, consisting of a simple features object, must be specified.") expect_error(qm_validate("TRACTCE", test_clusterV), "The reference object must be a simple features object.") expect_error(qm_validate(ref = test_tbl, key = "TRACTCE", value = test_clusterV), "The reference object must be a simple features object.") expect_error(qm_validate(ref = test_sf, value = test_clusterV), "A key identification variable must be specified.") expect_error(qm_validate(ref = test_sf, key = "test", value = test_clusterV), "The specified key test cannot be found in the reference data.") expect_error(qm_validate(ref = test_sf, key = test, value = test_clusterV), "The specified key test cannot be found in the reference data.") expect_error(qm_validate(ref = test_sf, key = "TRACTCE"), "A vector containing feature ids must be specified.") expect_error(qm_validate(test_sf, "TRACTCE"), "A vector containing feature ids must be specified.") expect_error(qm_validate(ref = test_sf, key = "TRACTCE", value = test_clusterE2), "Mismatch in class between TRACTCE (numeric) and test_clusterE2 (character). These must be the same class to create cluster object.", fixed = TRUE) resultE2 <- qm_validate(ref = test_sf, key = "TRACTCE", value = test_clusterE) resultV1 <- qm_validate(ref = test_sf, key = "TRACTCE", value = test_clusterV) resultV2 <- qm_validate(ref = test_sf, key = TRACTCE, value = test_clusterV) test_that("returns FALSE - value not in key", { expect_equal(resultE2, FALSE) }) test_that("returns TRUE - value is in key", { expect_equal(resultV1, TRUE) }) test_that("returns TRUE - value is in key with unquoted input", { expect_equal(resultV2, TRUE) })
summary.lyapunov <- function(object, ...) { if (object$nprint == 0) { cat("Call:\n") cat(object$estimator, "\n") cat("\nCoefficients:\n") if (object$procedure == "QR decomposition by full sample method" | object$procedure == "Norma-2 by full sample method") { print(object$exponent) } else { print(object$exponent.median) } cat("---\n") cat("Procedure:", object$procedure, "\n") cat("Embedding dimension: ", object$emb.m, ", ", "Time-delay: ", object$emb.lag, ", ", "No. hidden units: ", object$emb.h, sep = "") cat("\nSample size: ", object$sample, ", ", "Block length: ", object$block.length, ", ", "No. blocks: ", object$no.block, sep = "") } else { cat("Call:\n") cat(unlist(object[[21]][1]), "\n") cat("\nCoefficients:\n") if (unlist(object[[21]][[2]][1]) == "QR decomposition by full sample method" | unlist(object[[21]][[2]][1]) == "Norma-2 by full sample method") { print(object[[21]][[3]]) cat("---\n") cat("Procedure:", unlist(object[[21]][2]), "\n") cat("Embedding dimension: ", object$emb.m, ", ", "Time-delay: ", object$emb.lag, ", ", "No. hidden units: ", object$emb.h, sep = "") cat("\nSample size: ", unlist(object[[21]][4]), ", ", "Block length: ", unlist(object[[21]][5]), ", ", "No. blocks: ", unlist(object[[21]][6]), sep = "") cat("... only the first method is shown (see lyapunov object)\n") } else { print(object[[21]][[4]]) cat("---\n") cat("Procedure:", unlist(object[[21]][2]), "\n") cat("Embedding dimension: ", object$emb.m, ", ", "Time-delay: ", object$emb.lag, ", ", "No. hidden units: ", object$emb.h, sep = "") cat("\nSample size: ", unlist(object[[21]][5]), ", ", "Block length: ", unlist(object[[21]][6]), ", ", "No. blocks: ", unlist(object[[21]][7]), sep = "") cat("... only the first method is shown (see lyapunov object)\n") } } }
`pairscor.fnc` <- function(data, hist = TRUE, smooth = TRUE, cex.points = 1, col.points = "darkgrey") { panel.hist <- function(x, ...) { usr <- graphics::par("usr"); on.exit(graphics::par(usr)) graphics::par(usr = c(usr[1:2], 0, 1.5) ) h <- hist(x, plot = FALSE) breaks <- h$breaks; nB <- length(breaks) y <- h$counts; y <- y/max(y) graphics::rect(breaks[-nB], 0, breaks[-1], y, ...) } pairscor.lower <- function(x, y, ...) { usr <- graphics::par("usr"); on.exit(graphics::par(usr)) graphics::par(usr = c(0, 1, 0, 1)) m = stats::cor.test(x, y) r = round(m$estimate, 2) p = round(m$p.value, 4) rtxt = paste("r =", r) ptxt = paste("p =", p) options(warn=-1) m2 = stats::cor.test(x, y, method="spearman") r2 = round(m2$estimate, 2) p2 = round(m2$p.value, 4) rtxt2 = paste("rs =", r2) ptxt2 = paste("p =", p2) options(warn=0) graphics::text(0.5, 0.8, rtxt) graphics::text(0.5, 0.6, ptxt) graphics::lines(c(0.2,0.8),c(0.5,0.5)) graphics::text(0.5, 0.4, rtxt2) graphics::text(0.5, 0.2, ptxt2) } panel.smooth2 = function (x, y, col = graphics::par("col"), bg = NA, pch = graphics::par("pch"), cex = 1, span = 2/3, iter = 3, ...) { graphics::points(x, y, pch = pch, col = col, bg = bg, cex = cex) ok <- is.finite(x) & is.finite(y) if (any(ok)) graphics::lines(stats::lowess(x[ok], y[ok], f = span, iter = iter), col = "black", ...) } if (hist == TRUE) { if (smooth == TRUE) { graphics::pairs(data, diag.panel = panel.hist, lower.panel = pairscor.lower, upper.panel = panel.smooth2, col = col.points, cex = cex.points) } else { graphics::pairs(data, diag.panel = panel.hist, lower.panel = pairscor.lower) } } else { if (smooth == TRUE) { graphics::pairs(data, lower.panel = pairscor.lower, upper.panel = panel.smooth2, col = col.points, cex = cex.points) } else { graphics::pairs(data, lower.panel = pairscor.lower) } } }
input_for_find_intro_conc_html = function (y, all) { check0 = lapply(y@PMID, function(b) { url = paste("https://eutils.ncbi.nlm.nih.gov/entrez/eutils/epost.fcgi?db=", "pubmed", "&id=", b, sep = "") epost = xmlTreeParse(getURL(url), useInternalNodes = T) webenv = xmlValue(getNodeSet(epost, "//WebEnv")[[1]]) key = xmlValue(getNodeSet(epost, "//QueryKey")[[1]]) url1 = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?" query = "db=pubmed&retmode=xml&rettype=abstracts" efetch = xmlTreeParse(getURL(paste(url1, query, "&WebEnv=", webenv, "&query_key=", key, sep = "")), useInternalNodes = T) abs = unlist(lapply(getNodeSet(efetch, "//Abstract"), function(x) {xmlValue(x)})) if (length(abs) == 0) abs = "No Abstract Found" authorsln = lapply(getNodeSet(efetch, "//LastName"), function(x) {xmlValue(x)});checkAA = unlist(authorsln); authorsini = lapply(getNodeSet(efetch, "//Initials"), function(x) {xmlValue(x)}); if (length(checkAA) == 0) {authorsln = list("No authors name"); authorsini = list("No authors name")}; authors = unlist(lapply(1:length(authorsln), function(x){return(paste(authorsln[[x]],authorsini[[x]], sep = " "))})) arttit = unlist(lapply(getNodeSet(efetch, "//ArticleTitle"), function(x) {xmlValue(x)})) if (all == FALSE) return(c(abs, b)) else if (all == TRUE) return(c(arttit,paste(authors, collapse = " "),abs,b)) }) }
require(OpenMx) data(myRegDataRaw) MultipleDataRaw<-myRegDataRaw[,c("x","y","z")] dataRaw <- mxData( observed=MultipleDataRaw, type="raw" ) matrA <- mxMatrix( type="Full", nrow=3, ncol=3, free= c(F,F,F, T,F,T, F,F,F), values=c(0,0,0, 1,0,1, 0,0,0), labels=c(NA,NA,NA, "betax",NA,"betaz", NA,NA,NA), byrow=TRUE, name="A" ) matrS <- mxMatrix( type="Symm", nrow=3, ncol=3, free=c(T,F,T, F,T,F, T,F,T), values=c(1,0,.5, 0,1,0, .5,0,1), labels=c("varx",NA,"covxz", NA,"residual",NA, "covxz",NA,"varz"), byrow=TRUE, name="S" ) matrF <- mxMatrix( type="Iden", nrow=3, ncol=3, name="F" ) matrM <- mxMatrix( type="Full", nrow=1, ncol=3, free=c(T,T,T), values=c(0,0,0), labels=c("meanx","beta0","meanz"), name="M" ) exp <- mxExpectationRAM("A","S","F","M", dimnames=c("x","y","z") ) funML <- mxFitFunctionML() multiRegModel <- mxModel("Multiple Regression Matrix Specification", dataRaw, matrA, matrS, matrF, matrM, exp, funML) multiRegFit<-mxRun(multiRegModel) summary(multiRegFit) multiRegFit$output omxCheckCloseEnough(coef(multiRegFit)[["beta0"]], 1.6332, 0.001) omxCheckCloseEnough(coef(multiRegFit)[["betax"]], 0.4246, 0.001) omxCheckCloseEnough(coef(multiRegFit)[["betaz"]], 0.2260, 0.001) omxCheckCloseEnough(coef(multiRegFit)[["residual"]], 0.6267, 0.001) omxCheckCloseEnough(coef(multiRegFit)[["varx"]], 1.1053, 0.001) omxCheckCloseEnough(coef(multiRegFit)[["varz"]], 0.8275, 0.001) omxCheckCloseEnough(coef(multiRegFit)[["covxz"]], 0.2862, 0.001) omxCheckCloseEnough(coef(multiRegFit)[["meanx"]], 0.0542, 0.001) omxCheckCloseEnough(coef(multiRegFit)[["meanz"]], 4.0611, 0.001)
cv.cpalspath <- function(outlist, lambda, x, y, foldid, pred.loss, w, tau) { typenames <- "Coupled asymmetric squared error loss" if (!match(pred.loss, c("loss"), FALSE)) { warning("Only 'loss' available for coupled ALS regression; 'loss' used") pred.loss <- "loss" } y <- as.double(y) nfolds <- max(foldid) predmat1 <- matrix(NA, length(y), length(lambda)) predmat2 <- matrix(NA, length(y), length(lambda)) nlams <- double(nfolds) for (i in seq(nfolds)) { whichfold <- (foldid == i) fitobj <- outlist[[i]] nlami <- length(fitobj$lambda) preds <- predict(fitobj, x[whichfold, , drop = FALSE], type = "response") predmat1[whichfold, seq(nlami)] <- preds predmat2[whichfold, seq(nlami)] <- predict(fitobj, x[whichfold, , drop = FALSE], type = "scale") + preds nlams[i] <- nlami } cvraw <- w * ercls(y-predmat1, 0.5) + ercls(y-predmat2, tau) N <- length(y) - apply(is.na(predmat2), 2, sum) cvm <- apply(cvraw, 2, mean, na.rm = TRUE) cvsd <- sqrt(apply(scale(cvraw, cvm, FALSE)^2, 2, mean, na.rm = TRUE)/(N - 1)) list(cvm = cvm, cvsd = cvsd, name = typenames) }
tth.control <- function(a = FALSE, c = FALSE, d = FALSE, e = 2, f = NULL, g = FALSE, i = FALSE, j = NULL, L = TRUE, n = NULL, p = NULL, r = TRUE, t = FALSE, u = FALSE, w = NULL, y = 2, xmakeindxcmd = NULL, v = FALSE) { rval <- list(a = a, c = c, d = d, e = e, f = f, g = g, i = i, j = j, L = L, n = n, p = p, r = r, t = t, u = u, w = w, y = y, xmakeindxcmd = xmakeindxcmd, v = v) args_logical <- c("a", "c", "d", "g", "i", "r", "t", "u", "v", "V") args_numeric <- c("e", "f", "j", "n", "w", "y") args_character <- c("p", "xmakeindxcmd") if(is.character(rval[["L"]])) { args_character <- c(args_character, "L") } else { rval[["L"]] <- as.logical(rval[["L"]]) args_logical <- c(args_logical, "L") } if(!is.null(rval[["v"]])) { if(is.numeric(rval[["v"]])) { if(rval[["v"]] > 1L) { rval[["V"]] <- TRUE rval[["v"]] <- NULL } else { rval[["v"]] <- as.logical(rval[["v"]]) rval[["V"]] <- NULL } } } for(i in args_logical) { if(!is.null(rval[[i]])) { if(!is.logical(rval[[i]]) | length(rval[[i]]) != 1L) { warning(sprintf("argument %s needs to be a single logical, changed to default", i)) rval[[i]] <- NULL } } } for(i in args_numeric) { if(!is.null(rval[[i]])) { if(!(is.numeric(rval[[i]]) | is.logical(rval[[i]])) | length(rval[[i]]) != 1L) { warning(sprintf("argument %s needs to be a single numeric, changed to default", i)) rval[[i]] <- NULL } } } for(i in args_character) { if(!is.null(rval[[i]])) { if(!is.character(rval[[i]]) | length(rval[[i]]) != 1L) { warning(sprintf("argument %s needs to be a single character string, changed to default", i)) rval[[i]] <- NULL } } } rval <- rval[!sapply(rval, is.null)] rval <- rval[!sapply(rval, identical, FALSE)] rval <- paste("-", names(rval), ifelse(sapply(rval, isTRUE), "", unlist(rval)), sep = "", collapse = " ") return(rval) } tth <- function(x, ..., fixup = TRUE, Sweave = TRUE, mode = NULL) { if(Sweave) { tab <- rbind( c("\\\\begin\\{Sinput}", "\\\\begin{verbatim}"), c("\\\\end\\{Sinput}", "\\\\end{verbatim}"), c("\\\\begin\\{Soutput}", "\\\\begin{verbatim}"), c("\\\\end\\{Soutput}", "\\\\end{verbatim}"), c("\\\\begin\\{Schunk}", ""), c("\\\\end\\{Schunk}", "") ) for(i in 1:nrow(tab)) x <- gsub(tab[i,1L], tab[i,2L], x) } TTH <- file.path(find.package("tth", quiet = TRUE), "libs", .Platform$r_arch, if(.Platform$OS.type == "windows") "tth.exe" else "tth") y <- system(paste(shQuote(TTH), tth.control(...)), input = x, intern = TRUE, ignore.stderr = TRUE) if(fixup) { y <- y[-grep("^ *$", y)] tab <- rbind( c("\\\\not +=", "& c("\\\\not +&lt;", "& c("\\\\not +&gt;", "& c("\\\\not +& c("\\\\nleq;", "& c("\\\\not +& c("\\\\ngeq", "& ) for(i in 1:nrow(tab)) y <- gsub(tab[i,1L], tab[i,2L], y) } if(!is.null(mode)) y <- .fix_character_entity_references(y, mode = mode) return(y) } ttm <- function(x, ..., fixup = TRUE, Sweave = TRUE, mode = NULL) { if(Sweave) { tab <- rbind( c("\\\\begin\\{Sinput}", "\\\\begin{verbatim}"), c("\\\\end\\{Sinput}", "\\\\end{verbatim}"), c("\\\\begin\\{Soutput}", "\\\\begin{verbatim}"), c("\\\\end\\{Soutput}", "\\\\end{verbatim}"), c("\\\\begin\\{Schunk}", ""), c("\\\\end\\{Schunk}", "") ) for(i in 1:nrow(tab)) x <- gsub(tab[i,1L], tab[i,2L], x) } TTM <- file.path(find.package("tth", quiet = TRUE), "libs", .Platform$r_arch, if(.Platform$OS.type == "windows") "ttm.exe" else "ttm") y <- system(paste(shQuote(TTM), tth.control(...)), input = x, intern = TRUE, ignore.stderr = TRUE) if(fixup) { y <- y[-grep("^ *$", y)] tab <- rbind( c("\\\\not *<mo>=</mo>", "<mo>&ne;</mo>"), c("\\\\not *<mo>&lt;</mo>", "<mo>&nlt;</mo>"), c("\\\\not *<mo>&le;</mo>", "<mo>&nleq;</mo>"), c("\\\\nleq", "<mo>&nleq;</mo>"), c("\\\\not *<mo>&gt;</mo>", "<mo>&ngt;</mo>"), c("\\\\not *<mo>&ge;</mo>", "<mo>&ngeq;</mo>"), c("\\\\ngeq", "<mo>&ngeq;</mo>") ) for(i in 1:nrow(tab)) y <- gsub(tab[i,1L], tab[i,2L], y) } if(!is.null(mode)) y <- .fix_character_entity_references(y, mode = mode) return(y) }
is.infinite_df <- function(x) { do.call(cbind, lapply(x, is.infinite)) }
matrixToMarray <- function(tt, valuenames) { size <- lengths(valuenames) nbvar <- length(size) if ( ncol(tt) != prod(size) ) { stop("Product of size of variables not equal to number of columns of tt matrix.") } mtt0 <- array(as.vector(t(tt)), c(size[nbvar:1],nrow(tt)), dimnames = c((valuenames)[nbvar:1], list(ev=1:nrow(tt)))) mtt <- aperm(mtt0, perm = c(nbvar:1, nbvar+1)) return(mtt) }
lrCostFunction <- function(X, y, lambda) { function(theta) { m <- length(y) J <- 0 h <- sigmoid(X %*% theta) theta1 <- c(0,c(theta[-1])) p <- lambda * (t(theta1) %*% theta1) / (2 * m) J <- -(t(y) %*% log(h) + t(1 - y) %*% log(1 - h)) / m + p J } } lrGradFunction <- function(X, y, lambda) { function(theta) { m <- length(y) grad <- matrix(0,length(theta)) h <- sigmoid(X %*% theta) theta1 <- c(0,c(theta[-1])) grad <- (t(X) %*% (h - y) + lambda * theta1) / m grad } }
Mee_asymptotic_score_CI_2x2 <- function(n, alpha=0.05, printresults=TRUE) { n11 <- n[1, 1] n21 <- n[2, 1] n1p <- n[1, 1] + n[1, 2] n2p <- n[2, 1] + n[2, 2] pi1hat <- n[1, 1] / n1p pi2hat <- n[2, 1] / n2p estimate <- pi1hat - pi2hat tol <- 0.0000001 delta0 <- -0.99999 delta1 <- 0.99999 if (estimate == -1) { L <- -1 } else { L <- uniroot( calculate_limit_lower.Mee, c(delta0, estimate), n11=n11, n21=n21, n1p=n1p, n2p=n2p, pi1hat=pi1hat, pi2hat=pi2hat, alpha=alpha, tol=tol )$root } if (estimate == 1) { U <- 1 } else { U <- uniroot( calculate_limit_upper.Mee, c(estimate, delta1), n11=n11, n21=n21, n1p=n1p, n2p=n2p, pi1hat=pi1hat, pi2hat=pi2hat, alpha=alpha, tol=tol )$root } if (printresults) { print( sprintf( 'Mee asymptotic score CI: estimate = %6.4f (%g%% CI %6.4f to %6.4f)', estimate, 100 * (1 - alpha), L, U ), quote=FALSE ) } res <- data.frame(lower=L, upper=U, estimate=estimate) invisible(res) }
knitr::opts_chunk$set( collapse = TRUE, comment = " ) library(bayesrules) plot_beta(alpha = 3, beta = 13, mean = TRUE, mode = TRUE) summarize_beta(alpha = 3, beta = 13) plot_binomial_likelihood(y = 3, n = 15, mle = TRUE) plot_beta_binomial(alpha = 3, beta = 13, y = 5, n = 10, prior = TRUE, likelihood = TRUE, posterior = TRUE ) summarize_beta_binomial(alpha = 3, beta = 13, y = 5, n = 10) plot_gamma_poisson( shape = 3, rate = 4, sum_y = 3, n = 9, prior = TRUE, likelihood = TRUE, posterior = TRUE ) summarize_normal_normal(mean = 3.8, sd = 1.12, sigma = 5.8, y_bar = 3.35, n = 8)
is.tframed.zoo <- function(x) {TRUE} tframe.zoo <- function (x) { tf <- zoo::index(x) class(tf) <- c( "zootframe", class(tf), "tframe") tf } tfUnSet.zoo <- function(x) {zoo::coredata(x)} tfSet.zootframe <- function(value, x){ if(Tobs(value) != Tobs(x)) stop("number of Tobs of observations must correspond to number of Tobs indicated by tframe.") class(value) <- class(value)[class(value) != "zootframe"] r <- zoo::zoo(x, order.by = value) seriesNames(r) <- seriesNames(x) r } tfSet.Date <- function(value, x){ if(Tobs(value) != Tobs(x)) stop("number of Tobs of observations must correspond to number of Tobs indicated by tframe.") r <- zoo::zoo(x, order.by = value) seriesNames(r) <- seriesNames(x) r } tfSet.POSIXct <- function(value, x){ if(Tobs(value) != Tobs(x)) stop("number of Tobs of observations must correspond to number of Tobs indicated by tframe.") r <- zoo::zoo(x, order.by = value) seriesNames(r) <- seriesNames(x) r } "seriesNames<-.zoo" <- function (x, value) {if (is.matrix(x)) dimnames(x) <- list(NULL, value) else attr(x, "seriesNames") <- value x } Tobs.zoo <- function(x) NROW(x) tfstart.zootframe <- function(x) x[1] tfend.zootframe <- function(x) x[length(x)] Tobs.zootframe <- function(x) length(x) tfL.zoo <- function (x, p = 1) lag(x, k = -p) tfwindow.zoo <- function(x, tf=NULL, start=tfstart(tf), end=tfend(tf), warn=TRUE) { if (!warn) {opts <- options(warn = -1) on.exit(options(opts)) } y <- window(x, start=start, end=end) seriesNames(y) <- seriesNames(x) attr(y, "TSrefperiod") <- attr(x, "TSrefperiod") y } tfExpand.zoo <- function(x, add.start = 0, add.end = 0){ idx <- time(x) r <- as.matrix(zoo::coredata(x)) if (add.start > 0 ) { idx <- c(start(x) - seq(add.start), idx) r <- rbind(matrix(NA, add.start, ncol(r)), r) } if (add.end > 0 ) { idx <- c(idx, end(x) + seq(add.end)) r <- rbind(r, matrix(NA,add.end, ncol(r))) } zoo::zoo(r, order.by = idx) } tbind.zoo <- function(x, ..., pad.start=TRUE, pad.end=TRUE, warn=TRUE) {nm <- seriesNames(x) ref <- attr(x, "TSrefperiod") for (z in list(...)) { if (!is.null(z)) { nm <- c(nm, seriesNames(z)) ref <- c(ref, attr(z, "TSrefperiod")) x <- cbind(x, z) } } if (!pad.start | !pad.end) x <- trimNA(x, startNAs= !pad.start, endNAs= !pad.end) seriesNames(x) <- nm attr(x, "TSrefperiod") <- ref x }
test_that("integer_ generates integers", { for_all( a = integer_(), property = \(a) is.integer(a) |> expect_true() ) }) test_that("integer_ doesn't generate NAs by default", { for_all( a = integer_(), property = \(a) a |> is.na() |> any() |> expect_false() ) }) test_that("integer_ generates vectors of length 1 by default", { for_all( a = integer_(), property = \(a) length(a) |> expect_equal(1L) ) }) test_that("integer_ generates vectors of specific length", { for_all( len = integer_bounded(1L, 10L), property = \(len) { for_all( a = integer_(len = len), property = \(a) length(a) |> expect_equal(len), tests = 10L ) }, tests = 10L ) }) test_that("integer_ generates vectors within a range of lengths", { for_all( min = integer_bounded(1L, 5L), max = integer_bounded(5L, 10L), property = \(min, max) { for_all( a = integer_(len = c(min, max)), property = \(a) expect_true(length(a) >= min && length(a) <= max), tests = 10L ) }, tests = 10L ) }) test_that("integer_ can generate vectors with NAs", { for_all( a = integer_(len = 10L, frac_na = 1), property = \(a) is_na_integer(a) |> all() |> expect_true() ) }) test_that("integer_ generates integers small enough to be squared", { for_all( a = integer_(), property = \(a) is.integer(a * a) |> expect_true() ) }) test_that("integer_bounded generates bounded integers", { left <- -100L right <- 100L for_all( a = integer_bounded(left = left, right = right), property = \(a) expect_true(a >= left && a <= right) ) }) test_that("integer_left_bounded generates left bounded integers", { left <- 100L for_all( a = integer_left_bounded(left = left), property = \(a) expect_true(a >= left) ) }) test_that("integer_right_bounded generates right bounded integers", { right <- 100L for_all( a = integer_right_bounded(right = right), property = \(a) expect_true(a <= right) ) }) test_that("integer_positive generates positive integers", { for_all( a = integer_positive(), property = \(a) expect_true(a > 0L) ) }) test_that("integer_negative generates negative integers", { for_all( a = integer_negative(), property = \(a) expect_true(a < 0L) ) }) test_that("max_positive_integer can't be squared when big_int = TRUE", { max_int <- max_positive_integer(big_int = TRUE) expect_warning( max_int * max_int, regexp = "NAs produced by integer overflow" ) }) test_that("max_positive_integer can be squared when big_int = FALSE", { max_int <- max_positive_integer(big_int = FALSE) is.integer(max_int * max_int) |> expect_true() })
enginesH <- sort(c(sub("\\.cc$", "", list.files("src/trng", "(yarn|mrg|lcg|mt).*\\.cc$")), sub("\\.hpp$", "", list.files("inst/include/trng", "lagfib.*\\.hpp$")))) engines <- sub("^(lagfib.*)$", "\\1_19937_64", enginesH) cat(engines) cat(deparse(engines)) cat(paste(engines, collapse = ", ")) cat(paste0("Rcpp_", engines, "-class")) cat(paste0("\\code{", engines, "}", collapse = ", ")) cat(paste0(" cat(paste0(" cat(paste0("typedef Engine<", engines, "> ", engines, "_Engine;", collapse = "\n")) cat(paste0("template class Engine<", engines, ">;", collapse = "\n")) cat(paste0("PARALLEL_ENGINE_MODULE(", engines, ");", collapse = "\n")) cat(paste0("PARALLEL_ENGINE_CASE(", engines, ")", collapse = "\n")) cat(paste0(" cat(paste0("engineIDMap[\"", engines, "\"] = ", engines, "_ID;", collapse = "\n"))
adk_lakes = function(){ return(adk_data('meta')) }
`vcov.mlcm` <- function(object, ...) if (object$method == "glm") vcov(object$obj, ...) else solve(object$hess)
howbig <- function(nrow=1, ncol=1, representation="dense", unit="best", prefix="IEC", names="short", sparsity=0.05, type="double", intsize=4) { type <- match.arg(tolower(type), c("double", "float", "integer")) x <- internal.mu(size=1, unit="b", unit.prefix=prefix, unit.names=names) bytes <- check_type(type=type, intsize=intsize) x@size <- nrow*ncol*bytes representation <- match.arg(tolower(representation), c("dense", "sparse")) if (representation == "sparse") { if (sparsity < 0 || sparsity > 1) stop("argument 'sparsity' should be between 0 and 1") else x <- sparsity * x } swap.unit(x, unit) }
require(TCIU) require(DT) require(AnalyzeFMRI) fmri_generate = fmri_simulate_func(dim_data = c(64, 64, 40), mask = mask, ons = c(1, 21, 41, 61, 81, 101, 121, 141), dur = c(10, 10, 10, 10, 10, 10, 10, 10)) dim(fmri_generate$fmri_data) fmri_time_series(sample[[5]], voxel_location = NULL, is.4d = FALSE, ref = sample[[4]]) datatable(fmri_kimesurface(fmri_generate$fmri_data, c(44,30,33))[[1]]) fmri_kimesurface(fmri_generate$fmri_data, c(44,30,33))[[2]] fmri_image(fmri_generate$fmri_data, option="manually", voxel_location = c(40,22,33), time=4) smoothmod<-GaussSmoothArray(fmri_generate$fmri_data, sigma = diag(3,3)) fmri_ts_forecast(smoothmod, voxel_location=c(41,44,33)) p_simulate_t_test = fmri_stimulus_detect(fmridata= fmri_generate$fmri_data, mask = fmri_generate$mask, stimulus_idx = fmri_generate$on_time, method = "t-test" , ons = fmri_generate$ons, dur = fmri_generate$dur) dim(p_simulate_t_test) summary(p_simulate_t_test) fmri_3dvisual(p_simulate_t_test, fmri_generate$mask, p_threshold = 0.05, method="scale_p", multi_pranges=TRUE)$plot fmri_pval_comparison_2d(list(p_simulate_t_test, phase3_pval), list('pval_simulated', 'pval_posthoc'), list(list(35, 33, 22), list(40, 26, 33)), hemody_data = NULL, mask = mask, p_threshold = 0.05, legend_show = FALSE, method = 'scale_p', color_pal = "YlOrRd", multi_pranges=FALSE)
test_that("an empty environment behaves as expected", { output <- paste0( "simmer environment: SuperDuperSim | now: 0 | next: 0", ".*Monitor: in memory.*", ".*Resource: asdf | monitored: TRUE | server status: 0(1) | queue status: 0(Inf).*", ".*Source: dummy | monitored: 1 | n_generated: 0.*") env <- simmer("SuperDuperSim", verbose = TRUE) %>% add_resource("asdf") %>% add_generator("dummy", trajectory() %>% timeout(1), at(0)) expect_output(print(env), output) expect_is(env, "simmer") expect_equal(env %>% now(), 0) expect_equal(env %>% peek(), 0) env %>% stepn() %>% run() expect_equal(env %>% now(), 1) expect_equal(env %>% peek(), numeric(0)) }) t0 <- trajectory("") %>% seize("server", 1) %>% set_attribute("dummy", 1) %>% timeout(1) %>% release("server", 1) test_that("the simulator is reset (1)", { t1 <- trajectory() %>% seize("server", 1) %>% set_attribute("dummy", 1) %>% timeout(1) %>% release("server", 1) inf_sch <- schedule(c(0.5, 1), c(1, 1), Inf) env <- simmer(verbose = TRUE) %>% add_resource("server", inf_sch, queue_size = 1, preemptive = TRUE) %>% add_generator("entity0", t0, function() 0.5) %>% add_generator("entity1", t1, function() 0.5, mon = 2, preemptible = 10, priority = 10) %>% run(4) %>% reset() expect_equal(env %>% now(), 0) expect_equal(env %>% peek(), 0) expect_equal(nrow(get_mon_arrivals(env)), 0) expect_equal(nrow(get_mon_arrivals(env, TRUE)), 0) expect_equal(nrow(get_mon_resources(env)), 1) expect_equal(nrow(get_mon_attributes(env)), 0) }) test_that("the simulator is reset (2)", { t1 <- trajectory() %>% renege_in(3) %>% seize("res") %>% renege_abort() %>% timeout(5) %>% release("res") env <- simmer(verbose = TRUE) %>% add_resource("res") %>% add_generator("dummy", t1, at(0, 0)) %>% run(2) expect_silent(reset(env)) expect_equal(env %>% now(), 0) expect_equal(env %>% peek(), 0) expect_equal(nrow(get_mon_arrivals(env)), 0) expect_equal(nrow(get_mon_arrivals(env, TRUE)), 0) expect_equal(nrow(get_mon_resources(env)), 0) expect_equal(nrow(get_mon_attributes(env)), 0) }) test_that("the progress is reported", { progress <- NULL record <- function(x) progress <<- c(progress, x) env <- simmer() %>% add_generator("dummy", trajectory(), at(0)) %>% run(progress=record) expect_equal(progress, seq(0, 1, 0.1)) }) test_that("the simulator stops if there are no more events", { env <- simmer(verbose = TRUE) %>% add_resource("server", 1) %>% add_generator("entity", t0, at(0)) %>% run(10) expect_equal(env %>% now(), 1) }) test_that("a negative simulation time is converted to positive", { env <- simmer(verbose = TRUE) %>% add_resource("server", 1) %>% add_generator("entity", t0, at(10)) %>% run(-10) expect_equal(env %>% now(), 10) }) test_that("a stopped simulation can be resumed", { env <- simmer(verbose = TRUE) %>% add_resource("server", 1) %>% add_generator("entity", t0, function() 1) %>% run(10) expect_equal(env %>% now(), 10) env %>% run(20) expect_equal(env %>% now(), 20) env %>% run(30) expect_equal(env %>% now(), 30) }) test_that("there is verbose output", { output <- paste0( ".*(", ".*1.*arrival0.*Seize.*server", ".*1.*arrival0.*Release.*server", ").*") expect_output( env <- simmer(verbose = TRUE) %>% add_resource("server", 1) %>% add_generator("arrival", t0, at(1)) %>% run(), output ) }) test_that("we can force some errors (just to complete coverage)", { expect_error(simmer(0)) expect_error(simmer(verbose = TRUE) %>% add_resource(0)) env <- simmer(verbose = TRUE) %>% add_resource("dummy") %>% add_generator("dummy", trajectory(), function() 1, mon = 1000) env$sim_obj <- NULL env$mon$xptr <- NULL expect_error(env %>% reset()) expect_error(env %>% now()) expect_error(env %>% peek()) expect_error(env %>% stepn()) expect_error(env %>% get_mon_arrivals(FALSE)) expect_error(env %>% get_mon_arrivals(TRUE)) expect_error(env %>% get_mon_attributes()) expect_error(env %>% get_mon_resources()) sch <- schedule(c(1, 2), c(1, 2), Inf) sch$schedule$period <- "asdf" expect_error(simmer(verbose = TRUE) %>% add_resource("dummy", sch)) env <- simmer(verbose = TRUE) expect_equal(env %>% get_mon_resources() %>% nrow(), 0) })
NULL setClass( Class = "Bertrand", contains="Antitrust", representation=representation( shares = "numeric", mcDelta = "numeric", slopes = "matrixOrList", subset = "logical", diversion = "matrix" ), prototype=prototype( slopes = matrix(), mcDelta = numeric(), subset = logical(), diversion = matrix() ), validity=function(object){ if(is.list(object@labels)){ nprods <- length(object@labels[[1]])} else{nprods <- length(object@labels)} if(!is.list(object@labels) && (nprods != length(object@shares) || nprods != length(object@subset))){ stop("'labels', 'shares', and 'subset' must all have the same length")} if(any(object@shares < 0 | object@shares > 1,na.rm=TRUE)){ stop("'shares' values must be between 0 and 1")} if(!(sum(object@shares,na.rm=TRUE) < 1 || isTRUE(all.equal(sum(object@shares),1,check.names=FALSE, tolerance = 1e-6)))){ stop("The sum of 'shares' values must be less than or equal to 1")} if(nprods != length(object@mcDelta) || any(is.na(object@mcDelta))){ stop("'mcDelta' must be a numeric vector with the same length as 'shares' and no element of 'mcDelta' can equal NA")} if(any(object@mcDelta>0,na.rm=TRUE)){ warning("positive values of 'mcDelta' imply an INCREASE in marginal costs")} diversion <- object@diversion if(!all(is.na(diversion))){ if(!isTRUE(all.equal(diag(diversion),rep(-1,nprods), check.names=FALSE))){ stop("'diversions' diagonal elements must all equal -1")} allhavezeros <- all(apply(diversion,1,function(x){any(x==0)})) if(allhavezeros){stop("every row of 'diversions' contains zeros. Cannot calibrate demand parameters!")} diag(diversion)=1 if(any(diversion > 1 | diversion<0)){ stop("'diversions' off-diagonal elements must be between 0 and 1")} if (!isTRUE(all.equal(rowSums(object@diversion,na.rm=TRUE),rep(0,nprods),check.names=FALSE,tolerance=1e-3)) && any(rowSums(object@diversion,na.rm=TRUE)>0,na.rm=TRUE)){ stop("'diversions' rows cannot sum to greater than 0")} if(nprods != nrow(object@diversion) || nprods != ncol(object@diversion)){ stop("'diversions' must be a square matrix") } } return(TRUE) } ) setClass( Class = "Linear", contains="Bertrand", representation=representation( intercepts = "vector", prices = "vector", quantities = "numeric", margins = "numeric", priceStart = "numeric", symmetry = "logical" ), prototype=prototype( intercepts = numeric(), symmetry = TRUE ), validity=function(object){ nprods <- length(object@shares) if(nprods != length(object@quantities) || nprods != length(object@margins) || nprods != length(object@prices)){ stop("'prices', 'quantities', 'margins', and 'shares' must all be vectors with the same length")} if(any(object@prices<0,na.rm=TRUE)) stop("'prices' values must be positive") if(any(object@quantities<0,na.rm=TRUE)) stop("'quantities' values must be positive") if(any(object@margins<0 | object@margins>1,na.rm=TRUE)) stop("'margins' values must be between 0 and 1") if(any(is.na(object@diversion))){stop("'diversions' matrix cannot contain NA")} if(nprods != length(object@priceStart)){ stop("'priceStart' must have the same length as 'shares'")} if(!is.logical(object@symmetry) || length(object@symmetry)!=1){stop("'symmetry' must equal TRUE or FALSE")} if(!object@symmetry && length(object@margins[!is.na(object@margins)])!= nprods){ stop("When 'symmetry' is FALSE, all product margins must be supplied") } return(TRUE) } ) setClass( Class = "LogLin", contains="Linear", prototype=prototype( symmetry=FALSE ), validity=function(object){ nprods <- length(object@prices) if(any(is.na(object@margins))){ stop("'margins' cannot contain NA values") } if(nprods != length(object@priceStart)){ stop("'priceStart' must have the same length as 'prices'")} }) setClass( Class = "AIDS", contains="Linear", representation=representation( priceStart = "numeric", priceDelta = "numeric", mktElast = "numeric", parmStart="numeric", insideSize = "numeric" ), prototype=prototype( insideSize = NA_real_, priceDelta = numeric(), mktElast = numeric(), parmStart = numeric(), control.slopes = list( ) ), validity=function(object){ if(!length(object@parmStart) %in% c(0,2) || any(object@parmStart > 0,na.rm=TRUE)){stop("'parmStart' must be a length-2 non-positive numeric vector")} nprods <- length(object@shares) if(!isTRUE(all.equal(rowSums(object@diversion,na.rm=TRUE),rep(0,nprods),check.names=FALSE,tolerance=1e-3))){ stop("'diversions' rows must sum to 0")} if(!isTRUE(all.equal(sum(object@shares),1,check.names=FALSE,tolerance=1e-3))){ stop("The sum of 'shares' values must equal 1")} nMargins <- length(object@margins[!is.na(object@margins)]) if(nMargins<2 && isTRUE(is.na(object@mktElast))){stop("At least 2 elements of 'margins' must not be NA in order to calibrate demand parameters")} if(nMargins<1 && !isTRUE(is.na(object@mktElast))){stop("At least 1 element of 'margins' must not be NA in order to calibrate demand parameters")} return(NULL) } ) setClass( Class = "PCAIDS", contains="AIDS", representation=representation( knownElast = "numeric", knownElastIndex = "numeric" ), validity=function(object){ nprods <- length(object@shares) if(length(object@knownElastIndex) != 1 ){stop("'knownElastIndex' must be length 1")} if(length(object@knownElast) != 1 ){stop("'knownElast' must be length 1")} if(length(object@mktElast) != 1 ){stop("'mktElast' must be length 1")} if(!(object@knownElastIndex %in% seq(1,nprods)) ){ stop("'knownElastIndex' value must be between 1 and the length of 'shares'")} if(nprods != length(object@mcDelta)){ stop("'mcDelta' must have the same length as 'shares'")} if(object@knownElast>0 || object@mktElast > 0 ){ stop("'mktElast', 'knownElast' must be non-positive")} if(abs(object@knownElast) < abs(object@mktElast) ){ stop("'mktElast' must be less than 'knownElast' in absolute value")} } ) setClass( Class = "PCAIDSNests", contains="PCAIDS", representation= representation( nests="factor", nestsParms="numeric"), validity=function(object){ nprods <- length(object@shares) if(nprods != length(object@nests)){ stop("'nests' length must equal the number of products")} nNestParm <- nlevels(object@nests) nNestParm <- nNestParm*(nNestParm -1)/2 nMargins <- length(object@margins[!is.na(object@margins)]) maxNests <- floor((sqrt(8 * nMargins + 1) + 1)/2) if(!is.vector(object@nestsParms) || nNestParm != length(object@nestsParms)){ stop(paste("'nestsParmStart' must be a vector of length",nNestParm))} if(nNestParm > nMargins){ stop(paste( "Impossible to calibrate nest parameters with the number of margins supplied.\n", "The maximum number of nests supported by the supplied margin information is" , maxNests,".")) } } )
expected <- eval(parse(text="TRUE")); test(id=0, code={ argv <- eval(parse(text="list(structure(list(title = structure(1L, .Label = c(\"An Introduction to R\", \"Exploratory Data Analysis\", \"Interactive Data Analysis\", \"LISP-STAT\", \"Modern Applied Statistics ...\", \"Spatial Statistics\", \"Stochastic Simulation\"), class = \"factor\"), other.author = structure(2L, .Label = c(\"Ripley\", \"Venables & Smith\"), class = \"factor\")), .Names = c(\"title\", \"other.author\"), row.names = 1L, class = \"data.frame\"))")); do.call(`is.list`, argv); }, o=expected);
test_that("Individual model functions", { data <- survival::lung mod <- build_cox_model(data, 'time', 'status', c('age', 'sex')) expect_equal(mod$nevent, 165) mod2 <- build_reg_model(data, 'age', 'sex') expect_equal(mod2$df.residual, 226) expect_equal(calculate_Uno_c(data, mod), 0.5848518, tolerance = 0.0001) expect_equal(redundancy_analysis(mod2, data)$In, c('age', 'sex')) expect_equal(table_predictors(data, mod, 'sex')$coef, -0.5132185, tolerance = 0.0001) }) test_that("Blanket stats /-ments", { data <- survival::lung models_to_run <- list('OS' = list('outcome' = 'time', 'modality' = 'cox', 'event_censor' = 'status'), 'weight_loss' = list('outcome' = 'wt.loss', 'modality' = 'linear', 'event_censor' = NA)) predictor_sets <- list('age' = c('age'), 'age_ecog' = c('age', 'ph.ecog')) covariates = c('sex') bl_stats <- blanket_statsments(data, models_to_run, predictor_sets, covariates) bl_redun <- blanket_redundancy_analysis(bl_stats, data) expect_equal(bl_stats$OS$age$nevent, 165) expect_equal(table_blanket_statsments(data, bl_stats)['weight_loss', 'age_ecog_R^2'], 0.04990419, tolerance = 0.0001) expect_equal(bl_redun$OS$age$rsq1[['time']], 0.02072846 , tolerance=0.0001) expect_equal(table_blanket_redundancies(bl_redun)$age_ecog_redundant_vars, c('', '')) })
FiellerC <- function (Data) { lmResults <- lm(y ~ x1 + x2 + x1*x2,data=Data) coefficient <- summary(lmResults)$coefficients covariance <- vcov(lmResults) B2 <- coefficient[3,1] B3 <- coefficient[4,1] COV22 <- covariance[3,3] COV33 <- covariance[4,4] COV23 <- covariance[3,4] k <- 3.84 a <- B3^2 - k*COV33 b <- 2*(B2*B3-k*COV23) c <- B2^2 - k*COV22 delta <- b^2-4*a*c if (a>0 && delta>0) { LowCI <- (-b-sqrt(b^2-4*a*c))/(2*a) UpperCI <- (-b+sqrt(b^2-4*a*c))/(2*a) } else { return(-1) } results <- list(LowCI = LowCI, UpperCI = UpperCI) return(results) }
adverb_manner <- c( "beautifully", "bravely", "brightly", "calmly", "carefully", "cautiously", "cheerfully", "clearly", "correctly", "courageously", "daringly", "deliberately", "doubtfully", "eagerly", "easily", "elegantly", "enormously", "enthusiastically", "faithfully", "fast", "fondly", "fortunately", "frankly", "frantically", "generously", "gently", "gladly", "gracefully", "happily", "healthily", "honestly", "joyously", "justly", "kindly", "neatly", "openly", "patiently", "perfectly", "politely", "powerfully", "quickly", "quietly", "rapidly", "really", "regularly", "repeatedly", "rightfully", "seriously", "sharply", "smoothly", "speedily", "successfully", "swiftly", "tenderly", "thoughtfully", "truthfully", "warmly", "well", "wisely" ) adverb <- adverb_manner
library('testthat') context("simulate_data_from_gcode") data("example_gcode_parsed") data("example_simulated_gcode_data") simulated_gcode_data = na.omit(simulate_data_from_gcode(example_gcode_parsed, start_time = 0, data_res = 0.1, data_type = "HH")) expect_equal(simulated_gcode_data,example_simulated_gcode_data)
MRM <- function(f, inputDimension, inputDistribution, dir.monot, N.calls, Method, silent = FALSE){ transformtionToInputSpace <- function(inputDistribution){ InputDist <- list() InputDist <- inputDistribution for(i in 1:inputDimension){ nparam <- length(inputDistribution[[i]][[2]]) for(j in 1:nparam){ InputDist[[i]]$q <- paste("q", InputDist[[i]][[1]], sep = ""); InputDist[[i]]$p <- paste("p", InputDist[[i]][[1]], sep = ""); InputDist[[i]]$d <- paste("d", InputDist[[i]][[1]], sep = ""); InputDist[[i]]$r <- paste("r", InputDist[[i]][[1]], sep = ""); } } InputDist } InputDist <- transformtionToInputSpace(inputDistribution) G <- function(X){ XU <- numeric() for(i in 1:inputDimension){ if(dir.monot[i] == -1){X[i] <- 1 - X[i]} XU[i] <- do.call(InputDist[[i]]$q,c(list(X[i, drop = FALSE]), InputDist[[i]][[2]])) } return(f(XU)) } Intersect <- function(inputDimension, FUNC){ a <- 2 k <- 2 res <- list() u.new <- 0 temp <- 0 u.dep <- list() out <- list() comp <- 2 u.dep[[1]] <- rep(1/2, inputDimension) temp <- FUNC(u.dep[[1]]) u.dep[[2]] <- sign(temp) cp <- 1 u.other <- u.dep u.new <- u.dep[[1]] LIST <- list() LIST[[1]] <- u.dep list.set <- LIST[[1]][2] if(temp > 0){ u.new <- u.dep[[1]] - 1/(a^k) }else{ u.new <- u.dep[[1]] + 1/(a^k) } eps <- ( u.new - u.dep[[1]] )%*%( u.new - u.dep[[1]] ) if( ( u.other[[2]] != sign(temp)) & (eps > 1e-7) ){ u.other <- list( u.dep[[1]], sign(temp) ) } k <- k + 1 sign.0 <- sign(temp) sign.other <- - sign.0 while(sign(temp) != sign.other){ u.dep[[1]] <- u.new temp <- FUNC(u.dep[[1]]) u.dep[[2]] <- sign(temp) cp <- cp + 1 if(temp > 0){ u.new <- u.dep[[1]] - 1/(a^k) }else{ u.new <- u.dep[[1]] + 1/(a^k) } eps <- ( u.new - u.dep[[1]] )%*%( u.new - u.dep[[1]] ) k <- k + 1 LIST[[comp]] <- u.dep list.set[comp] <- LIST[[comp]][[2]] comp <- comp + 1 } return(LIST) list.set <- as.numeric(list.set) if( abs(sum(list.set)) == length(LIST)){ res[[1]] <- LIST[[length(LIST)]][[1]] res[[2]] <- LIST[[length(LIST)]][[2]] out <- list(res, cp) return(out) }else{ u.dep[[1]] <- LIST[[max(which(list.set == -1))]][[1]] u.dep[[2]] <- LIST[[max(which(list.set == -1))]][[2]] u.other[[1]] <- LIST[[max(which(list.set == 1))]][[1]] u.other[[2]] <- LIST[[max(which(list.set == 1))]][[2]] res[[1]] <- rbind(u.dep[[1]], u.other[[1]]) res[[2]] <- c(u.dep[[2]], u.other[[2]]) out <- list(res, cp) return(out) } } is.dominant <- function(x, y, inputDimension, set){ dominant <- NULL; if( is.null(dim(x)) ){ if(set == -1){ if ( sum(x >= y) == inputDimension ){ return(TRUE) }else{ return(FALSE) } }else{ if( sum(x <= y) == inputDimension ){ return(TRUE) }else{ return(FALSE) } } } y.1 <- NULL Y.2 <- NULL y.1 <- rep(y,dim(x)[1]) y.2 <- matrix(y.1, ncol = inputDimension, byrow = TRUE) if(set == -1){ dominant <- apply(x >= y.2, 1, sum) == inputDimension }else{ dominant <- apply(x <= y.2, 1, sum) == inputDimension } return(dominant) } Volume.bounds <- function(S, set){ if(set == 1){ S <- 1 - S } if(is.null(dim(S))){ if(set == 1){ return(1 - prod(S)) } if(set == -1){ return(prod(S)) } } DS <- dim(S)[1] if(inputDimension == 2){ S <- S[order(S[,1]),] res <- diag(outer(S[,1], c(0,S[1:(DS - 1), 1]), "-")) res1 <- res%*%S[,2] res1 <- ifelse(set == 1, 1 - res1, res1) return(res1) } RES.VOL <- dominated_hypervolume(1 - t(S), rep(1, inputDimension)) if(set == 1){ RES.VOL <- 1 - RES.VOL } return(RES.VOL) } Frontier <- function(S, set){ if(is.null(dim(S)) |( dim(S)[1] == 1)){ return(S) } R <- NULL if(set == 1){ S <- 1 - S } while(!is.null(dim(S))){ aa <- apply(S, MARGIN = 1, prod) temp <- S[which.max(aa), ] R <- rbind(R, temp) S <- S[-which.max(aa), ] ss <- is.dominant(S, temp, inputDimension, -1) if(!is.null(dim(S))){ S <- S[which(ss == FALSE),] }else{ S <- matrix(S, ncol= inputDimension) S <- S[which(ss == FALSE), ] R <- rbind(R, S) if(set == 1){ return(1 - R) }else{ return(R) } } } if(set == 1){ return(1 - R) }else{ return(R) } } monteCarloMonotone <- function(N.calls){ NN <- 0 N.tot <- 0 res <- NULL Z.safe <- NULL Z.fail <- NULL X <- NULL Y <- NULL is.Call <- 0 while(NN < N.calls){ if(silent == FALSE){ if(N.tot%%100 == 0){print(NN);flush.console();} } U <- runif(inputDimension) X <- rbind(X, U) N.tot <- N.tot + 1 if( is.null(Z.safe)& is.null(Z.fail) ){ t.u <- G(U) NN <- NN + 1 is.Call[NN] <- N.tot } if(is.null(Z.safe)&( !is.null(Z.fail)) ){ ttf <- is.dominant(Z.fail, U, inputDimension, set = -1) if( sum(ttf) == 0 ){ t.u <- G(U) NN <- NN + 1 is.Call[NN] <- N.tot }else{ t.u <- -1 } } if(!is.null(Z.safe) & is.null(Z.fail) ){ tts <- is.dominant(Z.safe, U, inputDimension, set = 1) if(sum(tts) == 0){ t.u <- G(U) NN <- NN + 1 is.Call[NN] <- N.tot }else{ t.u <- 1 } } if((!is.null(Z.safe)) &( !is.null(Z.fail)) ){ ttf <- is.dominant(Z.fail, U, inputDimension, set = -1) tts <- is.dominant(Z.safe, U, inputDimension, set = 1) if( (sum(tts) == 0) & (sum(ttf) == 0) ){ t.u <- G(U) NN <- NN + 1 is.Call[NN] <- N.tot } if( (sum(tts) == 0)& (sum(ttf) != 0) ){ t.u <- -1 } if( (sum(tts) != 0)& (sum(ttf) == 0) ){ t.u <- 1 } } Y <- c(Y, t.u) if(t.u <= 0){ Z.fail <- rbind(Z.fail, U) res <- c(res, 1) }else{ Z.safe <- rbind(Z.safe, U) res <- c(res, 0) } } I <- 1:N.tot alpha <- 0.05 cum.res <- cumsum(res) estimation_MC <- cum.res/I Var_MC <- (estimation_MC)*(1 - estimation_MC)/I IC.inf <- estimation_MC - qnorm(1 - alpha/2)*sqrt(Var_MC) IC.sup <- estimation_MC + qnorm(1 - alpha/2)*sqrt(Var_MC) CV_MC <- 100*sqrt(Var_MC)/estimation_MC if(is.null(Z.fail)){ Um <- 0 }else{ ZF <- Frontier(Z.fail, -1) Um <- Volume.bounds(ZF, -1) } if(is.null(Z.safe)){ UM <-1 }else{ ZS <- Frontier(Z.safe, 1) UM <- Volume.bounds(ZS, 1) } return(list(cbind(IC.inf, IC.inf, estimation_MC, CV_MC, Var_MC)[is.Call, ], Um, UM, N.tot)) } log.likehood <- function(p , p.k, signature){ gamma <- (p - p.k[,1])/(p.k[,2] - p.k[,1]) u <- (gamma^signature)*((1 - gamma)^(1 - signature)) return(prod(u)) } as.binary <- function (x) { base <- 2; r <- numeric(inputDimension) for (i in inputDimension:1){ r[i] <- x%%base x <- x%/%base } return(r) } SIM <- function(x, W){ B <- 0 B <- apply( matrix(W, ncol = 1), MARGIN = 1, function(v){ Z <- as.binary(v) v <- 0 u <- 0 for(j in 1:inputDimension){ u[j] <- ifelse(Z[j] == 0, 1 - x[j], x[j]) } return(prod(u)) } ) B <- cumsum(B) U <- runif(1, 0, max(B)) pos <- ifelse(U < B[1], 1, which.max(B[B <= U]) + 1) Z <- as.binary(W[pos]) A <- 0 for(i in 1:inputDimension){ A[i] = ifelse(Z[i] == 0, runif(1, x[i], 1), runif(1, 0, x[i])) } return(A) } Sim.non.dominated.space <- function(CP, Z.safe, Z.fail, W){ CP1 <- 0; Y <- NULL Y.temp <- apply(1 - Z.safe, MARGIN = 1, prod) Y.temp1 <- Z.safe[which.max(Y.temp),] while(CP1 < CP){ Y.temp2 <- SIM(Y.temp1, W) tts1 <- is.dominant(Z.safe, Y.temp2, inputDimension, 1) ttf1 <- is.dominant(Z.fail, Y.temp2, inputDimension, -1) if( (sum(tts1) == 0 ) & ( sum(ttf1) == 0) ){ Y <- rbind(Y,Y.temp2) CP1 <- CP1 + 1 } } return(Y) } mrmEstimation <- function(N.calls, H){ V <- list() V <- Intersect(inputDimension, H) list.set <- 0 for(i in 1:length(V)){ list.set[i] <- V[[i]][[2]] } u.dep <- list() u.other <- list() u.dep[[1]] <- V[[max(which(list.set == -1))]][[1]] u.dep[[2]] <- V[[max(which(list.set == -1))]][[2]] u.other[[1]] <- V[[max(which(list.set == 1))]][[1]] u.other[[2]] <- V[[max(which(list.set == 1))]][[2]] Z.fail <- t(as.matrix(u.dep[[1]])) Z.safe <- t(as.matrix(u.other[[1]])) cp <- length(V) um <- 0 uM <- 1 Um <- 0 UM <- 1 eps <- 1e-7 alpha <- 0.05 SIGN <- 0 ICinf <- 0 ICsup <- 0 VAR <- 0 CV.MLE <- 0 MLE <- 0 p.hat <- 0 X <- NULL Y <- NULL um <- prod(V[[cp]][[1]]) uM <- 1 - prod(1 - V[[cp]][[1]]) j <- 1 Um <- um UM <- uM W <- 1:(2^(inputDimension) - 1) while(cp < N.calls){ if(silent == FALSE){ print(paste("Current number of runs =",cp));flush.console() } uu <- Sim.non.dominated.space (1, Z.safe, Z.fail, W) H.u <- H(uu) SIGN[j] <- (1-sign(H.u))/2 X <- rbind(X, uu) Y <- c(Y, H.u) if(H.u > 0){ Z.safe.old <- rbind(uu, Z.safe) ss <- is.dominant(Z.safe, uu, inputDimension, -1) Z.safe <- Z.safe[which(ss == FALSE), ] Z.safe <- rbind(uu, Z.safe) vol <- Volume.bounds(Z.safe, 1) Um[j+1] <- Um[j] if(vol >= UM[j]){ UM[j + 1] <- UM[j] }else{ UM[j + 1] <- vol } CC <- ifelse(cp == N.calls, 1, 0) }else{ Z.fail.old <- rbind(uu, Z.fail) ff <- is.dominant(Z.fail, uu, inputDimension, 1) Z.fail <- Z.fail[which(ff == FALSE), ] Z.fail <- rbind(uu, Z.fail) vol <- Volume.bounds(Z.fail, -1) UM[j+1] <- UM[j] if(vol <= Um[j]){ Um[j+1] <- Um[j] }else{ Um[j+1] <- vol } } cp <- cp + 1 MLE.test <- optimize(f = log.likehood, interval = c(Um[j],UM[j]), maximum = TRUE, signature = SIGN, p.k = cbind(Um[1:j], UM[1:j]) ) MLE[j] <- as.numeric(MLE.test[1]) VAR <- sum( 1/((MLE - Um[1:j])*(UM[1:j]- MLE))) bn <- 1/VAR an <- eps*VAR^(5/2)/abs( sum( 1/((MLE + eps - Um[1:j])*(UM[1:j] - MLE - eps))) - sum( 1/((MLE - Um[1:j])*(UM[1:j]- MLE))) ) ICinf[j] <- MLE[j] - qnorm(1 - alpha/2)/sqrt(VAR - alpha/an) ICsup[j] <- MLE[j] + qnorm(1 - alpha/2)/sqrt(VAR + alpha/an) CV.MLE[j] <- 100/(sqrt(VAR)*MLE[j]) p.hat[j] <- mean(Um[1:j] + (UM[1:j] - Um[1-j])*SIGN[1:j] ) j <- j + 1 } RR <- list( cbind(Um[1:(j-1)], UM[1:(j-1)], MLE[1:j-1], ICinf[1:(j-1)] , ICsup[1:(j-1)], CV.MLE[1:(j-1)], p.hat[1:(j-1)]), X, Y) return(RR) } if(Method == "MRM"){ RESULT <- mrmEstimation(N.calls, G) } if(Method == "MC"){ RESULT <- monteCarloMonotone(N.calls) } return(RESULT) }
nlm.prob <- function(f, p, prob.vectors = list(1:length(p)), ..., lambda = 1, eta0max = 1e10, maximise = FALSE, maximize = maximise, hessian = FALSE, typsize = rep(1, length(p)), fscale = 1, print.level = 0, ndigit = 12, gradtol = 1e-06, stepmax = max(1000*sqrt(sum((p/typsize)^2)), 1000), steptol = 1e-06, iterlim = 100, check.analyticals = TRUE) { print.level <- as.integer(print.level) if (!(print.level %in% c(0,1,2))) { stop("'print.level' must be in {0,1,2}") } MSG <- ifelse(check.analyticals, c(9, 1, 17)[1+ print.level], c(15, 7, 23)[1+ print.level]) m <- length(p) s <- length(prob.vectors) for (i in 1:s) { INDICES <- prob.vectors[[i]] if (!is.numeric(INDICES)) { stop('Error: Each element of prob.vectors should be a vector of indices for the input p') } if (!all(as.integer(INDICES) == INDICES)) { stop('Error: Each element of prob.vectors should be a vector of indices for the input p') } if (min(INDICES) < 1) { stop('Error: Each element of prob.vectors should be a vector of indices for the input p') } if (max(INDICES) > m) { stop('Error: Each element of prob.vectors should be a vector of indices for the input p') } INDICES <- sort(unique(INDICES)) WARN <- FALSE if (min(p[INDICES]) < 0) { warning(paste0('Error: prob.vector ', i, ' has a negative element')) WARN <- TRUE } if (sum(p[INDICES]) != 1) { warning(paste0('Error: prob.vector ', i, ' does not sum to one')) WARN <- TRUE } if (WARN) { PVEC <- pmax(1e-10, p[INDICES]) PVEC <- PVEC/sum(PVEC) p[INDICES] <- PVEC warning(paste0('We have adjusted the starting values of prob.vector ', i, ' to use a valid probability vector\n')) } } if (length(unique(unlist(prob.vectors))) < length(unlist(prob.vectors))) { stop('Error: Lists of indices in prob.vectors must not overlap') } if (!is.numeric(lambda)) { stop('Error: Input lambda should be a numeric value') } if (min(lambda) <= 0) { stop('Error: Input lambda should be a positive value') } ss <- length(lambda) if ((ss != 1)&(ss != s)) { stop('Error: Input lambda should either be a single value, or it should have the same length as prob.vectors') } if (ss == 1) { lambda <- rep(lambda, s) } if (!missing(maximise) && !missing(maximize)) { if (maximise != maximize) { warning("Specify 'maximise' or 'maximize' but not both") } else { stop("Error: specify 'maximise' or 'maximize' but not both") } } MAX <- maximize if (!isTRUE(MAX) && ! isFALSE(MAX)) { stop('Error: Input maximise/maximize should be a single logical value') } ARGS.LENGTH <- rep(0, s+1) ARGS.INDEX <- vector(mode = "list", length = s+1) names(ARGS.INDEX)[1:s] <- sprintf('prob.vector.%s', 1:s) names(ARGS.INDEX)[s+1] <- 'other.args' OTHER <- rep(TRUE, m) for (i in 1:s) { IND <- prob.vectors[[i]] OTHER[IND] <- FALSE ARGS.INDEX[[i]] <- IND ARGS.LENGTH[i] <- length(IND) } ARGS.INDEX[[s+1]] <- which(OTHER) ARGS.LENGTH[s+1] <- sum(OTHER) eta_to_p <- function(eta) { if (!is.numeric(eta)) { stop('Error: Input eta must be numeric') } if (length(eta) != m-s) { stop('Error: Input eta must have length m-s') } ARGS <- vector(mode = "list", length = s+1) DD1 <- vector(mode = "list", length = s+1) DD2 <- vector(mode = "list", length = s+1) t <- 0 for (i in 1:s) { r <- ARGS.LENGTH[i] if (r == 1) { ARGS[[i]] <- 1 } if (r > 1) { SOFT <- softmax(eta[(t+1):(t+r-1)], lambda = lambda[i], gradient = TRUE, hessian = TRUE) ARGS[[i]] <- c(SOFT)/sum(c(SOFT)) DD1[[i]] <- attributes(SOFT)$gradient DD2[[i]] <- attributes(SOFT)$hessian } t <- t+r-1 } r <- ARGS.LENGTH[s+1] ARGS[[s+1]] <- eta[(t+1):(t+r)] DD1[[s+1]] <- diag(r) DD2[[s+1]] <- array(0, dim = c(r, r, r)) PPP <- rep(NA, m) D1 <- array(0, dim = c(m, m-s)) D2 <- array(0, dim = c(m, m-s, m-s)) t <- 0 for (i in 1:s) { r <- ARGS.LENGTH[i] IND <- ARGS.INDEX[[i]] PPP[IND] <- ARGS[[i]] if (r > 1) { D1[IND, (t+1):(t+r-1)] <- DD1[[i]] D2[IND, (t+1):(t+r-1), (t+1):(t+r-1)] <- DD2[[i]] } t <- t+r-1 } r <- ARGS.LENGTH[s+1] if (r > 0) { IND <- ARGS.INDEX[[s+1]] PPP[IND] <- ARGS[[s+1]] D1[IND, (t+1):(t+r)] <- DD1[[s+1]] D2[IND, (t+1):(t+r), (t+1):(t+r)] <- DD2[[s+1]] } attr(PPP, 'gradient') <- D1 attr(PPP, 'hessian') <- D2 PPP } p_to_eta <- function(p) { ARGS <- vector(mode = "list", length = s+1) DD1 <- vector(mode = "list", length = s+1) DD2 <- vector(mode = "list", length = s+1) for (i in 1:s) { IND <- ARGS.INDEX[[i]] SOFTINV <- softmaxinv(p[IND], lambda = lambda[i], gradient = TRUE, hessian = TRUE) ARGS[[i]] <- c(SOFTINV) DD1[[i]] <- attributes(SOFTINV)$gradient DD2[[i]] <- attributes(SOFTINV)$hessian } r <- ARGS.LENGTH[s+1] if (r > 0) { IND <- ARGS.INDEX[[s+1]] ARGS[[s+1]] <- p[IND] DD1[[s+1]] <- diag(r) DD2[[s+1]] <- array(0, dim = c(r, r, r)) } EEE <- unlist(ARGS) D1 <- array(0, dim = c(m-s, m)) D2 <- array(0, dim = c(m-s, m, m)) t <- 0 for (i in 1:s) { r <- ARGS.LENGTH[i] if (r > 1) { IND <- ARGS.INDEX[[i]] D1[(t+1):(t+r-1), IND] <- DD1[[i]] D2[(t+1):(t+r-1), IND, IND] <- DD2[[i]] } t <- t+r-1 } r <- ARGS.LENGTH[s+1] if (r > 0) { IND <- ARGS.INDEX[[s+1]] D1[(t+1):(t+r), IND] <- DD1[[s+1]] D2[(t+1):(t+r), IND, IND] <- DD2[[s+1]] } attr(EEE, 'gradient') <- D1 attr(EEE, 'hessian') <- D2 EEE } SGN <- ifelse(MAX, -1, 1) OBJ <- function(eta, ...) { PP <- eta_to_p(eta) GG <- SGN*f(c(PP), ...) GRAD.f <- SGN*attributes(GG)$gradient if (is.matrix(GRAD.f)) { GRAD.p <- attributes(PP)$gradient D1 <- GRAD.f %*% GRAD.p attr(GG, 'gradient') <- D1 } HESS.f <- SGN*attributes(GG)$hessian if ((is.matrix(GRAD.f))&&(is.matrix(HESS.f))) { HESS.p <- attributes(PP)$hessian T1 <- (t(GRAD.p) %*% (HESS.f %*% GRAD.p)) T2 <- matrix(0, m-1, m-1) for (i in 1:(m-1)) { for (j in 1:(m-1)) { T2[i,j] <- sum(GRAD.f*HESS.p[ ,i,j]) } } attr(GG, 'hessian') <- T1 + T2 } GG } eta0 <- c(p_to_eta(p)) eta0 <- pmin(pmax(eta0, -eta0max), eta0max) NLM <- stats::nlm(f = OBJ, p = eta0, hessian = hessian, typsize = p_to_eta(typsize), fscale = fscale, print.level = print.level, ndigit = ndigit, gradtol = gradtol, stepmax = stepmax, steptol = steptol, iterlim = iterlim, check.analyticals = TRUE, ...) ESTIMATE <- c(eta_to_p(NLM$estimate)) OPT <- SGN*NLM$minimum GRAD.OPT <- attributes(f(ESTIMATE))$gradient HESS.OPT <- attributes(f(ESTIMATE))$hessian if (MAX) { if (hessian) { OUT <- list(maximum = OPT, estimate = ESTIMATE, gradient = GRAD.OPT, hessian = HESS.OPT, code = NLM$code, iterations = NLM$iterations) } else { OUT <- list(maximum = OPT, estimate = ESTIMATE, gradient = GRAD.OPT, code = NLM$code, iterations = NLM$iterations) } } else { if (hessian) { OUT <- list(minimum = OPT, estimate = ESTIMATE, gradient = GRAD.OPT, hessian = HESS.OPT, code = NLM$code, iterations = NLM$iterations) } else { OUT <- list(minimum = OPT, estimate = ESTIMATE, gradient = GRAD.OPT, code = NLM$code, iterations = NLM$iterations) } } OUT }
expected <- eval(parse(text="list(structure(list(structure(\"vpl1\", class = c(\"vpListing\", \"gridVectorListing\", \"gridListing\")), structure(\"1\", class = c(\"vpUpListing\", \"gridVectorListing\", \"gridListing\"))), class = c(\"gridListListing\", \"gridListing\")), structure(\"vpl2\", class = c(\"vpListing\", \"gridVectorListing\", \"gridListing\")))")); test(id=0, code={ argv <- eval(parse(text="list(list(structure(list(structure(\"vpl1\", class = c(\"vpListing\", \"gridVectorListing\", \"gridListing\")), structure(\"1\", class = c(\"vpUpListing\", \"gridVectorListing\", \"gridListing\"))), class = c(\"gridListListing\", \"gridListing\"))), list(structure(\"vpl2\", class = c(\"vpListing\", \"gridVectorListing\", \"gridListing\"))))")); do.call(`c`, argv); }, o=expected);
site <- c("ftp://ftp.jax.org", "ftp://ftp.jax.org", "ftp://ftp-mouse.sanger.ac.uk") subdir <- c("SNPtools/variants", "SNPtools/variants", "current_svs") files <- c("mgp.v5.merged.snps_all.dbSNP142.vcf.gz", "mgp.v5.merged.indels.dbSNP142.normed.vcf.gz", "28strains.REL-1410-SV.sdp.tab.gz") date_source <- c("2015-09-20", "2015-09-20", "2014-10-20") genome_build <- rep("GRCm38/mm10", 3) for(i in seq_along(files)) { file <- files[i] url <- paste0(site[i], "/", subdir[i], "/", file) tbi_file <- paste0(file, ".tbi") tbi_url <- paste0(site[i], "/", subdir[i], "/", tbi_file) if(!file.exists(file)) { cat(" -Downloading", file, "\n") download.file(url, file) } if(!file.exists(tbi_file)) { cat(" -Downloading", tbi_file, "\n") download.file(tbi_url, tbi_file) } } format_consequence <- function(csq_record) { x <- strsplit(csq_record, "|", fixed=TRUE) genes <- sapply(x, "[", 2) csq <- sapply(x, "[", 5) csq <- unlist(lapply(seq_along(csq), function(i) { if(genes[i] == "") { return(csq[i]) } else { tmp <- unlist(strsplit(csq[i], "&", fixed=TRUE)) return(paste(genes[i], tmp, sep=":")) }})) c(paste(unique(genes), collapse=","), paste(unique(csq), collapse=",")) } chr <- c(1:19, "X", "Y", "MT") cc_founders <- c("A/J", "C57BL/6J", "129S1/SvImJ", "NOD/ShiLtJ", "NZO/HlLtJ", "CAST/EiJ", "PWK/PhJ", "WSB/EiJ") strains <- sub("/", "_", cc_founders[-2]) n_strains <- length(strains) library(VariantAnnotation) library(RSQLite) db_file <- "cc_variants.sqlite" db <- dbConnect(SQLite(), dbname=db_file) dbExecute(db, paste0("ATTACH '", db_file, "' AS NEW")) cat(" -SNPs\n") tabfile <- TabixFile(files[1], paste0(files[1], ".tbi")) db_started <- FALSE for(thechr in chr) { for(left in seq(0, 190, by=10)) { cat(thechr, left, "\n") gr <- GRanges(seqnames=thechr, ranges=IRanges(start=left*1e6, end=(left+10)*1e6-1)) param <- ScanVcfParam(geno = c("GT", "FI"), samples = strains, which = gr) snps <- readVcf(tabfile, genome = "mm10", param = param) if(nrow(snps)==0) next fi <- geno(snps)$FI snps <- snps[rowSums(!is.na(fi) & fi==1) == n_strains] g <- geno(snps)$GT snps <- snps[rowSums(is.na(g)) == 0 & rowSums(g=="0/0") < n_strains] g <- geno(snps)$GT if(nrow(snps)==0) next g <- geno(snps)$GT g <- cbind(g[,1,drop=FALSE], C57BL_6J="0/0", g[,-1]) colnames(g) <- cc_founders major <- as.character(ref(snps)) minor <- CharacterList(alt(snps)) alleles <- matrix("", nrow=nrow(snps), ncol=4) alleles[,1] <- major for(i in 2:4) { alleles[,i] <- sapply(minor, "[", i-1) } rs <- sapply(c("0/0", "1/1", "2/2", "3/3"), function(a) rowSums(g==a)) stopifnot(all(rowSums(rs)==8)) for(i in ncol(rs):2) { wh <- (rs[,i] > rs[,1] & rowSums(rs <= rs[,i]) == ncol(rs)) if(any(wh)) { tmp <- alleles[wh,i] alleles[wh,i] <- alleles[wh,1] alleles[wh,1] <- tmp pat <- paste0(i-1, "/", i-1) gg <- g[wh,,drop=FALSE] gg[gg==pat] <- "x/x" gg[gg=="0/0"] <- pat gg[gg=="x/x"] <- "0/0" g[wh,] <- gg } } glet <- gnum <- matrix(nrow=nrow(g), ncol=ncol(g)) dimnames(glet) <- dimnames(gnum) <- dimnames(g) for(i in 1:4) { pat <- paste0(i-1, "/", i-1) for(j in 1:ncol(g)) { wh <- (g[,j] == pat) glet[wh,j] <- alleles[wh,i] gnum[wh,j] <- i } } for(i in 1:4) { wh <- which(rowSums(gnum==i)==0) alleles[wh,i] <- NA } alleles_char <- paste(alleles[,1], apply(alleles[,-1,drop=FALSE], 1, function(a) paste(a[!is.na(a)], collapse="/")), sep="|") for(i in 3:2) { wh <- is.na(alleles[,i]) if(any(wh)) { tmp <- gnum[wh,] tmp[tmp >= i] <- tmp[tmp >= i] - 1 gnum[wh,] <- tmp } } gbin <- gnum gbin[gbin > 1] <- 3 csq <- sapply(info(snps)$CSQ, format_consequence) snps <- data.frame(snp_id=rownames(g), chr=as.vector(seqnames(snps)), pos=start(snps), alleles=alleles_char, sdp=qtl2::calc_sdp(gbin), ensembl_gene=csq[1,], consequence=csq[2,], gnum, type="snp", stringsAsFactors=FALSE) colnames(snps)[8:15] <- c(strains[1], "C57BL_6J", strains[-1]) dbWriteTable(db, "variants", snps, row.names=FALSE, overwrite=!db_started, append=db_started, field.types=NULL) db_started <- TRUE } } cat(" -InDels\n") chr <- c(1:19, "X", "Y") tabfile <- TabixFile(files[2], paste0(files[2], ".tbi")) for(thechr in chr) { for(left in seq(0, 190, by=10)) { cat(thechr, left, "\n") gr <- GRanges(seqnames=thechr, ranges=IRanges(start=left*1e6, end=(left+10)*1e6-1)) param <- ScanVcfParam(geno = c("GT", "FI"), samples = strains, which = gr) indels <- readVcf(tabfile, genome = "mm10", param = param) if(nrow(indels)==0) next fi <- geno(indels)$FI indels <- indels[rowSums(!is.na(fi) & fi==1) == n_strains] if(nrow(indels)==0) next g <- geno(indels)$GT indels <- indels[rowSums(is.na(g)) == 0 & rowSums(g=="0/0") < n_strains] g <- geno(indels)$GT if(nrow(indels)==0) next g <- cbind(g[,1,drop=FALSE], C57BL_6J="0/0", g[,-1,drop=FALSE]) colnames(g) <- cc_founders major <- as.character(ref(indels)) minor <- CharacterList(alt(indels)) max_minor <- max(sapply(minor, length)) alleles <- matrix("", nrow=nrow(g), ncol=max_minor+1) alleles[,1] <- major for(i in 2:(max_minor+1)) alleles[,i] <- sapply(minor, "[", i-1) pat <- paste0(0:max_minor, "/", 0:max_minor) rs <- sapply(pat, function(a) rowSums(g==a)) if(!is.matrix(rs)) { rs <- matrix(rs, nrow=1) dimnames(rs) <- list(rownames(g), pat) } stopifnot(all(rowSums(rs)==8)) for(i in ncol(rs):2) { wh <- (rs[,i] > rs[,1] & rowSums(rs <= rs[,i]) == ncol(rs)) if(any(wh)) { tmp <- alleles[wh,i] alleles[wh,i] <- alleles[wh,1] alleles[wh,1] <- tmp pat <- colnames(rs)[i] gg <- g[wh,,drop=FALSE] gg[gg==pat] <- "x/x" gg[gg=="0/0"] <- pat gg[gg=="x/x"] <- "0/0" g[wh,] <- gg } } glet <- gnum <- matrix(nrow=nrow(g), ncol=ncol(g)) dimnames(glet) <- dimnames(gnum) <- dimnames(g) for(i in 1:ncol(rs)) { pat <- paste0(i-1, "/", i-1) for(j in 1:ncol(g)) { wh <- (g[,j] == pat) glet[wh,j] <- alleles[wh,i] gnum[wh,j] <- i } } for(i in 1:ncol(alleles)) { wh <- which(rowSums(gnum==i)==0) alleles[wh,i] <- NA } alleles_char <- paste(alleles[,1], apply(alleles[,-1,drop=FALSE], 1, function(a) paste(a[!is.na(a)], collapse="/")), sep="|") if(ncol(alleles) >= 3) { for(i in 3:2) { wh <- is.na(alleles[,i]) if(any(wh)) { tmp <- gnum[wh,] tmp[tmp >= i] <- tmp[tmp >= i] - 1 gnum[wh,] <- tmp } } } gbin <- gnum gbin[gbin > 1] <- 3 csq <- sapply(info(indels)$CSQ, format_consequence) indels <- data.frame(snp_id=rownames(g), chr=thechr, pos=start(indels), alleles=alleles_char, sdp=qtl2::calc_sdp(gbin), ensembl_gene=csq[1,], consequence=csq[2,], gnum, type="indel", stringsAsFactors=FALSE) colnames(indels)[8:15] <- c(strains[1], "C57BL_6J", strains[-1]) dbWriteTable(db, "variants", indels, row.names=FALSE, overwrite=FALSE, append=TRUE, field.types=NULL) } } cat(" -Stuctural variants\n") tmpfile <- tempfile() system(paste0("gunzip -c ", files[3], " > ", tmpfile)) svs <- data.table::fread(tmpfile, data.table=FALSE) unlink(tmpfile) g <- svs[,colnames(svs) %in% strains] g <- g[,strains] g <- cbind(g[,1,drop=FALSE], C57BL_6J="0", g[,-1,drop=FALSE], stringsAsFactors=FALSE) n_allele <- apply(g, 1, function(a) length(unique(a))) svs <- svs[n_allele > 1,] g <- g[n_allele > 1,] g[g=="0"] <- "-" alleles <- apply(g, 1, function(a) { tab <- table(a) result <- names(sort(tab, decreasing=TRUE)) if(length(result) < 8) result <- c(result, rep(NA, 8-length(result))) result }) alleles <- t(alleles) gnum <- matrix(nrow=nrow(g), ncol=ncol(g)) dimnames(gnum) <- dimnames(g) for(i in 1:ncol(alleles)) { for(j in 1:ncol(g)) { wh <- (!is.na(alleles[,i]) & g[,j] == alleles[,i]) gnum[wh,j] <- i } } alleles_char <- paste(alleles[,1], apply(alleles[,-1,drop=FALSE], 1, function(a) paste(a[!is.na(a)], collapse="/")), sep="|") gbin <- gnum gbin[gbin > 1] <- 3 svs <- data.frame(snp_id=paste0("SV_", svs[," chr=svs[," pos=round((svs[,"START"] + svs[,"END"])/2), alleles=alleles_char, sdp=qtl2::calc_sdp(gbin), ensembl_gene=NA, consequence=NA, gnum, type="SV", stringsAsFactors=FALSE) colnames(svs)[8:15] <- c(strains[1], "C57BL_6J", strains[-1]) dbWriteTable(db, "variants", svs, row.names=FALSE, overwrite=FALSE, append=TRUE, field.types=NULL) description <- data.frame(description=c("SNPs in Collaborative Cross founders", "Indels in Collaborative Cross founders", "SVs in Collaborative Cross founders"), source=c("Mouse Genome Informatics (MGI), Jackson Lab", "Mouse Genome Informatics (MGI), Jackson Lab", "Sanger"), url=paste0(site, "/", subdir, "/", files), date_created=rep(as.character(Sys.Date()), 3), date_source=date_source, genome_build=genome_build, stringsAsFactors=FALSE) dbWriteTable(db, "description", description, append=TRUE) dbExecute(db, "CREATE INDEX chr_pos ON variants(chr, pos)") dbDisconnect(db)
options(width=180,max.print=200,mc.cores=1,rf.cores=1) suppressPackageStartupMessages(library(randomForestSRC,quietly=TRUE)) library(salbm,lib.loc="../../../libs/") args <- commandArgs(trailingOnly=TRUE) if ( length(args) < 3 ) stop("need at least 3 arguments\n") No <- as.numeric(args[1]) NumB <- as.numeric(args[2]) bBS <- as.numeric(args[3]) seeds <- c(21+No, 481+No, 221+No) seeds2 <- c(-53 - (No-1) * NumB, -388 - (No-1) * NumB, -271 - (No-1) * NumB ) iname <- sprintf("LRDS/salbmResults0.rds") obj <- readRDS(iname) oname <- sprintf("LRDS/salbmResults_part%03d.rds", No ) tm0 <- proc.time() print(c(No,NumB,bBS)) set.seed(233+No) Rup <- addSamples( obj = obj, nseeds=seeds,nseeds2=seeds2, bBS = bBS, NBootstraps = NumB, ReturnJP = FALSE ) saveRDS(Rup,oname) print(Rup) tm1 <- proc.time() print(tm0) print(tm1) print(tm1 - tm0)
spline.bbase <- function(knots, X., BDEG.) { dx <- diff(knots)[1] P <- outer(X., knots, tpower, BDEG.) n <- dim(P)[2] D <- diff(diag(n), diff = BDEG. + 1)/(gamma(BDEG. + 1)*dx^BDEG.) B <- (-1) ^ (BDEG. + 1) * P %*% t(D) B }
lg.mu.sig = function(m, v){ mu = log(m^2/sqrt(m^2+v)) sig = sqrt(log((m^2+v)/m^2)) return(list(mu=mu, sig=sig)) }
context("UNFv6: Dataframes") test_that("Variable order irrelevant", { expect_equal(unf(data.frame(1:3,4:6,7:9), version = 6)$unf, unf(data.frame(7:9,1:3,4:6), version = 6)$unf, "ukDZSJXck7fn4SlPJMPFTQ==") }) test_that("Variable names irrelevant", { expect_equal(unf(data.frame(x=1:3,y=4:6,z=7:9), version = 6)$unf, unf(data.frame(z=1:3,y=4:6,x=7:9), version = 6)$unf) }) test_that("Sort order relevant", { expect_false(identical(unf(iris, version = 6), unf(iris[order(iris$Sepal.Length),], version = 6))) }) test_that("Subsetting relevant", { expect_false(identical(unf(iris, version = 6), unf(head(iris), version = 6))) }) test_that("UNF for one-variable dataframe is univariate UNF", { expect_equal(unf(data.frame(x=-3:3), version = 6), unf6(-3:3)) })
context("planedtw and friends") test_that("in-/decrement", { rw <- function(nn) cumsum(rnorm(nn)) Q <- sin(1:100) C <- Q[1:90] + rnorm(90, 0, 0.1) WS <- 40 x <- initialize_plane(Q, C, ws = WS) y1 <- Q[91:92] + rnorm(2, 0, 0.1) x <- increment(x, newObs = y1) y2 <- Q[93:95] + rnorm(3, 0, 0.1) x <- increment(x, newObs = y2) y3 <- c(Q[96:100] + rnorm(5, 0, 0.1), rw(10)) x <- increment(x, newObs = y3) ist <- x$control$nC soll <- 110 expect_equal(ist, soll) ist <- x$normalized_distance soll <- dtw2vec(Q, c(C, y1, y2, y3) , ws = WS)$normalized_distance expect_equal(ist, soll) x <- decrement(x, direction = "C", refresh_dtw = TRUE) ist <- x$normalized_distance soll <- dtw2vec(Q, c(C, y1, y2, y3[1:5]) , ws = WS)$normalized_distance expect_lte(ist, soll) }) test_that("reverse increment", { rw <- function(nn) cumsum(rnorm(nn)) Q <- rw(100) C <- Q[11:90] + rnorm(80, 0, 0.1) WS <- 40 x <- initialize_plane(Q, C, ws = WS) y1 <- Q[91:100] + rnorm(10, 0, 0.1) x <- increment(x, newObs = y1) x <- reverse(x) y2 <- Q[10:6] + rnorm(5, 0, 0.1) x <- increment(x, newObs = y2) y3 <- Q[5:1] + rnorm(5, 0, 0.1) x <- increment(x, newObs = y3) ist <- x$distance soll <- dtw2vec(rev(Q), rev(c(rev(y3), rev(y2), C, y1)),ws = WS)$distance expect_equal(ist, soll) }) test_that("refresh", { rw <- function(nn) cumsum(rnorm(nn)) Q <- rw(100) C <- Q[11:90] + rnorm(80, 0, 0.1) WS <- 40 x <- initialize_plane(Q, C, ws = WS) x <- refresh(x) ist <- x$normalized_distance soll <- dtw2vec(Q, C, ws = WS)$normalized_distance expect_equal(ist, soll) x <- decrement(x, refresh_dtw = FALSE) x <- refresh(x) ist <- x$normalized_distance soll <- dtw2vec(Q, C[1:x$control$nC], ws = WS)$normalized_distance expect_equal(ist, soll) }) test_that("primitive decrement", { rw <- function(nn) cumsum(rnorm(nn)) Q <- rw(100) C <- Q[11:90] + rnorm(80, 0, 0.1) WS <- 100 x <- initialize_plane(Q, C, ws = WS) x <- decrement(x, nC = 20) expect_equal(x$gcm_lc_new, NULL) expect_equal(x$gcm_lr_new, NULL) expect_equal(x$control$nC, 20) x <- initialize_plane(Q, C, ws = WS) x <- decrement(x, nC = 20, refresh_dtw = TRUE) expect_equal(x$normalized_distance, dtw2vec(Q, C[1:20], ws = WS)$normalized_distance) expect_equal(x$control$nC, 20) }) test_that("partial decrement", { rw <- function(nn) cumsum(rnorm(nn)) Q <- rw(100) C <- c(Q[1:90] + rnorm(90, 0, 0.1), rnorm(20)) WS <- 30 x <- initialize_plane(Q, C, ws = WS) par <- dtw_partial(x, partial_Q = FALSE, partial_C = TRUE) x1 <- decrement(x, refresh_dtw = TRUE) x2 <- decrement(x, nC = par$rangeC[2], refresh_dtw = TRUE) expect_equal(x1, x2) x1 <- decrement(x, refresh_dtw = FALSE) x2 <- decrement(x, nC = par$rangeC[2], refresh_dtw = FALSE) expect_equal(x1, x2) x <- decrement(x, direction = "C") expect_equal(x$gcm_lc_new, NULL) expect_equal(x$gcm_lr_new, NULL) x <- initialize_plane(Q, C, ws = WS) x <- decrement(x, nC = 75, refresh_dtw = TRUE) tmp <- dtw2vec(Q, C[1:75], ws = WS) expect_equal(x$normalized_distance, tmp$normalized_distance) expect_equal(x$distance, tmp$distance) expect_equal(x$control$nC, 75) })
NULL repr_function_generic <- function(f, fmt, escape, high_wrap, norm_wrap, highlight) { code <- deparse(f) if (highlight) { if (!requireNamespace('highr')) stop(sprintf('Tried to create a %s representation of a function with highlighting, but the `highlight` package is not installed!', fmt)) code <- highr::hilight(code, fmt) wrap <- high_wrap } else { code <- escape(code) wrap <- norm_wrap } sprintf(wrap, paste(code, collapse = '\n')) } repr_html.function <- function(obj, highlight = getOption('repr.function.highlight'), ...) { wrap <- '<pre class=language-r><code>%s</code></pre>' repr_function_generic(obj, 'html', html_escape, wrap, wrap, highlight, ...) } repr_latex.function <- function(obj, highlight = getOption('repr.function.highlight'), ...) { minted_wrap <- '\\begin{minted}{r}\n%s\n\\end{minted}' repr_function_generic(obj, 'latex', latex_escape, '%s', minted_wrap, highlight, ...) } repr_markdown.function <- function(obj, fenced = TRUE, ...) { code <- deparse(obj) if (fenced) { code <- c('```r', code, '```') } else { code <- paste0('\t', code) } paste(code, collapse = '\n') }
ibdhap.seg.lengths <- function( x, position=NA ){ n.marker<- length(x) if(is.element(position[1],NA)){position <- 1:(n.marker) } change.points<-c(1) for(imarker in 2:n.marker) { prev.val<-x[imarker-1] val <- x[imarker] if( prev.val!=val) { change.points=c(change.points, imarker) } } if(change.points[length(change.points)]!= n.marker){change.points=c(change.points, n.marker)} change.points.pos<-position[change.points] seg.lengths<-diff(change.points.pos) ibd.state<-x[change.points[1:length(seg.lengths)] ] return( as.data.frame(cbind(ibd.state = ibd.state, seg.lengths = seg.lengths))) }
library(testthat) context("Matrix Estimation") test_that("Anand et al", { L <- c(a = 4, b = 5, c = 5, d = 0, e = 0, f = 2, g = 4) A <- c(a = 7, b = 5, c = 3, d = 1, e = 3, f = 0, g = 1) M <- matrix_estimation(A, L, verbose = F) M2 <- round(M, 2) check_m <- c(0, 1.72, 0.98, 0.25, 0.75, 0, 0.3, 2.53, 0, 1.06, 0.27, 0.81, 0, 0.32, 2.18, 1.6, 0, 0.23, 0.7, 0, 0.28, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.74, 0.54, 0.31, 0.08, 0.24, 0, 0.09, 1.55, 1.14, 0.65, 0.17, 0.5, 0, 0) expect_equal(c(M2), c(check_m)) expect_equal(sum(rowSums(M)), sum(A)) expect_equal(sum(colSums(M)), sum(L)) check_md <- c(0,3,0,0,0,0,1, 3,0,2,0,0,0,0, 0,2,0,0,3,0,0, rep(0, 7), rep(0, 7), 0,0,1,1,0,0,0, 4,0,0,0,0,0,0) set.seed(192) md <- matrix_estimation(A, L, method = "md", verbose = F) expect_equal(c(md), c(check_md)) expect_equal(sum(rowSums(md)), sum(A)) expect_equal(sum(colSums(md)), sum(L)) } ) test_that("Capture printing", { L <- c(a = 4, b = 5, c = 5, d = 0, e = 0, f = 2, g = 4) A <- c(a = 7, b = 5, c = 3, d = 1, e = 3, f = 0, g = 1) print.me <- capture.output(max_ent(A, L)) set.seed(192) print.md <- capture.output(min_dens(A, L)) } )
library(stplanr) r1 <- route_graphhopper("Yeadon, UK", to = "Leeds", silent = FALSE) r2 <- route_graphhopper("Leeds", "Yeadon, UK") if(require(leaflet)) { leaflet() %>% addTiles() %>% addPolylines(data = r1) } r1@data r2@data r1 <- route_cyclestreets("Yeadon", to = "Leeds") r2 <- route_cyclestreets("Leeds", "Yeadon") if(require(leaflet)) { leaflet() %>% addTiles() %>% addPolylines(data = r1) } r1@data r2@data
VaR = function(x, alpha = 0.05, type = "sample", tail = c("lower", "upper")) { x = as.matrix(x) tail = match.arg(tail) if (type == "sample") { if (tail == "upper") alpha = 1-alpha VaR = quantile(x, probs = alpha, type = 1) } else if (type == "gpd") { VaR = "Not yet Implemented" } else if (type == "obre") { VaR = "Not yet Implemented" } VaR } CVaR = function(x, alpha = 0.05, type = "sample", tail = c("lower", "upper")) { x = as.matrix(x) tail = match.arg(tail) VaR = VaR(x, alpha, type, tail) if (tail == "upper") alpha = 1-alpha if (type == "sample") { CVaR = NULL for (i in 1:ncol(x)) { X = as.vector(x[, i]) CVaR = c(CVaR, VaR[i] - 0.5 * mean(((VaR[i]-X) + abs(VaR[i]-X))) / alpha ) } } CVaR }
bcbExpectMTop5 <- function(indicator = 'IGP-DI',limit = 100, variables = c("tipoCalculo","Media","Mediana","DesvioPadrao","CoeficienteVariacao","Minimo","Maximo"), start, end ){ indicator = str_replace_all(indicator," ","%20") if(limit > 10000 | limit < 0)stop("You need provid a limit in between 0 and 10000!") variaveis_a <- paste("filter=Indicador%20eq%20'",indicator,"'",sep="") variaveis_b <- paste("top=",limit,sep="") variaveis_c <- paste("Indicador", "IndicadorDetalhe", "Data", "DataReferencia", variables, sep = ",") if(missing(start) & missing(end)){ timespan <- "" }else if(missing(start) & !missing(end)){ timespan <- paste0("%20and%20Data%20gt%20'", start,"'") }else if(!missing(start) & !missing(end)){ timespan <- paste0("%20and%20Data%20gt%20'", start, "'%20and%20", "Data%20lt%20'", end,"'") }else{ timespan <- paste0("%20and%20Data%20lt%20'", end,"'") } baseurl <- "https://olinda.bcb.gov.br/olinda/servico/Expectativas/versao/v1/odata/" query_url <- paste(baseurl, "ExpectativasMercadoTop5Mensais", "?$",variaveis_b,"&$",variaveis_a,timespan, "&$select=",variaveis_c, sep = "", collapse = "") data <- fromJSON(file = query_url)$value data <- do.call("rbind", lapply(data, as.data.frame)) return(data) }
library(SuperLearner) if(all(sapply(c("testthat", "glmnet", "mlbench"), requireNamespace))){ testthat::context("Learner: glmnet") data(PimaIndiansDiabetes2, package = "mlbench") data = PimaIndiansDiabetes2 data = na.omit(data) Y = as.numeric(data$diabetes == "pos") X = subset(data, select = -diabetes) set.seed(1, "L'Ecuyer-CMRG") glmnet = SuperLearner::SL.glmnet(Y, X, X, family = binomial(), obsWeights = rep(1, nrow(X)), id = NULL) pred = predict(glmnet$fit, X) summary(pred) sl = SuperLearner(Y, X, family = binomial(), cvControl = list(V = 2), SL.library = c("SL.mean", "SL.glm", "SL.glmnet")) sl glmnet = SuperLearner::SL.glmnet(Y, X, X, family = binomial(), alpha = 0, obsWeights = rep(1, nrow(X)), id = NULL) glmnet = SuperLearner::SL.glmnet(Y, X, X, family = gaussian(), alpha = 0, obsWeights = rep(1, nrow(X)), id = NULL) glmnet = SuperLearner::SL.glmnet(Y, X, X, family = binomial(), useMin = F, obsWeights = rep(1, nrow(X)), id = NULL) glmnet = SuperLearner::SL.glmnet(Y, X, X, family = gaussian(), useMin = F, obsWeights = rep(1, nrow(X)), id = NULL) glmnet = SuperLearner::SL.glmnet(Y, X, X, family = binomial(), nfolds = 3, obsWeights = rep(1, nrow(X)), id = NULL) glmnet = SuperLearner::SL.glmnet(Y, X, X, family = gaussian(), nfolds = 3, obsWeights = rep(1, nrow(X)), id = NULL) glmnet = SuperLearner::SL.glmnet(Y, X, X, family = binomial(), loss = "auc", obsWeights = rep(1, nrow(X)), id = NULL) glmnet = SuperLearner::SL.glmnet(Y, X, X, family = gaussian(), loss = "mae", obsWeights = rep(1, nrow(X)), id = NULL) newdata = X glmnet = SuperLearner::SL.glmnet(Y, X, X, family = binomial(), obsWeights = rep(1, nrow(X)), id = NULL) pred = predict(glmnet$fit, cbind(newdata, extra_column = 5)) summary(pred) tryCatch({ pred = predict(glmnet$fit, cbind(newdata, extra_column = 5), remove_extra_cols = F) }, error = function(e) { cat("Got an error, as expected.\n") print(e) }) summary(pred) pred = predict(glmnet$fit, newdata[, -5]) summary(pred) tryCatch({ pred = predict(glmnet$fit, newdata[, -5], add_missing_cols = F) }, error = function(e) { cat("Got an error, as expected.\n") print(e) }) summary(pred) }
colnames.row <- function(colnames.obj) { max.row <- tapply(colnames.obj$col.row, list(colnames.obj$col.logical.row), function(x) max(x, na.rm=T)) d.max.row <- data.frame(max.row) d.max.row$col.logical.row <- rownames(d.max.row) d.max.row$lag.max.row.adj <- c(0, d.max.row$max.row[-nrow(d.max.row)]-1) colnames.obj <- merge(colnames.obj, d.max.row, by ="col.logical.row") colnames.obj$row <- colnames.obj$col.logical.row + colnames.obj$col.row -1 + colnames.obj$lag.max.row.adj colnames.obj$row <- max(colnames.obj$row) - (colnames.obj$row-1) colnames.obj } colnames.linebreak <- function(colnames.obj ) { grep.linebreak <- grep("\n",colnames.obj$cname) if (length(grep.linebreak > 0)) { strsplit.linebreak <- strsplit(colnames.obj$cname[grep.linebreak[1]], "\n") n.linebreak <- length(strsplit.linebreak[[1]]) d.temp <- colnames.obj[rep(grep.linebreak[1], n.linebreak), ] d.temp$cname <- strsplit.linebreak[[1]] d.temp$col.row <- n.linebreak:1 if (grep.linebreak[1] == 1) { d.after <- colnames.obj[(grep.linebreak[1]+1):nrow(colnames.obj) , ] colnames.obj <- rbind( d.temp, d.after) } else if (grep.linebreak[1] == nrow(colnames.obj)) { d.before <- colnames.obj[1:(grep.linebreak[1]-1) , ] colnames.obj <- rbind(d.before, d.temp) } else { d.before <- colnames.obj[1:(grep.linebreak[1]-1) , ] d.after <- colnames.obj[(grep.linebreak[1]+1):nrow(colnames.obj) , ] colnames.obj <- rbind(d.before, d.temp, d.after) } grep.linebreak <- grep("\n",colnames.obj$cname) if (length(grep.linebreak) > 0) {colnames.obj <- colnames.linebreak(colnames.obj)} } colnames.obj } colnames.struct <- function(col.names, linebreak=TRUE) { col.grp.dx <- grep(":", col.names) col.grp.split <- strsplit(col.names, ":") orig.names <- unlist(lapply(col.grp.split, FUN=function(x) {x[length(x)]})) lst.names <- lapply(col.grp.split, FUN=function(x) {x[-length(x)]}) lnames <- length(orig.names) column.heading <- data.frame(cname = orig.names, col.logical.row = rep(1, lnames), col.row=1, span.beg=1:lnames, span.end=1:lnames, stringsAsFactors =F) r.i <- nrow(column.heading) cname.hierc.df <- list.to.df(lst.names) if(!is.null(cname.hierc.df)) { for (hier.i in ncol(cname.hierc.df):1) { d.i <- consect.struct(cname.hierc.df[, hier.i]) n.d <- nrow(d.i$consec.begend) column.heading[r.i+ (1:n.d), "col.logical.row"] <- ncol(cname.hierc.df)-hier.i+2 column.heading[r.i+ (1:n.d), "col.row"] <-1 column.heading[r.i+(1:n.d), c("cname", "span.beg", "span.end")] <- d.i$consec.begend r.i <- nrow(column.heading) } } column.heading$cname <- kill.multiregx(column.heading$cname, "`") if (linebreak) { column.heading <- colnames.linebreak(column.heading) column.heading <- colnames.row(column.heading) } column.heading }
lime.data.frame <- function(x, model, preprocess = NULL, bin_continuous = TRUE, n_bins = 4, quantile_bins = TRUE, use_density = TRUE, ...) { if (is.null(preprocess)) preprocess <- function(x) x assert_that(is.function(preprocess)) explainer <- c(as.list(environment()), list(...)) explainer$x <- NULL explainer$feature_type <- setNames(sapply(x, function(f) { if (is.integer(f)) { if (length(unique(f)) == 1) 'constant' else 'integer' } else if (is.numeric(f)) { if (length(unique(f)) == 1) 'constant' else 'numeric' } else if (is.character(f)) { 'character' } else if (is.factor(f)) { 'factor' } else if (is.logical(f)) { 'logical' } else if (inherits(f, 'Date') || inherits(f, 'POSIXt')) { 'date_time' } else { stop('Unknown feature type', call. = FALSE) } }), names(x)) if (any(explainer$feature_type == 'constant')) { warning('Data contains numeric columns with zero variance', call. = FALSE) } explainer$bin_cuts <- setNames(lapply(seq_along(x), function(i) { if (explainer$feature_type[i] %in% c('numeric', 'integer')) { if (quantile_bins) { bins <- quantile(x[[i]], seq(0, 1, length.out = n_bins + 1), na.rm = TRUE) bins <- bins[!duplicated(bins)] if (length(bins) < 3) { warning(names(x)[i], ' does not contain enough variance to use quantile binning. Using standard binning instead.', call. = FALSE) d_range <- range(x[[i]], na.rm = TRUE) bins <- seq(d_range[1], d_range[2], length.out = n_bins + 1) } bins } else { d_range <- range(x[[i]], na.rm = TRUE) seq(d_range[1], d_range[2], length.out = n_bins + 1) } } }), names(x)) explainer$feature_distribution <- setNames(lapply(seq_along(x), function(i) { switch( explainer$feature_type[i], integer = , numeric = if (bin_continuous) { table(cut(x[[i]], unique(explainer$bin_cuts[[i]]), labels = FALSE, include.lowest = TRUE))/nrow(x) } else if (use_density) { density(x[[i]]) } else { c(mean = mean(x[[i]], na.rm = TRUE), sd = sd(x[[i]], na.rm = TRUE)) }, character = , logical = , factor = table(x[[i]])/nrow(x), NA ) }), names(x)) structure(explainer, class = c('data_frame_explainer', 'explainer', 'list')) } explain.data.frame <- function(x, explainer, labels = NULL, n_labels = NULL, n_features, n_permutations = 5000, feature_select = 'auto', dist_fun = 'gower', kernel_width = NULL, gower_pow = 1, ...) { assert_that(is.data_frame_explainer(explainer)) m_type <- model_type(explainer) o_type <- output_type(explainer) if (m_type == 'regression') { if (!is.null(labels) || !is.null(n_labels)) { warning('"labels" and "n_labels" arguments are ignored when explaining regression models') } n_labels <- 1 labels <- NULL } assert_that(is.null(labels) + is.null(n_labels) == 1, msg = "You need to choose between labels and n_labels parameters.") assert_that(is.count(n_features)) assert_that(is.count(n_permutations)) if (is.null(kernel_width)) { kernel_width <- sqrt(ncol(x)) * 0.75 } kernel <- exp_kernel(kernel_width) case_perm <- permute_cases(x, n_permutations, explainer$feature_distribution, explainer$bin_continuous, explainer$bin_cuts, explainer$use_density) case_res <- predict_model(explainer$model, explainer$preprocess(case_perm), type = o_type, ...) case_res <- set_labels(case_res, explainer$model) case_ind <- split(seq_len(nrow(case_perm)), rep(seq_len(nrow(x)), each = n_permutations)) res <- lapply(seq_along(case_ind), function(ind) { i <- case_ind[[ind]] if (dist_fun == 'gower') { sim <- 1 - (gower_dist(case_perm[i[1], , drop = FALSE], case_perm[i, , drop = FALSE])) ^ gower_pow } perms <- numerify(case_perm[i, ], explainer$feature_type, explainer$bin_continuous, explainer$bin_cuts) if (dist_fun != 'gower') { sim <- kernel(c(0, dist(feature_scale(perms, explainer$feature_distribution, explainer$feature_type, explainer$bin_continuous), method = dist_fun)[seq_len(n_permutations-1)])) } res <- model_permutations(as.matrix(perms), case_res[i, , drop = FALSE], sim, labels, n_labels, n_features, feature_select) res$feature_value <- unlist(case_perm[i[1], res$feature]) res$feature_desc <- describe_feature(res$feature, case_perm[i[1], ], explainer$feature_type, explainer$bin_continuous, explainer$bin_cuts) guess <- which.max(abs(case_res[i[1], ])) res$case <- rownames(x)[ind] res$label_prob <- unname(as.matrix(case_res[i[1], ]))[match(res$label, colnames(case_res))] res$data <- list(as.list(case_perm[i[1], ])) res$prediction <- list(as.list(case_res[i[1], ])) res$model_type <- m_type res }) res <- do.call(rbind, res) res <- res[, c('model_type', 'case', 'label', 'label_prob', 'model_r2', 'model_intercept', 'model_prediction', 'feature', 'feature_value', 'feature_weight', 'feature_desc', 'data', 'prediction')] if (m_type == 'regression') { res$label <- NULL res$label_prob <- NULL res$prediction <- unlist(res$prediction) } as_tibble(res) } is.data_frame_explainer <- function(x) inherits(x, 'data_frame_explainer') numerify <- function(x, type, bin_continuous, bin_cuts) { setNames(as.data.frame(lapply(seq_along(x), function(i) { if (type[i] %in% c('character', 'factor', 'logical')) { as.numeric(x[[i]] == x[[i]][1]) } else if (type[i] == 'date_time' || type[i] == 'constant') { rep(0, nrow(x)) } else { if (bin_continuous) { cuts <- bin_cuts[[i]] cuts[1] <- -Inf cuts[length(cuts) + 1] <- Inf xi <- cut(x[[i]], unique(cuts), include.lowest = T) as.numeric(xi == xi[1]) } else { x[[i]] } } }), stringsAsFactors = FALSE), names(x)) } feature_scale <- function(x, distribution, type, bin_continuous) { setNames(as.data.frame(lapply(seq_along(x), function(i) { if (type[i] == 'numeric' && !bin_continuous) { scale(x[, i], distribution[[i]]['mean'], distribution[[i]]['sd']) } else { x[, i] } }), stringsAsFactors = FALSE), names(x)) } describe_feature <- function(feature, case, type, bin_continuous, bin_cuts) { sapply(feature, function(f) { if (type[[f]] == 'logical') { paste0(f, ' is ', tolower(as.character(case[[f]]))) } else if (type[[f]] %in% c('character', 'factor')) { paste0(f, ' = ', as.character(case[[f]])) } else if (bin_continuous) { cuts <- bin_cuts[[f]] cuts[1] <- -Inf cuts[length(cuts)] <- Inf bin <- cut(case[[f]], unique(cuts), labels = FALSE, include.lowest = TRUE) cuts <- trimws(format(cuts, digits = 3)) if (bin == 1) { paste0(f, ' <= ', cuts[bin + 1]) } else if (bin == length(cuts) - 1) { paste0(cuts[bin], ' < ', f) } else { paste0(cuts[bin], ' < ', f, ' <= ', cuts[bin + 1]) } } else { f } }) }
read.corp.LCC <- function(LCC.path, format="flatfile", fileEncoding="UTF-8", n=-1, keep.temp=FALSE, prefix=NULL, bigrams=FALSE, cooccurence=FALSE, caseSens=TRUE){ if(identical(format, "flatfile")){ if(!as.logical(file_test("-d", LCC.path))){ if(file.exists(LCC.path)){ if(any(bigrams, cooccurence)){ lookForFiles <- "(words|meta|co_n|co_s).txt$" } else { lookForFiles <- "(words|meta).txt$" } tmp.path <- tempfile("koRpus.LCC") if(!dir.create(tmp.path, recursive=TRUE)) stop(simpleError("Can't create temporary directory!")) if(isTRUE(keep.temp)){ message(paste("Unpacked data will be kept in\n", tmp.path)) } else { on.exit(unlink(tmp.path, recursive=TRUE)) } if(grepl("(\\.zip|\\.ZIP)$", LCC.path)){ message("Unzipping LCC archive... ", appendLF=FALSE) LCC.zip.content <- as.character(unzip(LCC.path, list=TRUE)$Name) LCC.zip.wanted <- LCC.zip.content[grep(lookForFiles, LCC.zip.content)] unzip(LCC.path, files=LCC.zip.wanted, junkpaths=TRUE, exdir=tmp.path) message("done.") } else if(grepl("(\\.tar|\\.tar.gz|\\.tgz)$", tolower(LCC.path))){ message("Fetching needed files from LCC archive... ", appendLF=FALSE) LCC.tar.content <- as.character(untar(LCC.path, list=TRUE)) LCC.tar.wanted <- LCC.tar.content[grep(lookForFiles, LCC.tar.content)] if(is.null(prefix)){ prefix <- gsub(lookForFiles, "", LCC.tar.wanted)[1] } else {} untar(LCC.path, files=LCC.tar.wanted, exdir=tmp.path) message("done.") } else { stop(simpleError(paste("Unknown LCC data format:", LCC.path))) } LCC.path <- tmp.path } else { stop(simpleError(paste("Cannot access LCC data:", LCC.path))) } } else { check.file(LCC.path, mode="dir") } have <- c() LCC.files <- c() for (thisFile in c("meta", "co_n", "co_s")){ LCC.files[thisFile] <- file.path(LCC.path, paste0(prefix, thisFile, ".txt")) have[thisFile] <- check.file(LCC.files[thisFile], mode="exist", stopOnFail=FALSE) } LCC.files["words"] <- file.path(LCC.path, paste0(prefix, "words.txt")) check.file(LCC.files["words"], mode="exist") } else if(identical(format, "MySQL")){ stop(simpleMessage("Sorry, not implemented yet...")) } else { stop(simpleError(paste("Unknown format:", format))) } dscrpt.meta <- table.meta <- NULL num.running.words <- NA if(isTRUE(have[["meta"]])){ table.meta <- read.delim(LCC.files["meta"], header=FALSE, col.names=c(1,"meta","value"), strip.white=TRUE, fill=FALSE, stringsAsFactors=FALSE, fileEncoding=fileEncoding)[,-1] if(all(c("number of distinct word forms", "average sentence length in characters") %in% table.meta[,1])){ LCC.archive.format <- "zip" } else if(all(c("SENTENCES", "WORD_TOKENS", "WORD_TYPES") %in% table.meta[,1])){ LCC.archive.format <- "tar" } else { stop(simpleError("Sorry, this format is not supported! Please contact the authors.")) } if(identical(LCC.archive.format, "zip")){ num.distinct.words <- as.numeric(table.meta[table.meta[,1] == "number of distinct word forms", 2]) num.running.words <- as.numeric(table.meta[table.meta[,1] == "number of running word forms", 2]) avg.sntclgth.words <- as.numeric(table.meta[table.meta[,1] == "average sentence length in words", 2]) avg.sntclgth.chars <- as.numeric(table.meta[table.meta[,1] == "average sentence length in characters", 2]) avg.wrdlgth.form <- as.numeric(table.meta[table.meta[,1] == "average word form length", 2]) avg.wrdlgth.running <- as.numeric(table.meta[table.meta[,1] == "average running word length", 2]) fileEncoding <- table.meta[table.meta[,1] == "database encoding", 2] } else if(identical(LCC.archive.format, "tar")) { num.distinct.words <- as.numeric(table.meta[table.meta[,1] == "WORD_TYPES", 2]) num.running.words <- as.numeric(table.meta[table.meta[,1] == "WORD_TOKENS", 2]) avg.sntclgth.words <- avg.sntclgth.chars <- avg.wrdlgth.form <- avg.wrdlgth.running <- NA fileEncoding <- table.meta[table.meta[,1] == "database encoding", 2] } dscrpt.meta <- data.frame( tokens=num.running.words, types=num.distinct.words, words.p.sntc=avg.sntclgth.words, chars.p.sntc=avg.sntclgth.chars, chars.p.wform=avg.wrdlgth.form, chars.p.word=avg.wrdlgth.running) } else {} table.cooccur <- table.bigrams <- NULL if(any(bigrams, cooccurence) & !identical(n, -1)){ warning("Importing bigrams and co-occurrences is only possible while 'n = -1'!") } else {} if(isTRUE(bigrams) & identical(n, -1)){ if(isTRUE(have[["co_n"]])){ message("Importing bigrams... ", appendLF=FALSE) LCC.file.con <- file(LCC.files[["co_n"]], open="r") rL.words <- readLines(LCC.file.con, encoding=fileEncoding) close(LCC.file.con) table.bigrams <- matrix(unlist(strsplit(rL.words, "\t")), ncol=4, byrow=TRUE, dimnames=list(c(),c("token1","token2","freq","sig"))) rm(rL.words) message("done.") } else { warning("'bigrams' is TRUE, but no *-co_n.txt file was found in the LCC archive!") } } else {} if(isTRUE(cooccurence) & identical(n, -1)){ if(isTRUE(have[["co_s"]])){ message("Importing co-occrrence in one sentence... ", appendLF=FALSE) LCC.file.con <- file(LCC.files[["co_s"]], open="r") rL.words <- readLines(LCC.file.con, encoding=fileEncoding) close(LCC.file.con) table.cooccur <- matrix(unlist(strsplit(rL.words, "\t")), ncol=4, byrow=TRUE, dimnames=list(c(),c("token1","token2","freq","sig"))) rm(rL.words) message("done.") } else { warning("'cooccurence' is TRUE, but no *-co_s.txt file was found in the LCC archive!") } } else {} LCC.file.con <- file(LCC.files[["words"]], open="r") rL.words <- readLines(LCC.file.con, n=n, encoding=fileEncoding) close(LCC.file.con) words.num.cols <- length(unlist(strsplit(rL.words[1], "\t"))) if(isTRUE(words.num.cols == 3)){ table.words <- matrix(unlist(strsplit(rL.words, "\t")), ncol=3, byrow=TRUE, dimnames=list(c(),c("num","word","freq"))) } else if(isTRUE(words.num.cols == 4)){ table.words <- matrix(unlist(strsplit(rL.words, "\t")), ncol=4, byrow=TRUE, dimnames=list(c(),c("num","word","word2","freq"))) rm(rL.words) if(!identical(table.words[,2], table.words[,3])){ warning( paste0( "This looks like a newer LCC archive with four columns in the *-words.txt file.\n", "The two word columns did not match, but we'll only use the first one!" ) ) } else {} table.words <- table.words[,c("num","word","freq")] } else { stop(simpleError( paste0("It seems the LCC archove format has changed:\n", " koRpus supports *-words.txt files with 3 or 4 columns, found ", words.num.cols, ".\n", " Please inform the package author(s) so that the problem get's fixed soon!" ) )) } results <- create.corp.freq.object( matrix.freq=table.words, num.running.words=num.running.words, df.meta=table.meta, df.dscrpt.meta=dscrpt.meta, matrix.table.bigrams=table.bigrams, matrix.table.cooccur=table.cooccur, caseSens=caseSens ) return(results) }
context("Available data") test_that("Error message regbar when data not provided", { expect_error(regbar(),"'data' must be provided") expect_error(regbar(hfdata), "Both 'x' and 'y' should be specified") expect_error(regbar(hfdata, x = centre), "Both 'x' and 'y' should be specified") expect_error(regbar(hfdata, y = case1), "Both 'x' and 'y' should be specified") }) test_that("Error message regrad when data not provided", { expect_error(regbar(),"'data' must be provided") })
is_pgs_id <- function(str, convert_NA_to_FALSE = TRUE) { if (!is.character(str)) stop("str argument must be a character vector.") if (identical(length(str), 0L)) stop("str contains no values, it must contain at least one string.") if (convert_NA_to_FALSE) { str2 <- str str2[is.na(str)] <- "" } else { str2 <- str } is_accession <- stringr::str_detect(str2, "^PGS\\d{6}$") return(is_accession) }
context("Testing ciccr") test_that("The default option for avg_RR_logit is 'control'", { y = ACS_CC$topincome t = ACS_CC$baplus x = ACS_CC$age results_default = avg_RR_logit(y, t, x) results_control = avg_RR_logit(y, t, x, 'control') expect_equal( results_default$est, results_control$est) })
context("Colors") bw <- c("black", "white") test_that("Edgy col_bin scenarios", { expect_equal(col_bin(bw, NULL)(1), " expect_equal(col_bin(bw, 1)(1), " }) test_that("Outside of domain returns na.color", { suppressWarnings({ expect_identical(" expect_identical(" expect_identical(" expect_identical(" expect_identical(" expect_true(is.na(col_factor(bw, letters, na.color = NA)("foo"))) expect_true(is.na(col_quantile(bw, 0:1, na.color = NA)(-1))) expect_true(is.na(col_quantile(bw, 0:1, na.color = NA)(2))) expect_true(is.na(col_numeric(bw, c(0, 1), na.color = NA)(-1))) expect_true(is.na(col_numeric(bw, c(0, 1), na.color = NA)(2))) }) expect_warning(col_factor(bw, letters, na.color = NA)("foo")) expect_warning(col_quantile(bw, 0:1, na.color = NA)(-1)) expect_warning(col_quantile(bw, 0:1, na.color = NA)(2)) expect_warning(col_numeric(bw, c(0, 1), na.color = NA)(-1)) expect_warning(col_numeric(bw, c(0, 1), na.color = NA)(2)) }) test_that("Basic color accuracy", { expect_identical(c(" expect_identical(c(" expect_identical(c(" expect_identical(c(" expect_identical(c(" expect_identical(c(" expect_identical(c(" expect_identical(c(" expect_identical(rev(c(" expect_identical(c(" expect_identical(c(" expect_identical(c(" expect_identical(c(" expect_identical(rev(c(" }) test_that("col_numeric respects alpha", { expect_equal( col_numeric(c(" " ) }) test_that("CIELab overflow", { expect_identical(c(" }) test_that("factors match by name, not position", { full <- factor(letters[1:5]) pal <- col_factor("magma", na.color = NA, levels = full) partial <- full[2:4] expect_identical(pal(partial), pal(droplevels(partial))) col <- expect_warning(pal(letters[10:20])) expect_true(all(is.na(col))) }) test_that("qualitative palettes don't interpolate", { pal <- col_factor("Accent", na.color = NA, levels = letters[1:5]) allColors <- RColorBrewer::brewer.pal( n = RColorBrewer::brewer.pal.info["Accent", "maxcolors"], name = "Accent") expect_identical(pal(letters[1:5]), allColors[1:5]) expect_identical( col_factor("Accent", domain = rep(letters[1:5], 2))(letters[1:5]), allColors[1:5] ) expect_identical( col_factor("Accent", domain = factor(rep(letters[5:1], 2)))(letters[1:5]), allColors[1:5] ) expect_identical( col_factor("Accent", domain = rep(letters[5:1], 2), ordered = TRUE)(letters[5:1]), allColors[1:5] ) expect_identical( col_factor("Accent", NULL)(letters[1:5]), allColors[1:5] ) expect_warning(pal(letters[6])) expect_true(suppressWarnings(is.na(pal(letters[6])))) }) test_that("OK, qualitative palettes sometimes interpolate", { pal <- col_factor("Accent", na.color = NA, levels = letters[1:20]) allColors <- RColorBrewer::brewer.pal( n = RColorBrewer::brewer.pal.info["Accent", "maxcolors"], name = "Accent") result <- expect_warning(pal(letters[1:20])) expect_true(all(result[c(1, 20)] %in% allColors)) expect_true(!any(result[-c(1, 20)] %in% allColors)) }) verifyReversal <- function(colorFunc, values, ..., filter = identity) { f1 <- filter(colorFunc("Blues", domain = values, ...)(values)) f2 <- filter(colorFunc("Blues", domain = NULL, ...)(values)) f3 <- filter(colorFunc("Blues", domain = values, reverse = FALSE, ...)(values)) f4 <- filter(colorFunc("Blues", domain = NULL, reverse = FALSE, ...)(values)) r1 <- filter(colorFunc("Blues", domain = values, reverse = TRUE, ...)(values)) r2 <- filter(colorFunc("Blues", domain = NULL, reverse = TRUE, ...)(values)) expect_identical(f1, f2) expect_identical(f1, f3) expect_identical(f1, f4) expect_identical(r1, r2) expect_identical(f1, rev(r1)) } test_that("col_numeric can be reversed", { verifyReversal(col_numeric, 1:10) }) test_that("col_bin can be reversed", { verifyReversal(col_bin, 1:10, filter = unique) }) test_that("col_quantile can be reversed", { verifyReversal(col_quantile, 1:10, n = 7) }) test_that("col_factor can be reversed", { verifyReversal(col_factor, letters, filter = expect_warning) accent <- suppressWarnings(RColorBrewer::brewer.pal(Inf, "Accent")) result1 <- col_factor("Accent", NULL)(letters[1:5]) expect_identical(result1, head(accent, 5)) result2 <- col_factor("Accent", NULL, reverse = TRUE)(letters[1:5]) expect_identical(result2, rev(head(accent, 5))) }) test_that("Palettes with ncolor < 3 work properly", { test_palette <- function(palette) { colors <- col_factor(palette, letters[1:2])(letters[1:2]) expected_colors <- suppressWarnings(RColorBrewer::brewer.pal(2, palette))[1:2] expect_identical(colors, expected_colors) colors <- col_bin(palette, 1:2, bins = 2)(1:2) expect_identical(colors, expected_colors) } test_palette("Accent") test_palette("Blues") test_palette("Spectral") }) test_that("Arguments to `cut` are respected", { colors1 <- col_bin("Greens", 1:3, 1:3)(1:3) expect_identical(colors1, c(" colors2 <- col_bin("Blues", 1:3, 1:3, right = TRUE)(1:3) expect_identical(colors2, c(" pal <- col_factor("Reds", domain = NULL, na.color = NA) colorsTT <- pal(cut(1:3, 1:3, include.lowest = TRUE, right = TRUE)) expect_identical(colorsTT, c(" colorsTF <- pal(cut(1:3, 1:3, include.lowest = TRUE, right = FALSE)) expect_identical(colorsTF, c(" colorsFT <- pal(cut(1:3, 1:3, include.lowest = FALSE, right = TRUE)) expect_identical(colorsFT, c(NA, " colorsFF <- pal(cut(1:3, 1:3, include.lowest = FALSE, right = FALSE)) expect_identical(colorsFF, c(" })
blandr.data.preparation <- function(method1, method2, sig.level) { method.comparison <- data.frame(method1, method2) method.comparison <- na.omit(method.comparison) if (length(method.comparison$method1) != length(method.comparison$method2)) stop("Method comparison analysis error: the 2 methods must have paired values.") if (!is.numeric(method.comparison$method1)) stop("Method comparison analysis error: the first method is not a number.") if (!is.numeric(method.comparison$method2)) stop("Method comparison analysis error: the second method is not a number.") if (sig.level < 0) stop("Method comparison analysis error: you can't have a significance level less than 0.") if (sig.level < 0.8) warning("Method comparison analysis warning: do you really want a significance level <0.8?") if (sig.level > 1) stop("Method comparison analysis error: you can't have a significance level greater than 1.") if (sig.level == 1) warning("Method comparison analysis warning: selecting a significance level of 1 suggest that probability testing might not be what you need.") return(method.comparison) }
jacobian_gaussian_group_sigmaVersion_meanPart <- function(sigma,mu,means,kappa,...){ grad_mean <- -2 * t(means - mu) %*% kappa grad_mean } jacobian_gaussian_group_sigmaVersion_sigmaPart <- function(S,means,mu,sigma,D,kappa,...){ n <- ncol(S) mat <- S + (means - mu) %*% t(means - mu) - sigma grad_sigma <- t(-t(D) %*% Vec(kappa %*% mat %*% kappa)) as.matrix(grad_sigma) } jacobian_gaussian_group_sigma <- function(...,Drawts,mu,sigma, meanstructure = TRUE, corinput = FALSE){ grad_sigma <- jacobian_gaussian_group_sigmaVersion_sigmaPart(mu=mu,sigma=sigma,...,Drawts=Drawts) if (corinput){ keep <- diag(ncol(sigma))[lower.tri(diag(ncol(sigma)),diag=TRUE)] != 1 grad_sigma <- as(grad_sigma[,keep, drop=FALSE], "matrix") } if (meanstructure){ grad_mean <- jacobian_gaussian_group_sigmaVersion_meanPart(mu=mu,sigma=sigma,...) Out <- cbind(grad_mean,grad_sigma) } else { Out <- grad_sigma } Out } jacobian_gaussian_sigma <- function(prep){ g_per_group <- lapply(prep$groupModels,do.call,what=jacobian_gaussian_group_sigma) for (i in 1:length(prep$groupModels)){ g_per_group[[i]] <- (prep$nPerGroup[i] / prep$nTotal) * g_per_group[[i]] } Reduce("cbind",g_per_group) }
.plotTOC <- function(object, labelThres=FALSE, modelLeg="Model", digits=3, nticks=5, digitsL=1, posL = NULL, offsetL = 0.5, ...){ old.opt <- options() options(digits=digits) old.par <- par(no.readonly = TRUE) par(oma = c(0, 0, 0, 4)) par(mgp = c(1.5, 1, 0)) population <- object@population prevalence <- object@prevalence/population units <- object@units tocd <- object@table if((!is.null(tocd$HitsP) & !is.null(tocd$"Hits+FalseAlarmsP"))==TRUE){ tocd$Hits <- tocd$HitsP tocd$"Hits+FalseAlarms" <- tocd$"Hits+FalseAlarmsP" } graphics::plot(c(0, population*(1-prevalence), population), c(0, 0, prevalence * population), type="l", lty="dashed", xlab=paste0("Hits+False Alarms (", units, ")"), ylab=paste0("Hits (", units, ")"), lwd=2, col=rgb(128,100,162, maxColorValue=255), bty="n", xaxt="n", yaxt="n", xlim=c(0, population), ylim=c(0, prevalence * population), asp=1/prevalence, ...) xlabels <- c(0, format((1:nticks)*population/nticks, digits)) ylabels <- c(0, format((1:nticks)*prevalence * population/nticks, digits)) axis(1, pos = 0, labels=xlabels, at=xlabels, xaxp = c(0, population, nticks), cex.axis=0.9, ...) axis(2, pos = 0, labels=ylabels, at=ylabels, yaxp = c(0, prevalence * population, nticks), cex.axis=0.9, ...) lines(c(0, prevalence * population, population), c(0, prevalence * population, prevalence * population), lty="dotdash", lwd=2, col=rgb(79,129,189, maxColorValue=255)) lines(c(0, population), rep(prevalence*population, 2), lwd=3, col=rgb(146,208,80, maxColorValue=255)) lines(c(0, population), c(0, prevalence*population), lty="dotted", lwd=2, col=rgb(0,0,255, maxColorValue=255)) lines(tocd$"Hits+FalseAlarms", tocd$Hits, lwd=2, col=rgb(255,0,0, maxColorValue=255)) points(tocd$"Hits+FalseAlarms", tocd$Hits, pch=17, col=rgb(255,0,0, maxColorValue=255)) if(labelThres == TRUE) text(tocd$"Hits+FalseAlarms", tocd$Hits, round(as.numeric(tocd$Threshold), digitsL), pos = posL, offset = offsetL, ...) par(fig = c(0, 1, 0, 1), oma = c(0, 0, 0, 0), mar = c(0, 0, 0, 0), new = TRUE) graphics::plot(0, 0, type = "n", bty = "n", xaxt = "n", yaxt = "n") legend("right", c("Hits+Misses", "Maximum", modelLeg, "Uniform", "Minimum"), col = c(rgb(146,208,80, maxColorValue=255), rgb(79,129,189, maxColorValue=255), rgb(255,0,0, maxColorValue=255), rgb(0,0,255, maxColorValue=255), rgb(128,100,162, maxColorValue=255)), lty = c(1, 4, 1, 3, 2), pch = c(NA, NA, 17, NA, NA), merge = TRUE, bty="n", lwd=c(3, 2, 2, 2, 2)) par(old.par) options(old.opt) }
repan <- function(n, mu = 0, r = 5^0.5) { if (any(r <= 0)) { stop("Range must be strictly positive") } if (n%%1 != 0 | n < 1) { stop("n must be an integer") } qepan(runif(n, min = 0, max = 1), mu, r) }
n <- 1000 ar1 <- 0.6 ar2 <- 0.2 ma1 <- -0.2 sigma <- sqrt(0.2) a <- arima.sim(model = list(ar = c(ar1, ar2), ma = ma1), n = n, innov = rnorm(n) * sigma) arma21ss <- function(ar1, ar2, ma1, sigma) { Tt <- matrix(c(ar1, ar2, 1, 0), ncol = 2) Zt <- matrix(c(1, 0), ncol = 2) ct <- matrix(0) dt <- matrix(0, nrow = 2) GGt <- matrix(0) H <- matrix(c(1, ma1), nrow = 2) * sigma HHt <- H %*% t(H) a0 <- c(0, 0) P0 <- matrix(1e6, nrow = 2, ncol = 2) return(list(a0 = a0, P0 = P0, ct = ct, dt = dt, Zt = Zt, Tt = Tt, GGt = GGt, HHt = HHt)) } objective <- function(theta, yt) { sp <- arma21ss(theta["ar1"], theta["ar2"], theta["ma1"], theta["sigma"]) ans <- fkf(a0 = sp$a0, P0 = sp$P0, dt = sp$dt, ct = sp$ct, Tt = sp$Tt, Zt = sp$Zt, HHt = sp$HHt, GGt = sp$GGt, yt = yt) return(-ans$logLik) } theta <- c(ar = c(0, 0), ma1 = 0, sigma = 1) fit <- optim(theta, objective, yt = rbind(a), hessian = TRUE) fit rbind(fit$par - qnorm(0.975) * sqrt(diag(solve(fit$hessian))), fit$par + qnorm(0.975) * sqrt(diag(solve(fit$hessian)))) sp <- arma21ss(fit$par["ar1"], fit$par["ar2"], fit$par["ma1"], fit$par["sigma"]) ans <- fkf(a0 = sp$a0, P0 = sp$P0, dt = sp$dt, ct = sp$ct, Tt = sp$Tt, Zt = sp$Zt, HHt = sp$HHt, GGt = sp$GGt, yt = rbind(a)) plot(ans, at.idx = 1, att.idx = NA, CI = NA) lines(a, lty = "dotted") plot(ans, at.idx = NA, att.idx = 1, CI = NA) lines(a, lty = "dotted") plot(ans, type = "resid.qq") plot(ans, type = "acf") y <- Nile y[c(3, 10)] <- NA dt <- ct <- matrix(0) Zt <- Tt <- matrix(1) a0 <- y[1] P0 <- matrix(100) fit.fkf <- optim(c(HHt = var(y, na.rm = TRUE) * .5, GGt = var(y, na.rm = TRUE) * .5), fn = function(par, ...) -fkf(HHt = matrix(par[1]), GGt = matrix(par[2]), ...)$logLik, yt = rbind(y), a0 = a0, P0 = P0, dt = dt, ct = ct, Zt = Zt, Tt = Tt) fkf.obj <- fkf(a0, P0, dt, ct, Tt, Zt, HHt = matrix(fit.fkf$par[1]), GGt = matrix(fit.fkf$par[2]), yt = rbind(y)) fit.stats <- StructTS(y, type = "level") fit.fkf$par fit.stats$coef plot(y, main = "Nile flow") lines(fitted(fit.stats), col = "green") lines(ts(fkf.obj$att[1, ], start = start(y), frequency = frequency(y)), col = "blue") legend("top", c("Nile flow data", "Local level (StructTS)", "Local level (fkf)"), col = c("black", "green", "blue"), lty = 1) y <- treering y[c(3, 10)] <- NA dt <- ct <- matrix(0) Zt <- Tt <- matrix(1) a0 <- y[1] P0 <- matrix(100) fit.fkf <- optim(c(HHt = var(y, na.rm = TRUE) * .5, GGt = var(y, na.rm = TRUE) * .5), fn = function(par, ...) -fkf(HHt = matrix(par[1]), GGt = matrix(par[2]), ...)$logLik, yt = rbind(y), a0 = a0, P0 = P0, dt = dt, ct = ct, Zt = Zt, Tt = Tt) fkf.obj <- fkf(a0, P0, dt, ct, Tt, Zt, HHt = matrix(fit.fkf$par[1]), GGt = matrix(fit.fkf$par[2]), yt = rbind(y)) plot(y, main = "Treering data") lines(ts(fkf.obj$att[1, ], start = start(y), frequency = frequency(y)), col = "blue") legend("top", c("Treering data", "Local level"), col = c("black", "blue"), lty = 1) plot(fkf.obj, type = "resid.qq") plot(fkf.obj, type = "acf", na.action = na.pass)
tar_force <- function( name, command, force, tidy_eval = targets::tar_option_get("tidy_eval"), packages = targets::tar_option_get("packages"), library = targets::tar_option_get("library"), format = targets::tar_option_get("format"), iteration = targets::tar_option_get("iteration"), error = targets::tar_option_get("error"), memory = targets::tar_option_get("memory"), garbage_collection = targets::tar_option_get("garbage_collection"), deployment = targets::tar_option_get("deployment"), priority = targets::tar_option_get("priority"), resources = targets::tar_option_get("resources"), storage = targets::tar_option_get("storage"), retrieval = targets::tar_option_get("retrieval"), cue = targets::tar_option_get("cue") ) { name <- targets::tar_deparse_language(substitute(name)) name_change <- paste0(name, "_change") envir <- tar_option_get("envir") command <- targets::tar_tidy_eval(substitute(command), envir, tidy_eval) force <- targets::tar_tidy_eval(substitute(force), envir, tidy_eval) change <- as.call(list(call_ns("tarchetypes", "tar_force_change"), force)) tar_change_raw( name = name, name_change = name_change, command = command, change = change, packages = packages, library = library, format = format, iteration = iteration, error = error, memory = memory, garbage_collection = garbage_collection, deployment = deployment, priority = priority, resources = resources, storage = storage, retrieval = retrieval, cue = cue ) } tar_force_change <- function(condition) { path <- targets::tar_path() new <- basename(tempfile(pattern = "")) old <- if_any(file.exists(path), readRDS(path), new) if_any(condition, new, old) }
emul.lik <- function(parvec, Y.mat, X.mat, t.vec, Theta.mat, n.par, p.par, fix.betas, limits.lower=NULL, limits.upper=NULL, beta.vec=NULL) { if ((fix.betas) && (is.null(beta.vec))) { stop("***ERROR*** Betas are fixed, yet the beta vector was not provided!\n") } if ( (!fix.betas) && (!is.null(beta.vec))) { warning('beta.vec argument is ignored') } if (!is.null(limits.lower)) { if (any(parvec < limits.lower)) { llik <- -Inf return(llik) } } if (!is.null(limits.upper)) { if (any(parvec > limits.upper)) { llik <- -Inf return(llik) } } rho <- parvec[1] kappa <- parvec[2] zeta <- parvec[3] if (!fix.betas) { beta.ind <- names(parvec) == "beta" beta.vec <- parvec[beta.ind] } phi.ind <- names(parvec) == "phi" phi.vec <- parvec[phi.ind] if (kappa == 0 && zeta == 0) stop("***ERROR*** Kappa and zeta can't be both 0!\n") Sigma.mats <- sep.cov(Theta.mat, t.vec, rho, kappa, phi.vec, zeta) Sigma.theta.Chol.mat <- chol(Sigma.mats$Sigma.theta.mat) Sigma.theta.inv.mat <- chol2inv(Sigma.theta.Chol.mat) Sigma.t.Chol.mat <- chol(Sigma.mats$Sigma.t.mat) Sigma.t.inv.mat <- chol2inv(Sigma.t.Chol.mat) mu.vec <- X.mat%*%as.matrix(beta.vec) vec.C <- Y.mat - mu.vec C.mat <- matrix(as.vector(vec.C), nrow=p.par, ncol=n.par) T1.mat <- Sigma.theta.inv.mat%*%C.mat%*%Sigma.t.inv.mat T2.mat <- C.mat*T1.mat Term1 <- -0.5*sum(T2.mat) Det10 <- 2*(sum(log(diag(Sigma.t.Chol.mat)))) Det1 <- p.par*Det10 Det20 <- 2*(sum(log(diag(Sigma.theta.Chol.mat)))) Det2 <- n.par*Det20 Term2 <- -0.5*(Det1+Det2) Term3 <- -0.5*n.par*p.par*log(2*pi) llik <- Term1 + Term2 + Term3 llik }
print.gsummary.dmm <- function(x, ...) { cat("Call:\n") print(x$call) cat("\nProportion of phenotypic var/covariance partitioned by DME:\n") cat(" to each component (OLS-b):\n\n") for(i in 1:length(x$ftables)) { print(x$ftables[[i]],digits=x$digits) cat("\n") } cat("\nCorrelation corresponding to each var/covariance component:\n") cat(" partitioned by DME (OLS-b):\n\n") for(i in 1:length(x$rtables)) { print(x$rtables[[i]],digits=x$digits) cat("\n") } cat("\nPhenotypic var/covariance from components partitioned by DME (OLS-b):\n\n") print(x$ptables[[1]],digits=x$digits) cat("\n") if(x$gls) { cat("\nProportion of phenotypic var/covariance partitioned by DME:\n") cat(" to each component (GLS-b):\n\n") for(i in 1: length(x$gftables)){ print(x$gftables[[i]],digits=x$digits) cat("\n") } cat("\nCorrelation corresponding to each var/covariance component:\n") cat(" partitioned by DME (GLS-b):\n\n") for(i in 1:length(x$grtables)) { print(x$grtables[[i]],digits=x$digits) cat("\n") } cat("\nPhenotypic var/covariance from components partitioned by DME (GLS-b):\n\n") print(x$gptables[[1]],digits=x$digits) cat("\n") } }
Lg <- function(Xj,Mj,D=diag(nrow(Xj))/nrow(Xj),Xk=Xj,Mk=Mj) { Wj <- Xj %*% Mj %*% t( Xj) Wk <- Xk %*% Mk %*% t( Xk) Lg <- sum(diag(Wj%*%D %*%Wk %*%D)) return(Lg) } wibca2mfa <- function(ACww) { rbl <- ACww$rbl cbl <- ACww$cbl J <- nrow(ACww$cbvar) L <- nrow(ACww$lbvar) nf <- ACww$nf homJ <- ACww$hom[2] colb <- NULL colb$eig<-ACww$eig*homJ homL <- ACww$hom[1] rowb <- NULL rowb$eig<-ACww$eig*homL colb$coor <-homJ*(ACww$cbvar * (ACww$cbw %*% t(rep(1,nf)))) rowb$coor <-homL*(ACww$lbvar * (ACww$lbw %*% t(rep(1,nf)))) X <- as.matrix(ACww$tab) cbl.fac <- rep(1:J,cbl) rbl.fac <- rep(1:L,rbl) D <- diag(ACww$lw) M<- diag(ACww$cw) Mbc <- M*homJ ev1 <- eigen(t(X)%*%D%*%X%*%M,symmetric=FALSE, only.values = TRUE)$values[1] Lgbc <- matrix(NA,J+1,J+1) for (j in 1:J) { Xj <- X[,cbl.fac==j];Mj <- Mbc[cbl.fac==j,cbl.fac==j] Lgbc[J+1,j]<- Lg(Xj,Mj,D,X,M/ev1) for (k in 1:J) { Xk <- X[,cbl.fac==k];Mk <- Mbc[cbl.fac==k,cbl.fac==k] Lgbc[j,k] <- Lg(Xj,Mj,D,Xk,Mk) } } Lgbc[J+1,J+1]<- Lg(X,M/ev1,D) Lgbc[,J+1]<- Lgbc[J+1,] rownames(Lgbc)<-colnames(Lgbc)<- c(rownames(ACww$cbvar),"MFA") RVbc <- (diag(1/(sqrt(diag(Lgbc)))))%*%Lgbc%*% (diag(1/sqrt(diag(Lgbc)))) rownames(RVbc)<-rownames(Lgbc); colnames(RVbc)<-colnames(Lgbc) colb$Lg <- Lgbc colb$RV <- RVbc Dbl <- D*homL Lgbl <- matrix(NA,L+1,L+1) for (l in 1:L) { Xl <- t(X[rbl.fac==l,]) Dl <- Dbl[rbl.fac==l,rbl.fac==l] Lgbl[L+1,l]<- Lg(Xl,Dl,M,t(X),D/ev1) for (m in 1:L) { Xm <- t(X[rbl.fac==m,]);Dm <- Dbl[rbl.fac==m,rbl.fac==m] Lgbl[l,m] <- Lg(Xl,Dl,M,Xm,Dm) } } Lgbl[L+1,L+1]<- Lg(t(X),D/ev1,M) Lgbl[,L+1]<- Lgbl[L+1,] rownames(Lgbl)<-colnames(Lgbl)<- c(rownames(ACww$lbvar),"MFA") RVbl <- (diag(1/sqrt(diag(Lgbl))))%*%Lgbl%*% (diag(1/sqrt(diag(Lgbl)))) rownames(RVbl)<-rownames(Lgbl); colnames(RVbl)<-colnames(Lgbl) rowb$Lg <- Lgbl rowb$RV <- RVbl mfa <- list(colb=colb,rowb=rowb) return(mfa) }
"as.dudi" <- function (df, col.w, row.w, scannf, nf, call, type, tol = 1e-07, full = FALSE) { if (!is.data.frame(df)) stop("data.frame expected") lig <- nrow(df) col <- ncol(df) if (length(col.w) != col) stop("Non convenient col weights") if (length(row.w) != lig) stop("Non convenient row weights") if (any(col.w < 0)) stop("col weight < 0") if (any(row.w < 0)) stop("row weight < 0") if (full) scannf <- FALSE transpose <- FALSE if(lig<col) transpose <- TRUE res <- list(tab = df, cw = col.w, lw = row.w) df <- as.matrix(df) df.ori <- df df <- df * sqrt(row.w) df <- sweep(df, 2, sqrt(col.w), "*") if(!transpose){ df <- crossprod(df,df) } else{ df <- tcrossprod(df,df) } eig1 <- eigen(df,symmetric=TRUE) eig <- eig1$values rank <- sum((eig/eig[1]) > tol) if (scannf) { if (exists("ade4TkGUIFlag")) { nf <- ade4TkGUI::chooseaxes(eig, rank) } else { barplot(eig[1:rank]) cat("Select the number of axes: ") nf <- as.integer(readLines(n = 1)) messageScannf(call, nf) } } if (nf <= 0) nf <- 2 if (nf > rank) nf <- rank if (full) nf <- rank res$eig <- eig[1:rank] res$rank <- rank res$nf <- nf col.w[which(col.w == 0)] <- 1 row.w[which(row.w == 0)] <- 1 dval <- sqrt(res$eig)[1:nf] if(!transpose){ col.w <- 1/sqrt(col.w) auxi <- eig1$vectors[, 1:nf] * col.w auxi2 <- sweep(df.ori, 2, res$cw, "*") auxi2 <- data.frame(auxi2%*%auxi) auxi <- data.frame(auxi) names(auxi) <- paste("CS", (1:nf), sep = "") row.names(auxi) <- make.unique(names(res$tab)) res$c1 <- auxi names(auxi2) <- paste("Axis", (1:nf), sep = "") row.names(auxi2) <- row.names(res$tab) res$li <- auxi2 res$co <- sweep(res$c1,2,dval,"*") names(res$co) <- paste("Comp", (1:nf), sep = "") res$l1 <- sweep(res$li,2,dval,"/") names(res$l1) <- paste("RS", (1:nf), sep = "") } else { row.w <- 1/sqrt(row.w) auxi <- eig1$vectors[, 1:nf] * row.w auxi2 <- t(sweep(df.ori,1,res$lw,"*")) auxi2 <- data.frame(auxi2%*%auxi) auxi <- data.frame(auxi) names(auxi) <- paste("RS", (1:nf), sep = "") row.names(auxi) <- row.names(res$tab) res$l1 <- auxi names(auxi2) <- paste("Comp", (1:nf), sep = "") row.names(auxi2) <- make.unique(names(res$tab)) res$co <- auxi2 res$li <- sweep(res$l1,2,dval,"*") names(res$li) <- paste("Axis", (1:nf), sep = "") res$c1 <- sweep(res$co,2,dval,"/") names(res$c1) <- paste("CS", (1:nf), sep = "") } res$call <- call class(res) <- c(type, "dudi") return(res) } "is.dudi" <- function (x) { inherits(x, "dudi") } "print.dudi" <- function (x, ...) { cat("Duality diagramm\n") cat("class: ") cat(class(x)) cat("\n$call: ") print(x$call) cat("\n$nf:", x$nf, "axis-components saved") cat("\n$rank: ") cat(x$rank) cat("\neigen values: ") l0 <- length(x$eig) cat(signif(x$eig, 4)[1:(min(5, l0))]) if (l0 > 5) cat(" ...\n") else cat("\n") sumry <- array("", c(3, 4), list(1:3, c("vector", "length", "mode", "content"))) sumry[1, ] <- c("$cw", length(x$cw), mode(x$cw), "column weights") sumry[2, ] <- c("$lw", length(x$lw), mode(x$lw), "row weights") sumry[3, ] <- c("$eig", length(x$eig), mode(x$eig), "eigen values") print(sumry, quote = FALSE) cat("\n") sumry <- array("", c(5, 4), list(1:5, c("data.frame", "nrow", "ncol", "content"))) sumry[1, ] <- c("$tab", nrow(x$tab), ncol(x$tab), "modified array") sumry[2, ] <- c("$li", nrow(x$li), ncol(x$li), "row coordinates") sumry[3, ] <- c("$l1", nrow(x$l1), ncol(x$l1), "row normed scores") sumry[4, ] <- c("$co", nrow(x$co), ncol(x$co), "column coordinates") sumry[5, ] <- c("$c1", nrow(x$c1), ncol(x$c1), "column normed scores") print(sumry, quote = FALSE) cat("other elements: ") if (length(names(x)) > 11) cat(names(x)[12:(length(x))], "\n") else cat("NULL\n") } "t.dudi" <- function (x) { if (!inherits(x, "dudi")) stop("Object of class 'dudi' expected") res <- list() res$tab <- data.frame(t(x$tab)) res$cw <- x$lw res$lw <- x$cw res$eig <- x$eig res$rank <- x$rank res$nf <- x$nf res$c1 <- x$l1 res$l1 <- x$c1 res$co <- x$li res$li <- x$co res$call <- match.call() class(res) <- c("transpo", "dudi") return(res) } "redo.dudi" <- function (dudi, newnf = 2) { if (!inherits(dudi, "dudi")) stop("Object of class 'dudi' expected") appel <- as.list(dudi$call) if (appel[[1]] == "t.dudi") { dudiold <- eval.parent(appel[[2]]) appel <- as.list(dudiold$call) appel$nf <- newnf appel$scannf <- FALSE dudinew <- eval.parent(as.call(appel)) return(t.dudi(dudinew)) } appel$nf <- newnf appel$scannf <- FALSE eval.parent(as.call(appel)) } screeplot.dudi <- function (x, npcs = length(x$eig), type = c("barplot","lines"), main = deparse(substitute(x)), col = c(rep("black",x$nf),rep("grey",npcs-x$nf)), ...){ type <- match.arg(type) pcs <- x$eig xp <- seq_len(npcs) if (type == "barplot") barplot(pcs[xp], names.arg = 1:npcs, main = main, ylab = "Inertia", xlab = "Axis", col = col, ...) else { plot(xp, pcs[xp], type = "b", axes = FALSE, main = main, xlab = "Axis", ylab = "Inertia", col = col, ...) axis(2) axis(1, at = xp, labels = 1:npcs) } invisible() } biplot.dudi <- function (x, ...){ scatter(x, ...) } summary.dudi <- function(object, ...){ cat("Class: ") cat(class(object)) cat("\nCall: ") print(object$call) cat("\nTotal inertia: ") cat(signif(sum(object$eig), 4)) cat("\n") l0 <- length(object$eig) cat("\nEigenvalues:\n") vec <- object$eig[1:(min(5, l0))] names(vec) <- paste("Ax",1:length(vec), sep = "") print(format(vec, digits = 4, trim = TRUE, width = 7), quote = FALSE) cat("\nProjected inertia (%):\n") vec <- (object$eig / sum(object$eig) * 100)[1:(min(5, l0))] names(vec) <- paste("Ax",1:length(vec), sep = "") print(format(vec, digits = 4, trim = TRUE, width = 7), quote = FALSE) cat("\nCumulative projected inertia (%):\n") vec <- (cumsum(object$eig) / sum(object$eig) * 100)[1:(min(5, l0))] names(vec)[1] <- "Ax1" if(l0>1) names(vec)[2:length(vec)] <- paste("Ax1:",2:length(vec),sep="") print(format(vec, digits = 4, trim = TRUE, width = 7), quote = FALSE) if (l0 > 5) { cat("\n") cat(paste("(Only 5 dimensions (out of ",l0, ") are shown)\n", sep="",collapse="")) } cat("\n") } "[.dudi" <- function (x, i, j) { res <- unclass(x) if(!missing(i)){ res$tab <- res$tab[i, , drop = FALSE] res$li <- res$li[i, , drop = FALSE] res$l1 <- res$l1[i, , drop = FALSE] res$lw <- res$lw[i, drop = FALSE] res$lw <- res$lw / sum(res$lw) } if(!missing(j)){ res$tab <- res$tab[, j, drop = FALSE] res$co <- res$co[j, , drop = FALSE] res$c1 <- res$c1[j, , drop = FALSE] res$cw <- res$lw[j, drop = FALSE] } class(res) <- class(x) res$call <- match.call() return(res) }
plum.its <- function(ssize=2e3, set=get('info'), ACCEP_EV=20, EVERY_MULT=5, BURN_IN_MULT=20) { dims <- set$K + 4 if(set$ra.case == 2) dims <- dims + nrow(set$supportedData) store.every <- dims * EVERY_MULT MCMC.size <- ACCEP_EV * store.every * (ssize + BURN_IN_MULT) MCMC.kept <- MCMC.size - (store.every * BURN_IN_MULT) MCMC.stored <- round((MCMC.kept / store.every / ACCEP_EV) * 3.5) message(" Will run around ", MCMC.size, " iterations and store around ", MCMC.stored) } Plum_runs <- function(coredir=get('info')$coredir) list.files(coredir) check.equi <- function(dets, suggest=TRUE) { rawdata <- dets[,4] rawsd <- dets[,5] lendat <- length(rawdata) numdat <- as.integer(.5*lendat) usedat <- rawdata[(lendat-3):lendat] usesd <- rawsd[(lendat-3):lendat] usex <- 1:length(usedat) usereg <- lm(usedat ~ usex, weights=1/(usesd^2)) reg <- coef(summary(usereg))[2,4] est <- coef(summary(usereg))[1,1] coe <- 3 for(i in 1:numdat) { usedat <- rawdata[(lendat-3-i):lendat] usesd <- rawsd[(lendat-3-i):lendat] usex <- 1:length((lendat-3-i):lendat) usereg <- lm(usedat ~ as.numeric(scale(usex)), weights=1/(usesd^2)) reg1 <- coef(summary(usereg))[2,4] est1 <- mean(usedat) if(reg1 > reg) { reg <- reg1 coe <- (3+i) est <- est1 } } stopQuietly <- function() { blankMsg <- sprintf("\r%s\r", paste(rep(" ", getOption("width")-1L), collapse=" ")); stop(simpleError(blankMsg)); } if(suggest) { ans <- readline(message("The regression process proposes using the last ", as.integer(coe), " data points as estimates of the supported activity, with a p-value of ", round((reg),3), ", OK? (Y/n) ")) if(!(ans=="y" || ans=="")) { message(" OK. Please provide correct n.supp (as Plum option or in the .csv file).") stopQuietly() } } return(as.integer(coe)) } read.dets.plum <- function(core, coredir, n.supp=c(), date.sample, sep=",", dec=".", cc=1, Bqkg=TRUE, ra.case=c(), suggest=TRUE) { csv.file <- paste0(coredir, core, "/", core, ".csv") changed <- FALSE if(file.exists(csv.file)) { dets <- read.table(csv.file, header=TRUE, sep=sep) message("Reading ", csv.file) } else { if(file.exists(paste0(csv.file, ".txt"))) { file.rename(paste0(csv.file, ".txt"), csv.file) message("Removing .txt extension from .csv file") dets <- read.table(csv.file, header=TRUE, sep=sep) message("Reading", csv.file, "\n") } else message("No .csv file found. Please check if the name and/or location is correct") } commas <- grep(",,", readLines(csv.file)) if(length(!is.na(commas)) > 0) stop("check the .csv file in a plain-text editor for 'orphan' commas", call.=FALSE) nas <- which(is.na(dets[,ncol(dets)])) if(length(nas) > 0) dets[nas,ncol(dets)] <- "" idColumn <- 1 depthColumn <- 2 rhoColumn <- 3 plumdataColumn <- 4 stdColumn <- 5 deltaColumn <- 6 raColumn <- 7 sdRaColumn <- 8 if(min(diff(dets[,depthColumn])) < 0) { message("Warning, the depths are not in ascending order, I will correct this.") dets <- dets[ order(dets[,depthColumn]),] changed <- TRUE } date.infile <- NA; nsupp.infile <- NA; racase.infile <- NA if(ncol(dets) == 6 || ncol(dets) == 8) detsOrig <- dets else if(ncol(dets) == 7 || ncol(dets) == 9) { n <- ifelse(ncol(dets) == 7, 7, 9) detsOrig <- dets[,-n] if(length(dets[1,n]) > 0) if(!is.na(dets[1,n])) if(dets[1,n] != "") date.infile <- dets[1,n] if(length(dets[2,n]) > 0) if(!is.na(dets[2,n])) if(dets[2,n] != "") nsupp.infile <- dets[2,n] if(length(dets[3,n]) > 0) if(!is.na(dets[3,n])) if(dets[3,n] != "") racase.infile <- dets[3,n] } else stop(paste(csv.file, "should have between 6 and 9 columns. Please check."), call.=TRUE) date.asoption <- date.sample nsupp.asoption <- n.supp racase.asoption <- ra.case Bqkg.asoption <- Bqkg choice <- function(infile, asoption, string1, string2, testnumeric=TRUE, test=c()) { if(length(infile) == 0 || is.na(infile) || infile == "") { if(length(asoption) == 0) { if(length(test) > 0) ans <- test else ans <- readline(string2) if(testnumeric) if(grepl("^[0-9.][0-9]*[.]?[0-9]*[0-9.]$",ans[1]) == FALSE) if(grepl("^[0-9]+$", ans[1]) == FALSE) stop(cat(string1, "should be a numeric value "), call.=FALSE) chosen <- as.numeric(ans) } else chosen <- as.numeric(asoption) } else if(length(asoption) == 0) chosen <- as.numeric(infile) else chosen <- as.numeric(asoption) message("Using for ", string1, ": ", chosen) return(chosen) } if(core=="HP1C") { if(length(date.asoption) == 0) { message("Core HP1C was sampled in summer 2018, setting date.sample to 2018.5") date.sample <- 2018.5 } } else date.sample <- choice(date.infile, date.asoption, "sampling date", "Please provide a date (in AD) for when the Pb210 samples were measured: ") if(ncol(dets) == 6) { if(length(racase.asoption) == 0) message("No radium-226 data, setting ra.case to 0, using tail data to estimate supported Pb-210") else { if(racase.asoption != 0) message("Setting ra.case to 0 as no radium-226 data provided, using tail data to estimate supported Pb-210") else message("No radium-226 data, setting ra.case to 0") } ra.case <- 0 } if(ncol(dets) == 7) { if(length(racase.asoption) == 0) { if(length(racase.infile) == 0 || is.na(racase.infile) || racase.infile > 0) message("Setting ra.case to 0 as no radium-226 data provided, using tail data to estimate supported Pb-210 (ra.case 0)") else message("No radium-226 data, using tail data to estimate supported Pb-210 (ra.case 0)") } else { if(racase.asoption != 0) message("Setting ra.case to 0 as no radium-226 data provided, using tail data to estimate supported Pb-210 (ra.case 0)") else message("No radium-226 data, using tail data to estimate supported Pb-210 (ra.case 0)") } ra.case <- 0 } if(ncol(dets) == 8) { if(length(racase.asoption) == 0) { message("Radium-226 data provided. Should I assume constant (ra.case 1) or varying (ra.case 2) supported Pb-210? Note that using ra.case 2 will greatly increase the computing time and should only be used when clear patterns are observed in the radium-226 data.") ans <- readline(" Use ra.case (1 or 2):") if(ans == 1) ra.case <- 1 else if(ans == 2) ra.case <- 2 else stop("I do not understand this value for ra.case (should be 1 or 2). Please adapt the settings", call.=TRUE) } else { if(racase.asoption == 1) ra.case <- 1 else if(racase.asoption == 2) ra.case <- 2 else { message("Radium-226 data provided. Should I assume constant (ra.case 1) or varying (ra.case 2) supported Pb-210? Note that using ra.case 2 will greatly increase the computing time and should only be used when clear patterns are observed in the radium data.") ans <- readline("Use ra.case (1 or 2):") if(ans == 1) ra.case <- 1 else if(ans == 2) ra.case <- 2 else stop("I do not understand this value for ra.case (should be 1 or 2). Please adapt the settings", call.=TRUE) } } } if(ncol(dets) == 9) { if(length(racase.asoption) == 0) { if(length(racase.infile) == 0 || is.na(racase.infile) || racase.infile == 0 || racase.infile == "") { message("Radium data provided so ra.case cannot be 0 or empty. Should I assume constant (ra.case 1) or varying (ra.case 2) supported Pb-210? Note that using ra.case 2 will greatly increase the computing time and should only be used when clear patterns are observed in the radium-226 data. ") ans <- readline("Use ra.case (1 or 2):") if(ans == 1) ra.case <- 1 else if(ans == 2) ra.case <- 2 else stop("I do not understand this answer. Please adapt the settings", call.=TRUE) } else if(racase.infile == 1) ra.case <- 1 else if(racase.infile == 2) ra.case <- 2 else stop("I do not understand the radium-226 case value in the .csv file. Please adapt", call.=TRUE) } else { if(racase.asoption == 0) { message("Radium-226 data provided so ra.case cannot be 0. Should I assume constant (ra.case 1) or varying (ra.case 2) supported Pb-210? Note that using ra.case 2 will greatly increase the computing time and should only be used when clear patterns are observed in the radium-226 data.") ans <- readline("Use ra.case (1 or 2):") if(ans == 1) ra.case <- 1 else if(ans == 2) ra.case <- 2 else stop("I do not understand this value for ra.case (should be 1 or 2). Please adapt the settings", call.=TRUE) } else if(racase.asoption == 1) { message("ra.case 1") ra.case <- 1 } else if(racase.asoption == 2) { message("ra.case 2") ra.case <- 2 } else stop("I do not understand this value for ra.case (should be 1 or 2). Please adapt the settings", call.=TRUE) } } if(ra.case < 2) { if(ra.case == 1) message("Besides using the radium data, the tail Pb-210 data can also be used to estimate supported Pb-210. ") n.supp <- choice(nsupp.infile, nsupp.asoption, "number of supported data", "",, check.equi(dets)) } if(ra.case == 2) n.supp <- 0 if(length(Bqkg) == 0 || !(Bqkg %in% c(0, 1))) { message("Assuming that the Pb units are in Bq/kg, Bqkg=1") Bqkg <- 1 } choices <- c(date.sample, n.supp, ra.case, rep("", nrow(dets)-3)) suggested.names <- c("labID", "depth(cm)","density(g/cm^3)","210Pb(Bq/kg)","sd(210Pb)","thickness(cm)", "226Ra(Bq/kg)", "sd(226Ra)", "settings") if(ra.case == 0) suggested.names <- suggested.names[-(7:8)] if(ncol(dets) %in% c(6,8)) { changed <- TRUE dets <- cbind(dets, choices) } else { current <- c(date.infile, nsupp.infile, racase.infile) if(length(is.na(current)) > 0 || length(choices[1:4] == current) < 4) changed <- TRUE dets[,ncol(dets)] <- choices } if(changed) { message("Writing changes to ", csv.file) write.table(dets, csv.file, sep=paste0(sep, "\t"), dec=dec, row.names=FALSE, col.names=suggested.names, quote=FALSE) } if(ncol(detsOrig) == 6) { raColumn <- 4 sdRaColumn <- 5 supportedData <- detsOrig[(nrow(detsOrig)-n.supp+1):nrow(detsOrig),c(raColumn, sdRaColumn, depthColumn, deltaColumn)] detsOrig <- detsOrig[1:(nrow(detsOrig)-n.supp),] } else if(ncol(detsOrig) == 8) { raColumn <- 7 sdRaColumn <- 8 supportedData <- detsOrig[,c(raColumn, sdRaColumn, depthColumn, deltaColumn)] detsOrig <- detsOrig[,-c(raColumn,sdRaColumn)] if(length(supportedData[is.na(supportedData)]) > 0) { message("Missing values are detected; the radium case is set to 1.") ra.case <- 1 elim <- c() for(i in 1:nrow(supportedData)) if(length(is.na(supportedData[i,])) > 0) elim <- c(elim, i) supportedData <- supportedData[-elim,] } if(length(n.supp) > 0) if(n.supp > 0) { raColumn <- 4 sdRaColumn <- 5 tmp <- detsOrig[(nrow(detsOrig)-n.supp+1):(nrow(detsOrig)),c(raColumn, sdRaColumn, depthColumn, deltaColumn)] names(tmp) <- colnames(supportedData) supportedData <- rbind(supportedData, tmp) detsOrig <- detsOrig[1:(nrow(detsOrig)-n.supp),] } } else if( n.supp > 0 ) { raColumn <- 4 sdRaColumn <- 5 tmp <- detsOrig[(nrow(detsOrig)-n.supp+1):(nrow(detsOrig)),c(raColumn, sdRaColumn, depthColumn, deltaColumn)] names(tmp) <- colnames(supportedData) supportedData <- rbind(supportedData, tmp) detsOrig <- detsOrig[1:(nrow(detsOrig)-n.supp),] } else stop("Unexpected column names, order or values in dets file. \nPlease check the manual for how to produce a correct dets file.", call.=FALSE) if(!is.numeric(dets[,plumdataColumn]) || !is.numeric(dets[,stdColumn]) || !is.numeric(dets[,depthColumn])) stop("unexpected values in dets file, I expected numbers. Check the manual.", call.=FALSE) if(!is.numeric(dets[,deltaColumn]) || !is.numeric(dets[,rhoColumn]) ) stop("unexpected values in dets file, I expected numbers. Check the manual.", call.=FALSE) dets <- dets[,c(idColumn, plumdataColumn, stdColumn, depthColumn, deltaColumn, rhoColumn)] if(ncol(detsOrig) == 6) { age.min <- min( c(detsOrig[,2]-(detsOrig[,6]/2)), c( detsOrig[,4]-detsOrig[,5]) ) age.max <- max( c(detsOrig[,2]-(detsOrig[,6]/2)), c( detsOrig[,4]+detsOrig[,5]) ) } else { age.min <- min( c(detsOrig[,2]-(detsOrig[,6]/2),detsOrig[,2]-(detsOrig[,6]/2)), c( detsOrig[,4]-detsOrig[,5],detsOrig[,7]-detsOrig[,8]) ) age.max <- max( c(detsOrig[,2]-(detsOrig[,6]/2),detsOrig[,2]-(detsOrig[,6]/2)), c( detsOrig[,4]+detsOrig[,5],detsOrig[,7]+detsOrig[,8]) ) } layout(1) oldpar <- par(mar=c(3,3,1,1), mgp=c(1.5,.7,.0), bty="l") on.exit(par(oldpar)) age.lim <- extendrange(c(age.min, age.max), f=0.01) dlim <- c(0, max(detsOrig[,depthColumn])) ylab <- ifelse(Bqkg, '210Pb (Bq/kg)', '210Pb (dpm/g)') plot(0, type='n', pch=16,col=c(rep('red',nrow(detsOrig)),rep('red',nrow(detsOrig))), cex=.3, ylab=ylab, xlab='depth(cm)', xlim=dlim, ylim=age.lim ) rect(detsOrig[,2]-detsOrig[,6], detsOrig[,4]-detsOrig[,5], detsOrig[,2], detsOrig[,4]+detsOrig[,5], lty=3, border=4) if(ncol(detsOrig) > 6) rect(detsOrig[,2], detsOrig[,7]-detsOrig[,8], detsOrig[,2]-detsOrig[,6], detsOrig[,7]+detsOrig[,8], lty=3, border=2) return(list(dets, supportedData, ra.case, date.sample, detsOrig, n.supp, Bqkg)) } Plum.cleanup <- function(set=get('info')) { files <- c(paste0(set$prefix, ".bacon"), paste0(set$prefix, ".out"), paste0(set$prefix, ".pdf"), paste0(set$prefix, "_ages.txt"), paste0(set$coredir,set$core, "/", set$core, "_settings.txt")) for(i in files) if(file.exists(i)) tmp <- file.remove(i) if(exists("tmp")) rm(tmp) message("Previous Plum runs of core ", set$core, " with thick=", set$thick, " deleted. Now try running the core again\n") } .plum.settings <- function(core, coredir, dets, thick, remember=TRUE, d.min, d.max, d.by, depths.file, slump, acc.mean, acc.shape, mem.mean, mem.strength, boundary, hiatus.depths, hiatus.max, hiatus.shape, BCAD, cc, postbomb, cc1, cc2, cc3, cc4, depth.unit, normal, t.a, t.b, delta.R, delta.STD, prob, defaults, runname, ssize, dark, MinAge, MaxAge, cutoff, age.res, after, age.unit, supportedData, date.sample, Al, phi.shape, phi.mean, s.shape, s.mean, ra.case, Bqkg, n.supp) { vals <- list(d.min, d.max, d.by, depths.file, slump, acc.mean, acc.shape, mem.mean, mem.strength, boundary, hiatus.depths, hiatus.max, BCAD, cc, postbomb, cc1, cc2, cc3, cc4, depth.unit, normal, t.a, t.b, delta.R, delta.STD, prob, age.unit) valnames <- c("d.min", "d.max", "d.by", "depths.file", "slump", "acc.mean", "acc.shape", "mem.mean", "mem.strength", "boundary", "hiatus.depths", "hiatus.max", "BCAD", "cc", "postbomb", "cc1", "cc2", "cc3", "cc4", "depth.unit", "normal", "t.a", "t.b", "delta.R", "delta.STD", "prob", "age.unit") extr <- function(i, def=deffile, pre=prevfile, exists.pre=prevf, rem=remember, sep=" ", isnum=TRUE) { if(length(vals[[i]]) > 0) if(any(is.na(vals[[i]]))) { ext.def <- strsplit(def[i], sep)[[1]] ext.def <- ext.def[-length(ext.def)] if(exists.pre) { ext.pre <- strsplit(pre[i], sep)[[1]] ext.pre <- ext.pre[-length(ext.pre)] if(def[i] == pre[i]) ext <- ext.pre else if(rem) { if(i==13) ifelse(ext.pre, "using BC/AD", "using cal BP") else if(i>2) message(" using previous run's value for ", valnames[i], ", ", ext.pre) ext <- ext.pre } else { if(i==13) ifelse(ext.def, "using BC/AD", "using cal BP") else if(i>2) message(" using default value for ", valnames[i], ", ", ext.def) ext <- ext.def } } else ext <- ext.def if(any(ext=="NA") || any(is.na(ext))) NA else if(isnum) as.numeric(ext) else noquote(ext) } else if(isnum) as.numeric(vals[[i]]) else vals[[i]] } deffile <- readLines(defaults, n=-1) prevfile <- paste0(coredir, core, "/", core, "_settings.txt") prevf <- FALSE if(file.exists(prevfile)) { prevfile <- readLines(prevfile, n=-1) if(length(prevfile) > 0) prevf <- TRUE } if(is.na(d.min) || d.min=="NA") d.min <- min(dets[,4]) if(is.na(d.max) || d.max=="NA") d.max <- max(dets[,4]) if(length(acc.shape) < length(acc.mean)) acc.shape <- rep(acc.shape, length(acc.mean)) else if(length(acc.shape) > length(acc.mean)) acc.mean <- rep(acc.mean, length(acc.shape)) if(length(mem.strength) < length(mem.mean)) mem.strength <- rep(mem.strength, length(mem.mean)) else if(length(mem.strength) > length(mem.mean)) mem.mean <- rep(mem.mean, length(mem.strength)) prevfile <- file(paste0(coredir, core, "/", core, "_settings.txt"), "w") scat <- function(m, n="") cat(m, n, sep="", file=prevfile) cat(d.min, " depths.file, " for(i in acc.mean) scat(i, " "); scat(" for(i in acc.shape) scat(i, " "); scat(" for(i in mem.mean) scat(i, " "); scat(" for(i in mem.strength) scat(i, " "); scat(" for(i in boundary) scat(i, " "); scat(" for(i in hiatus.depths) scat(i, " "); scat(" for(i in hiatus.max) scat(i, " "); scat(" cat(BCAD, " cc1, " depth.unit, " delta.R, " cat(date.sample, " s.shape, " cat(n.supp, " close(prevfile) if(length(MinAge) == 0) MinAge <- min(1950 - as.integer(format(Sys.time(), "%Y")), round(dets[,2] - (5*dets[,3]))) if(length(MaxAge) == 0) MaxAge <- max(1e6, round(dets[,2] + (5*dets[,3]))) theta0 <- 1950 - date.sample list(core=core, thick=thick, dets=dets, d.min=d.min, d.max=d.max, coredir=coredir, d.by=d.by, depths.file=depths.file, slump=slump, acc.mean=acc.mean, acc.shape=acc.shape, mem.mean=mem.mean, mem.strength=mem.strength, boundary=boundary, hiatus.depths=hiatus.depths, hiatus.max=hiatus.max, BCAD=BCAD, cc=cc, postbomb=postbomb, cc1=cc1, cc2=cc2, cc3=cc3, cc4=cc4, depth.unit=noquote(depth.unit), unit=depth.unit, age.unit=noquote(age.unit), normal=normal, t.a=t.a, t.b=t.b, delta.R=delta.R, delta.STD=delta.STD, prob=prob, date=date(), runname=runname, ssize=ssize, dark=dark, MinAge=MinAge, MaxAge=MaxAge, cutoff=cutoff, age.res=age.res, after=after, supportedData=supportedData, theta0=theta0, Al=Al, phi.shape=phi.shape, phi.mean=phi.mean, s.shape=s.shape, s.mean=s.mean, ra.case=ra.case, Bqkg=Bqkg) } merge.dets <- function(detsPlum, detsBacon, delta.R, delta.STD, t.a, t.b, cc) { if(ncol(detsBacon) >= 5) { cc <- detsBacon[,5] detsBacon <- detsBacon[,-5] } else cc <- array(cc, dim=c(nrow(detsBacon),1)) if(ncol(detsBacon) < 9 ) { for(i in(ncol(detsBacon)+1):9) { if(i==5) { col <- array(delta.R, dim=c(nrow(detsBacon),1)) } else if(i==6) { col <- array(delta.STD, dim=c(nrow(detsBacon),1)) } else if(i==7) { col <- array(t.a, dim=c(nrow(detsBacon),1)) } else if(i==8) { col <- array(t.b, dim=c(nrow(detsBacon),1)) } else if(i==9) { col <- cc } detsBacon <- cbind(detsBacon, col) } colnames(detsBacon) <- c("labID", "X210Pb.Bq.kg.", "sd.210Pb.", "depth.cm.", "thickness.cm.", "density.g.cm.3.", "t.a", "t.b", "cc") } if(ncol(detsPlum) < 9) { for(i in (ncol(detsPlum)+1):9){ if(i==5) { col <- array(delta.R, dim=c(nrow(detsPlum),1)) } else if(i==6) { col <- array(delta.STD, dim=c(nrow(detsPlum),1)) } else if(i==7) { col <- array(t.a, dim=c(nrow(detsPlum),1)) } else if(i==8) { col <- array(t.b, dim=c(nrow(detsPlum),1)) } else if(i==9) { col <- array(5, dim=c(nrow(detsPlum),1)) } detsPlum <- cbind(detsPlum, col) } colnames(detsPlum) <- c("labID", "X210Pb.Bq.kg.", "sd.210Pb.", "depth.cm.", "thickness.cm.", "density.g.cm.3.", "t.a", "t.b", "cc") } dets <- rbind(detsPlum, detsBacon, make.row.names=FALSE) dets <- dets[order(dets[,4]),] } write.plum.file <- function(set=get('info')) { if(length(set$slump) > 0) { dets <- set$slumpdets hiatus.depths <- set$slumphiatus boundary <- set$slumpboundary } else { dets <- set$dets hiatus.depths <- set$hiatus.depths boundary <- set$boundary } depthColumn <- 4 if(is.na(set$d.min) || set$d.min < min(dets[,depthColumn])) { dets <- rbind(dets[which(dets[,depthColumn] == min(dets[,depthColumn]))[1],], dets, make.row.names=FALSE) dets[1,1] <- NA dets[1,3] <- max(1e5, 1e3*dets[,4], 1e3*dets[,3]) dets[1,depthColumn] <- set$d.min } if(is.na(set$d.max) || set$d.max > max(dets[,depthColumn])) { dets <- rbind(dets, dets[which(dets[,depthColumn] == max(dets[,depthColumn]))[1],], make.row.names=FALSE) dets[nrow(dets),1] <- NA dets[nrow(dets),3] <- max(1e5, 1e3*dets[,4], 1e3*dets[,3]) dets[nrow(dets),depthColumn] <- set$d.max } supportedData <- set$supportedData fl <- file(set$bacon.file, "w") cat(" cat("Cal 0 : ConstCal;\nCal 1 : ", if(set$cc1=="IntCal20" || set$cc1=="\"IntCal20\"") "IntCal20" else noquote(set$cc1), ", ", set$postbomb, ";\nCal 2 : ", if(set$cc2=="Marine20" || set$cc2=="\"Marine20\"") "Marine20" else noquote(set$cc2), ";\nCal 3 : ", if(set$cc3=="SHCal20" || set$cc3=="\"SHCal20\"") "SHCal20" else noquote(set$cc3), ", ", set$postbomb, ";", if(set$cc4=="ConstCal" || set$cc4=="\"ConstCal\"") set$cc4 <- c() else paste0("\nCal 4 : GenericCal, ", set$cc4, ";"), sep="", file=fl) cat("\nCal 4 : ConstCal;", sep="", file=fl) cat("\n cat("\nCal 5 : Plum, ", set$phi.shape, ", ", set$phi.mean, ", ", set$s.shape, ", ", set$s.mean, ", ", set$Al, ", ", set$theta0, ", ", set$ra.case, ", ", set$plum.file,";", sep="", file=fl) cat("\n for( i in 1:nrow(dets) ) { cat( "\nDet ", i-1, " : ", as.character(dets[i,1]), " , ", dets[i,2], ", ", dets[i,3], ", ", dets[i,4], ", ", dets[i,5], ", ", dets[i,6], ", ", dets[i,7], ", ", dets[i,8], ", ", dets[i,9], ";", sep="", file=fl) } if(!is.na(hiatus.depths[1])) { if(is.null(boundary[1])) message("\n Hiatus set at depth(s) ", hiatus.depths, "\n") else message("\n Boundary set at depth(s) ", boundary, "\n") if(length(set$acc.shape)==1) set$acc.shape <- rep(set$acc.shape, length(hiatus.depths)+1) if(length(set$acc.mean)==1) set$acc.mean <- rep(set$acc.mean, length(hiatus.depths)+1) if(length(set$hiatus.max)==1) set$hiatus.max <- rep(set$hiatus.max, length(hiatus.depths)) assign_to_global("info", set) cat("\n\n "\n for(i in length(hiatus.depths):1) cat("\nHiatus ", i-1, ": ", hiatus.depths[i], ", ", set$acc.shape[i+1], ", ", set$acc.shape[i+1]/set$acc.mean[i+1], ", ", .1, ", ", set$hiatus.max[i], ";", sep="", file=fl) } cK <- set$d.min+(set$thick*set$K) if( is.na(set$seed) ) { wrapup <- paste0("\n\n "\nBacon 0: ", ifelse(set$normal, "FixNor", "FixT"), ", ", set$K, ", ", set$theta0-.02, ", ", 26500, ", ", set$theta0-0.01, ", ", set$theta0+0.01, ", ", set$mem.strength*set$mem.mean, ", ", set$mem.strength*(1-set$mem.mean), ", ", set$acc.shape[1], ", ", set$acc.shape[1]/set$acc.mean[1], ", ", set$d.min, ", ", cK, ";\n") } else { wrapup <- paste0("\n\n "\nBacon 0: ", ifelse(set$normal, "FixNor", "FixT"), ", ", set$K, ", ", set$theta0-.02, ", ", 26500, ", ", set$theta0-0.01, ", ", set$theta0+0.01, ", ", set$mem.strength*set$mem.mean, ", ", set$mem.strength*(1-set$mem.mean), ", ", set$acc.shape[1], ", ", set$acc.shape[1]/set$acc.mean[1], ", ", set$d.min, ", ", cK, ", ", set$seed, ";\n") } cat(wrapup, file=fl) close(fl) fl <- file(set$plum.file, "w") if(length(supportedData) > 0) for(i in 1:nrow(supportedData)) { for(j in 1:2) cat(supportedData[i,j], " ", file=fl) cat("\n", file=fl) } close(fl) }
SimTestRatHom <- function(trlist, grp, ntr, nep, ssmat, Num.Contrast, Den.Contrast, ncomp, alternative, Margin, meanmat, CorrMatDat) { if (any(meanmat<0)) { warning("At least one sample mean is negative; check whether the test direction", "\n", "is still correct", "\n") } estimate <- Num.Contrast%*%meanmat/(Den.Contrast%*%meanmat) defr <- matrix(sum(ssmat[,1])-ntr, nrow=ncomp, ncol=nep) CovMatDat <- Reduce("+", lapply(trlist, function(x) (nrow(x)-1)*cov(x)) )/defr[1,1] if (is.null(CorrMatDat)) { CorrMatDat <- cov2cor(CovMatDat) } else { sdmat <- sqrt( diag( diag(CovMatDat),nrow=nep ) ) CovMatDat <- sdmat%*%CorrMatDat%*%sdmat } M <- diag(1/ssmat[,1]) R <- NULL for (z in 1:ncomp) { Rrow <- NULL for (w in 1:ncomp) { Rpart <- matrix(nrow=nep,ncol=nep) for (i in 1:nep) { for (h in 1:nep) { Rpart[i,h] <- CorrMatDat[i,h] * ( t(Num.Contrast[z,]-Margin[z,i]*Den.Contrast[z,])%*% M%*%(Num.Contrast[w,]-Margin[w,h]*Den.Contrast[w,]) ) / sqrt( (t(Num.Contrast[z,]-Margin[z,i]*Den.Contrast[z,])%*%M%*% (Num.Contrast[z,]-Margin[z,i]*Den.Contrast[z,])) * (t(Num.Contrast[w,]-Margin[w,h]*Den.Contrast[w,])%*%M%*% (Num.Contrast[w,]-Margin[w,h]*Den.Contrast[w,])) ) } } Rrow <- cbind(Rrow,Rpart) } R <- rbind(R, Rrow) } diag(R) <- 1 statistic <- matrix(nrow=ncomp, ncol=nep) for (z in 1:ncomp) { for (i in 1:nep) { statistic[z,i] <- ( t(Num.Contrast[z,]-Margin[z,i]*Den.Contrast[z,])%*%meanmat[,i] ) / sqrt( diag(CovMatDat)[i] * ( t(Num.Contrast[z,]-Margin[z,i]*Den.Contrast[z,])%*%M%*% (Num.Contrast[z,]-Margin[z,i]*Den.Contrast[z,]) ) ) } } p.val <- SimTestP(ncomp=ncomp,nep=nep,alternative=alternative,statistic=statistic, defr.mul=defr,defr.uni=defr,R=R) p.val.adj <- p.val$p.val.adj; p.val.raw <- p.val$p.val.raw list(estimate=estimate, statistic=statistic, p.val.raw=p.val.raw, p.val.adj=p.val.adj, CovMatDat=CovMatDat, CorrMatDat=CorrMatDat, CorrMatComp=R, degr.fr=defr, Num.Contrast=Num.Contrast, Den.Contrast=Den.Contrast, alternative=alternative) }
subset.int <- function(timestamp, subset) { num.samples <- length(timestamp) tz <- attr(timestamp, "tzone") if(is.null(tz)) tz <- "" if((!any(is.character(subset)) && !any(is.na(subset))) || length(subset)!=2) stop("Please specify 'subset' as vector of start and end time stamp") if(is.na(subset[1])) subset[1] <- as.character(timestamp[1]) if(is.na(subset[2])) subset[2] <- as.character(timestamp[num.samples]) if(nchar(subset[1])==10) subset[1] <- paste(subset[1], "00:00:00") if(nchar(subset[2])==10) subset[2] <- paste(subset[2], "00:00:00") start <- strptime(subset[1], "%Y-%m-%d %H:%M:%S", tz[1]) end <- strptime(subset[2], "%Y-%m-%d %H:%M:%S", tz[1]) if(is.na(start)) stop("'start' time stamp in 'subset' not correctly formated") if(is.na(end)) stop("'end' time stamp in 'subset' not correctly formated") if(start<timestamp[1] || start>timestamp[num.samples]) stop("'start' timestamp in 'subset' not in period") if(end<timestamp[1] || end>timestamp[num.samples]) stop("'end' timestamp in 'subset' not in period") match.date <- difftime(timestamp, ISOdatetime(1,1,1,0,0,0), tz=tz[1], units="days") - difftime(start, ISOdatetime(1,1,1,0,0,0), tz=tz[1], units="days") start <- which(abs(as.numeric(match.date)) == min(abs(as.numeric(match.date)))) match.date <- difftime(timestamp, ISOdatetime(1,1,1,0,0,0), tz=tz[1], units="days") - difftime(end, ISOdatetime(1,1,1,0,0,0), tz=tz[1], units="days") end <- which(abs(as.numeric(match.date)) == min(abs(as.numeric(match.date)))) return(cbind(start, end)) }
NULL prior_options <- function(prior_scale_for_dispersion = 5, min_prior_scale = 1e-12, scaled = TRUE) { warning( "'prior_options' is deprecated and will be removed in a future release.", "\n* Priors for auxiliary parameters should now be set using", " the new 'prior_aux' argument when calling ", "'stan_glm', 'stan_glmer', etc.", "\n* Instead of setting 'prior_options(scaled=FALSE)',", " internal rescaling is now toggled using the", " new 'autoscale' argument to 'normal', 'student_t', or 'cauchy'", " (the other prior distributions do not support 'autoscale').", call. = FALSE ) validate_parameter_value(prior_scale_for_dispersion) validate_parameter_value(min_prior_scale) out <- nlist(scaled, min_prior_scale, prior_scale_for_dispersion) structure(out, from_prior_options = TRUE) } .support_deprecated_prior_options <- function(prior, prior_intercept, prior_aux, prior_ops) { if (!isTRUE(attr(prior_ops, "from_prior_options"))) stop( "The 'prior_ops' argument must be a call to 'prior_options'. ", "But 'prior_options' is deprecated and will be removed in a future release. ", "See help('rstanarm-deprecated') for details on the functionality ", "that replaces 'prior_options'.", call. = FALSE ) po_disp_scale <- prior_ops[["prior_scale_for_dispersion"]] po_scaled <- prior_ops[["scaled"]] if (!is.null(prior_aux) && !is.null(po_disp_scale)) { if (po_disp_scale != prior_aux[["scale"]]) { warning( "Setting prior scale for aux to value specified in ", "'prior_options' rather than value specified in 'prior_aux'.", call. = FALSE ) prior_aux[["scale"]] <- po_disp_scale } } if (!is.null(po_scaled) && identical(po_scaled, FALSE)) { if (isTRUE(prior$dist %in% c("normal", "t"))) prior$autoscale <- FALSE if (!is.null(prior_intercept)) prior_intercept$autoscale <- FALSE } nlist(prior, prior_intercept, prior_aux) }
context("test normalisation for PNG write") test_that("raster normalisation works", { dummy_values <- rep(c(0, 64, 128, 255), 25) mat_5_20 <- raster::raster(nrows = 5, ncols = 20, vals = dummy_values) mat_20_5 <- raster::raster(nrows = 20, ncols = 5, vals = dummy_values) mat_10_10 <- raster::raster(nrows = 10, ncols = 10, vals = dummy_values) expect_equal( raster::brick(mat_5_20, mat_5_20 * 0.1, mat_5_20 * 10) %>% normalise_raster() %>% range(), c(0, 1)) expect_equal( raster::brick(mat_5_20, mat_5_20 * 0.1, mat_5_20 * 10, mat_5_20) %>% normalise_raster() %>% range(), c(0, 1)) expect_equal( raster::brick(mat_5_20, mat_5_20 * 10) %>% normalise_raster() %>% range(), c(0, 1)) expect_equal( raster::brick(mat_20_5, mat_20_5 * 0.1, mat_20_5 * 10) %>% normalise_raster() %>% range(), c(0, 1)) expect_equal( raster::brick(mat_10_10, mat_10_10 * 0.1, mat_10_10 * 10) %>% normalise_raster() %>% range(), c(0, 1)) })
summary.l1ce <- function(object, correlation = TRUE, type = c("OPT", "Tibshirani"), gen.inverse.diag = 0, sigma = NULL, ...) { type <- match.arg(type) coef <- coef(object) cnames <- labels(coef) resid <- resid(object) covdf <- vcov(object,type,gen.inverse.diag) sigma.provided <- !missing(sigma) if(!sigma.provided) sigma <- sqrt(deviance(object)/covdf$df[2]) se <- as.vector(sqrt(diag(covdf$cov.unscaled))) correl <- if(correlation) { p <- length(se) correl <- covdf$cov.unscaled/se correl/se[rep(1:p,rep(p,p))] } coef <- array(coef, c(p, 4)) dimnames(coef) <- list(cnames, c("Value", "Std. Error", "Z score", "Pr(>|Z|)")) coef[, 2] <- se %o% sigma coef[, 3] <- coef[, 1]/coef[, 2] coef[, 4] <- 2*(1-pnorm(abs(coef[,3]))) keep <- c("call", "terms", "bound", "relative.bound", "Lagrangian") object <- object[keep[!is.na(match(keep,names(object)))]] object$residuals <- resid object$coefficients <- coef object$sigma <- sigma object$sigma.provided <- sigma.provided object$df <- covdf$df object$cov.unscaled <- covdf$cov.unscaled object$correlation <- correl class(object) <- "summary.l1ce" object } summary.gl1ce <- function(object, dispersion = NULL, correlation=FALSE, ...) { if(correlation) stop("The `correlation' argument is not yet implemented for gl1ce objects") coef <- coef(object) if(is.null(cnames <- names(coef))) cnames <- c("(Intercept)", labels(object)) names(coef) <- cnames coef <- cbind(Value = coef) keep <- c("call", "terms", "bound", "Lagrangian", "family", "iter") object <- c(object[keep[!is.na(match(keep, names(object)))]], list(residuals = residuals(object, type="deviance"), coefficients = coef)) class(object) <- "summary.gl1ce" object }
hy.prior.update <- function(network, hy.prior.Eta, hy.prior.bl, hy.prior.cov, hy.prior){ with(network, { if(baseline.risk == "exchangeable"){ if(is.null(hy.prior.Eta)){ hy.prior.Eta <- hy.prior.default(network) } } if(baseline == "exchangeable"){ if(is.null(hy.prior.bl)){ hy.prior.bl <- hy.prior.default(network) } } if(!is.null(covariate)){ if(covariate.model == "exchangeable"){ if(is.null(hy.prior.cov)){ hy.prior.cov <- hy.prior.default(network) } } } if(type == "random"){ if(is.null(hy.prior)){ hy.prior <- hy.prior.default(network) } } return(list(hy.prior.Eta = hy.prior.Eta, hy.prior.bl = hy.prior.bl, hy.prior.cov = hy.prior.cov, hy.prior = hy.prior)) }) } network.prior.default <- function(network, mean.d, prec.d, mean.Eta, prec.Eta, hy.prior.Eta, mean.bl, prec.bl, hy.prior.bl, mean.cov, prec.cov, hy.prior.cov, hy.prior) { with(network, { if(response == "binomial" || response == "normal"){ if(is.null(mean.d)){ mean.d = 0 } if(is.null(prec.d)){ prec.d = 0.0001 } if(is.null(mean.Eta)){ mean.Eta = 0 } if(is.null(prec.Eta)){ prec.Eta = 0.0001 } prior.data = list(mean.d = mean.d, prec.d = prec.d, mean.Eta = mean.Eta, prec.Eta = prec.Eta) if(type == "random"){ prior.data$hy.prior.1 <- hy.prior[[2]] prior.data$hy.prior.2 <- hy.prior[[3]] } if(baseline != "none"){ if(is.null(mean.bl)){ mean.bl = 0 } if(is.null(prec.bl)){ prec.bl = 0.0001 } prior.data$mean.bl = mean.bl prior.data$prec.bl = prec.bl if(baseline == "exchangeable"){ prior.data$hy.prior.bl.1 <- hy.prior.bl[[2]] prior.data$hy.prior.bl.2 <- hy.prior.bl[[3]] } } if(baseline.risk == "exchangeable"){ prior.data$hy.prior.Eta.1 <- hy.prior.Eta[[2]] prior.data$hy.prior.Eta.2 <- hy.prior.Eta[[3]] } if(!is.null(covariate)){ if(is.null(mean.cov)){ mean.cov = 0 } if(is.null(prec.cov)){ prec.cov = 0.0001 } prior.data$mean.cov = mean.cov prior.data$prec.cov = prec.cov if(covariate.model == "exchangeable"){ prior.data$hy.prior.cov.1 <- hy.prior.cov[[2]] prior.data$hy.prior.cov.2 <- hy.prior.cov[[3]] } } } else if(response == "multinomial"){ if(is.null(mean.d)){ mean.d = rep(0, ncat - 1) } if(is.null(prec.d)){ prec.d = diag(0.0001, ncat - 1) } if(is.null(mean.Eta)){ mean.Eta = rep(0, ncat - 1) } if(is.null(prec.Eta)){ prec.Eta = diag(0.25, ncat - 1) } prior.data = list(mean.d = mean.d, prec.d = prec.d, mean.Eta = mean.Eta, prec.Eta = prec.Eta) if(type == "random"){ prior.data$hy.prior.1 <- hy.prior[[2]] prior.data$hy.prior.2 <- hy.prior[[3]] } if(baseline != "none"){ if(is.null(mean.bl)){ mean.bl = rep(0, network$ncat - 1) } if(is.null(prec.bl)){ prec.bl = diag(0.0001, network$ncat - 1) } prior.data$mean.bl = mean.bl prior.data$prec.bl = prec.bl if(baseline == "exchangeable"){ prior.data$hy.prior.bl.1 <- hy.prior.bl[[2]] prior.data$hy.prior.bl.2 <- hy.prior.bl[[3]] } } if(baseline.risk == "exchangeable"){ prior.data$hy.prior.Eta.1 <- hy.prior.Eta[[2]] prior.data$hy.prior.Eta.2 <- hy.prior.Eta[[3]] } if(!is.null(network$covariate)){ if(is.null(mean.cov)){ mean.cov = rep(0, network$ncat - 1) } if(is.null(prec.cov)){ prec.cov = diag(0.0001, network$ncat - 1) } prior.data$mean.cov = mean.cov prior.data$prec.cov = prec.cov if(covariate.model == "exchangeable"){ prior.data$hy.prior.cov.1 <- hy.prior.cov[[2]] prior.data$hy.prior.cov.2 <- hy.prior.cov[[3]] } } } return(prior.data) }) } hy.prior.default <- function(network){ with(network,{ hy.prior <- if(response == "binomial"){ list("dunif", 0, 5) } else if(response == "normal"){ list("dunif", 0, 100) } else if(response == "multinomial"){ list("dwish", diag(ncat - 1), ncat -1) } return(hy.prior) }) }
maxORFlength_RNA<-function(seqs,reverse=TRUE,normalized=FALSE,label=c()){ if(length(seqs)==1&&file.exists(seqs)){ seqs<-fa.read(seqs,alphabet="rna") seqs_Lab<-alphabetCheck(seqs,alphabet = "rna",label) seqs<-seqs_Lab[[1]] label<-seqs_Lab[[2]] } else if(is.vector(seqs)){ seqs<-sapply(seqs,toupper) seqs_Lab<-alphabetCheck(seqs,alphabet = "rna",label) seqs<-seqs_Lab[[1]] label<-seqs_Lab[[2]] } else { stop("ERROR: Input sequence is not in the correct format. It should be a FASTA file or a string vector.") } numSeqs<-length(seqs) listORF<-list() listSeqs<-list() orfLen<-vector(mode = "numeric",length = numSeqs) for(n in 1:numSeqs){ seq<-seqs[n] len<-nchar(seq) firstSeq<-substring(seq,seq(1,(len-2),3),seq(3,len,3)) secSeq<-substring(seq,seq(2,(len-3),3),seq(4,len,3)) thSeq<-substring(seq,seq(3,(len-4),3),seq(5,len,3)) listSeqs[[1]]<-firstSeq listSeqs[[2]]<-secSeq listSeqs[[3]]<-thSeq listORF[[1]]<-findORF_RNA(firstSeq) listORF[[2]]<-findORF_RNA(secSeq) listORF[[3]]<-findORF_RNA(thSeq) if(reverse==TRUE){ revSeq<-revComp(seq) revSeq<-paste(revSeq,collapse = "") firstSeq<-substring(revSeq,seq(1,(len-2),3),seq(3,len,3)) secSeq<-substring(revSeq,seq(2,(len-3),3),seq(4,len,3)) thSeq<-substring(revSeq,seq(3,(len-4),3),seq(5,len,3)) listORF[[4]]<-findORF_RNA(firstSeq) listORF[[5]]<-findORF_RNA(secSeq) listORF[[6]]<-findORF_RNA(thSeq) } orfVect=c() for(i in 1:length(listORF)){ orfVect<-c(orfVect,listORF[[i]][1]) } maxORF<-max(orfVect) orfLen[n]<-(maxORF*3) } if(normalized==TRUE){ lenSeqs<-sapply(seqs, nchar) orfLen<-orfLen/lenSeqs } names(orfLen)<-names(seqs) if(length(label)==numSeqs){ orfLen<-cbind(orfLen,label) } return(orfLen) }
GC.content <- function(fasta_file){ requireNamespace("seqinr") x <- fasta_file tt<-function(x){ res<-GC(x) val=round(res,4) return(val) } f_res<-lapply(x,tt) s=data.frame(f_res) rownames(s) <- c("GC-content") w=t(s) return(w) }
library(ggplot2) library(ggiraph) p <- ggplot(mtcars, aes(wt, mpg, label = rownames(mtcars))) + geom_label_interactive(aes(tooltip = paste(rownames(mtcars), mpg, sep = "\n"))) x <- girafe(ggobj = p) if( interactive() ) print(x) p <- ggplot(mtcars, aes(wt, mpg, label = rownames(mtcars))) + geom_label_interactive(aes(fill = factor(cyl), tooltip = paste(rownames(mtcars), mpg, sep = "\n")), colour = "white", fontface = "bold") x <- girafe(ggobj = p) if( interactive() ) print(x)
context("testKeyPhrases") docsText <- c( "Loved the food, service and atmosphere! We'll definitely be back.", "Very good food, reasonable prices, excellent service.", "It was a great restaurant.", "If steak is what you want, this is the place.", "The atmosphere is pretty bad but the food is quite good.", "The food is quite good but the atmosphere is pretty bad.", "I'm not sure I would come back to this restaurant.", "The food wasn't very good.", "While the food was good the service was a disappointment.", "I was very disappointed with both the service and my entree." ) docsText2 <- c( "", "Very good food, reasonable prices, excellent service.", "It was a great restaurant.", "If steak is what you want, this is the place.", "The atmosphere is pretty bad but the food is quite good.", "The food is quite good but the atmosphere is pretty bad.", "I'm not sure I would come back to this restaurant.", "The food wasn't very good.", "While the food was good the service was a disappointment.", "I was very disappointed with both the service and my entree." ) docsLanguage <- rep("en", length(docsText)) test_that("textaKeyPhrases returns expected result structure", { skip_on_cran() res <- textaKeyPhrases(docsText, docsLanguage) expect_that(res, is_a("texta")) expect_that(length(res), equals(3)) expect_that(res[["request"]], is_a("request")) expect_that(res[["json"]], is_a("character")) expect_that(res[["results"]], is_a("data.frame")) expect_that(nrow(res[["results"]]), equals(10)) expect_that(ncol(res[["results"]]), equals(2)) expect_that(names(res[["results"]])[1], equals("text")) expect_that(names(res[["results"]])[2], equals("keyPhrases")) }) test_that("textaKeyPhrases fails with an error", { skip_on_cran() expect_that(textaKeyPhrases(documents = 0), throws_error()) expect_that(textaKeyPhrases(docsText, languages = 0), throws_error()) expect_that(textaKeyPhrases(docsText2), throws_error()) url <- mscstexta4r:::textaGetURL() key <- mscstexta4r:::textaGetKey() mscstexta4r:::textaSetKey("invalid-key") expect_that(textaKeyPhrases(docsText, docsLanguage), throws_error()) mscstexta4r:::textaSetURL("invalid-URL") expect_that(textaKeyPhrases(docsText, docsLanguage), throws_error()) mscstexta4r:::textaSetKey(key) expect_that(textaKeyPhrases(docsText, docsLanguage), throws_error()) mscstexta4r:::textaSetURL(url) })