code
stringlengths 1
13.8M
|
---|
task.exceptions = function(query, data) {
ex = list(query = query, data = data)
unq_in_list = function(x) {
y = unlist(x, use.names=FALSE)
length(unique(y))==length(y)
}
if (!all(sapply(ex$query, unq_in_list))) stop("task.exceptions detected invalid entries in 'query' exceptions")
if (!all(sapply(ex$data, unq_in_list))) stop("task.exceptions detected invalid entries in 'data' exceptions")
ex
}
header_title_fun = function(x) {
stopifnot(is.data.table(x), "data" %in% names(x))
data_name = unique1(x[["data"]])
file = file.path("data", paste(data_name, "csv",sep="."))
tmp = strsplit(as.character(data_name), "_", fixed=TRUE)[[1L]]
ds = data_spec(file, nrow=as.numeric(tmp[2]))
na = as.numeric(tmp[4])
sort = as.integer(tmp[5])
extra = if (na>0 || sort>0L) {
if (na>0 && sort>0L) sprintf(" %d%% NAs, pre-sorted", na)
else if (na>0) sprintf(" %d%% NAs", na)
else if (sort>0L) " pre-sorted"
else stop("internal error")
} else ""
sprintf(
"Input table: %s rows x %s columns ( %s GB )%s",
format_comma(as.numeric(ds[["nrow"]])[1L]),
as.numeric(ds[["ncol"]])[1L],
as.numeric(ds[["gb"]])[1L],
extra
)
}
solution.dict = {list(
"data.table" = list(name=c(short="data.table", long="data.table"), color=c(strong="blue", light="
"dplyr" = list(name=c(short="dplyr", long="dplyr"), color=c(strong="red", light="
"pandas" = list(name=c(short="pandas", long="pandas"), color=c(strong="green4", light="
"pydatatable" = list(name=c(short="pydatatable", long="(py)datatable"), color=c(strong="darkorange", light="orange")),
"spark" = list(name=c(short="spark", long="spark"), color=c(strong="
"dask" = list(name=c(short="dask", long="dask"), color=c(strong="slategrey", light="lightgrey")),
"juliadf" = list(name=c(short="DF.jl", long="DataFrames.jl"), color=c(strong="deepskyblue", light="darkturquoise")),
"clickhouse" = list(name=c(short="clickhouse", long="ClickHouse"), color=c(strong="hotpink4", light="hotpink1")),
"cudf" = list(name=c(short="cuDF", long="cuDF"), color=c(strong="peachpuff3", light="peachpuff1")),
"polars" = list(name=c(short="polars", long="Polars"), color=c(strong="deepskyblue4", light="deepskyblue3")),
"arrow" = list(name=c(short="arrow", long="Arrow"), color=c(strong="aquamarine3", light="aquamarine1")),
"duckdb" = list(name=c(short="duckdb", long="DuckDB"), color=c(strong="
)}
groupby_q_title_fun = function(x) {
stopifnot(c("question","iquestion","out_rows","out_cols","in_rows") %in% names(x),
uniqueN(x, by="iquestion")==nrow(x))
x = copy(x)[, "top2":=FALSE][, "iquestion":=rev(seq_along(iquestion))]
x[question=="largest two v3 by id6", "top2":=TRUE]
x[, sprintf("Query %s: \"%s\": %s%s ad hoc groups of ~%s rows; result %s x %s",
iquestion, as.character(question),
if (top2) "~" else "",
format_comma(if (top2) out_rows/2 else out_rows),
if (top2) "2" else format_comma(as.numeric(as.character(in_rows))/as.numeric(out_rows)),
format_comma(out_rows), out_cols),
by = "iquestion"]$V1
}
groupby.syntax.dict = {list(
"data.table" = {c(
"sum v1 by id1" = "DT[, .(v1=sum(v1, na.rm=TRUE)), by=id1]",
"sum v1 by id1:id2" = "DT[, .(v1=sum(v1, na.rm=TRUE)), by=.(id1, id2)]",
"sum v1 mean v3 by id3" = "DT[, .(v1=sum(v1, na.rm=TRUE), v3=mean(v3, na.rm=TRUE)), by=id3]",
"mean v1:v3 by id4" = "DT[, lapply(.SD, mean, na.rm=TRUE), by=id4, .SDcols=v1:v3]",
"sum v1:v3 by id6" = "DT[, lapply(.SD, sum, na.rm=TRUE), by=id6, .SDcols=v1:v3]",
"median v3 sd v3 by id4 id5" = "DT[, .(median_v3=median(v3, na.rm=TRUE), sd_v3=sd(v3, na.rm=TRUE)), by=.(id4, id5)]",
"max v1 - min v2 by id3" = "DT[, .(range_v1_v2=max(v1, na.rm=TRUE)-min(v2, na.rm=TRUE)), by=id3]",
"largest two v3 by id6" = "DT[order(-v3, na.last=NA), .(largest2_v3=head(v3, 2L)), by=id6]",
"regression v1 v2 by id2 id4" = "DT[, .(r2=cor(v1, v2, use=\"na.or.complete\")^2), by=.(id2, id4)]",
"sum v3 count by id1:id6" = "DT[, .(v3=sum(v3, na.rm=TRUE), count=.N), by=id1:id6]"
)},
"dplyr" = {c(
"sum v1 by id1" = "DF %>% group_by(id1) %>% summarise(v1=sum(v1, na.rm=TRUE))",
"sum v1 by id1:id2" = "DF %>% group_by(id1, id2) %>% summarise(v1=sum(v1, na.rm=TRUE))",
"sum v1 mean v3 by id3" = "DF %>% group_by(id3) %>% summarise(v1=sum(v1, na.rm=TRUE), v3=mean(v3, na.rm=TRUE))",
"mean v1:v3 by id4" = "DF %>% group_by(id4) %>% summarise_at(.funs=\"mean\", .vars=c(\"v1\",\"v2\",\"v3\"), na.rm=TRUE)",
"sum v1:v3 by id6" = "DF %>% group_by(id6) %>% summarise_at(.funs=\"sum\", .vars=c(\"v1\",\"v2\",\"v3\"), na.rm=TRUE)",
"median v3 sd v3 by id4 id5" = "DF %>% group_by(id4, id5) %>% summarise(median_v3=median(v3, na.rm=TRUE), sd_v3=sd(v3, na.rm=TRUE))",
"max v1 - min v2 by id3" = "DF %>% group_by(id3) %>% summarise(range_v1_v2=max(v1, na.rm=TRUE)-min(v2, na.rm=TRUE))",
"largest two v3 by id6" = "DF %>% select(id6, largest2_v3=v3) %>% filter(!is.na(largest2_v3)) %>% arrange(desc(largest2_v3)) %>% group_by(id6) %>% filter(row_number() <= 2L)",
"regression v1 v2 by id2 id4" = "DF %>% group_by(id2, id4) %>% summarise(r2=cor(v1, v2, use=\"na.or.complete\")^2)",
"sum v3 count by id1:id6" = "DF %>% group_by(id1, id2, id3, id4, id5, id6) %>% summarise(v3=sum(v3, na.rm=TRUE), count=n())"
)},
"pandas" = {c(
"sum v1 by id1" = "DF.groupby('id1', as_index=False, sort=False, observed=True, dropna=False).agg({'v1':'sum'})",
"sum v1 by id1:id2" = "DF.groupby(['id1','id2'], as_index=False, sort=False, observed=True, dropna=False).agg({'v1':'sum'})",
"sum v1 mean v3 by id3" = "DF.groupby('id3', as_index=False, sort=False, observed=True, dropna=False).agg({'v1':'sum', 'v3':'mean'})",
"mean v1:v3 by id4" = "DF.groupby('id4', as_index=False, sort=False, observed=True, dropna=False).agg({'v1':'mean', 'v2':'mean', 'v3':'mean'})",
"sum v1:v3 by id6" = "DF.groupby('id6', as_index=False, sort=False, observed=True, dropna=False).agg({'v1':'sum', 'v2':'sum', 'v3':'sum'})",
"median v3 sd v3 by id4 id5" = "DF.groupby(['id4','id5'], as_index=False, sort=False, observed=True, dropna=False).agg({'v3': ['median','std']})",
"max v1 - min v2 by id3" = "DF.groupby('id3', as_index=False, sort=False, observed=True, dropna=False).agg({'v1':'max', 'v2':'min'}).assign(range_v1_v2=lambda x: x['v1']-x['v2'])[['id3','range_v1_v2']]",
"largest two v3 by id6" = "DF[~DF['v3'].isna()][['id6','v3']].sort_values('v3', ascending=False).groupby('id6', as_index=False, sort=False, observed=True, dropna=False).head(2)",
"regression v1 v2 by id2 id4" = "DF[['id2','id4','v1','v2']].groupby(['id2','id4'], as_index=False, sort=False, observed=True, dropna=False).apply(lambda x: pd.Series({'r2': x.corr()['v1']['v2']**2}))",
"sum v3 count by id1:id6" = "DF.groupby(['id1','id2','id3','id4','id5','id6'], as_index=False, sort=False, observed=True, dropna=False).agg({'v3':'sum', 'v1':'size'})"
)},
"pydatatable" = {c(
"sum v1 by id1" = "DT[:, {'v1': sum(f.v1)}, by(f.id1)]",
"sum v1 by id1:id2" = "DT[:, {'v1': sum(f.v1)}, by(f.id1, f.id2)]",
"sum v1 mean v3 by id3" = "DT[:, {'v1': sum(f.v1), 'v3': mean(f.v3)}, by(f.id3)]",
"mean v1:v3 by id4" = "DT[:, {'v1': mean(f.v1), 'v2': mean(f.v2), 'v3': mean(f.v3)}, by(f.id4)]",
"sum v1:v3 by id6" = "DT[:, {'v1': sum(f.v1), 'v2': sum(f.v2), 'v3': sum(f.v3)}, by(f.id6)]",
"median v3 sd v3 by id4 id5" = "DT[:, {'median_v3': median(f.v3), 'sd_v3': sd(f.v3)}, by(f.id4, f.id5)]",
"max v1 - min v2 by id3" = "DT[:, {'range_v1_v2': max(f.v1)-min(f.v2)}, by(f.id3)]",
"largest two v3 by id6" = "DT[~isna(f.v3),:][:2, {'largest2_v3': f.v3}, by(f.id6), sort(-f.v3)]",
"regression v1 v2 by id2 id4" = "DT[:, {'r2': corr(f.v1, f.v2)**2}, by(f.id2, f.id4)]",
"sum v3 count by id1:id6" = "DT[:, {'v3': sum(f.v3), 'count': count()}, by(f.id1, f.id2, f.id3, f.id4, f.id5, f.id6)]"
)},
"dask" = {c(
"sum v1 by id1" = "DF.groupby('id1', dropna=False, observed=True).agg({'v1':'sum'}).compute()",
"sum v1 by id1:id2" = "DF.groupby(['id1','id2'], dropna=False, observed=True).agg({'v1':'sum'}).compute()",
"sum v1 mean v3 by id3" = "DF.groupby('id3', dropna=False, observed=True).agg({'v1':'sum', 'v3':'mean'}).compute()",
"mean v1:v3 by id4" = "DF.groupby('id4', dropna=False, observed=True).agg({'v1':'mean', 'v2':'mean', 'v3':'mean'}).compute()",
"sum v1:v3 by id6" = "DF.groupby('id6', dropna=False, observed=True).agg({'v1':'sum', 'v2':'sum', 'v3':'sum'}).compute()",
"median v3 sd v3 by id4 id5" = "",
"max v1 - min v2 by id3" = "DF.groupby('id3', dropna=False, observed=True).agg({'v1':'max', 'v2':'min'}).assign(range_v1_v2=lambda x: x['v1']-x['v2'])[['range_v1_v2']].compute()",
"largest two v3 by id6" = "DF[~DF['v3'].isna()][['id6','v3']].groupby('id6', dropna=False, observed=True).apply(lambda x: x.nlargest(2, columns='v3'), meta={'id6':'Int64', 'v3':'float64'})[['v3']].compute()",
"regression v1 v2 by id2 id4" = "DF[['id2','id4','v1','v2']].groupby(['id2','id4'], dropna=False, observed=True).apply(lambda x: pd.Series({'r2': x.corr()['v1']['v2']**2}), meta={'r2':'float64'}).compute()",
"sum v3 count by id1:id6" = "DF.groupby(['id1','id2','id3','id4','id5','id6'], dropna=False, observed=True).agg({'v3':'sum', 'v1':'size'}).compute()"
)},
"spark" = {c(
"sum v1 by id1" = "SELECT id1, sum(v1) AS v1 FROM tbl GROUP BY id1",
"sum v1 by id1:id2" = "SELECT id1, id2, sum(v1) AS v1 FROM tbl GROUP BY id1, id2",
"sum v1 mean v3 by id3" = "SELECT id3, sum(v1) AS v1, mean(v3) AS v3 FROM tbl GROUP BY id3",
"mean v1:v3 by id4" = "SELECT id4, mean(v1) AS v1, mean(v2) AS v2, mean(v3) AS v3 FROM tbl GROUP BY id4",
"sum v1:v3 by id6" = "SELECT id6, sum(v1) AS v1, sum(v2) AS v2, sum(v3) AS v3 FROM tbl GROUP BY id6",
"median v3 sd v3 by id4 id5" = "",
"max v1 - min v2 by id3" = "SELECT id3, max(v1)-min(v2) AS range_v1_v2 FROM tbl GROUP BY id3",
"largest two v3 by id6" = "SELECT id6, largest2_v3 from (SELECT id6, v3 AS largest2_v3, row_number() OVER (PARTITION BY id6 ORDER BY v3 DESC) AS order_v3 FROM tbl WHERE v3 IS NOT NULL) sub_query WHERE order_v3 <= 2",
"regression v1 v2 by id2 id4" = "SELECT id2, id4, pow(corr(v1, v2), 2) AS r2 FROM tbl GROUP BY id2, id4",
"sum v3 count by id1:id6" = "SELECT id1, id2, id3, id4, id5, id6, sum(v3) AS v3, count(*) AS count FROM tbl GROUP BY id1, id2, id3, id4, id5, id6"
)},
"juliadf" = {c(
"sum v1 by id1" = "combine(groupby(DF, :id1), :v1 => sum∘skipmissing => :v1)",
"sum v1 by id1:id2" = "combine(groupby(DF, [:id1, :id2]), :v1 => sum∘skipmissing => :v1)",
"sum v1 mean v3 by id3" = "combine(groupby(DF, :id3), :v1 => sum∘skipmissing => :v1, :v3 => mean∘skipmissing => :v3)",
"mean v1:v3 by id4" = "combine(groupby(DF, :id4), :v1 => mean∘skipmissing => :v1, :v2 => mean∘skipmissing => :v2, :v3 => mean∘skipmissing => :v3)",
"sum v1:v3 by id6" = "combine(groupby(DF, :id6), :v1 => sum∘skipmissing => :v1, :v2 => sum∘skipmissing => :v2, :v3 => sum∘skipmissing => :v3)",
"median v3 sd v3 by id4 id5" = "combine(groupby(DF, [:id4, :id5]), :v3 => median∘skipmissing => :median_v3, :v3 => std∘skipmissing => :sd_v3)",
"max v1 - min v2 by id3" = "combine(groupby(DF, :id3), [:v1, :v2] => ((v1, v2) -> maximum(skipmissing(v1))-minimum(skipmissing(v2))) => :range_v1_v2)",
"largest two v3 by id6" = "combine(groupby(dropmissing(DF, :v3), :id6), :v3 => (x -> partialsort!(x, 1:min(2, length(x)), rev=true)) => :largest2_v3)",
"regression v1 v2 by id2 id4" = "combine(groupby(DF, [:id2, :id4]), [:v1, :v2] => ((v1,v2) -> cor(v1, v2)^2) => :r2)",
"sum v3 count by id1:id6" = "combine(groupby(DF, [:id1, :id2, :id3, :id4, :id5, :id6]), :v3 => sum∘skipmissing => :v3, :v3 => length => :count)"
)},
"cudf" = {c(
"sum v1 by id1" = "DF.groupby('id1', as_index=False, dropna=False).agg({'v1':'sum'}).compute()",
"sum v1 by id1:id2" = "DF.groupby(['id1','id2'], as_index=False, dropna=False).agg({'v1':'sum'}).compute()",
"sum v1 mean v3 by id3" = "DF.groupby('id3', as_index=False, dropna=False).agg({'v1':'sum', 'v3':'mean'}).compute()",
"mean v1:v3 by id4" = "DF.groupby('id4', as_index=False, dropna=False).agg({'v1':'mean', 'v2':'mean', 'v3':'mean'}).compute()",
"sum v1:v3 by id6" = "DF.groupby('id6', as_index=False, dropna=False).agg({'v1':'sum', 'v2':'sum', 'v3':'sum'}).compute()",
"median v3 sd v3 by id4 id5" = "DF.groupby(['id4','id5'], as_index=False, dropna=False).agg({'v3': ['median','std']}).compute()",
"max v1 - min v2 by id3" = "",
"largest two v3 by id6" = "",
"regression v1 v2 by id2 id4" = "",
"sum v3 count by id1:id6" = "DF.groupby(['id1','id2','id3','id4','id5','id6'], as_index=False, dropna=False).agg({'v3':'sum', 'v1':'size'}).compute()"
)},
"clickhouse" = {c(
"sum v1 by id1" = "SELECT id1, sum(v1) AS v1 FROM tbl GROUP BY id1",
"sum v1 by id1:id2" = "SELECT id1, id2, sum(v1) AS v1 FROM tbl GROUP BY id1, id2",
"sum v1 mean v3 by id3" = "SELECT id3, sum(v1) AS v1, avg(v3) AS v3 FROM tbl GROUP BY id3",
"mean v1:v3 by id4" = "SELECT id4, avg(v1) AS v1, avg(v2) AS v2, avg(v3) AS v3 FROM tbl GROUP BY id4",
"sum v1:v3 by id6" = "SELECT id6, sum(v1) AS v1, sum(v2) AS v2, sum(v3) AS v3 FROM tbl GROUP BY id6",
"median v3 sd v3 by id4 id5" = "SELECT id4, id5, medianExact(v3) AS median_v3, stddevPop(v3) AS sd_v3 FROM tbl GROUP BY id4, id5",
"max v1 - min v2 by id3" = "SELECT id3, max(v1) - min(v2) AS range_v1_v2 FROM tbl GROUP BY id3",
"largest two v3 by id6" = "SELECT id6, arrayJoin(arraySlice(arrayReverseSort(groupArray(v3)), 1, 2)) AS v3 FROM (SELECT id6, v3 FROM tbl WHERE v3 IS NOT NULL) AS subq GROUP BY id6",
"regression v1 v2 by id2 id4" = "SELECT id2, id4, pow(corr(v1, v2), 2) AS r2 FROM tbl GROUP BY id2, id4",
"sum v3 count by id1:id6" = "SELECT id1, id2, id3, id4, id5, id6, sum(v3) AS v3, count() AS cnt FROM tbl GROUP BY id1, id2, id3, id4, id5, id6"
)},
"polars" = {c(
"sum v1 by id1" = "DF.groupby('id1').agg(pl.sum('v1')).collect()",
"sum v1 by id1:id2" = "DF.groupby(['id1','id2']).agg(pl.sum('v1')).collect()",
"sum v1 mean v3 by id3" = "DF.groupby('id3').agg([pl.sum('v1'), pl.mean('v3')]).collect()",
"mean v1:v3 by id4" = "DF.groupby('id4').agg([pl.mean('v1'), pl.mean('v2'), pl.mean('v3')]).collect()",
"sum v1:v3 by id6" = "DF.groupby('id6').agg([pl.sum('v1'), pl.sum('v2''), pl.sum('v3'')]).collect()",
"median v3 sd v3 by id4 id5" = "DF.groupby(['id4','id5']).agg([pl.median('v3').alias('v3_median'), pl.std('v3').alias('v3_std')]).collect()",
"max v1 - min v2 by id3" = "DF.groupby('id3').agg([(pl.max('v1') - pl.min('v2')).alias('range_v1_v2')]).collect()",
"largest two v3 by id6" = "DF.drop_nulls('v3').sort('v3', reverse=True).groupby('id6').agg(col('v3').head(2).alias('largest2_v3')).explode('largest2_v3').collect()",
"regression v1 v2 by id2 id4" = "DF.groupby(['id2','id4']).agg((pl.pearson_corr('v1','v2')**2).alias('r2')).collect()",
"sum v3 count by id1:id6" = "DF.groupby(['id1','id2','id3','id4','id5','id6']).agg([pl.sum('v3').alias('v3'), pl.count('v1').alias('count')]).collect()"
)},
"arrow" = {c(
"sum v1 by id1" = "AT %>% group_by(id1) %>% summarise(v1=sum(v1, na.rm=TRUE))",
"sum v1 by id1:id2" = "AT %>% group_by(id1, id2) %>% summarise(v1=sum(v1, na.rm=TRUE))",
"sum v1 mean v3 by id3" = "AT %>% group_by(id3) %>% summarise(v1=sum(v1, na.rm=TRUE), v3=mean(v3, na.rm=TRUE))",
"mean v1:v3 by id4" = "AT %>% group_by(id4) %>% summarise_at(.funs=\"mean\", .vars=c(\"v1\",\"v2\",\"v3\"), na.rm=TRUE)",
"sum v1:v3 by id6" = "AT %>% group_by(id6) %>% summarise_at(.funs=\"sum\", .vars=c(\"v1\",\"v2\",\"v3\"), na.rm=TRUE)",
"median v3 sd v3 by id4 id5" = "AT %>% group_by(id4, id5) %>% summarise(median_v3=median(v3, na.rm=TRUE), sd_v3=sd(v3, na.rm=TRUE))",
"max v1 - min v2 by id3" = "AT %>% group_by(id3) %>% summarise(range_v1_v2=max(v1, na.rm=TRUE)-min(v2, na.rm=TRUE))",
"largest two v3 by id6" = "AT %>% select(id6, largest2_v3=v3) %>% filter(!is.na(largest2_v3)) %>% arrange(desc(largest2_v3)) %>% group_by(id6) %>% filter(row_number() <= 2L)",
"regression v1 v2 by id2 id4" = "AT %>% group_by(id2, id4) %>% summarise(r2=cor(v1, v2, use=\"na.or.complete\")^2)",
"sum v3 count by id1:id6" = "AT %>% group_by(id1, id2, id3, id4, id5, id6) %>% summarise(v3=sum(v3, na.rm=TRUE), count=n())"
)},
"duckdb" = {c(
"sum v1 by id1" = "SELECT id1, sum(v1) AS v1 FROM tbl GROUP BY id1",
"sum v1 by id1:id2" = "SELECT id1, id2, sum(v1) AS v1 FROM tbl GROUP BY id1, id2",
"sum v1 mean v3 by id3" = "SELECT id3, sum(v1) AS v1, mean(v3) AS v3 FROM tbl GROUP BY id3",
"mean v1:v3 by id4" = "SELECT id4, mean(v1) AS v1, mean(v2) AS v2, mean(v3) AS v3 FROM tbl GROUP BY id4",
"sum v1:v3 by id6" = "SELECT id6, sum(v1) AS v1, sum(v2) AS v2, sum(v3) AS v3 FROM tbl GROUP BY id6",
"median v3 sd v3 by id4 id5" = "SELECT id4, id5, quantile_cont(v3, 0.5) AS median_v3, stddev(v3) AS sd_v3 FROM tbl GROUP BY id4, id5",
"max v1 - min v2 by id3" = "SELECT id3, max(v1)-min(v2) AS range_v1_v2 FROM tbl GROUP BY id3",
"largest two v3 by id6" = "SELECT id6, v3 AS largest2_v3 FROM (SELECT id6, v3, row_number() OVER (PARTITION BY id6 ORDER BY v3 DESC) AS order_v3 FROM x WHERE v3 IS NOT NULL) sub_query WHERE order_v3 <= 2",
"regression v1 v2 by id2 id4" = "SELECT id2, id4, pow(corr(v1, v2), 2) AS r2 FROM tbl GROUP BY id2, id4",
"sum v3 count by id1:id6" = "SELECT id1, id2, id3, id4, id5, id6, sum(v3) AS v3, count(*) AS count FROM tbl GROUP BY id1, id2, id3, id4, id5, id6"
)}
)}
groupby.query.exceptions = {list(
"data.table" = list(),
"dplyr" = list(),
"pandas" = list(),
"pydatatable" = list(),
"spark" = list("not yet implemented: SPARK-26589" = "median v3 sd v3 by id4 id5"),
"dask" = list("not yet implemented: dask
"juliadf" = list(),
"cudf" = list("not yet implemented: cudf
"not yet implemented: cudf
"not yet implemented: cudf
"not yet implemented: cudf
"clickhouse" = list(),
"polars" = list(),
"arrow" = list(),
"duckdb" = list("inaccurate: duckdb
)}
groupby.data.exceptions = {list(
"data.table" = {list(
"timeout" = c("G1_1e9_1e1_0_0",
"G1_1e9_2e0_0_0")
)},
"dplyr" = {list(
"timeout" = c("G1_1e8_2e0_0_0"),
"internal error" = c("G1_1e9_1e2_0_0","G1_1e9_1e2_0_1","G1_1e9_1e2_5_0",
"G1_1e9_1e1_0_0",
"G1_1e9_2e0_0_0")
)},
"pandas" = {list(
"not yet implemented: pandas
"out of memory" = c("G1_1e9_1e2_0_0","G1_1e9_1e1_0_0","G1_1e9_2e0_0_0","G1_1e9_1e2_0_1")
)},
"pydatatable" = {list(
"csv reader NAs bug: datatable
)},
"spark" = {list(
"timeout" = "G1_1e9_1e2_5_0"
)},
"dask" = {list(
"not yet implemented: dask
"internal error" = "G1_1e8_1e2_0_0",
"out of memory" = c("G1_1e7_1e2_0_0","G1_1e7_1e2_0_1",
"G1_1e8_1e2_0_1",
"G1_1e9_1e2_0_0","G1_1e9_1e2_0_1","G1_1e9_1e1_0_0","G1_1e9_2e0_0_0"),
"timeout" = c("G1_1e7_1e1_0_0",
"G1_1e7_2e0_0_0",
"G1_1e8_1e1_0_0",
"G1_1e8_2e0_0_0")
)},
"juliadf" = {list(
"timeout" = "G1_1e8_2e0_0_0",
"out of memory" = c("G1_1e9_1e2_0_0","G1_1e9_1e1_0_0","G1_1e9_2e0_0_0","G1_1e9_1e2_0_1","G1_1e9_1e2_5_0")
)},
"cudf" = {list(
"not yet implemented: cudf
"internal error: cudf
"out of memory" = c("G1_1e8_1e2_0_0","G1_1e8_1e1_0_0","G1_1e8_2e0_0_0","G1_1e8_1e2_0_1",
"G1_1e9_1e2_0_0","G1_1e9_1e1_0_0","G1_1e9_2e0_0_0","G1_1e9_1e2_0_1")
)},
"clickhouse" = {list(
)},
"polars" = {list(
"out of memory" = c("G1_1e9_1e2_0_0","G1_1e9_1e1_0_0","G1_1e9_2e0_0_0","G1_1e9_1e2_0_1","G1_1e9_1e2_5_0")
)},
"arrow" = {list(
"timeout" = "G1_1e8_2e0_0_0",
"internal error" = c("G1_1e9_1e2_0_0","G1_1e9_1e2_0_1","G1_1e9_1e2_5_0","G1_1e9_1e1_0_0",
"G1_1e9_2e0_0_0")
)},
"duckdb" = {list(
"out of memory" = c("G1_1e9_1e2_0_0","G1_1e9_1e1_0_0","G1_1e9_2e0_0_0","G1_1e9_1e2_0_1","G1_1e9_1e2_5_0"),
"incorrect: duckdb
)}
)}
groupby.exceptions = task.exceptions(groupby.query.exceptions, groupby.data.exceptions)
join_q_title_fun = function(x) {
stopifnot(c("question","iquestion","out_rows","out_cols","in_rows") %in% names(x),
uniqueN(x, by="iquestion")==nrow(x))
x = copy(x)[, "iquestion":=rev(seq_along(iquestion))]
x[, sprintf("Query %s: \"%s\": result %s x %s", iquestion, as.character(question), format_comma(out_rows), out_cols), by="iquestion"]$V1
}
join.syntax.dict = {list(
"dask" = {c(
"small inner on int" = "DF.merge(small, on='id1').compute()",
"medium inner on int" = "DF.merge(medium, on='id2').compute()",
"medium outer on int" = "DF.merge(medium, how='left', on='id2').compute()",
"medium inner on factor" = "DF.merge(medium, on='id5').compute()",
"big inner on int" = "DF.merge(big, on='id3').compute()"
)},
"data.table" = {c(
"small inner on int" = "DT[small, on='id1', nomatch=NULL]",
"medium inner on int" = "DT[medium, on='id2', nomatch=NULL]",
"medium outer on int" = "medium[DT, on='id2']",
"medium inner on factor" = "DT[medium, on='id5', nomatch=NULL]",
"big inner on int" = "DT[big, on='id3', nomatch=NULL]"
)},
"dplyr" = {c(
"small inner on int" = "inner_join(DF, small, by='id1')",
"medium inner on int" = "inner_join(DF, medium, by='id2')",
"medium outer on int" = "left_join(DF, medium, by='id2')",
"medium inner on factor" = "inner_join(DF, medium, by='id5')",
"big inner on int" = "inner_join(DF, big, by='id3')"
)},
"juliadf" = {c(
"small inner on int" = "innerjoin(DF, small, on = :id1, makeunique=true, matchmissing=:equal)",
"medium inner on int" = "innerjoin(DF, medium, on = :id2, makeunique=true, matchmissing=:equal)",
"medium outer on int" = "leftjoin(DF, medium, on = :id2, makeunique=true, matchmissing=:equal)",
"medium inner on factor" = "innerjoin(DF, medium, on = :id5, makeunique=true, matchmissing=:equal)",
"big inner on int" = "innerjoin(DF, big, on = :id3, makeunique=true, matchmissing=:equal)"
)},
"pandas" = {c(
"small inner on int" = "DF.merge(small, on='id1')",
"medium inner on int" = "DF.merge(medium, on='id2')",
"medium outer on int" = "DF.merge(medium, how='left', on='id2')",
"medium inner on factor" = "DF.merge(medium, on='id5')",
"big inner on int" = "DF.merge(big, on='id3')"
)},
"pydatatable" = {c(
"small inner on int" = "y.key = 'id1'; DT[:, :, join(y)][isfinite(f.v2), :]",
"medium inner on int" = "y.key = 'id2'; DT[:, :, join(y)][isfinite(f.v2), :]",
"medium outer on int" = "y.key = 'id2'; DT[:, :, join(y)]",
"medium inner on factor" = "y.key = 'id5'; DT[:, :, join(y)][isfinite(f.v2), :]",
"big inner on int" = "y.key = 'id3'; DT[:, :, join(y)][isfinite(f.v2), :]"
)},
"spark" = {c(
"small inner on int" = "select * from x join small using (id1)",
"medium inner on int" = "select * from x join medium using (id2)",
"medium outer on int" = "select * from x left join medium using (id2)",
"medium inner on factor" = "select * from x join medium using (id5)",
"big inner on int" = "select * from x join big using (id3)"
)},
"clickhouse" = {c(
"small inner on int" = "SELECT id1, x.id2, x.id3, x.id4, y.id4, x.id5, x.id6, x.v1, y.v2 FROM x INNER JOIN y USING (id1)",
"medium inner on int" = "SELECT x.id1, y.id1, id2, x.id3, x.id4, y.id4, x.id5, y.id5, x.id6, x.v1, y.v2 FROM x INNER JOIN y USING (id2)",
"medium outer on int" = "SELECT x.id1, y.id1, id2, x.id3, x.id4, y.id4, x.id5, y.id5, x.id6, x.v1, y.v2 FROM x LEFT JOIN y USING (id2)",
"medium inner on factor" = "SELECT x.id1, y.id1, x.id2, y.id2, x.id3, x.id4, y.id4, id5, x.id6, x.v1, y.v2 FROM x INNER JOIN y USING (id5)",
"big inner on int" = "SELECT x.id1, y.id1, x.id2, y.id2, id3, x.id4, y.id4, x.id5, y.id5, x.id6, y.id6, x.v1, y.v2 FROM x INNER JOIN y USING (id3)"
)},
"cudf" = {c(
"small inner on int" = "DF.merge(small, on='id1').compute()",
"medium inner on int" = "DF.merge(medium, on='id2').compute()",
"medium outer on int" = "DF.merge(medium, how='left', on='id2').compute()",
"medium inner on factor" = "DF.merge(medium, on='id5').compute()",
"big inner on int" = "DF.merge(big, on='id3').compute()"
)},
"polars" = {c(
"small inner on int" = "DF.merge(small, on='id1')",
"medium inner on int" = "DF.merge(medium, on='id2')",
"medium outer on int" = "DF.merge(medium, how='left', on='id2')",
"medium inner on factor" = "DF.merge(medium, on='id5')",
"big inner on int" = "DF.merge(big, on='id3')"
)},
"arrow" = {c(
"small inner on int" = "inner_join(DF, small, by='id1')",
"medium inner on int" = "inner_join(DF, medium, by='id2')",
"medium outer on int" = "left_join(DF, medium, by='id2')",
"medium inner on factor" = "inner_join(DF, medium, by='id5')",
"big inner on int" = "inner_join(DF, big, by='id3')"
)},
"duckdb" = {c(
"small inner on int" = "SELECT x.*, small.id4 AS small_id4, v2 FROM x JOIN small USING (id1)",
"medium inner on int" = "SELECT x.*, medium.id1 AS medium_id1, medium.id4 AS medium_id4, medium.id5 AS medium_id5, v2 FROM x JOIN medium USING (id2)",
"medium outer on int" = "SELECT x.*, medium.id1 AS medium_id1, medium.id4 AS medium_id4, medium.id5 AS medium_id5, v2 FROM x LEFT JOIN medium USING (id2)",
"medium inner on factor" = "SELECT x.*, medium.id1 AS medium_id1, medium.id2 AS medium_id2, medium.id4 AS medium_id4, v2 FROM x JOIN medium USING (id5)",
"big inner on int" = "SELECT x.*, big.id1 AS big_id1, big.id2 AS big_id2, big.id4 AS big_id4, big.id5 AS big_id5, big.id6 AS big_id6, v2 FROM x JOIN big USING (id3)"
)}
)}
join.query.exceptions = {list(
"data.table" = list(),
"dplyr" = list(),
"pandas" = list(),
"pydatatable" = list(),
"spark" = list(),
"dask" = list(),
"juliadf" = list(),
"cudf" = list("not yet implemented: cudf
"clickhouse" = list(),
"polars" = list(),
"arrow" = list(),
"duckdb" = list()
)}
join.data.exceptions = {list(
"data.table" = {list(
"out of memory" = c("J1_1e9_NA_0_0","J1_1e9_NA_5_0","J1_1e9_NA_0_1")
)},
"dplyr" = {list(
"out of memory" = c("J1_1e9_NA_0_0","J1_1e9_NA_5_0","J1_1e9_NA_0_1")
)},
"pandas" = {list(
"out of memory" = c("J1_1e9_NA_0_0","J1_1e9_NA_5_0","J1_1e9_NA_0_1")
)},
"pydatatable" = {list(
"csv reader NAs bug: datatable
"out of memory" = c("J1_1e9_NA_0_0","J1_1e9_NA_0_1")
)},
"spark" = {list(
"timeout" = c("J1_1e9_NA_0_0","J1_1e9_NA_5_0","J1_1e9_NA_0_1")
)},
"dask" = {list(
"internal error: dask
"J1_1e8_NA_0_0","J1_1e8_NA_5_0","J1_1e8_NA_0_1",
"J1_1e9_NA_5_0","J1_1e9_NA_0_1"),
"out of memory" = c("J1_1e9_NA_0_0")
)},
"juliadf" = {list(
"out of memory" = c("J1_1e9_NA_0_0","J1_1e9_NA_5_0","J1_1e9_NA_0_1")
)},
"cudf" = {list(
"internal error: cudf
"out of memory" = c("J1_1e9_NA_0_0","J1_1e9_NA_5_0","J1_1e9_NA_0_1")
)},
"clickhouse" = {list(
"out of memory" = c("J1_1e9_NA_0_0",
"J1_1e9_NA_5_0","J1_1e9_NA_0_1")
)},
"polars" = {list(
"out of memory" = c("J1_1e9_NA_0_0","J1_1e9_NA_5_0","J1_1e9_NA_0_1")
)},
"arrow" = {list(
"not yet implemented:
)},
"duckdb" = {list(
"internal error: duckdb
"out of memory" = c("J1_1e9_NA_0_0","J1_1e9_NA_5_0","J1_1e9_NA_0_1")
)}
)}
join.exceptions = task.exceptions(join.query.exceptions, join.data.exceptions)
groupby2014.syntax.dict = {list(
"data.table" = {c(
"sum v1 by id1" = "DT[, sum(v1), keyby=id1]",
"sum v1 by id1:id2" = "DT[, sum(v1), keyby='id1,id2']",
"sum v1 mean v3 by id3" = "DT[, list(sum(v1), mean(v3)), keyby=id3]",
"mean v1:v3 by id4" = "DT[, lapply(.SD, mean), keyby=id4, .SDcols=7:9]",
"sum v1:v3 by id6" = "DT[, lapply(.SD, sum), keyby=id6, .SDcols=7:9]"
)},
"dplyr" = {c(
"sum v1 by id1" = "DF %>% group_by(id1) %>% summarise(sum(v1))",
"sum v1 by id1:id2" = "DF %>% group_by(id1,id2) %>% summarise(sum(v1))",
"sum v1 mean v3 by id3" = "DF %>% group_by(id3) %>% summarise(sum(v1), mean(v3))",
"mean v1:v3 by id4" = "DF %>% group_by(id4) %>% summarise(across(v1:v3, mean))",
"sum v1:v3 by id6" = "DF %>% group_by(id6) %>% summarise(across(v1:v3, sum))"
)},
"pandas" = {c(
"sum v1 by id1" = "DF.groupby(['id1']).agg({'v1':'sum'})",
"sum v1 by id1:id2" = "DF.groupby(['id1','id2']).agg({'v1':'sum'})",
"sum v1 mean v3 by id3" = "DF.groupby(['id3']).agg({'v1':'sum', 'v3':'mean'})",
"mean v1:v3 by id4" = "DF.groupby(['id4']).agg({'v1':'mean', 'v2':'mean', 'v3':'mean'})",
"sum v1:v3 by id6" = "DF.groupby(['id6']).agg({'v1':'sum', 'v2':'sum', 'v3':'sum'})"
)}
)}
groupby2014.query.exceptions = {list(
"data.table" = list(),
"dplyr" = list(),
"pandas" = list()
)}
groupby2014.data.exceptions = {list(
"data.table" = {list(
)},
"dplyr" = {list(
"internal error" = "G0_1e9_1e2_0_0"
)},
"pandas" = {list(
"out of memory" = "G0_1e9_1e2_0_0"
)}
)}
groupby2014.exceptions = task.exceptions(groupby2014.query.exceptions, groupby2014.data.exceptions) |
`betadiver` <-
function(x, method = NA, order = FALSE, help = FALSE, ...)
{
beta <- list("w"="(b+c)/(2*a+b+c)", "-1"="(b+c)/(2*a+b+c)", "c"="(b+c)/2",
"wb"="b+c", "r"="2*b*c/((a+b+c)^2-2*b*c)",
"I"="log(2*a+b+c) - 2*a*log(2)/(2*a+b+c) - ((a+b)*log(a+b) + (a+c)*log(a+c)) / (2*a+b+c)",
"e"="exp(log(2*a+b+c) - 2*a*log(2)/(2*a+b+c) - ((a+b)*log(a+b) + (a+c)*log(a+c)) / (2*a+b+c))-1",
"t"="(b+c)/(2*a+b+c)", "me"="(b+c)/(2*a+b+c)",
"j"="a/(a+b+c)", "sor"="2*a/(2*a+b+c)",
"m"="(2*a+b+c)*(b+c)/(a+b+c)",
"-2"="pmin(b,c)/(pmax(b,c)+a)",
"co"="(a*c+a*b+2*b*c)/(2*(a+b)*(a+c))",
"cc"="(b+c)/(a+b+c)", "g"="(b+c)/(a+b+c)",
"-3"="pmin(b,c)/(a+b+c)", "l"="(b+c)/2",
"19"="2*(b*c+1)/(a+b+c)/(a+b+c-1)",
"hk"="(b+c)/(2*a+b+c)", "rlb"="a/(a+c)",
"sim"="pmin(b,c)/(pmin(b,c)+a)",
"gl"="2*abs(b-c)/(2*a+b+c)",
"z"="(log(2)-log(2*a+b+c)+log(a+b+c))/log(2)"
)
if (help) {
for (i in seq_along(beta))
writeLines(strwrap(paste(i, " \"", names(beta[i]),
"\" = ", beta[[i]], "\n", sep="")))
return(invisible(NULL))
}
x <- ifelse(x > 0, 1, 0)
if (order) {
x <- x[order(rowSums(x)),]
}
d <- tcrossprod(x)
a <- as.dist(d)
S <- diag(d)
N <- length(S)
b <- as.dist(matrix(rep(S, N), nrow=N)) - a
c <- as.dist(matrix(rep(S, each=N), nrow=N)) - a
if (is.na(method) || is.null(method) || is.logical(method) && !method) {
out <- list(a = a, b = b, c = c)
class(out) <- "betadiver"
return(out)
}
out <- eval(parse(text=beta[[method]]))
out <- as.dist(out)
attr(out, "method") <- paste("beta", names(beta[method]), sep=".")
attr(out, "call") <- match.call()
out
} |
flow_doc <- function(
pkg = NULL,
prefix = NULL,
code = TRUE,
narrow = FALSE,
truncate = NULL,
swap = TRUE,
out = NULL,
engine = c("nomnoml", "plantuml")) {
engine <- match.arg(engine)
as_dots <- function(x) {
f <- function(...) environment()$...
do.call(f, as.list(x))
}
pkgdown <- is.null(pkg)
missing_out <- FALSE
if(is.null(out)) {
if(pkgdown) {
out <- "diagrams.md"
} else {
missing_out <- TRUE
out <- tempfile(fileext = ".html")
}
}
ext <- sub("^.*?\\.(.*?)", "\\1", out)
if(pkgdown) {
pkg <- basename(getwd())
exported <- gsub("export\\((.*?)\\)", "\\1", grep("^export", readLines("NAMESPACE"), value = TRUE))
} else {
exported <- getNamespaceExports(pkg)
}
all_funs <- lsf.str(asNamespace(pkg))
exported_funs <- intersect(all_funs, exported)
unexported_funs <- setdiff(all_funs, exported_funs)
f <- toupper(substr(exported_funs,1,1))
f[! f %in% LETTERS] <- "-"
exported_funs_split <-split(exported_funs, f)
f <- toupper(substr(unexported_funs,1,1))
f[! f %in% LETTERS] <- "-"
unexported_funs_split <- split(unexported_funs, f)
path <- file.path(tempdir(), pkg)
dir.create(path, recursive = TRUE, showWarnings = FALSE)
rmd_output <- file.path(path, "test.Rmd")
ns <- asNamespace(pkg)
if(ext == "md") {
rmd_header <- ''
} else {
rmd_header <- sprintf(paste(
'---',
'title: "%s"',
'output:',
' html_document:',
' toc: true',
' toc_float: true',
'---\n\n', sep="\n"), pkg)
}
cat(rmd_header, file = rmd_output)
append_function_diagrams(
title = "
progress_txt = "Building diagrams of exported functions\n",
funs_split = exported_funs_split,
out = rmd_output,
ns = ns,
path = path,
exp_unexp = "exp",
pkg = pkg,
prefix = prefix,
truncate = truncate,
swap = swap,
narrow = narrow,
code = code,
engine = engine)
append_function_diagrams(
title = "
progress_txt = "Building diagrams of unexported functions\n",
funs_split = unexported_funs_split,
out = rmd_output,
ns = ns,
path = path,
exp_unexp = "unexp",
pkg = pkg,
prefix = prefix,
truncate = truncate,
swap = swap,
narrow = narrow,
code = code,
engine = engine)
cat("knitting")
out <- suppressWarnings(normalizePath(out, winslash = "/"))
rmarkdown::render(rmd_output, output_file = out)
if(ext == "md") {
writeLines(readLines(out)[-1], out)
}
if(missing_out) {
browseURL(out)
}
invisible(NULL)
}
append_function_diagrams <- function(
title,
progress_txt,
funs_split,
out,
ns,
path,
exp_unexp,
pkg,
...) {
if(!length(funs_split)) return(invisible(NULL))
cat(title, file = out, append = TRUE)
pb = txtProgressBar(min = 0, max = length(unlist(funs_split)), initial = 0)
stepi = 0
cat(progress_txt)
for(L in names(funs_split)) {
letter_title <- sprintf("
cat(letter_title, file = out, append = TRUE)
for(fun_chr in funs_split[[L]]) {
stepi <- stepi + 1
setTxtProgressBar(pb,stepi)
fun_lng <- str2lang(sprintf("%s::`%s`", pkg, fun_chr))
fun_val <- get(fun_chr, envir = ns)
sub_funs <- find_funs(body(fun_val))
names(sub_funs) <- ifelse(
names(sub_funs) == "",
seq_along(sub_funs),
names(sub_funs)
)
has_subfuns <- length(sub_funs) > 0
if(has_subfuns) {
fun_title <- sprintf('
} else {
fun_title <- sprintf("
}
cat(fun_title, file = out, append = TRUE)
out_tmp <- file.path(path, paste0(exp_unexp, "_", stepi, ".png"))
if(!is.null(body(fun_val))) {
capture.output(suppressMessages(
flow_view(setNames(c(fun_val), fun_chr), ..., out = out_tmp)))
fun_img <- sprintf('\n\n', exp_unexp, stepi)
cat(fun_img, file = out, append = TRUE)
} else {
cat("`", fun_chr, "` doesn't have a body\n\n", sep="",
file = out, append = TRUE)
}
for(i in seq_along(sub_funs)) {
sub_fun_val <- eval(sub_funs[[i]])
sub_fun_chr <- names(sub_funs)[[i]]
sub_fun_title <- sprintf("
cat(sub_fun_title, file = out, append = TRUE)
if(grepl("^\\d+$", sub_fun_chr)) {
sub_fun_chr <- "function"
}
out_tmp <- file.path(path, paste0(exp_unexp, "_", stepi,"_", i, ".png"))
if(!is.null(body(sub_fun_val))) {
capture.output(suppressMessages(
flow_view(setNames(c(sub_fun_val), sub_fun_chr), ..., out = out_tmp)))
sub_fun_img <- sprintf('\n\n', exp_unexp, stepi, i)
cat(sub_fun_img, file = out, append = TRUE)
} else {
cat("`", sub_fun_chr, "` doesn't have a body\n\n", sep="",
file = out, append = TRUE)
}
}
}
}
close(pb)
} |
tilt <- function( alpha, results, small = 1E-5 ) {
Res <- results
len <- length(results)
nn <- round(log(len,base=3))
Ealpha <- exp(alpha)
for ( j in 1:nn ) {
len <- length(Res)
s0 <- seq( 1, len, by = 3 )
s1 <- s0 + 1
s2 <- s0 + 2
A <- Res[ s0 ]
B <- Res[ s1 ]
C <- Res[ s2 ]
T <- A + Ealpha * B
X0 <- (( A == 0 ) & ( B == 0 )) & ( C > 0 )
X1 <- (( A > 0 ) & ( B > 0 )) & ( C > 0 )
X2 <- (( A > 0 ) & ( B == 0 )) & ( C > 0 )
X3 <- (( A == 0 ) & ( B > 0 )) & ( C > 0 )
rt <- sum(B) / ( sum(A) + sum(B) )
maxf <- sum(B) + sum(C)
minf <- sum(B)
if ( abs(rt - minf) < small * sum(C)) rt1 <- minf+small
else if ( abs(maxf - rt) < small * sum(C)) rt1 <- maxf-small
else {
emo <- ( maxf - rt ) / ( rt - minf )
rt1 <- minf + (maxf - minf) / ( 1 + emo*exp( -alpha ))
if ( is.nan(rt1) ) rt1 <- (minf + maxf)/2
if ( rt1 < (minf+small) ) rt1 <- minf+small
if ( rt1 > (maxf-small) ) rt1 <- maxf-small
}
TA <- sum(A)
TB <- sum(B)
TC <- sum(C)
if ( any(X1) ) {
A[X1] <- A[X1] + A[X1] * C[X1] / T[X1]
B[X1] <- B[X1] + B[X1] * Ealpha * C[X1] / T[X1]
CA <- sum(A) - TA
CB <- sum(B) - TB
pA <- CA / ( CA + CB )
pB <- CB / ( CA + CB )
} else {
pA <- 1 - rt1
pB <- rt1
}
if ( any(X0) ) {
A[X0] <- A[X0] + pA * C[X0]
B[X0] <- B[X0] + pB * C[X0]
}
if ( any(X2) ) {
TmA <- A[X2]
TmB <- A[X2] * ( TB / TA )
TmB <- A[X2] * ( rt1 - minf ) / ( maxf - rt1 )
A[X2] <- A[X2] + TmA * C[X2] / (TmA + Ealpha * TmB)
B[X2] <- B[X2] + TmB * Ealpha * C[X2] / (TmA + Ealpha * TmB)
}
if ( any(X3) ) {
TmA <- B[X3] * ( TA / TB )
TmA <- B[X3] * ( maxf - rt1 ) / ( rt1 - minf )
TmB <- B[X3]
A[X3] <- A[X3] + TmA * C[X3] / (TmA + Ealpha * TmB)
B[X3] <- B[X3] + TmB * Ealpha * C[X3] / (TmA + Ealpha * TmB)
}
Res <- c(A,B)
}
w <- Res
len <- length(Res)
sm <- rep(0,len)
Results <- rep(NA,2*nn+1)
Results[1] <- alpha
for ( j in 1:nn ) {
yj <- rep(c(rep( 0, 2^(j-1) ), rep( 1, 2^(j-1))), len / (2^j))
Results[j+1] <- weighted.mean( yj, w = w )
sm <- sm + yj
Results[nn+1+j] <- weighted.mean( sm, w = w )
}
return(Results)
} |
grlassoFit <- function(X, Y, lbd, weights = rep(1, max(group)), group = 1:p,
Gamma, eps=1e-5, returnGamma = FALSE, initBeta = rep(1, ncol(X)))
{
n <- nrow(X)
p <- ncol(X)
if (lbd < 0) {
stop("lbd should be non-negative.")
}
if (length(group) != p) {
stop("group must have a same length with the number of X columns")
}
if (length(weights) != length(unique(group))) {
stop("weights has to have a same length as the number of groups")
}
if (any(weights <= 0)) {
stop("weights should be positive.")
}
if (any(!1:max(group) %in% group)) {
stop("group index has to be a consecutive integer starting from 1.")
}
if (!is.null(Y) & length(Y) != n) {
stop("dimension of X and Y are not conformable.")
}
XY <- X * c(Y)
if (missing(Gamma)) {
Gamma <- groupMaxEigen(X, group)
}
if (returnGamma) {
return(list(coef = c(grlasso(X = X, Y = Y, XY = XY, weights = weights, group = group,
lbd = lbd, Gamma = Gamma, initBeta = initBeta,
eps = eps))
, Gamma = Gamma))
} else {
return(list(coef = c(grlasso(X = X, Y = Y, XY = XY, weights = weights, group = group,
lbd = lbd, Gamma = Gamma, initBeta = initBeta,
eps = eps))))
}
} |
fat <- function(b, b.se, n.total, d.total, d1, d2, method="E-FIV")
{
if (missing(b)) {
stop ("No values given for 'b'")
}
if (method == "E-UW") {
if (missing(b.se)) {
stop ("No values given for 'b.se'")
}
if (length(b) != length(b.se)) {
stop("Incompatible vector sizes for 'b' and 'b.se'!")
}
studies.complete <- c(!is.na(b) & !is.na(b.se))
ds <- data.frame("y" = b,
"x" = b.se
)
} else if (method== "E-FIV") {
if (missing(b.se)) {
stop ("No values given for 'b.se'")
}
if (length(b) != length(b.se)) {
stop("Incompatible vector sizes for 'b' and 'b.se'!")
}
studies.complete <- c(!is.na(b) & !is.na(b.se))
ds <- data.frame("y" = b,
"x" = b.se,
"w" = (1/(b.se**2))
)
} else if (method == "M-FIV") {
if (missing(b.se)) {
stop ("No values given for 'b.se'")
}
if (missing(n.total)) {
stop ("No values given for 'n.total'")
}
if (length(b) != length(b.se)) {
stop("Incompatible vector sizes for 'b' and 'b.se'!")
}
if (length(b) != length(n.total)) {
stop("Incompatible vector sizes for 'b' and 'n.total'!")
}
studies.complete <- c(!is.na(b) & !is.na(b.se) & !is.na(n.total))
ds <- data.frame("y" = b,
"x" = n.total,
"w" = (1/(b.se**2))
)
} else if (method=="M-FPV") {
if (missing(n.total)) {
stop ("No values given for 'n.total'")
}
if (missing(d.total)) {
stop ("No values given for 'd.total'")
}
if (length(b) != length(n.total)) {
stop("Incompatible vector sizes for 'b' and 'n.total'!")
}
if (length(b) != length(d.total)) {
stop("Incompatible vector sizes for 'b' and 'd.total'!")
}
studies.complete <- c(!is.na(b) & !is.na(d.total) & !is.na(n.total))
d.total.cc <- d.total
d.total.cc[d.total==0] <- 1
n.total[d.total==0] <- n.total[d.total==0]+2
ds <- as.data.frame(cbind(b, n.total, (d.total.cc*(1-d.total.cc/n.total))))
colnames(ds) <- c("y","x","w")
} else if (method=="P-FPV") {
if (missing(n.total)) {
stop ("No values given for 'n.total'")
}
if (missing(d.total)) {
stop ("No values given for 'd.total'")
}
if (length(b) != length(n.total)) {
stop("Incompatible vector sizes for 'b' and 'n.total'!")
}
if (length(b) != length(d.total)) {
stop("Incompatible vector sizes for 'b' and 'd.total'!")
}
studies.complete <- c(!is.na(b) & !is.na(d.total) & !is.na(n.total))
d.total.cc <- d.total
d.total.cc[d.total==0] <- 1
n.total[d.total==0] <- n.total[d.total==0]+2
ds <- data.frame("y" = b,
"x" = 1/n.total,
"w" = (d.total.cc*(1-d.total.cc/n.total))
)
} else if (method=="D-FIV") {
if (missing(b.se)) {
stop ("No values given for 'b.se'")
}
if (missing(d.total)) {
stop ("No values given for 'd.total'")
}
if (length(b) != length(b.se)) {
stop("Incompatible vector sizes for 'b' and 'b.se'!")
}
if (length(b) != length(d.total)) {
stop("Incompatible vector sizes for 'b' and 'd.total'!")
}
studies.complete <- c(!is.na(b) & !is.na(b.se) & !is.na(d.total))
d.total.cc <- d.total
d.total.cc[d.total==0] <- 1
ds <- data.frame("y" = b,
"x" = 1/d.total.cc,
"w" = (1/(b.se**2))
)
} else if (method=="D-FAV") {
if (missing(d1)) {
stop ("No values given for 'd1'")
}
if (missing(d2)) {
stop ("No values given for 'd2'")
}
if (length(b) != length(d1)) {
stop("Incompatible vector sizes for 'b' and 'd1'!")
}
if (length(b) != length(d2)) {
stop("Incompatible vector sizes for 'b' and 'd2'!")
}
if (!missing(d.total)) {
if (sum(d1+d2!=d.total) > 0)
stop("Incompatible information between 'd.total', 'd1' and 'd2'")
}
studies.complete <- c(!is.na(b) & !is.na(d1) & !is.na(d2))
d1.cc <- d1
d2.cc <- d2
d1.cc[(d1==0 | d2==0)] <- d1.cc[(d1==0 | d2==0)]+0.5
d2.cc[(d1==0 | d2==0)] <- d2.cc[(d1==0 | d2==0)]+0.5
ds <- data.frame("y" = b,
"x" = 1/(d1.cc+d2.cc),
"w" = 1/((1/d1.cc)+(1/d2.cc))
)
}
else {
stop("Method for testing funnel plot asymmetry not supported")
}
nstudies <- sum(studies.complete)
ds <- ds[studies.complete,]
if (nstudies < length(studies.complete)) {
warning("Some studies were removed due to missing data!")
}
res <- NULL
if (!missing(b.se)) {
res <- rma(yi = b[studies.complete], sei = b.se[studies.complete], method = "FE")
}
if (method %in% c("E-FIV", "M-FIV", "M-FPV", "P-FPV", "D-FIV", "D-FAV")) {
suppressWarnings(m.fat <- try(glm(y~x, weights=ds$w, data=ds), silent=T))
} else if (method=="E-UW") {
suppressWarnings(m.fat <- try(glm(y~x, data=ds), silent=T))
} else {
stop("Method for testing funnel plot asymmetry currently not implemented")
}
if ("try-error" %in% attr(m.fat,"class")) {
warning("Estimation of the regression model unsuccessful, P-value omitted.")
t.fat <- NA
p.fat <- NA
} else {
t.fat <- coefficients(m.fat)[2]/sqrt(diag(vcov(m.fat))[2])
p.fat <- 2*pt(-abs(t.fat),df=(nstudies-2))
}
out <- list()
out$call <- match.call()
out$method <- method
out$tval <- t.fat
out$pval <- p.fat
out$fema <- res
out$df <- nstudies-2
out$model <- m.fat
class(out) <- "fat"
return(out)
}
print.fat <- function(x, digits = max(3, getOption("digits") - 3), ...) {
cat("Call: ");
print(x$call);
if (!is.null(x$fema)) {
cat(c("\nFixed effect summary estimate: ", round(x$fema$b, digits = digits), " \n"))
}
cat("\n")
cat(paste("test for funnel plot asymmetry: t =", round(x$tval, digits = digits), ", df = ", x$df, ", ", sep=""))
cat(paste("p = ", round(x$pval, digits = digits), "\n", sep=""))
}
plot.fat <- function(x, ref, confint = TRUE, confint.level = 0.10, confint.col = "skyblue", confint.alpha = .50,
confint.density = NULL,
xlab = "Effect size", add.pval = TRUE, ...) {
if (!inherits(x, "fat"))
stop("Argument 'x' must be an object of class \"fat\".")
if (confint.level < 0 | confint.level > 1) {
stop("Argument 'confint.level' must be between 0 and 1.")
}
if (confint.alpha < 0 | confint.alpha > 1) {
stop("Argument 'confint.alpha' must be between 0 and 1.")
}
y <- NULL
xval <- x$model$data[, "y"]
if (x$method %in% c("E-UW", "E-FIV")) {
ylab <- "Standard error"
yval <- (x$model$data[, "x"])
ylim <- rev(c(0, max(yval, na.rm = T)))
xlim <- c(min(c(0, xval)), max(xval))
} else if (x$method %in% c("M-FIV")) {
ylab <- "Sample size"
yval <- (x$model$data[, "x"])
ylim <- (c(0, max(yval, na.rm = T)))
xlim <- c(min(c(0, xval)), max(xval))
} else if (x$method == "P-FPV") {
ylab <- "Sample size"
yval <- (x$model$data[, "x"])
ylim <- rev(c(0, max(yval, na.rm = T)))
xlim <- c(min(c(0, xval)), max(xval))
step <- ((max(yval) - min(yval))/5)
yax <- c(plyr::round_any(1/min(yval), 10^(sapply(round(1/min(yval)), nchar) - 1)),
plyr::round_any(1/seq(step, 4 * step, by = step), 10), plyr::round_any(1/max(yval), 10))
} else if (x$method == "D-FIV") {
ylab <- "Total events"
yval <- (x$model$data[, "x"])
ylim <- rev(c(0, max(yval, na.rm = T)))
xlim <- c(min(c(0, xval)), max(xval))
step <- ((max(yval) - min(yval))/4)
yax <- c(plyr::round_any(1/min(yval), 10^(sapply(round(1/min(yval)), nchar) - 1)),
plyr::round_any(1/seq(step, 4 * step, by = step), 10), plyr::round_any(1/max(yval), 10))
} else if (x$method == "D-FAV") {
ylab <- "Total events"
yval <- (x$model$data[, "x"])
ylim <- rev(c(0, max(yval, na.rm = T)))
xlim <- c(min(c(0, xval)), max(xval))
step <- ((max(yval) - min(yval))/4)
yax <- c(plyr::round_any(1/min(yval),10^(sapply(round(1/min(yval)), nchar) - 1)),
plyr::round_any(1/seq(step, 4 * step, by = step), 10), plyr::round_any(1/max(yval), 10))
} else {
stop("Plot not supported!")
}
newdata <- sort(c(-max(x$model$data[, "x"]), x$model$data[,"x"], 2 * max(x$model$data[, "x"])))
newdata <- as.data.frame(cbind(seq(min(newdata), max(newdata), length.out = 500), NA))
colnames(newdata) <- c("x", "y")
predy <- predict(x$model, newdata = newdata, se.fit = T)
predy.mean <- predy$fit
predy.lowerInt <- as.vector(predy$fit + qt(confint.level/2, df = x$df) * predy$se.fit)
predy.upperInt <- as.vector(predy$fit + qt((1 - confint.level/2), df = x$df) * predy$se.fit)
predy.upperInt[predy.upperInt < min(pretty(range(xlim)))] <- min(pretty(range(xlim)))
predy.upperInt[predy.upperInt > max(pretty(range(xlim)))] <- max(pretty(range(xlim)))
predy.lowerInt[predy.lowerInt < min(pretty(range(xlim)))] <- min(pretty(range(xlim)))
predy.lowerInt[predy.lowerInt > max(pretty(range(xlim)))] <- max(pretty(range(xlim)))
newdata[, "x"][newdata[, "x"] < min(pretty(range(ylim)))] <- min(pretty(range(ylim)))
newdata[, "x"][newdata[, "x"] > max(pretty(range(ylim)))] <- max(pretty(range(ylim)))
p <- ggplot2::ggplot(data = data.frame(x = xval, y = yval))
if (confint) {
p <- p + ggplot2::geom_polygon(
mapping = ggplot2::aes(
x = x,
y = y
),
data = data.frame(
x = c(
predy.upperInt,
rev(predy.lowerInt)),
y = c(
newdata[, "x"],
rev(newdata[, "x"]))
),
fill = confint.col,
alpha = confint.alpha
)
}
p <- p +
ggplot2::geom_point(
mapping = ggplot2::aes(x = x, y = y),
shape = 19
) +
ggplot2::geom_line(
mapping = ggplot2::aes(x = x, y = y),
data = data.frame(
x = predy.mean[newdata[, "x"] > min(pretty(range(ylim))) & newdata[, "x"] < max(pretty(range(ylim)))],
y = newdata[, "x"][newdata[, "x"] > min(pretty(range(ylim))) & newdata[, "x"] < max(pretty(range(ylim)))]
),
linetype = 2)
if (missing(ref)) {
p <- p + ggplot2::geom_vline(xintercept = x$fema$b)
} else {
p <- p + ggplot2::geom_vline(xintercept = ref)
}
p <- p + ggplot2::scale_x_continuous(
name = xlab,
limits = range(pretty(range(xlim))),
breaks = pretty(range(xlim)))
if (x$method %in% c("P-FPV", "D-FAV", "D-FIV")){
p <- p + ggplot2::scale_y_reverse(name = ylab, breaks = 1/yax, labels = yax, limits = rev(range(pretty(ylim))))
} else if (x$method %in% c("E-UW", "E-FIV")){
p <- p + ggplot2::scale_y_reverse(name = ylab, limits = rev(range(pretty(ylim))), breaks = pretty(range(ylim)))
} else {
p <- p + ggplot2::scale_y_continuous(name = ylab, limits = range(pretty(ylim)), breaks = pretty(range(ylim)))
}
return(p)
} |
hmmsetcont <-
function (Observations, Pi1=0.5, Pi2=0.5, A11=0.7, A12=0.3, A21=0.3, A22=0.7, Mu1=5, Mu2=(-5), Var1=10, Var2=10)
{
Parameters<-matrix(c(Pi1, Pi2, A11, A12, A21, A22, Mu1, Mu2, Var1, Var2), byrow=T, ncol=10, nrow=1)
colnames(Parameters) <- c("Pi1", "Pi2", "A11", "A12", "A21", "A22", "Mu1", "Mu2", "Var1", "Var2")
Results<-matrix(NA, ncol=6, nrow=1)
colnames(Results) <- c("Pal", "Pbe", "Pxi", "AIC", "SBIC", "HQIC")
Viterbi<-matrix(0, nrow=(length(Observations)), ncol=1)
B<-matrix(NA, nrow=(length(Observations)), ncol=2)
hmm<-list(Observations=(Observations), Parameters=(Parameters), Results=(Results), Viterbi=(Viterbi), B=(B))
class(hmm)<-"ContObservHMM"
return(hmm)
} |
test_that("summary input yields same result as raw", {
x <- MASS::mvrnorm(18, mu = c(100, 13),
Sigma = matrix(c(15^2, 0.65*15*3,
0.65*15*3, 3^2),
nrow = 2, byrow = T),
empirical = TRUE)
set.seed(123456)
sumstats <- BTD_cov(70, 13, c(100, 15), c(13, 3), use_sumstats = TRUE,
cor_mat = matrix(c(1, 0.65, 0.65, 1), nrow=2),
sample_size = 18)[["p.value"]]
set.seed(123456)
raw <- BTD_cov(70, 13, x[ , 1], x[ , 2])[["p.value"]]
expect_equal(sumstats, raw, tol = 0.01)
})
test_that("input of control_covar can be both dataframe and matrix", {
size_weight_illusion$MF01 <- as.numeric(size_weight_illusion$SEX == "Female")
set.seed(123)
df <- BTD_cov(case_task = size_weight_illusion[1, "V_SWI"],
case_covar = unlist(size_weight_illusion[1, c("YRS", "MF01")]),
control_task = size_weight_illusion[-1, "V_SWI"],
control_covar = size_weight_illusion[-1, c("YRS", "MF01")], iter = 100)
set.seed(123)
mat <- BTD_cov(case_task = size_weight_illusion[1, "V_SWI"],
case_covar = unlist(size_weight_illusion[1, c("YRS", "MF01")]),
control_task = size_weight_illusion[-1, "V_SWI"],
control_covar = as.matrix(size_weight_illusion[-1, c("YRS", "MF01")]), iter = 100)
expect_equal(df, mat)
})
test_that("we get approx same results as C&G on BTD_cov", {
x <- MASS::mvrnorm(18, mu = c(100, 13),
Sigma = matrix(c(15^2, 0.65*15*3,
0.65*15*3, 3^2),
nrow = 2, byrow = T),
empirical = TRUE)
cg_ot <- c(0.04362 , -2.653, -1.071, 0.3987, 14.2189)
set.seed(1234597)
sc_ot <- BTD_cov(78, 13, x[ , 1], x[ , 2], iter = 10000)
sc_ot <- c(sc_ot[["p.value"]],
sc_ot[["interval"]][["Lower Z-CCC CI"]],
sc_ot[["interval"]][["Upper Z-CCC CI"]],
sc_ot[["interval"]][["Lower p CI"]],
sc_ot[["interval"]][["Upper p CI"]])
expect_equal(sc_ot, cg_ot, tolerance = 1e-2)
})
test_that("alternative hypotheses direction", {
x <- MASS::mvrnorm(18, mu = c(100, 13),
Sigma = matrix(c(15^2, 0.65*15*3,
0.65*15*3, 3^2),
nrow = 2, byrow = T),
empirical = TRUE)
set.seed(123456234)
pos_z <- BTD_cov(105, 13, x[ , 1], x[ , 2],
iter = 1000, alternative = "less")[["p.value"]]
expect_equal(pos_z > 0.5, TRUE)
set.seed(123456234)
pos_z <- BTD_cov(105, 13, x[ , 1], x[ , 2],
iter = 1000, alternative = "greater")[["p.value"]]
expect_equal(pos_z < 0.5, TRUE)
set.seed(123456234)
neg_z <- BTD_cov(78, 13, x[ , 1], x[ , 2],
iter = 1000, alternative = "less")[["p.value"]]
expect_equal(neg_z < 0.5, TRUE)
set.seed(123456234)
neg_z <- BTD_cov(78, 13, x[ , 1], x[ , 2],
iter = 1000, alternative = "greater")[["p.value"]]
expect_equal(neg_z > 0.5, TRUE)
})
test_that("errors and warnings are occuring as they should for BTD", {
expect_error(BTD_cov(1, 0, 0, 0, use_sumstats = TRUE, sample_size = NULL),
"Please supply both correlation matrix and sample size")
expect_error(BTD_cov(1, 0, 0, 0, use_sumstats = TRUE, sample_size = 20, cor_mat = NULL),
"Please supply both correlation matrix and sample size")
expect_error(BTD_cov(-2, 0, rnorm(15), rnorm(15), int_level = 1.1),
"Interval level must be between 0 and 1")
expect_error(BTD_cov(c(-2, 0), 0, rnorm(15), rnorm(15)),
"case_task should be single value")
expect_error(BTD_cov(-2, 0, c(0, 1), c(0, 1), use_sumstats = TRUE, sample_size = 20,
cor_mat = diag(c(-2, -2))),
"cor_mat is not positive definite")
expect_error(BTD_cov(-2, 0, c(0, 1), c(0, 1), use_sumstats = FALSE, sample_size = 20,
cor_mat = diag(2)),
"If input is summary data, set use_sumstats = TRUE")
expect_error(BTD_cov(-2, 0, c(0, 1), c(0, 1), use_sumstats = TRUE, sample_size = 20,
cor_mat = diag(3)),
"Number of variables and number of correlations does not match")
}) |
rm(list=ls())
library(tidyverse)
library(curl)
library(readxl)
library(lubridate)
library(paletteer)
library(geofacet)
library(scales)
temp <- tempfile()
source <- ("https://api.coronavirus.data.gov.uk/v2/data?areaType=msoa&metric=newCasesBySpecimenDateRollingRate&format=csv")
temp <- curl_download(url=source, destfile=temp, quiet=FALSE, mode="wb")
casedata <- read_csv(temp)
temp <- tempfile()
source <- ("https://assets.publishing.service.gov.uk/government/uploads/system/uploads/attachment_data/file/833970/File_1_-_IMD2019_Index_of_Multiple_Deprivation.xlsx")
temp <- curl_download(url=source, destfile=temp, quiet=FALSE, mode="wb")
IMD <- read_excel(temp, sheet="IMD2019", range="A2:F32845", col_names=FALSE)[,c(1,2,5,6)]
colnames(IMD) <- c("LSOA11CD", "LSOA11NM", "IMDrank", "IMDdecile")
temp <- tempfile()
source <- ("https://opendata.arcgis.com/datasets/fe6c55f0924b4734adf1cf7104a0173e_0.csv")
temp <- curl_download(url=source, destfile=temp, quiet=FALSE, mode="wb")
lookup <- read.csv(temp) %>%
select(LSOA11CD, MSOA11CD, RGN11NM) %>%
unique()
IMD <- merge(IMD, lookup, by="LSOA11CD")
temp <- tempfile()
temp2 <- tempfile()
source <- "https://www.ons.gov.uk/file?uri=%2fpeoplepopulationandcommunity%2fpopulationandmigration%2fpopulationestimates%2fdatasets%2flowersuperoutputareamidyearpopulationestimatesnationalstatistics%2fmid2019sape22dt13/sape22dt13mid2019lsoabroadagesestimatesunformatted.zip"
temp <- curl_download(url=source, destfile=temp, quiet=FALSE, mode="wb")
unzip(zipfile=temp, exdir=temp2)
pop <- read_excel(file.path(temp2, "SAPE22DT13-mid-2019-lsoa-Broad_ages-estimates-unformatted.xlsx"),
sheet="Mid-2019 Persons", range="A6:G34758", col_names=FALSE)[,c(1,7)]
colnames(pop) <- c("LSOA11CD", "pop")
IMD <- merge(IMD, pop)
IMD_MSOA <- IMD %>%
group_by(MSOA11CD) %>%
summarise(IMDrank=weighted.mean(IMDrank, pop)) %>%
ungroup() %>%
merge(casedata, by.x="MSOA11CD", by.y="areaCode", all=TRUE) %>%
rename(msoa11cd=MSOA11CD, caserate=newCasesBySpecimenDateRollingRate)
data <- IMD_MSOA %>%
select(caserate, date, IMDrank, regionName) %>%
filter(date>=max(date)-weeks(3)) %>%
spread(date, caserate) %>%
mutate(abs1wk=.[[ncol(.)]]-.[[ncol(.)-1]],
abs2wk=.[[ncol(.)]]-.[[ncol(.)-2]],
abs3wk=.[[ncol(.)]]-.[[ncol(.)-3]],
rel1wk=abs1wk/.[[ncol(.)-1]],
rel2wk=abs2wk/.[[ncol(.)-2]],
rel3wk=abs3wk/.[[ncol(.)-3]],
IMDrank=max(IMDrank)-IMDrank) %>%
rename(oldcases=4)
natrhocases <- cor(subset(data, !is.na(oldcases))$IMDrank, subset(data, !is.na(oldcases))$oldcases)
natrhoabs <- cor(subset(data, !is.na(abs2wk))$IMDrank, subset(data, !is.na(abs2wk))$abs2wk)
natrhorel <- cor(subset(data, !is.na(rel2wk))$IMDrank, subset(data, !is.na(rel2wk))$rel2wk)
tiff("Outputs/COVIDMSOACaseRatexIMDOld.tiff", units="in", width=9, height=7, res=500)
ggplot(data, aes(x=IMDrank, y=oldcases, colour=oldcases))+
geom_point(show.legend=FALSE)+
geom_smooth(method="lm", formula=y~x, colour="Red")+
scale_x_continuous(name="Deprivation (higher = more deprived)")+
scale_y_continuous(name="Change in cases per 100,000 in the past 2 weeks")+
scale_colour_paletteer_c("scico::tokyo", direction=-1)+
theme_classic()+
theme(strip.background=element_blank(), strip.text=element_text(face="bold", size=rel(1)),
plot.title=element_text(face="bold", size=rel(1.2)))+
labs(title="COVID-19 case rates were higher, on average, in more deprived areas",
subtitle=paste0("7-day average rates of new COVID-19 cases for MSOAs in England in the week ending ",
max(IMD_MSOA$date)-weeks(2)),
caption="Data from PHE, ONS & MHCLG | Plot by @VictimOfMaths")+
annotate("text", x=33000, y=850, label=paste0("\u03C1", "=", round(natrhocases, 2)), colour="Red")
dev.off()
tiff("Outputs/COVIDMSOACaseRatexIMDAbs.tiff", units="in", width=9, height=7, res=500)
ggplot(data, aes(x=IMDrank, y=abs2wk, colour=abs2wk))+
geom_point()+
geom_hline(yintercept=0, colour="Grey60")+
geom_smooth(method="lm", formula=y~x, colour="Red")+
scale_x_continuous(name="Deprivation (higher = more deprived)")+
scale_y_continuous(name="Change in cases per 100,000 in the past 2 weeks")+
scale_colour_paletteer_c("scico::roma", name="Change in\ncase rates",
limit=c(-1,1)*max(abs(data$abs2wk), na.rm=TRUE))+
theme_classic()+
theme(strip.background=element_blank(), strip.text=element_text(face="bold", size=rel(1)),
plot.title=element_text(face="bold", size=rel(1.2)))+
labs(title="The absolute fall in COVID-19 cases is larger in more deprived areas",
subtitle=paste0("Change in rolling 7-day rates of new COVID-19 cases for MSOAs in England between ",
max(IMD_MSOA$date)-weeks(2), " and ", max(IMD_MSOA$date)),
caption="Data from PHE, ONS & MHCLG | Plot by @VictimOfMaths")+
annotate("text", x=33000, y=-100, label=paste0("\u03C1", "=", round(natrhoabs, 2)), colour="Red")
dev.off()
tiff("Outputs/COVIDMSOACaseRatexIMDRel.tiff", units="in", width=9, height=7, res=500)
ggplot(data, aes(x=IMDrank, y=rel2wk, colour=rel2wk))+
geom_point(show.legend=FALSE)+
geom_hline(yintercept=0, colour="Grey60")+
geom_smooth(method="lm", formula=y~x, colour="Red")+
scale_x_continuous(name="Deprivation (higher = more deprived)")+
scale_y_continuous(name="Change in cases per 100,000 in the past 2 weeks",
labels=scales::label_percent(accuracy=1))+
scale_colour_paletteer_c("scico::roma",
limit=c(-1,1)*max(abs(subset(data, !is.na(rel2wk))$rel2wk)))+
theme_classic()+
theme(strip.background=element_blank(), strip.text=element_text(face="bold", size=rel(1)),
plot.title=element_text(face="bold", size=rel(1.2)))+
labs(title="Relative reductions in COVID-19 cases are broadly equal across the deprivation spectrum",
subtitle=paste0("Change in rolling 7-day rates of new COVID-19 cases for MSOAs in England between ",
max(IMD_MSOA$date)-weeks(2), " and ", max(IMD_MSOA$date)),
caption="Data from PHE, ONS & MHCLG | Plot by @VictimOfMaths")+
annotate("text", x=33000, y=-0.3, label=paste0("\u03C1", "=", round(natrhorel, 2)), colour="Red")
dev.off()
mygrid <- data.frame(name=c("North East", "North West", "Yorkshire and The Humber",
"West Midlands", "East Midlands", "East of England",
"South West", "London", "South East"),
row=c(1,2,2,3,3,3,4,4,4), col=c(2,1,2,1,2,3,1,2,3),
code=c(1:9))
rhoold <- data %>%
filter(!is.na(oldcases)) %>%
group_by(regionName) %>%
mutate(rho=cor(IMDrank, oldcases),
IMDrank=27000, oldcases=2200) %>%
ungroup() %>%
select(regionName, rho, IMDrank, oldcases) %>%
distinct() %>%
mutate(label=paste0("\u03C1", "=", round(rho, 2)))
rhoabs <- data %>%
filter(!is.na(abs2wk)) %>%
group_by(regionName) %>%
mutate(rho=cor(IMDrank, abs2wk),
IMDrank=25000, abs2wk=1500) %>%
ungroup() %>%
select(regionName, rho, IMDrank, abs2wk) %>%
distinct() %>%
mutate(label=paste0("\u03C1", "=", round(rho, 2)))
rhorel <- data %>%
filter(!is.na(rel2wk)) %>%
group_by(regionName) %>%
mutate(rho=cor(IMDrank, rel2wk),
IMDrank=25000, rel2wk=3) %>%
ungroup() %>%
select(regionName, rho, IMDrank, rel2wk) %>%
distinct() %>%
mutate(label=paste0("\u03C1", "=", round(rho, 2)))
tiff("Outputs/COVIDMSOACaseRatexIMDOldxReg.tiff", units="in", width=10, height=10, res=500)
ggplot(data, aes(x=IMDrank, y=oldcases, colour=oldcases))+
geom_point(show.legend=FALSE)+
geom_hline(yintercept=0, colour="Grey60")+
geom_smooth(method="lm", formula=y~x, colour="Red")+
geom_text(data=rhoold, aes(label=label), colour="Red")+
scale_x_continuous(name="Deprivation (higher = more deprived)")+
scale_y_continuous(name="Change in cases per 100,000 in the past 2 weeks")+
scale_colour_paletteer_c("scico::tokyo", direction=-1)+
facet_geo(~regionName, grid=mygrid)+
theme_classic()+
theme(strip.background=element_blank(), strip.text=element_text(face="bold", size=rel(1)),
plot.title=element_text(face="bold", size=rel(1.2)))+
labs(title="COVID-19 case rates were higher, on average, in more deprived areas",
subtitle=paste0("7-day average rates of new COVID-19 cases for MSOAs in England in the week ending ",
max(IMD_MSOA$date)-weeks(2)),
caption="Data from PHE, ONS & MHCLG | Plot by @VictimOfMaths")
dev.off()
tiff("Outputs/COVIDMSOACaseRatexIMDAbsxReg.tiff", units="in", width=10, height=10, res=500)
ggplot(data, aes(x=IMDrank, y=abs2wk, colour=abs2wk))+
geom_point()+
geom_hline(yintercept=0, colour="Grey60")+
geom_smooth(method="lm", formula=y~x, colour="Red")+
geom_text(data=rhoabs, aes(label=label), colour="Red")+
scale_x_continuous(name="Deprivation (higher = more deprived)")+
scale_y_continuous(name="Change in cases per 100,000 in the past 2 weeks")+
scale_colour_paletteer_c("scico::roma", name="Change in\ncase rates",
limit=c(-1,1)*max(abs(data$abs2wk), na.rm=TRUE))+
facet_geo(~regionName, grid=mygrid)+
theme_classic()+
theme(strip.background=element_blank(), strip.text=element_text(face="bold", size=rel(1)),
plot.title=element_text(face="bold", size=rel(1.2)))+
labs(title="Absolute falls in COVID-19 deaths are bigger in more deprived parts of London and the South East",
subtitle=paste0("Change in rolling 7-day rates of new COVID-19 cases for MSOAs in England between ",
max(IMD_MSOA$date)-weeks(2), " and ", max(IMD_MSOA$date)),
caption="Data from PHE, ONS & MHCLG | Plot by @VictimOfMaths")
dev.off()
tiff("Outputs/COVIDMSOACaseRatexIMDRelxReg.tiff", units="in", width=10, height=10, res=500)
ggplot(data, aes(x=IMDrank, y=rel2wk, colour=rel2wk))+
geom_point(show.legend=FALSE)+
geom_hline(yintercept=0, colour="Grey60")+
geom_smooth(method="lm", formula=y~x, colour="Red")+
geom_text(data=rhorel, aes(label=label), colour="Red")+
scale_x_continuous(name="Deprivation (higher = more deprived)")+
scale_y_continuous(name="Change in cases per 100,000 in the past 2 weeks",
labels=scales::label_percent(accuracy=1))+
scale_colour_paletteer_c("scico::roma",
limit=c(-1,1)*max(abs(subset(data, !is.na(rel2wk))$rel2wk)))+
facet_geo(~regionName, grid=mygrid)+
theme_classic()+
theme(strip.background=element_blank(), strip.text=element_text(face="bold", size=rel(1)),
plot.title=element_text(face="bold", size=rel(1.2)))+
labs(title="The relative fall in COVID-19 cases is most unequal in Yorkshire and the North West",
subtitle=paste0("Change in rolling 7-day rates of new COVID-19 cases for MSOAs in England between ",
max(IMD_MSOA$date)-weeks(2), " and ", max(IMD_MSOA$date)),
caption="Data from PHE, ONS & MHCLG | Plot by @VictimOfMaths")
dev.off()
deciledata <- data %>%
mutate(decile=ntile(IMDrank, 10)) %>%
group_by(regionName, decile) %>%
summarise(oldcases=sum(oldcases, na.rm=TRUE), abs2wk=sum(abs2wk, na.rm=TRUE),
rel2wk=abs2wk/oldcases)
tiff("Outputs/COVIDMSOACaseRatexIMDRelxRegBar.tiff", units="in", width=10, height=10, res=500)
ggplot(deciledata)+
geom_col(aes(x=decile, y=rel2wk, fill=rel2wk), show.legend=FALSE)+
scale_x_continuous(name="Deprivation", breaks=c(1,10),
labels=c("Least\ndeprived", "Most\ndeprived"))+
scale_y_continuous(name="Change in cases per 100,000 in the past 2 weeks",
labels=label_percent(accuracy=1))+
scale_fill_paletteer_c("pals::ocean.tempo",
limit=c(NA,0), direction=-1)+
facet_geo(~regionName, grid=mygrid)+
theme_classic()+
theme(strip.background=element_blank(), strip.text=element_text(face="bold", size=rel(1)),
plot.title=element_text(face="bold", size=rel(1.5)))+
labs(title="COVID-19 cases are falling across England, but at unequal rates",
subtitle=paste0("Change in rolling 7-day rates of new COVID-19 cases for MSOAs in England between ",
max(IMD_MSOA$date)-weeks(2), " and ", max(IMD_MSOA$date)),
caption="Data from Public Health England, Office for National Statistics\nand the Ministry of Housing, Communities & Local Government\n\nPlot and analysis by Colin Angus")
dev.off() |
if (requireNamespace('spelling', quietly = TRUE)) {
if (isTRUE(as.logical(Sys.getenv("CI")))) {
spelling::spell_check_test(vignettes = TRUE, error = FALSE,
skip_on_cran = TRUE)
}
} |
NAME <- "methods"
source(file.path('_helper', 'init.R'))
par.env <- new.env()
local(
envir=par.env, {
suppressWarnings(
setClass(
"testdiffobj", slots=c(a="integer"), where=par.env
) )
print(
all.equal(
as.character(diffObj(new("testdiffobj", a=1L), new("testdiffobj", a=2L))),
rdsf(100)
) )
setMethod("diffObj", c("testdiffobj", "testdiffobj"),
function(target, current, ...) {
dots <- match.call(expand.dots=FALSE)[["..."]]
if("mode" %in% names(dots))
callNextMethod()
else
callNextMethod(target=target, current=current, ..., mode="unified")
},
where=par.env
)
on.exit(
removeMethod("diffObj", c("testdiffobj", "testdiffobj"), where=par.env)
)
print(
all.equal(
as.character(diffObj(new("testdiffobj", a=1L), new("testdiffobj", a=2L))),
rdsf(200)
) )
print(
all.equal(
as.character(
diffObj(
new("testdiffobj", a=1L), new("testdiffobj", a=2L), mode="sidebyside"
) ),
rdsf(100)
) )
try(
diffObj(new("testdiffobj", a=1L), new("testdiffobj", a=2L), mode="hello")
)
}) |
plot.marginalRelevance <- function (x, newdata = NULL, n.feat = NULL, type = "default", ...)
{
NF <- dim(x$orderedData)[2]
if(is.null(n.feat))n.feat <- NF
if(is.null(newdata)){
NTR <- dim(x$orderedData)[1]
ordered <- x$orderedData
}
else{
NTR <- dim(newdata)[1]
ordered <- matrix(0, NTR, NF)
for (i in 1 : NF)ordered[ ,i] <- newdata[ ,which(x$rank == i)]
}
if (type == "parallelcoord"){
dt <- ordered[, 1 : n.feat]
rdt <- apply(dt, 2L, range, na.rm = TRUE)
dt <- apply(dt, 2L, function(dt) (dt - min(dt, na.rm = TRUE))/(max(dt, na.rm = TRUE) - min(dt, na.rm = TRUE)))
matplot(1L:ncol(dt), t(dt), xaxt = "n", yaxt = "n", bty="n", xlab = "Ordered by marginal relevance", ylab ="", type = 'l',lty = 1, ...)
axis(1, at = 1 : n.feat, labels = x$bestVars[1 : n.feat], cex.axis = 0.7)
for (i in 1L:ncol(dt)) lines(c(i, i), c(0, 1), col = "grey70")
}
else if (type == "pairs"){
pairs(ordered[, 1 : n.feat], labels = x$bestVars[1 : n.feat], ...)
}
else if (type == "default"){
plot(1 : NF, x$score, xlab = "", ylab = "MR score" , col = 0)
lines(1 : NF, x$score)
}
else stop("error: Plot type not supported for a marginalRelevance object")
} |
filter_shared <-
function(spectral_count_object, metadata_feature){
spectral_count_object <- spectral_count_object
if(length(spectral_count_object) == 4){
if(spectral_count_object[[4]] == "spectral_count_object"){
type_sc_data <- names(spectral_count_object[1])
metadata <- spectral_count_object[[2]]
if(metadata_feature %in% colnames(metadata) == TRUE){
sc_data <- spectral_count_object[[1]]
colnames(sc_data) <- metadata[[metadata_feature]]
sc_data <- sapply(split.default(sc_data, names(sc_data)), rowSums)
sc_data <- as.data.frame(sc_data)
decision_matrix <- stack(sc_data[, 1:length(colnames(sc_data))])
names(decision_matrix)[1] <- "spectra"
names(decision_matrix)[2] <- "feature"
decision_matrix <- cbind.data.frame(
decision_matrix,
element = rep(rownames(sc_data), length(colnames(sc_data)))
)
decision_matrix$presence <- ifelse(decision_matrix$spectra == 0, 0, 1)
decision_matrix <- tapply(decision_matrix$presence,
list(decision_matrix$element, decision_matrix$feature),
FUN=sum)
decision_matrix <- as.data.frame(decision_matrix)
elements_count <- decision_matrix[, 1:length(colnames(sc_data))] > 0
elements_count <- apply(elements_count, 1, sum)
decision_matrix$elements <- as.factor(row.names(decision_matrix))
decision_matrix$counter <- elements_count
shared_elements <- as.vector(decision_matrix$elements[decision_matrix$counter == length(colnames(sc_data))])
new_sc_data <- spectral_count_object[[1]]
new_sc_data <- new_sc_data[rownames(new_sc_data) %in% shared_elements, ]
peps_prots <- spectral_count_object[[3]]
if("species" %in% colnames(peps_prots) | "genus" %in% colnames(peps_prots) |
"family" %in% colnames(peps_prots) | "order" %in% colnames(peps_prots) |
"class" %in% colnames(peps_prots) | "phylum" %in% colnames(peps_prots) |
"superkingdom" %in% colnames(peps_prots)){
if("species" %in% colnames(peps_prots)){
spectral_origin <- "species"
}
else if("genus" %in% colnames(peps_prots)){
spectral_origin <- "genus"
}
else if("family" %in% colnames(peps_prots)){
spectral_origin <- "family"
}
else if("order" %in% colnames(peps_prots)){
spectral_origin <- "order"
}
else if("class" %in% colnames(peps_prots)){
spectral_origin <- "class"
}
else if("phylum" %in% colnames(peps_prots)){
spectral_origin <- "phylum"
}
else{
spectral_origin <- "superkingdom"
}
new_peps_prots <- peps_prots[peps_prots[[spectral_origin]] %in% shared_elements, ]
}
else{
if(type_sc_data == "SC_specific_peptides"){
new_peps_prots <- peps_prots[peps_prots$Peptide %in% shared_elements, ]
}
else if(type_sc_data == "SC_subgroups"){
new_peps_prots <- peps_prots[peps_prots$Subgroup %in% shared_elements, ]
}
else{
new_peps_prots <- peps_prots[peps_prots$Group %in% shared_elements, ]
}
}
print("========= Object information ============")
print(paste("Number of samples: ", length(unique(metadata$msrunfile)), sep = ""))
print(paste("Number of groups: ", length(unique(new_peps_prots$Group)), sep = ""))
print(paste("Number of subgroups or metaproteins: ", length(unique(new_peps_prots$Subgroup)), sep = ""))
print(paste("Number of peptides: ", length(unique(new_peps_prots$Peptide)), sep = ""))
print("=========================================")
output <- list(new_sc_data, metadata, new_peps_prots, "spectral_count_object")
names(output)[1] <- type_sc_data
names(output)[2] <- "metadata"
names(output)[3] <- "peptides_proteins"
names(output)[4] <- "type_object"
print("Spectral count object generated")
return(output)
}
else{
options_meta <- paste(colnames(metadata), collapse = "', '")
stop(paste(c("The second argument is invalid. This argument must be one these options: '", options_meta), collapse = " "))
}
}
else{
stop("Invalid spectral count object")
}
}
else{
stop("Invalid spectral count object")
}
} |
testthat::context("gs stress")
"alpha.beta.range.util" <- function(alpha, beta, type, sf) {
no.err <- TRUE
for (a in alpha)
{
for (b in beta)
{
if (b < 1 - a - 0.1) {
res <- try(gsDesign(test.type = type, alpha = a, beta = b, sfu = sf))
if (is(res, "try-error")) {
no.err <- FALSE
}
}
}
}
no.err
}
"param.range.util" <- function(param, type, sf) {
no.err <- TRUE
for (p in param)
{
res <- try(gsDesign(test.type = type, sfu = sf, sfupar = p))
if (is(res, "try-error")) {
no.err <- FALSE
}
}
no.err
}
a1 <- round(seq(from = 0.05, to = 0.95, by = 0.05), 2)
a2 <- round(seq(from = 0.05, to = 0.45, by = 0.05), 2)
b <- round(seq(from = 0.05, to = 0.95, by = 0.05), 2)
nu <- round(seq(from = 0.1, to = 1.5, by = 0.1), 1)
rho <- round(seq(from = 1, to = 15, by = 1), 0)
gamma <- round(seq(from = -5, to = 5, by = 1), 0)
testthat::test_that("test.stress.sfExp.type1", {
no.errors <- param.range.util(param = nu, type = 1, sf = sfExponential)
testthat::expect_true(no.errors, info = "Type 1 sfExponential stress test")
})
testthat::test_that("test.stress.sfExp.type2", {
no.errors <- param.range.util(param = nu, type = 2, sf = sfExponential)
testthat::expect_true(no.errors, info = "Type 2 sfExponential stress test")
})
testthat::test_that("test.stress.sfExp.type3", {
no.errors <- param.range.util(param = nu, type = 3, sf = sfExponential)
testthat::expect_true(no.errors, info = "Type 3 sfExponential stress test")
})
testthat::test_that("test.stress.sfExp.type4", {
no.errors <- param.range.util(param = nu, type = 4, sf = sfExponential)
testthat::expect_true(no.errors, info = "Type 4 sfExponential stress test")
})
testthat::test_that("test.stress.sfExp.type5", {
no.errors <- param.range.util(param = nu, type = 5, sf = sfExponential)
testthat::expect_true(no.errors, info = "Type 5 sfExponential stress test")
})
testthat::test_that("test.stress.sfExp.type6", {
no.errors <- param.range.util(param = nu, type = 6, sf = sfExponential)
testthat::expect_true(no.errors, info = "Type 6 sfExponential stress test")
})
testthat::test_that("test.stress.sfHSD.type1", {
no.errors <- param.range.util(param = gamma, type = 1, sf = sfHSD)
testthat::expect_true(no.errors, info = "Type 1 sfHSD stress test")
})
testthat::test_that("test.stress.sfHSD.type2", {
no.errors <- param.range.util(param = gamma, type = 2, sf = sfHSD)
testthat::expect_true(no.errors, info = "Type 2 sfHSD stress test")
})
testthat::test_that("test.stress.sfHSD.type3", {
no.errors <- param.range.util(param = gamma, type = 3, sf = sfHSD)
testthat::expect_true(no.errors, info = "Type 3 sfHSD stress test")
})
testthat::test_that("test.stress.sfHSD.type4", {
no.errors <- param.range.util(param = gamma, type = 4, sf = sfHSD)
testthat::expect_true(no.errors, info = "Type 4 sfHSD stress test")
})
testthat::test_that("test.stress.sfHSD.type5", {
no.errors <- param.range.util(param = gamma, type = 5, sf = sfHSD)
testthat::expect_true(no.errors, info = "Type 5 sfHSD stress test")
})
testthat::test_that("test.stress.sfHSD.type6", {
no.errors <- param.range.util(param = gamma, type = 6, sf = sfHSD)
testthat::expect_true(no.errors, info = "Type 6 sfHSD stress test")
})
testthat::test_that("test.stress.sfLDOF.type1", {
no.errors <- alpha.beta.range.util(
alpha = a1, beta = b,
type = 1, sf = sfLDOF
)
testthat::expect_true(no.errors, info = "Type 1 LDOF stress test")
})
testthat::test_that("test.stress.sfLDOF.type2", {
no.errors <- alpha.beta.range.util(
alpha = a2, beta = b,
type = 2, sf = sfLDOF
)
testthat::expect_true(no.errors, info = "Type 2 LDOF stress test")
})
testthat::test_that("test.stress.sfLDOF.type3", {
no.errors <- alpha.beta.range.util(
alpha = a1, beta = b,
type = 3, sf = sfLDOF
)
testthat::expect_true(no.errors, info = "Type 3 LDOF stress test")
})
testthat::test_that("test.stress.sfLDOF.type4", {
no.errors <- alpha.beta.range.util(
alpha = a1, beta = b,
type = 4, sf = sfLDOF
)
testthat::expect_true(no.errors, info = "Type 4 LDOF stress test")
})
testthat::test_that("test.stress.sfLDOF.type5", {
no.errors <- alpha.beta.range.util(
alpha = a1, beta = b,
type = 5, sf = sfLDOF
)
testthat::expect_true(no.errors, info = "Type 5 LDOF stress test")
})
testthat::test_that("test.stress.sfLDOF.type6", {
no.errors <- alpha.beta.range.util(
alpha = a1, beta = b,
type = 6, sf = sfLDOF
)
testthat::expect_true(no.errors, info = "Type 6 LDOF stress test")
})
testthat::test_that("test.stress.sfLDPocock.type1", {
no.errors <- alpha.beta.range.util(
alpha = a1, beta = b,
type = 1, sf = sfLDPocock
)
testthat::expect_true(no.errors, info = "Type 1 LDPocock stress test")
})
testthat::test_that("test.stress.sfLDPocock.type2", {
no.errors <- alpha.beta.range.util(
alpha = a2, beta = b,
type = 2, sf = sfLDPocock
)
testthat::expect_true(no.errors, info = "Type 2 LDPocock stress test")
})
testthat::test_that("test.stress.sfLDPocock.type3", {
no.errors <- alpha.beta.range.util(
alpha = a1, beta = b,
type = 3, sf = sfLDPocock
)
testthat::expect_true(no.errors, info = "Type 3 LDPocock stress test")
})
testthat::test_that("test.stress.sfLDPocock.type4", {
no.errors <- alpha.beta.range.util(
alpha = a1, beta = b,
type = 4, sf = sfLDPocock
)
testthat::expect_true(no.errors, info = "Type 4 LDPocock stress test")
})
testthat::test_that("test.stress.sfLDPocock.type5", {
no.errors <- alpha.beta.range.util(
alpha = a1, beta = b,
type = 5, sf = sfLDPocock
)
testthat::expect_true(no.errors, info = "Type 5 LDPocock stress test")
})
testthat::test_that("test.stress.sfLDPocock.type6", {
no.errors <- alpha.beta.range.util(
alpha = a1, beta = b,
type = 6, sf = sfLDPocock
)
testthat::expect_true(no.errors, info = "Type 6 LDPocock stress test")
})
testthat::test_that("test.stress.sfPower.type1", {
no.errors <- param.range.util(param = rho, type = 1, sf = sfPower)
testthat::expect_true(no.errors, info = "Type 1 sfPower stress test")
})
testthat::test_that("test.stress.sfPower.type2", {
no.errors <- param.range.util(param = rho, type = 2, sf = sfPower)
testthat::expect_true(no.errors, info = "Type 2 sfPower stress test")
})
testthat::test_that("test.stress.sfPower.type3", {
no.errors <- param.range.util(param = rho, type = 3, sf = sfPower)
testthat::expect_true(no.errors, info = "Type 3 sfPower stress test")
})
testthat::test_that("test.stress.sfPower.type4", {
no.errors <- param.range.util(param = rho, type = 4, sf = sfPower)
testthat::expect_true(no.errors, info = "Type 4 sfPower stress test")
})
testthat::test_that("test.stress.sfPower.type5", {
no.errors <- param.range.util(param = rho, type = 5, sf = sfPower)
testthat::expect_true(no.errors, info = "Type 5 sfPower stress test")
})
testthat::test_that("test.stress.sfPower.type6", {
no.errors <- param.range.util(param = rho, type = 6, sf = sfPower)
testthat::expect_true(no.errors, info = "Type 6 sfPower stress test")
}) |
looq2 <-
function( modelData, formula = NULL, nu = 1, round = 4, extOut = FALSE, extOutFile = NULL ){
N <- nrow(modelData)
x <- mainfunc.q2( modelData, NULL, formula, N, 1, nu, round, extOut, extOutFile, match.call() )
return(x)
} |
negll <- function(beta, x, y){
.y <- y[,1]
if( ncol(y) == 1 ){ .n <- 1 }else{ .n <- rowSums(y) }
eta <- x %*% as.matrix(beta)
llik <- dbinom(.y, size = .n, prob = exp( eta ), log = TRUE )
.value <- -sum(llik)
return(.value)
} |
expand.dft <- function(x, var.names = NULL, freq = "Freq", ...){
if(inherits(x, "table")){
x <- as.data.frame.table(x, responseName = freq)
}
freq.col <- which(colnames(x) == freq)
if (length(freq.col) == 0){
stop(paste(sQuote("freq"), "not found in column names"))
}
DF <- sapply(1:nrow(x),
function(i) x[rep(i, each = x[i, freq.col]), ],
simplify = FALSE)
DF <- do.call("rbind", DF)[, -freq.col]
for (i in 1:ncol(DF)){
DF[[i]] <- type.convert(as.character(DF[[i]]), ...)
}
rownames(DF) <- NULL
if (!is.null(var.names)){
if (length(var.names) < dim(DF)[2]){
stop(paste("Too few", sQuote("var.names"), "given."))
} else if (length(var.names) > dim(DF)[2]){
stop(paste("Too many", sQuote("var.names"), "given."))
} else {
names(DF) <- var.names
}
}
return(DF)
}
expand.table <- expand.dft |
printmnRR <- function(formula, basecov, comparecov, fixcov = NULL, data){
fit <- multinom(formula, data = data, trace = FALSE, Hess = TRUE)
tmp <- strsplit(as.character(formula)[3], "[+]")
varnames <- gsub(" ","", tmp[[1]])
b <- length(levels(as.factor(data[ ,names(data) == varnames[1]])))
baseind = compareind <- 1
if (is.null(basecov)) basecov <- levels(data[ ,names(data) == varnames[1]])[baseind]
if (is.null(comparecov)) comparecov <- levels(data[ ,names(data) == varnames[1]])[compareind]
if (class(data[ ,names(data) == varnames[1]]) == "factor"){
if (!as.character(basecov) %in% as.character(levels(as.factor(data[ ,names(data) == varnames[1]])))) return("Invalid baseline exposure.")
if (!as.character(comparecov) %in% as.character(levels(as.factor(data[ ,names(data) == varnames[1]])))) return("Invalid exposure variable.")
}
for (j in 1:(b-1)){
if (as.character(gsub(varnames[1], "",colnames(coefficients(fit))[j+1])) == as.character(basecov)){
baseind <- j+1
}
if (as.character(gsub(varnames[1], "",colnames(coefficients(fit))[j+1])) == as.character(comparecov)){
compareind <- j+1
}
}
p <- length(varnames)-1
if (p == 0) {
newfixcov = NULL
} else if (p > 0) {
newfixcov <- t(as.matrix(rep(0, p)))
subdat = as.data.frame( data[,which(names(data) %in% varnames[-1])])
tmp <- which(apply(subdat, 2, class)!="numeric")
for (q in 1:p) {
if(class(subdat[,q]) == "factor"){
newfixcov[q] <- levels(as.factor(subdat[,q]))[1]
}else{
newfixcov[q] <- min(subdat[,q])
}
}
newfixcov <- as.data.frame(newfixcov)
names(newfixcov) = names(data)[which(names(data) %in% varnames[-1])]
}
if( sum(names(fixcov) %in% names(newfixcov)) > 0 ) {
tmpind <- which(names(newfixcov) %in% names(fixcov))
for(j in 1:length(tmpind)){
newfixcov[tmpind[j]] = eval(parse(text=paste0("fixcov$", names(newfixcov[tmpind])[j])))
}
}
fixcov = newfixcov
basecov <- levels(as.factor(data[ ,names(data) == varnames[1]]))[baseind]
comparecov <- levels(as.factor(data[ ,names(data) == varnames[1]]))[compareind]
expose.cov <- data.frame(as.factor(comparecov)); names(expose.cov) <- varnames[1]
unexpose.cov <- data.frame(as.factor(basecov)); names(unexpose.cov) <- varnames[1]
if (length(fixcov) > 0 & length(names(fixcov)) > 0 & length(fixcov) == length(varnames)-1) {
expose.cov <- cbind(expose.cov, fixcov)
unexpose.cov <- cbind(unexpose.cov, fixcov)
} else if (length(names(fixcov)) == 0 & length(fixcov) > 0) {
expose.cov <- cbind(expose.cov, fixcov); names(expose.cov)[2:length(expose.cov)] = varnames[2:length(varnames)]
unexpose.cov <- cbind(unexpose.cov, fixcov); names(unexpose.cov)[2:length(unexpose.cov)] = varnames[2:length(varnames)]
} else if (p > 0){
return("Invalid data frame for confounders")
}
for (i in 1:ncol(expose.cov)) {
if (class(data[ , names(data) == names(expose.cov)[i]]) != "factor") {
expose.cov[,i] <- as.numeric(expose.cov[,i]); unexpose.cov[,i] <- as.numeric(unexpose.cov[,i])
}
}
betas <- coefficients(fit)
exposed <- predict(fit, expose.cov, type = "probs")
unexposed <- predict(fit, unexpose.cov, type = "probs")
expose.set <- exposed / exposed[1]; expose.set <- expose.set[-1]
unexpose.set <- unexposed / unexposed[1]; unexpose.set <- unexpose.set[-1]
expose.sum <- sum(expose.set)
unexpose.sum <- sum(unexpose.set)
RR <- exposed / unexposed
RRR <- RR / RR[1]
n.par <- length(betas)
K <- nrow(betas)
q <- ncol(betas)
B.vec <- matrix(0, K+1, n.par)
B.vec[1, 1:K] = ((1+expose.sum)^(-2))*(expose.set*(1+unexpose.sum) - unexpose.set*(1+expose.sum))
for(j in 2:b){
for(l in 1:K){
B.vec[1, ((j-1)*K + l)] = ((1+expose.sum)^(-2))*((compareind == j)*expose.set[l]*(1+unexpose.sum) - (baseind == j)*unexpose.set[l]*(1+expose.sum))
}
}
if(q > b){
for(j in (b+1):q){
if (colnames(coefficients(fit))[j] %in% names(fixcov)) {
tmp <- which(names(fixcov) %in% colnames(coefficients(fit))[j])
B.vec[1, ((j-1)*K + 1):(j*K)] <- (1+expose.sum)^(-2)*(as.numeric(fixcov[tmp])*expose.set*(1+unexpose.sum) - as.numeric(fixcov[tmp])*unexpose.set*(1+expose.sum) )
} else if (sum(startsWith(colnames(coefficients(fit))[j], names(fixcov))) > 0) {
tmp <- which(startsWith(colnames(coefficients(fit))[j], names(fixcov)))
if (gsub(names(fixcov)[tmp], "",colnames(coefficients(fit))[j]) == as.character(fixcov[,tmp]) ) {
B.vec[1, ((j-1)*K + 1):(j*K)] <- (1+expose.sum)^(-2)*(1*expose.set*(1+unexpose.sum) - 1*unexpose.set*(1+expose.sum) )
} else {
B.vec[1, ((j-1)*K + 1):(j*K)] <- (1+unexpose.sum)^(-2)*(0*expose.set*(1+unexpose.sum) - 0*unexpose.set*(1+expose.sum) )
}
}
}
}
for(k in 2:(K+1)){
coefpart = exp( betas[(k-1), compareind]*(compareind %in% 2:b) -
betas[(k-1), baseind]*(baseind %in% 2:b) )
B.vec[k, 1:K] = ((1+expose.sum)^(-2))*coefpart*(expose.set*(1+unexpose.sum) - unexpose.set*(1+expose.sum))
for(j in 2:b){
for(l in 1:K){
B.vec[k, ((j-1)*K + l)] = ((1+expose.sum)^(-2))*( (compareind == j)*coefpart*expose.set[l]*(1+unexpose.sum) - coefpart*(baseind == j)*unexpose.set[l]*(1+expose.sum))
if((k-1) == l){
doublepart = ((compareind == j) - (baseind == j))*coefpart*(1+unexpose.sum) + coefpart*(baseind == j)*unexpose.set[l]
B.vec[k, ((j-1)*K + l)] = ((1+expose.sum)^(-2))*( (compareind == j)*coefpart*expose.set[l]*(1+unexpose.sum) - doublepart*(1+expose.sum))
}
}
}
if(q > b){
for(j in (b+1):q){
if (colnames(coefficients(fit))[j] %in% names(fixcov)) {
tmp <- which(names(fixcov) %in% colnames(coefficients(fit))[j])
B.vec[k, ((j-1)*K + 1):(j*K)] <- (1+expose.sum)^(-2)*(as.numeric(as.character(fixcov[tmp]))*coefpart*expose.set*(1+unexpose.sum) - as.numeric(fixcov[tmp])*coefpart*unexpose.set*(1+expose.sum) )
} else if (sum(startsWith(colnames(coefficients(fit))[j], names(fixcov))) > 0) {
tmp <- which(startsWith(colnames(coefficients(fit))[j], names(fixcov)))
if (gsub(names(fixcov)[tmp], "",colnames(coefficients(fit))[j]) == as.character(fixcov[,tmp])) {
B.vec[k, ((j-1)*K + 1):(j*K)] <- (1+expose.sum)^(-2)*(1*coefpart*expose.set*(1+unexpose.sum) - 1*coefpart*unexpose.set*(1+expose.sum) )
} else {
B.vec[k, ((j-1)*K + 1):(j*K)] <- (1+expose.sum)^(-2)*(0*coefpart*expose.set*(1+unexpose.sum) - 0*coefpart*unexpose.set*(1+expose.sum) )
}
}
}
}
}
cov.mat <- solve(fit$Hessian)
orders <- c()
for(j in 1:q){
orders <- c(orders, ((1:K)-1)*q + j)
}
cov.mat <- cov.mat[orders, orders]
deltavar <- rep(0, K+1)
for(k in 1:(K+1)){
for (i in 1:n.par) {
for (j in 1:n.par) {
deltavar[k] <- deltavar[k] + cov.mat[i,j]*B.vec[k,i]*B.vec[k,j]
}
}
}
return(list(fit = fit, RRR = RRR, RR = RR, delta.var = deltavar, fix.cov = fixcov))
}
multinRR = function(formula, basecov, comparecov, fixcov = NULL, data, boot = FALSE,
n.boot = 100){
results <- printmnRR(formula = formula, basecov = basecov, comparecov = comparecov, fixcov = fixcov,
data = data)
if (class(results) == "character") return(results)
if (boot == FALSE) return(results)
boot.rr = boot.rrr = boot.var <- matrix(0, n.boot, length(results$RR))
for (r in 1:n.boot) {
newdat <- data[sample(1:nrow(data), replace = TRUE),]
boot.results <- printmnRR(formula = formula, basecov = basecov, comparecov = comparecov,
fixcov = fixcov, data = newdat)
boot.rr[r,] <- boot.results$RR
boot.rrr[r,] <- boot.results$RRR
boot.var[r,] <- boot.results$delta.var
}
return(list(fit = results$fit, RRR = results$RRR,
RR = results$RR, delta.var = results$delta.var,
boot.rr = boot.rr, boot.rrr = boot.rrr,
boot.var = boot.var, fix.cov = results$fix.cov))
} |
context("test cett actions parsers")
library(dbparser)
library(testthat)
library(XML)
library(tibble)
library(purrr)
biotech <- "drugbank_record_biotech.xml"
database_connection <- dbConnect(RSQLite::SQLite(), ":memory:")
test_that(
desc = "Read database",
code = {
expect_true(read_drugbank_xml_db(
system.file("extdata", biotech, package = "dbparser")
))
}
)
test_that(
desc = "Read drug carriers actions",
code = {
expect_equal(
nrow(carriers_actions()),
0
)
expect_true(is_tibble(carriers_actions()))
expect_error(
nrow(carriers_actions(TRUE))
)
expect_error(carriers_actions(TRUE))
}
)
test_that(
desc = "Read drug enzymes actions attributes",
code = {
expect_equal(
nrow(enzymes_actions()),
0
)
expect_true(is_tibble(enzymes_actions()))
expect_error(enzymes_actions(TRUE))
}
)
test_that(
desc = "Read drug targ actions attributes",
code = {
expect_match(
as.character(
targets_actions()[["action"]][[1]]
),
"inhibitor"
)
expect_true(is_tibble(targets_actions()))
expect_error(targets_actions(TRUE))
}
)
test_that(
desc = "Read drug transporters actions attributes",
code = {
expect_equal(
nrow(transporters_actions()),
0
)
expect_true(is_tibble(transporters_actions()))
expect_error(transporters_actions(TRUE))
}
)
dbDisconnect(database_connection) |
get_file_info <- function(filename) {
product <- substr(basename(filename), 1, 3)
interval <- substr(basename(filename), 4,4)
stats <- substr(basename(filename), 5, 5)
date_time <- substr(basename(filename), 6, 17)
version_number <- substr(basename(filename), 18, 20)
if (substr(basename(filename), 21, 22) == "UD") {
grd <- substr(basename(filename), 21, 22)
} else {
grd <- as.character(as.numeric(substr(basename(filename), 21, 22)))
}
source <- substr(basename(filename), 23, 27)
level <- substr(basename(filename), 28, 29)
ar <- substr(basename(filename), 30, 31)
id <- paste0(product, interval, stats, version_number, source, level)
return(list(
product_type = product,
time_interval = naming_conventions$TimeInterval[[interval]],
statistics = naming_conventions$Statistics[[stats]],
date_time = as.POSIXct(date_time, format = "%Y%m%d%H%M"),
version_number = version_number,
grid = naming_conventions$Grid[[grd]],
data_source = naming_conventions$Source[[source]],
processing_level = level,
area = naming_conventions$Area[[ar]],
id = id
))
} |
knitr::opts_chunk$set(
collapse = TRUE,
comment = "
)
set.seed(1)
p = 6
n = 100
X = matrix(rnorm(n*p), ncol = p)
X[,1] = X[,2] + X[,3] + rnorm(n, 0, 0.01)
y = rnorm(n)
summary(lm(y ~ X))
library(mcvis)
mcvis_result = mcvis(X = X)
mcvis_result
plot(mcvis_result)
plot(mcvis_result, type = "igraph")
n=50
set.seed(8)
x1 = rnorm(n,0.22,2)
x7 = 0.5*x1 + rnorm(n,0,sd=2)
x6 = -0.75*x1 + rnorm(n,0,3)
x3 = -0.5-0.5*x6 + rnorm(n,0,2)
x9 = rnorm(n,0.6,3.5)
x4 = 0.5*x9 + rnorm(n,0,sd=3)
x2 = -0.5 + 0.5*x9 + rnorm(n,0,sd=2)
x5 = -0.5*x2+0.5*x3+0.5*x6-0.5*x9+rnorm(n,0,1.5)
x8 = x1 + x2 -2*x3 - 0.3*x4 + x5 - 1.6*x6 - 1*x7 + x9 +rnorm(n,0,0.5)
y = 0.6*x8 + rnorm(n,0,2)
artificialeg = round(data.frame(x1,x2,x3,x4,x5,x6,x7,x8,x9,y),1)
X = artificialeg[,1:9]
round(cor(X), 2)
mcvis_result = mcvis(X)
mcvis_result
plot(mcvis_result)
class(mcvis_result)
sessionInfo() |
`multistagegain` <-
function(corr, Q, alg= GenzBretz(),parallel=FALSE, Vg=1)
{
if (parallel)
{
no_cores <- detectCores() - 1
cl <- makeCluster(no_cores);
}
Vg = Vg
partial=FALSE
stages=partial
lim.y=-200
k=c(lim.y,Q)
sum.dim=length(k)
alphaofx=pmvnorm(lower=k,corr=corr,algorithm=alg)
dim=sum.dim
if (length(Q)==dim(corr)[1]-1)
{
}else if (length(Q)!=dim(corr)[1]-1)
{
stop("dimension of Q must be same as dim(corr)[1]+1")
}
for (i in 1:dim)
{
if (is.infinite(k[i])==TRUE)
{
k[i]=-lim.y
}
}
if (dim<2)
{
stop("dimension of k must bigger than 1, otherwise this is not one-stage selection")
}else if(dim==2)
{
gainresult=corr[1,2]*dnorm(k[2])/alphaofx
}else
{
A=array(0,c(dim,dim))
for (i in 1 : dim)
{
for (j in 1 : dim)
{
if(i!=j)
{
A[i,j]= (k[j]-corr[i,j]*k[i])/ (1-corr[i,j]^2)^0.5
}
}
}
part.corr=array(1,c(dim,dim,dim))
for (i in 1 : dim)
{
for (j in 1 : dim)
{
for (q in 1 : dim)
{
if(i!=j && q!=j && i!=q)
{
part.corr[i,j,q]= (corr[i,j]-corr[i,q]*corr[j,q])/ ((1-corr[i,q]^2)^0.5 * (1-corr[j,q]^2)^0.5)
}
}
}
}
j3q<-function (q,A,part.corr,dim,alg)
{
lower=A[q,-q]
corr= part.corr[-q,-q,q]
output=pmvnorm(lower = lower, upper = rep(Inf,c(dim-1)), mean = rep(0, length(lower)), corr = corr, sigma = NULL, algorithm = alg)
output
}
loopi<-function(i,k,A,part.corr,dim,alg,alpha3)
{
outputarrayi<-corr[1,i]*dnorm(k[i])*j3q(i,A,part.corr,dim,alg)/alpha3
outputarrayi
}
calculatx1<-function(A,part.corr,dim,corr,k,alpha3,stages=FALSE)
{
if (stages)
{
outputarray=rep(0,dim)
i=1
if (parallel)
{
outputarray <- parSapply(cl=cl, 1:dim, FUN=loopi,k=k,A=A,part.corr=part.corr,dim=dim,alg=alg,alpha3=alpha3)
outputarray[2:dim]
}else
{
for (i in 1 : dim)
{
outputarray[i]=corr[1,i]*dnorm(k[i])*j3q(i,A,part.corr,dim,alg)/alpha3
}
outputarray[2:dim]
}
}else
{
outputarray=rep(0,dim)
if (parallel)
{
outputarray <- parSapply(cl=cl, 1:dim, FUN=loopi,k=k,A=A,part.corr=part.corr,dim=dim,alg=alg,alpha3=alpha3)
output<-sum(outputarray)
output
}else
{
output=0
i=1
for (i in 1 : dim)
{
outputarray[i]=corr[1,i]*dnorm(k[i])*j3q(i,A,part.corr,dim,alg)/alpha3
}
sum(outputarray)
}
}
}
gainresult<-calculatx1(A=A,part.corr=part.corr,dim=dim,corr=corr,k=k,alpha3=alphaofx,stages=stages)
}
if (parallel)
{
stopCluster(cl);
}
if (stages==TRUE)
{
gainresult*Vg^0.5
}else
{
gainresult[[1]]*Vg^0.5
}
} |
customPSE <- function(db,
x,
xVars,
xGrpBy = NULL,
y = NULL,
yVars = NULL,
yGrpBy = NULL,
method = 'TI',
lambda = .5,
totals = TRUE,
variance = TRUE) {
if (!c(class(db) %in% c('FIA.Database', 'Remote.FIA.Database'))) {
stop('Must provide an `FIA.Database` or `Remote.FIA.Database`. See `readFIA` and/or `getFIA` to read and load your FIA data.')
}
if (!c('PLT_CN' %in% names(x))) {
stop('`PLT_CN` must be included in `x`. See our website for an example use case of `customPSE`.')
}
if (!is.null(y)) {
if (!c('PLT_CN' %in% names(y))) {
stop('`PLT_CN` must be included in `y`. See our website for an example use case of `customPSE`.')
}
}
if (!c('EVAL_TYP' %in% names(x))) {
stop('`EVAL_TYP` must be included in `x`. See our website for an example use case of `customPSE`.')
}
if (all(c('TREE_BASIS', 'AREA_BASIS') %in% names(x))) {
stop('Both `TREE_BASIS` and `AREA_BASIS` found in `x`, but only one is allowed. See our website for an example use case of `customPSE`.')
} else if (sum(c('TREE_BASIS', 'AREA_BASIS') %in% names(x)) != 1 ) {
stop('Neither `TREE_BASIS` or `AREA_BASIS` found in `x`, but one is required. See our website for an example use case of `customPSE`.')
}
if (!is.null(y)) {
if (all(c('TREE_BASIS', 'AREA_BASIS') %in% names(y))) {
stop('Both `TREE_BASIS` and `AREA_BASIS` found in `y`, but only one is allowed. See our website for an example use case of `customPSE`.')
} else if (sum(c('TREE_BASIS', 'AREA_BASIS') %in% names(y)) != 1) {
stop('Neither `TREE_BASIS` or `AREA_BASIS` found in `y`, but one is required. See our website for an example use case of `customPSE`.')
}
}
if ( 'TREE_BASIS' %in% names(x) & !all(c('SUBP', 'TREE') %in% names(x) )) {
stop('Both `SUBP` and `TREE` required for estimation of tree variables, but are missing from `x`. See our website for an example use case of `customPSE`.')
} else if ( 'AREA_BASIS' %in% names(x) & !c('CONDID' %in% names(x))) {
stop('`CONDID` required for estimation of condition variables, but is missing from `x`. See our website for an example use case of `customPSE`.')
}
if (!is.null(y)) {
if ( 'TREE_BASIS' %in% names(y) & !all(c('SUBP', 'TREE') %in% names(y) )) {
stop('Both `SUBP` and `TREE` required for estimation of tree variables, but are missing from `y`. See our website for an example use case of `customPSE`.')
} else if ( 'AREA_BASIS' %in% names(y) & !c('CONDID' %in% names(y))) {
stop('`CONDID` required for estimation of condition variables, but is missing from `y`. See our website for an example use case of `customPSE`.')
}
}
req.tables <- c('PLOT', 'POP_EVAL', 'POP_EVAL_TYP', 'POP_ESTN_UNIT', 'POP_STRATUM', 'POP_PLOT_STRATUM_ASSGN')
remote <- ifelse(class(db) == 'Remote.FIA.Database', 1, 0)
db <- readRemoteHelper(db$states, db, remote, req.tables, nCores = 1)
mr <- checkMR(db, remote = ifelse(class(db) == 'Remote.FIA.Database', 1, 0))
pops <- handlePops(db, evalType = x$EVAL_TYP[[1]], method, mr)
if ('TREE_BASIS' %in% names(x)) {
x.id.vars <- c('SUBP', 'TREE')
} else {
x.id.vars <- 'CONDID'
}
xVars <- rlang::enquos(xVars)
xGrpBy <- rlang::enquos(xGrpBy)
x <- dplyr::select(x, PLT_CN,
dplyr::all_of(x.id.vars),
dplyr::any_of(c('TREE_BASIS', 'AREA_BASIS')),
dplyr::any_of(c('ONEORTWO')),
!!!xVars, !!!xGrpBy) %>%
dplyr::distinct()
if (!is.null(y)) {
if ('TREE_BASIS' %in% names(y)) {
y.id.vars <- c('SUBP', 'TREE')
} else {
y.id.vars <- 'CONDID'
}
yVars <- rlang::enquo(yVars)
yGrpBy <- rlang::enquos(yGrpBy)
y <- dplyr::select(y, PLT_CN,
dplyr::all_of(y.id.vars),
dplyr::any_of(c('TREE_BASIS', 'AREA_BASIS')),
!!yVars, !!!yGrpBy) %>%
dplyr::distinct()
}
xGrpBy <- names(dplyr::select(x, !!!xGrpBy))
if (!is.null(y)) {
yGrpBy <- names(dplyr::select(y, !!!yGrpBy))
if (!all(yGrpBy %in% xGrpBy)) {
stop('All grouping variables listed in `yGrpBy` must be included in `xGrpBy`. More specifically, `yGrpBy` must be equal to or a subset of `xGrpBy`. ')
}
}
xPlt <- sumToPlot(x, pops, xGrpBy)
if (!is.null(y)) yPlt <- sumToPlot(y, pops, yGrpBy)
if (!is.null(y)) {
xGrpBy <- c('YEAR', xGrpBy)
yGrpBy <- c('YEAR', yGrpBy)
eu.sums <- sumToEU(db, xPlt, yPlt, pops, xGrpBy, yGrpBy, method = method)
xEst <- eu.sums$x
yEst <- eu.sums$y
} else {
xGrpBy <- c('YEAR', xGrpBy)
eu.sums <- sumToEU(db,
x = xPlt,
y = NULL,
pops = pops,
x.grpBy = xGrpBy,
y.grpBy = NULL,
method = method)
xEst <- eu.sums$x
}
if (mr) {
xEst <- combineMR(xEst, xGrpBy)
if (!is.null(y)) yEst <- combineMR(yEst, yGrpBy)
}
if (!is.null(y)) {
xGrpSyms <- dplyr::syms(xGrpBy)
yGrpSyms <- dplyr::syms(yGrpBy)
xTotalSyms <- dplyr::syms(names(xEst)[stringr::str_sub(names(xEst), -5, -1) == '_mean'])
xVarSyms <- dplyr::syms(names(xEst)[stringr::str_sub(names(xEst), -4, -1) == '_var'])
xCovSyms <- dplyr::syms(names(xEst)[stringr::str_sub(names(xEst), -3, -1) == '_cv'])
yTotalSyms <- dplyr::sym(names(yEst)[stringr::str_sub(names(yEst), -5, -1) == '_mean'])
yVarSyms <- dplyr::sym(names(yEst)[stringr::str_sub(names(yEst), -4, -1) == '_var'])
ratioSyms <- dplyr::syms(stringr::str_c(names(xEst)[stringr::str_sub(names(xEst), -5, -1) == '_mean'], '_RATIO'))
ratioVarSyms <- dplyr::syms(stringr::str_c(names(xEst)[stringr::str_sub(names(xEst), -5, -1) == '_mean'], '_RATIO_VAR'))
xEst <- xEst %>%
dplyr::select(-c(ESTN_UNIT_CN, AREA_USED)) %>%
dplyr::group_by(!!!xGrpSyms) %>%
dplyr::summarize(dplyr::across(dplyr::everything(), sum, na.rm = TRUE))
yEst <- yEst %>%
dplyr::select(-c(ESTN_UNIT_CN, AREA_USED, P2PNTCNT_EU)) %>%
dplyr::group_by( !!!yGrpSyms) %>%
dplyr::summarize(dplyr::across(dplyr::everything(), sum, na.rm = TRUE))
out <- left_join(xEst, yEst, by = yGrpBy) %>%
dplyr::mutate(dplyr::across(c(!!!xTotalSyms),
.fns = ~ .x / !!yTotalSyms,
.names = "{.col}_RATIO")) %>%
dplyr::mutate(dplyr::across(c(!!!xTotalSyms),
.fns = ~ (1 / ((!!yTotalSyms)^2)) * (get(stringr::str_c(stringr::str_sub(dplyr::cur_column(), 1, -6), '_var')) + ((.x/!!yTotalSyms)^2 * !!yVarSyms) - (2 * (.x/!!yTotalSyms) * get(stringr::str_c(stringr::str_sub(dplyr::cur_column(), 1, -6), '_cv'))) ),
.names = "{.col}_RATIO_VAR")) %>%
dplyr::mutate(dplyr::across(c(!!!ratioVarSyms),
.fns = ~ case_when(.x < 0 ~ 0,
TRUE ~ .x))) %>%
dplyr::mutate(dplyr::across(c(!!!xTotalSyms),
.fns = ~ sqrt(get(stringr::str_c(stringr::str_sub(dplyr::cur_column(), 1, -6), '_var'))) / abs(.x) * 100,
.names = "{.col}_SE")) %>%
dplyr::mutate(dplyr::across(c(!!!ratioSyms),
.fns = ~ sqrt(get(stringr::str_c(dplyr::cur_column(), '_VAR'))) / abs(.x) * 100,
.names = "{.col}_SE"))
out <- formatNames(out, xGrpBy)
} else {
xGrpSyms <- dplyr::syms(xGrpBy)
out <- xEst %>%
dplyr::select(-c(ESTN_UNIT_CN, AREA_USED)) %>%
dplyr::group_by(!!!xGrpSyms) %>%
dplyr::summarize(dplyr::across(dplyr::everything(), sum, na.rm = TRUE))
out <- formatNames(out, xGrpBy)
}
if (!totals) {
out <- out[,!stringr::str_detect(names(out), '_TOTAL')]
}
if (variance) {
out <- out[,!stringr::str_detect(names(out), '_SE')]
} else {
out <- out[,!stringr::str_detect(names(out), '_VAR')]
}
out <- out %>%
dplyr::ungroup() %>%
dplyr::mutate_if(is.factor, as.character) %>%
tidyr::drop_na(!!!xGrpSyms) %>%
dplyr::arrange(YEAR) %>%
as_tibble()
return(out)
}
formatNames <- function(x, grpBy) {
nms <- names(x)
total.slots <- stringr::str_sub(nms, -5, -1) == '_mean'
names(x)[total.slots] <- stringr::str_c(stringr::str_sub(nms[total.slots], 1, -6), '_TOTAL')
total.syms <- dplyr::syms(names(x)[total.slots])
total.var.slots <- stringr::str_sub(nms, -4, -1) == '_var'
names(x)[total.var.slots] <- stringr::str_c(stringr::str_sub(nms[total.var.slots], 1, -5), '_TOTAL_VAR')
total.var.syms <- dplyr::syms(names(x)[total.var.slots])
total.se.slots <- stringr::str_sub(nms, -8, -1) == '_mean_SE'
names(x)[total.se.slots] <- stringr::str_remove(nms[total.se.slots], '_mean')
total.se.syms <- dplyr::syms(names(x)[total.se.slots])
ratio.slots <- stringr::str_sub(nms, -11, -1) == '_mean_RATIO'
names(x)[ratio.slots] <- stringr::str_c(stringr::str_sub(nms[ratio.slots], 1, -12), '_RATIO')
ratio.syms <- dplyr::syms(names(x)[ratio.slots])
ratio.var.slots <- stringr::str_sub(nms, -15, -1) == '_mean_RATIO_VAR'
names(x)[ratio.var.slots] <- stringr::str_c(stringr::str_sub(nms[ratio.var.slots], 1, -16), '_RATIO_VAR')
ratio.var.syms <- dplyr::syms(names(x)[ratio.var.slots])
ratio.se.slots <- stringr::str_sub(nms, -14, -1) == '_mean_RATIO_SE'
names(x)[ratio.se.slots] <- stringr::str_remove(nms[ratio.se.slots], '_mean')
ratio.se.syms <- dplyr::syms(names(x)[ratio.se.slots])
names(x)[names(x) == 'P2PNTCNT_EU'] <- 'N'
names(x)[names(x) == 'nPlots.x'] <- 'nPlots_x'
names(x)[names(x) == 'nPlots.y'] <- 'nPlots_y'
grpSyms <- dplyr::syms(grpBy)
x <- x %>%
dplyr::select(!!!grpSyms,
!!!ratio.syms,
!!!total.syms,
!!!ratio.se.syms,
!!!total.se.syms,
!!!ratio.var.syms,
!!!total.var.syms,
dplyr::any_of(c('nPlots_x', 'nPlots_y')),
N)
return(x)
} |
AbbreviationPipe <- R6Class(
"AbbreviationPipe",
inherit = GenericPipe,
public = list(
initialize = function(propertyName = "abbreviation",
propertyLanguageName = "language",
alwaysBeforeDeps = list("GuessLanguagePipe"),
notAfterDeps = list(),
replaceAbbreviations = TRUE,
resourcesAbbreviationsPath = NULL) {
if (!"character" %in% class(propertyName)) {
bdpar.log(message = paste0("Checking the type of the 'propertyName' variable: ",
class(propertyName)),
level = "FATAL",
className = class(self)[1],
methodName = "initialize")
}
if (!"character" %in% class(propertyLanguageName)) {
bdpar.log(message = paste0("Checking the type of the 'propertyLanguageName' variable: ",
class(propertyLanguageName)),
level = "FATAL",
className = class(self)[1],
methodName = "initialize")
}
if (!"list" %in% class(alwaysBeforeDeps)) {
bdpar.log(message = paste0("Checking the type of the 'alwaysBeforeDeps' variable: ",
class(alwaysBeforeDeps)),
level = "FATAL",
className = class(self)[1],
methodName = "initialize")
}
if (!"list" %in% class(notAfterDeps)) {
bdpar.log(message = paste0("Checking the type of the 'notAfterDeps' variable: ",
class(notAfterDeps)),
level = "FATAL",
className = class(self)[1],
methodName = "initialize")
}
if (!"logical" %in% class(replaceAbbreviations)) {
bdpar.log(message = paste0("Checking the type of the 'replaceAbbreviations' variable: ",
class(replaceAbbreviations)),
level = "FATAL",
className = class(self)[1],
methodName = "initialize")
}
super$initialize(propertyName, alwaysBeforeDeps, notAfterDeps)
private$propertyLanguageName <- propertyLanguageName
if (is.null(resourcesAbbreviationsPath)) {
if (!bdpar.Options$isSpecificOption("resources.abbreviations.path") ||
is.null(bdpar.Options$get("resources.abbreviations.path"))) {
bdpar.log(message = paste0("Path of abbreviations resources is ",
"neither defined in initialize or in ",
"bdpar.Options"),
level = "FATAL",
className = class(self)[1],
methodName = "initialize")
} else {
resourcesAbbreviationsPath <- bdpar.Options$get("resources.abbreviations.path")
}
}
if (!"character" %in% class(resourcesAbbreviationsPath)) {
bdpar.log(message = paste0("Checking the type of the 'resourcesAbbreviationsPath' variable: ",
class(resourcesAbbreviationsPath)),
level = "FATAL",
className = class(self)[1],
methodName = "initialize")
}
private$resourcesAbbreviationsPath <- resourcesAbbreviationsPath
private$replaceAbbreviations <- replaceAbbreviations
},
pipe = function(instance) {
if (!"Instance" %in% class(instance)) {
bdpar.log(message = paste0("Checking the type of the 'instance' variable: ",
class(instance)),
level = "FATAL",
className = class(self)[1],
methodName = "pipe")
}
languageInstance <- "Unknown"
languageInstance <- instance$getSpecificProperty(self$getPropertyLanguageName())
if (is.null(languageInstance) ||
is.na(languageInstance) ||
"Unknown" %in% languageInstance) {
instance$addProperties(list(),super$getPropertyName())
bdpar.log(message = paste0("The file: ", instance$getPath(),
" has not language property"),
level = "WARN",
className = class(self)[1],
methodName = "pipe")
return(instance)
}
JsonFile <- paste(self$getResourcesAbbreviationsPath(),
"/abbrev.",
languageInstance,
".json",
sep = "")
jsonData <- Bdpar[["private_methods"]][["resourceHandler"]]()$isLoadResource(JsonFile)
if (!is.null(jsonData)) {
abbreviationsLocated <- list()
for (abbreviation in names(jsonData)) {
if (self$findAbbreviation(instance$getData(), abbreviation)) {
abbreviationsLocated <- list.append(abbreviationsLocated,
abbreviation)
}
if (private$replaceAbbreviations &&
abbreviation %in% abbreviationsLocated) {
instance$setData(
trimws(x = self$replaceAbbreviation(abbreviation,
as.character(jsonData[abbreviation]),
instance$getData())))
}
}
instance$addProperties(paste(abbreviationsLocated), super$getPropertyName())
} else {
instance$addProperties(list(), super$getPropertyName())
bdpar.log(message = paste0("The file: ", instance$getPath(),
" has not an abbreviationsJsonFile ",
"to apply to the language ->",
languageInstance),
level = "WARN",
className = class(self)[1],
methodName = "pipe")
return(instance)
}
if (is.na(instance$getData()) ||
all(instance$getData() == "") ||
is.null(instance$getData())) {
message <- paste0("The file: ", instance$getPath(),
" has data empty on pipe Abbreviation")
instance$addProperties(message, "reasonToInvalidate")
bdpar.log(message = message,
level = "WARN",
className = class(self)[1],
methodName = "pipe")
instance$invalidate()
return(instance)
}
instance
},
findAbbreviation = function(data, abbreviation) {
if (!"character" %in% class(data)) {
bdpar.log(message = paste0("Checking the type of the 'data' variable: ",
class(data)),
level = "FATAL",
className = class(self)[1],
methodName = "findAbbreviation")
}
if (!"character" %in% class(abbreviation)) {
bdpar.log(message = paste0("Checking the type of the 'abbreviation' variable: ",
class(abbreviation)),
level = "FATAL",
className = class(self)[1],
methodName = "findAbbreviation")
}
abbreviationEscaped <- rex::escape(abbreviation)
regularExpresion <- paste0("(?:[[:space:]]|[\"><\u00A1?\u00BF!;:,.'-]|^)(",
abbreviationEscaped,
")[;:?\"!,.'>-]?(?=(?:[[:space:]]|$|>))",
sep = "")
grepl(pattern = rex::regex(regularExpresion),
x = data,
perl = TRUE)
},
replaceAbbreviation = function(abbreviation, extendedAbbreviation, data) {
if (!"character" %in% class(abbreviation)) {
bdpar.log(message = paste0("Checking the type of the 'abbreviation' variable: ",
class(abbreviation)),
level = "FATAL",
className = class(self)[1],
methodName = "replaceAbbreviation")
}
if (!"character" %in% class(extendedAbbreviation)) {
bdpar.log(message = paste0("Checking the type of the 'extendedAbbreviation' variable: ",
class(extendedAbbreviation)),
level = "FATAL",
className = class(self)[1],
methodName = "replaceAbbreviation")
}
if (!"character" %in% class(data)) {
bdpar.log(message = paste0("Checking the type of the 'data' variable: ",
class(data)),
level = "FATAL",
className = class(self)[1],
methodName = "replaceAbbreviation")
}
abbreviationEscaped <- rex::escape(abbreviation)
regularExpresion <- paste0("(?:[[:space:]]|[\"><\u00A1?\u00BF!;:,.'-]|^)(",
abbreviationEscaped,
")[;:?\"!,.'>-]?(?=(?:[[:space:]]|$|>))",
sep = "")
gsub(rex::regex(regularExpresion),
paste(" ", extendedAbbreviation, " ", sep = ""),
data,
perl = TRUE)
},
getPropertyLanguageName = function() {
private$propertyLanguageName
},
getResourcesAbbreviationsPath = function() {
private$resourcesAbbreviationsPath
},
setResourcesAbbreviationsPath = function(path) {
if (!"character" %in% class(path)) {
bdpar.log(message = paste0("Checking the type of the 'path' variable: ",
class(path)),
level = "FATAL",
className = class(self)[1],
methodName = "setResourcesAbbreviationsPath")
}
private$resourcesAbbreviationsPath <- path
}
),
private = list(
propertyLanguageName = "",
resourcesAbbreviationsPath = "",
replaceAbbreviations = TRUE
)
) |
str_extract_base <- function(x, pattern){
do.call("rbind",
regmatches(x, gregexpr(pattern, x)))
}
str_split_fixed_base <- function(x, pattern, n){
spl <- strsplit(x, pattern)
spl <-
lapply(spl,
function(y) c(utils::head(y, n - 1),
paste0(utils::tail(y, -(n - 1)), collapse = " ")))
do.call("rbind", spl)
} |
library("data.table")
if (!packageVersion("data.table") >= "1.13.0")
stop("db-benchmark launcher script depends on recent data.table features, install at least 1.13.0.")
source("./_launcher/launcher.R")
.nodename = Sys.info()[["nodename"]]
mockup = as.logical(Sys.getenv("MOCKUP", "false"))
run_tasks = getenv("RUN_TASKS")
if (!length(run_tasks)) {
cat("No benchmark tasks to run\n")
q("no")
}
run_solutions = getenv("RUN_SOLUTIONS")
if (!length(run_solutions)) {
cat("No benchmark solutions to run\n")
q("no")
}
data = fread("./_control/data.csv", logical01=TRUE, colClasses=c("character","character","character","character","character","character","logical"))
if (anyDuplicated(data[["data"]]))
stop("_control/data.csv contains duplicated data cases")
data[active==TRUE,
][run_tasks, on="task", nomatch=NA
][, c("active") := NULL
][] -> data
if (any(is.na(data$data))) stop("missing entries in ./_control/data.csv for some tasks")
timeout = fread("./_control/timeout.csv", colClasses=c("character","character","numeric"))
timeout[run_tasks, on="task", nomatch=NA
] -> timeout
if (any(is.na(timeout$minutes))) stop("missing entries in ./_control/timeout.csv for some tasks")
solution = fread("./_control/solutions.csv")
solution[run_solutions, on="solution", nomatch=NA
] -> solution
if (any(is.na(solution$task))) stop("missing entries in ./_control/solutions.csv for some solutions")
dt = solution[data, on="task", allow.cartesian=TRUE, nomatch=NULL]
dt[, "nodename" := .nodename]
dt[, "in_rows" := sapply(strsplit(data, split="_", fixed=TRUE), `[[`, 2L)]
stopifnot(dt$in_rows == dt$nrow)
dt[timeout, "timeout_s" := i.minutes*60, on=c("task","in_rows")]
if (any(is.na(dt$timeout_s))) stop("missing entries in ./_control/timeout.csv for some tasks, detected after joining to solutions and data to run")
lookup_run_batch(dt)
cat("Benchmark solutions to run: ", dt[is.na(run_batch), paste(unique(solution),collapse=", ")], "\n", sep="")
is.stop()
is.pause()
is.stop()
launch(dt, mockup=mockup)
q("no") |
context('Test that patch labelling handles corner cases')
test_that("Patch counting works and handles weird matrices", {
expect_error(label(diag(10)))
expect_error(label(seq.int(10)))
a <- diag(5) > 0; a[5,1] <- TRUE
t <- label(a, wrap = FALSE)
expect_true((t[1,1] != t[5,1]) != t[5,5])
t <- label(a, wrap = TRUE)
expect_true((t[1,1] == t[5,1]) == t[5,5])
nbm <- label(diag(5) > 0, nbmask = matrix(c(1,1,1,1,0,1,1,1,1),
ncol = 3, nrow = 3))
expect_true(unique(na.omit(as.vector(nbm))) == 1)
expect_true( all(is.na(label(diag(10) == 2))) )
expect_true( unique(as.vector(label(diag(10) < 2))) == 1 )
testlist <- list(diag(10) > 0, diag(10) > 0 )
expect_true( all(patchsizes(testlist, merge = TRUE) == 1) )
expect_error( patchsizes(diag(10)) )
ex <- matrix(seq.int(5) > 2, ncol = 1)
expect_true(attr(label(ex), "psd") == 3)
expect_true(attr(label(t(ex)), "psd") == 3)
expect_true(all(dim(label(ex)) == dim(ex)))
ex <- matrix(c(1, 0, 0, 1,
0, 0, 0, 1,
0, 0, 0, 1), byrow = TRUE, ncol = 4) > 0
test <- label(ex, wrap = TRUE)
expect_true(test[1,1] == test[2, 4])
test <- label(ex, wrap = FALSE)
expect_true(test[1,1] != test[2, 4])
expect_true(test[1,4] == test[2, 4])
ex <- matrix(c(1, 0, 0,
1, 0, 0,
1, 0, 0,
1, 0, 1), byrow = TRUE, ncol = 3) > 0
test <- label(ex, wrap = TRUE)
expect_true(test[1,1] == test[4, 3])
test <- label(ex, wrap = FALSE)
expect_true(test[1,1] != test[4, 3])
expect_true(test[4,1] == test[3, 1])
ex <- matrix(c(1, 1, 1, 1,
0, 0, 0, 1,
0, 0, 0, 1), byrow = TRUE, ncol = 4) > 0
test <- label(ex)
expect_true(attr(test, "percolation"))
}) |
plotccda.q95 <-
function (x, pl = "max")
{
if (pl != "max"){
if(all(1:length(x$nameslist)!=pl)==TRUE){
stop("pl is not a valid grouping number")
}
}
if (is.null(x$RCDP)) {
stop("Missing RCDP. Run and save ccda.main with the option return.RCDP=TRUE !")
}
k = which(x$difference == max(x$difference))
if((length(k)>1)&(pl=="max")){stop("There are multiple maxima. Please specify which one you mean by entering a number.")}
par(mfrow = c(1, 1))
if (pl == "max")
k = k
else (k = pl)
plot(density(x$RCDP[k, ] * 100), xlim = range(x$RCDP[k, ] *
100, x$ratio[k] * 100), lwd = 2, xlab = "LDA-percentages (%)",
main = "")
abline(v = x$ratio[k] * 100, col = "red", lwd = 2)
abline(v = x$q95[k] * 100, col = "blue", lwd = 2)
legend("topright", c("ratio", "q95", paste("difference=",
round(x$difference[k] * 100, digits = 2), "%", sep = "")),
col = c("red", "blue", "white"), lty = 1, lwd = 2,
cex =min(1, min(0.4*par('pin')[1]/strwidth("difference=xx%","inches"),
par('pin')[2]*(0.08)/strheight("difference=xx%","inches")))
)
} |
boundingOperation <- function(v,
left,
right,
m,
n) {
l <- length(v)
if (left != 0) {
v <- setdiff(v,c(1:left))
}
if (right != 0) {
v <- setdiff(v,c((l-right+1):l))
}
return(cumsum(c(rep(1, left), c(rep(1,m),rep(0, n))[v], rep(0, right))))
}
empiricalBF <- function(tv.seq,
nsim = 1000,
m = 100,
n = 100,
alpha = 0.05) {
l <- lapply(1:nsim, function(i) {v <- sample(1:(m+n)); lapply(tv.seq, function(tv) boundingOperation(v = v,
left = stats::qbinom(prob = tv, size = m, p = 1-alpha/2),
right = stats::qbinom(prob = tv, size = n, p = 1-alpha/2),
m = m, n = n))})
ll <- Reduce(x = l, f = function(x, y) mapply(x, y, FUN = function(xx,yy) pmax(xx,yy), SIMPLIFY = FALSE))
return(ll)
} |
geos_intersection <- function(geom1, geom2) {
recycled <- recycle_common(list(sanitize_geos_geometry(geom1), sanitize_geos_geometry(geom2)))
new_geos_geometry(
.Call(geos_c_intersection, recycled[[1]], recycled[[2]]),
crs = wk_crs_output(recycled[[1]], recycled[[2]])
)
}
geos_difference <- function(geom1, geom2) {
recycled <- recycle_common(list(sanitize_geos_geometry(geom1), sanitize_geos_geometry(geom2)))
new_geos_geometry(
.Call(geos_c_difference, recycled[[1]], recycled[[2]]),
crs = wk_crs_output(recycled[[1]], recycled[[2]])
)
}
geos_sym_difference <- function(geom1, geom2) {
recycled <- recycle_common(list(sanitize_geos_geometry(geom1), sanitize_geos_geometry(geom2)))
new_geos_geometry(
.Call(geos_c_sym_difference, recycled[[1]], recycled[[2]]),
crs = wk_crs_output(recycled[[1]], recycled[[2]])
)
}
geos_union <- function(geom1, geom2) {
recycled <- recycle_common(list(sanitize_geos_geometry(geom1), sanitize_geos_geometry(geom2)))
new_geos_geometry(
.Call(geos_c_union, recycled[[1]], recycled[[2]]),
crs = wk_crs_output(recycled[[1]], recycled[[2]])
)
}
geos_intersection_prec <- function(geom1, geom2, grid_size) {
recycled <- recycle_common(
list(
sanitize_geos_geometry(geom1),
sanitize_geos_geometry(geom2),
sanitize_double(grid_size)
)
)
new_geos_geometry(
.Call(geos_c_intersection_prec, recycled[[1]], recycled[[2]], recycled[[3]]),
crs = wk_crs_output(recycled[[1]], recycled[[2]])
)
}
geos_difference_prec <- function(geom1, geom2, grid_size) {
recycled <- recycle_common(
list(
sanitize_geos_geometry(geom1),
sanitize_geos_geometry(geom2),
sanitize_double(grid_size)
)
)
new_geos_geometry(
.Call(geos_c_difference_prec, recycled[[1]], recycled[[2]], recycled[[3]]),
crs = wk_crs_output(recycled[[1]], recycled[[2]])
)
}
geos_sym_difference_prec <- function(geom1, geom2, grid_size) {
recycled <- recycle_common(
list(
sanitize_geos_geometry(geom1),
sanitize_geos_geometry(geom2),
sanitize_double(grid_size)
)
)
new_geos_geometry(
.Call(geos_c_sym_difference_prec, recycled[[1]], recycled[[2]], recycled[[3]]),
crs = wk_crs_output(recycled[[1]], recycled[[2]])
)
}
geos_union_prec <- function(geom1, geom2, grid_size) {
recycled <- recycle_common(
list(
sanitize_geos_geometry(geom1),
sanitize_geos_geometry(geom2),
sanitize_double(grid_size)
)
)
new_geos_geometry(
.Call(geos_c_union_prec, recycled[[1]], recycled[[2]], recycled[[3]]),
crs = wk_crs_output(recycled[[1]], recycled[[2]])
)
}
geos_shared_paths <- function(geom1, geom2) {
recycled <- recycle_common(list(sanitize_geos_geometry(geom1), sanitize_geos_geometry(geom2)))
new_geos_geometry(
.Call(geos_c_shared_paths, recycled[[1]], recycled[[2]]),
crs = wk_crs_output(recycled[[1]], recycled[[2]])
)
}
geos_snap <- function(geom1, geom2, tolerance = .Machine$double.eps ^ 2) {
recycled <- recycle_common(
list(
sanitize_geos_geometry(geom1),
sanitize_geos_geometry(geom2),
sanitize_double(tolerance)
)
)
new_geos_geometry(
.Call(geos_c_snap, recycled[[1]], recycled[[2]], recycled[[3]]),
crs = wk_crs_output(recycled[[1]], recycled[[2]])
)
}
geos_clearance_line_between <- function(geom1, geom2, prepare = FALSE) {
recycled <- recycle_common(
list(
sanitize_geos_geometry(geom1),
sanitize_geos_geometry(geom2)
)
)
prepare <- sanitize_logical_scalar(prepare)
new_geos_geometry(
.Call(geos_c_clearance_line_between, recycled[[1]], recycled[[2]], prepare),
crs = wk_crs_output(recycled[[1]], recycled[[2]])
)
}
geos_largest_empty_circle_spec <- function(geom, boundary, tolerance) {
recycled <- recycle_common(
list(
sanitize_geos_geometry(geom),
sanitize_geos_geometry(boundary),
sanitize_double(tolerance)
)
)
new_geos_geometry(
.Call(geos_c_largest_empty_circle, recycled[[1]], recycled[[2]], recycled[[3]]),
crs = wk_crs_output(recycled[[1]], recycled[[2]])
)
}
geos_largest_empty_crc <- function(geom, boundary, tolerance) {
spec <- geos_largest_empty_circle_spec(geom, boundary, tolerance)
xy <- unclass(as_xy(geos_point_start(spec)))
wk::crc(
xy$x, xy$y,
geos_length(spec),
crs = attr(spec, "crs", exact = TRUE)
)
} |
suitSplit <- function(missingCards = 5, cards_W = 13, cards_E = 13) {
outTable <- tibble(
"Cards held by West" = 0:missingCards,
"Cards held by East" = missingCards:0,
Probability = 0
)
for (i in outTable$"Cards held by West") {
unknown_W <- i
unknown_E <- missingCards - i
temp <- factorial(unknown_W + unknown_E) / (factorial(unknown_W) * factorial(unknown_E)) *
(factorial(cards_W) * factorial(cards_E) * factorial(cards_W + cards_E - unknown_W - unknown_E)) /
(factorial(cards_W + cards_E) * factorial(cards_W - unknown_W) * factorial(cards_E - unknown_E)) *
ifelse(cards_W == cards_E, (2 - abs(unknown_W == unknown_E)), 1)
outTable[i + 1, "Probability"] <- round(temp, 2)
}
if (cards_W == cards_E) {
outTable <- slice(outTable, 0:ceiling(nrow(outTable) / 2))
colnames(outTable) <- c("Cards in one hand", "Cards in other hand", "Probability")
}
xaxis <- outTable[, 1] %>%
unname() %>%
unlist()
if (cards_W == cards_E) {
subtitleText <- glue::glue("Symmetrical probabilities reflecting {cards_W} unknown cards held by both West and East")
} else {
subtitleText <- glue::glue("Asymmetrical probabilities reflecting {cards_W} unknown cards in West and {cards_E} in East")
}
graph <- ggplot(outTable) +
geom_col(aes(x = xaxis, y = Probability, fill = factor(xaxis + 1))) +
geom_label(aes(
x = xaxis, y = Probability, label = scales::percent(Probability, accuracy = 0.1),
vjust = ifelse(Probability > 0.1, 1.6, -0.4)
), fill = "white", fontface = "bold", label.size = NA) +
scale_y_continuous(labels = scales::percent) +
scale_x_continuous(breaks = xaxis) +
scale_fill_manual(values = c(
"
"purple", "darkred", "lightblue", "darkgrey", "darkgreen", "lightgrey"
)) +
labs(
title = glue::glue("Probable distribution of {missingCards} cards between two hands"),
subtitle = subtitleText,
x = colnames(outTable[1]), y = NULL
) +
guides(fill = "none") +
theme_minimal() +
theme(
title = element_text(size = rel(1.2)),
axis.text = element_text(size = rel(1))
)
plot(graph)
outTable$Probability <- scales::percent(outTable$Probability, accuracy = 0.1)
return(outTable)
} |
StepSize <- function(W, psi, dX, stepDirection){
to <- 1
Hto <- LikFunk(W, psi + to * stepDirection, dX) - LikFunk(W, psi, dX)
while (Hto < 0){
to <- to / 2
Hto <- LikFunk(W, psi + to * stepDirection, dX) - LikFunk(W, psi, dX)
}
dH0 <- t(GradientL(W, psi, dX)) %*% stepDirection
dH0to <- dH0 * to
if (Hto >= dH0to / 2){t <- to} else {t <- to * dH0to / (2 * (dH0to + Hto))}
return(as.numeric(t))
} |
context("Clipr read and write")
test_that("single NA vectors don't cause error", {
skip_if_not(is_clipr_available, skip_msg)
expect_equivalent(write_clip(NA_character_), NA_character_)
expect_equivalent(write_clip(NA_character_, return_new = TRUE), "NA")
expect_warning(write_clip(NA))
expect_warning(write_clip(NA_integer_))
expect_warning(write_clip(NA_real_))
expect_warning(write_clip(NA_complex_))
})
test_that("empty character in write_clip() causes no erroneous warning", {
skip_if_not(is_clipr_available, skip_msg)
expect_equivalent(write_clip(""), "")
expect_warning(null_res <- write_clip(NULL))
expect_equivalent(null_res, NULL)
expect_warning(null_new_res <- write_clip(NULL, return_new = TRUE))
expect_equivalent(null_new_res, "")
expect_equivalent(write_clip(character(0)), character(0))
expect_equivalent(write_clip(character(0), return_new = TRUE), "")
expect_warning(empty_res <- write_clip(integer(0)))
expect_equivalent(empty_res, integer(0))
expect_warning(empty_new_res <- write_clip(integer(0), return_new = TRUE))
expect_equivalent(empty_new_res, "")
expect_silent(clear_clip())
})
test_that("Render character vectors", {
skip_if_not(is_clipr_available, skip_msg)
single <- "hello, world!"
expect_equivalent(write_clip(single), single)
})
test_that("Render default multiline vectors", {
skip_if_not(is_clipr_available, skip_msg)
multiline <- c("hello", "world!")
inv_out <- write_clip(multiline, return_new = TRUE)
if (sys_type() == "Windows") {
expect_equivalent(inv_out, "hello\r\nworld!")
} else {
expect_equivalent(inv_out, "hello\nworld!")
}
expect_equivalent(read_clip(), multiline)
})
test_that("Render custom multiline vectors", {
skip_if_not(is_clipr_available, skip_msg)
multiline <- c("hello", "world!")
inv_out <- write_clip(multiline, breaks = ", ", return_new = TRUE)
expect_equivalent(inv_out, "hello, world!")
expect_equivalent(read_clip(), inv_out)
})
test_that("Render default data.frames", {
skip_if_not(is_clipr_available, skip_msg)
tbl <- data.frame(a = c(1,2,3), b = c(4,5,6))
inv_out <- write_clip(tbl, return_new = TRUE)
if (sys_type() == "Windows") {
expect_equivalent(inv_out, "a\tb\r\n1\t4\r\n2\t5\r\n3\t6")
} else {
expect_equivalent(inv_out, "a\tb\n1\t4\n2\t5\n3\t6")
}
expect_equal(read_clip_tbl(), tbl)
})
test_that("Probable rownames are read", {
skip_if_not(is_clipr_available, skip_msg)
write_clip(mtcars)
expect_equal(read_clip_tbl(), mtcars)
})
test_that("Render custom data.frames", {
skip_if_not(is_clipr_available, skip_msg)
tbl <- data.frame(a = c(1,2,3), b = c(4,5,6))
inv_out <- write_clip(tbl, sep = ",", return_new = TRUE)
if (sys_type() == "Windows") {
expect_equivalent(inv_out, "a,b\r\n1,4\r\n2,5\r\n3,6")
} else {
expect_equivalent(inv_out, "a,b\n1,4\n2,5\n3,6")
}
expect_equivalent(read_clip(), c("a,b", "1,4", "2,5", "3,6"))
})
test_that("Render matricies", {
skip_if_not(is_clipr_available, skip_msg)
tbl <- matrix(c(1, 2, 3, 4, 5, 6), nrow = 3, ncol = 2)
inv_out <- write_clip(tbl, return_new = TRUE)
if (sys_type() == "Windows") {
expect_equivalent(inv_out, "1\t4\r\n2\t5\r\n3\t6")
} else {
expect_equivalent(inv_out, "1\t4\n2\t5\n3\t6")
}
expect_equivalent(read_clip(), c("1\t4", "2\t5", "3\t6"))
})
test_that("Render custom matricies", {
skip_if_not(is_clipr_available, skip_msg)
tbl <- matrix(c(1, 2, 3, 4, 5, 6), nrow = 3, ncol = 2)
inv_out <- write_clip(tbl, sep = ",", return_new = TRUE)
if (sys_type() == "Windows") {
expect_equivalent(inv_out, "1,4\r\n2,5\r\n3,6")
} else {
expect_equivalent(inv_out, "1,4\n2,5\n3,6")
}
expect_equivalent(read_clip(), c("1,4", "2,5", "3,6"))
})
test_that("Render tables read from clipboard as data.frames", {
skip_if_not(is_clipr_available, skip_msg)
inv_out <- write_clip(iris[1:2, 1:4], return_new = TRUE)
expect_equivalent(read_clip_tbl(), iris[1:2, 1:4])
})
test_that("Tables written with rownames add extra space for column names", {
skip_if_not(is_clipr_available, skip_msg)
d <- matrix(1:4, 2)
rownames(d) <- c('a','b')
colnames(d) <- c('c','d')
df <- data.frame(c = c(1, 2), d = c(3, 4))
rownames(df) <- c('a', 'b')
mat_rnames_out <- write_clip(d, row.names = TRUE, col.names = FALSE, return_new = TRUE)
df_rnames_out <- write_clip(df, row.names = TRUE, col.names = FALSE, return_new = TRUE)
if (sys_type() == "Windows") {
expect_equivalent(mat_rnames_out, "a\t1\t3\r\nb\t2\t4")
expect_equivalent(df_rnames_out, "a\t1\t3\r\nb\t2\t4")
} else {
expect_equivalent(mat_rnames_out, "a\t1\t3\nb\t2\t4")
expect_equivalent(df_rnames_out, "a\t1\t3\nb\t2\t4")
}
mat_bnames_out <- write_clip(d, row.names = TRUE, col.names = TRUE, return_new = TRUE)
df_bnames_out <- write_clip(df, row.names = TRUE, col.names = TRUE, return_new = TRUE)
if (sys_type() == "Windows") {
expect_equivalent(mat_bnames_out, "\tc\td\r\na\t1\t3\r\nb\t2\t4")
expect_equivalent(df_bnames_out, "\tc\td\r\na\t1\t3\r\nb\t2\t4")
} else {
expect_equivalent(mat_bnames_out, "\tc\td\na\t1\t3\nb\t2\t4")
expect_equivalent(df_bnames_out, "\tc\td\na\t1\t3\nb\t2\t4")
}
mat_nonames_out <- write_clip(d, row.names = FALSE, col.names = FALSE, return_new = TRUE)
df_nonames_out <- write_clip(df, row.names = FALSE, col.names = FALSE, return_new = TRUE)
if (sys_type() == "Windows") {
expect_equivalent(mat_nonames_out, "1\t3\r\n2\t4")
expect_equivalent(df_nonames_out, "1\t3\r\n2\t4")
} else {
expect_equivalent(mat_nonames_out, "1\t3\n2\t4")
expect_equivalent(df_nonames_out, "1\t3\n2\t4")
}
}) |
.round_preserve_sum <- function(x, digits = 0) {
up <- 10 ^ digits
x <- x * up
y <- floor(x)
indices <- tail(order(x - y), round(sum(x)) - sum(y))
y[indices] <- y[indices] + 1
r <- y / up
if (zapsmall(sum(r)) != 1)
warning("The rounded vector is not stochastic.")
r
}
round_stochastic <- function(x, digits = 3) {
if (is.matrix(x))
t(apply(
x,
MARGIN = 1,
.round_preserve_sum,
digits = digits
))
else
.round_preserve_sum(x, digits = digits)
} |
richard <- function(t, alpha, beta, k, m) {
result <- (1 + beta * exp(-k * t))^(1 / m)
result <- alpha / result;
return(result)
}
richard.inverse <- function(x, alpha, beta, k, m){
result <- -log(((alpha/x)^m - 1)/beta)/k
return(result)
}
generalisedRichard <- function(t, A, U, k, m, beta, t0 = 0) {
result <- A + richard(t - t0, U - A, beta, k, m)
return(result)
}
generalisedRichard.inverse <- function(x, A, U, k, m, beta, t0 = 0) {
result <- richard.inverse(x - A, U - A, beta, k, m) + t0
return(result)
} |
dvnames <- function(models, number = FALSE, fill = 'Model') {
if (class(models)[1] != 'list') {
models <- list(models)
}
dvs <- sapply(models, insight::find_response)
dvs <- sapply(dvs, function(x)
ifelse(is.null(x), fill, x))
if (number) {
dvs <- paste0(dvs, ' (', 1:length(dvs), ')')
}
names(models) <- dvs
return(models)
} |
types.castInteger <- function(format, value, options={}) {
if (!is_integer(value)) {
if (!is.character(value)) return(config::get("ERROR", file = system.file("config/config.yml", package = "tableschema.r")))
if ("bareNumber" %in% names(options)) {
bareNumber <- options[["bareNumber"]]
if (bareNumber == FALSE) {
value <- stringr::str_replace_all(string = value, pattern = "(^\\D*)|(\\D*$)", replacement = "")
}
}
value <- tryCatch({
result <- as.integer(value)
if (is.nan(result) || as.character(result) != value) return(config::get("ERROR", file = system.file("config/config.yml", package = "tableschema.r")))
value <- result
},
warning = function(w) {
return(config::get("ERROR", file = system.file("config/config.yml", package = "tableschema.r")))
},
error = function(e) {
return(config::get("ERROR", file = system.file("config/config.yml", package = "tableschema.r")))
},
finally = {})
}
return(value)
} |
expected <- eval(parse(text="6L"));
test(id=0, code={
argv <- eval(parse(text="list(\"month\", c(\"secs\", \"mins\", \"hours\", \"days\", \"weeks\", \"months\", \"years\", \"DSTdays\"), NA_integer_, FALSE)"));
.Internal(`pmatch`(argv[[1]], argv[[2]], argv[[3]], argv[[4]]));
}, o=expected); |
context("species list")
test_that("We can pass a species_list based on taxanomic group", {
needs_api()
fish <- species_list(Genus = "Labroides")
expect_is(fish, "character")
expect_gt(length(fish), 1)
})
test_that("Look up species by SpecCode", {
needs_api()
fish <- species_list(SpecCode = "5537")
expect_is(fish, "character")
expect_equal(fish, "Bolbometopon muricatum")
}) |
mice.impute.polr <- function(y, ry, x, wy = NULL, nnet.maxit = 100,
nnet.trace = FALSE, nnet.MaxNWts = 1500,
polr.to.loggedEvents = FALSE, ...) {
if (is.null(wy)) wy <- !ry
x <- as.matrix(x)
aug <- augment(y, ry, x, wy)
x <- aug$x
y <- aug$y
ry <- aug$ry
wy <- aug$wy
w <- aug$w
xy <- cbind.data.frame(y = y, x = x)
fit <- try(suppressWarnings(MASS::polr(formula(xy),
data = xy[ry, , drop = FALSE],
weights = w[ry],
control = list(...)
)),
silent = TRUE
)
if (inherits(fit, "try-error")) {
if (polr.to.loggedEvents) {
updateLog(out = "polr falls back to multinom", frame = 6)
}
fit <- nnet::multinom(formula(xy),
data = xy[ry, , drop = FALSE],
weights = w[ry],
maxit = nnet.maxit, trace = nnet.trace,
MaxNWts = nnet.MaxNWts, ...
)
}
post <- predict(fit, xy[wy, , drop = FALSE], type = "probs")
if (sum(wy) == 1) {
post <- matrix(post, nrow = 1, ncol = length(post))
}
fy <- as.factor(y)
nc <- length(levels(fy))
un <- rep(runif(sum(wy)), each = nc)
if (is.vector(post)) {
post <- matrix(c(1 - post, post), ncol = 2)
}
draws <- un > apply(post, 1, cumsum)
idx <- 1 + apply(draws, 2, sum)
levels(fy)[idx]
} |
guess_stnderr <- function(pre_test=NULL, pst_test=NULL, nsamps=100, seed = 31415, force9=FALSE)
{
df <- data.frame(cbind(pre_test, pst_test))
nitems <- ncol(df)/2
transmatrix <- multi_transmat(pre_test, pst_test, force9=force9)
nparams <- ifelse(ncol(transmatrix)==4, 4, 8)
resamps.t1 <- resamps.t3 <- resamps.results <- list()
resamps.lca.eff <- matrix(ncol=nitems+1, nrow=nsamps)
resamps.agg <- matrix(ncol=ncol(df), nrow=nsamps)
stnderrs.lca.params <- matrix(ncol=nitems, nrow=nparams)
stnderrs.effects <- avg.effects <- matrix(ncol=nitems+1, nrow=1)
resamps.lca.params <- rep(list(matrix(nrow = nsamps, ncol = nparams)), nitems)
set.seed(seed)
resamples <- lapply(1:nsamps, function(i) df[sample(1:nrow(df), replace = T),])
for(i in 1:length(resamples)) {
print(i)
transmatrix_i <- multi_transmat(resamples[[i]][,1:nitems], resamples[[i]][,(nitems+1):(2*nitems)], force9=force9)
resamps.results[[i]] <- guesstimate(transmatrix_i)
resamps.lca.eff[i,] <- resamps.results[[i]]$est.learning
resamps.agg[i,] <- transmatrix_i[nitems,]
for(j in 1:nitems) {
resamps.lca.params[[j]][i,] <- resamps.results[[i]]$param.lca[,j]
}
}
stnderrs.effects[1,] <- sapply(as.data.frame(resamps.lca.eff), sd, na.rm=T)
avg.effects[1,] <- sapply(as.data.frame(resamps.lca.eff), mean, na.rm=T)
for (j in 1:nitems) {
stnderrs.lca.params[,j] <- sapply(as.data.frame(resamps.lca.params[[j]]), sd, na.rm=T)
}
row.names(avg.effects) <- row.names(stnderrs.effects) <- c("lca")
if(nrow(stnderrs.lca.params)==8) {
row.names(stnderrs.lca.params) <- c("lgg", "lgk", "lgc", "lkk", "lcg", "lck", "lcc", "gamma")
} else {
row.names(stnderrs.lca.params) <- c("lgg", "lgk", "lkk", "gamma")
}
res <- list(stnderrs.lca.params, avg.effects, stnderrs.effects)
names(res) <- c("stnderrs.lca.params", "avg.effects", "stnderrs.effects")
res
} |
annotate.table = function( a, dat ) {
words = do.call( rbind, sapply( dat, function(d) { reformat.textreg.model( d ) }, simplify=FALSE ) )
words = words[ !duplicated( words$phrase ), ]
words
words$beta = words$Z = NULL
a = as.data.frame(a)
a$phrase = rownames(a)
a = merge( a, words, by="phrase", all.x=TRUE, sort=FALSE )
a
}
textreg.to.df = function( textreg.res, RunID = 1 ) {
data.frame( RunID = RunID,
word = textreg.res$model$ngram,
weight = textreg.res$model$beta / textreg.res$model$Z )
}
make.list.table = function (result.list, model.names = names(result.list), M = 100,
topic = "Summary Collection", method = c("rank", "weight", "count", "word"),
annotate=TRUE)
{
if (!is.list(result.list) || is.textreg.result(result.list) ) {
stop("Need a list of result objects")
}
stopifnot(length(result.list) > 1)
df.list = result.list
for ( i in seq_along(df.list) ) {
if ( is.textreg.result( df.list[[i]] ) ) {
df.list[[i]] = textreg.to.df( df.list[[i]] )
}
stopifnot( nrow(df.list[[i]] ) > 0 )
df.list[[i]]$RunID = i
}
if (!requireNamespace("plyr", quietly = TRUE)) {
stop( "need to install plyr package to run make.list.table or other list generation methods" )
}
dat = plyr::rbind.fill( df.list )
Xs = unique(dat$word)
first = tapply(dat$RunID, dat$word, min)
first = tapply(dat$RunID, dat$word, min)
method = match.arg(method)
if (method == "rank") {
dat$rnk = dat$weight
for (i in unique(dat$RunID)) {
runi = dat$RunID == i
dat$rnk[runi] = rank(abs(dat$weight[runi]))
}
a = tapply(dat$rnk * sign(dat$weight), list(dat$word,
dat$RunID), sum)
}
else if (method == "weight" || method == "word") {
a = tapply(abs(dat$weight), list(dat$word, dat$RunID),
sum)
}
else if (method == "count") {
a = tapply(sign(dat$weight), list(dat$word, dat$RunID),
length)
}
else {
stop(gettextf("Method %s not understood as valid mode in make.list.table",
method))
}
stopifnot(all(names(first) == rownames(a)))
keeps = rownames(a) != "*intercept*"
if ( sum( keeps ) == 1 ) {
names = rownames(a)
b = matrix( a[keeps,], nrow=1 )
rownames(b) = names[keeps]
} else {
a = a[keeps, ]
a[is.na(a)] = 0
b2 = as.list(data.frame(abs(a)))
ord = do.call(order, c(b2, decreasing = TRUE))
b = a[ord, ]
b[b == 0] = NA
}
if (method == "word") {
words = rownames(b)
b = matrix(as.character(b), nrow = nrow(b))
for (i in 1:ncol(b)) {
b[, i] = ifelse(is.na(b[, i]), "", words)
}
rownames(b) = words
}
colnames(b) = model.names
if ( annotate ) {
ngo = sapply( result.list, is.textreg.result )
if ( sum( ngo ) > 0 ) {
b = annotate.table( b, result.list[ngo] )
}
attr(b, "num.models" ) = length( result.list )
}
attr(b, "topic" ) = topic
attr(b, "method" ) = method
b
}
old.make.list.table = function( result.list, model.names = names(result.list), M = 100, topic="Summary Collection",
method=c("rank","weight","count","word"),
annotate=TRUE ) {
if ( !is.list( result.list ) ) {
stop( "Need a list of result objects" )
}
stopifnot( length( result.list ) > 1 )
if ( length( result.list ) == 1 ) {
runids = rep( 1, nrow(result.list[[1]]$model) )
words = result.list[[1]]$model$ngram
weights = result.list[[1]]$model$beta / result.list[[1]]$model$Z
} else {
runids = do.call( c, sapply( 1:length(result.list), function( d ) { rep( d, nrow(result.list[[d]]$model) ) } ) )
words = do.call( c, sapply( result.list, function( d ) { d$model$ngram } ) )
weights = do.call( c, sapply( result.list, function( d ) { d$model$beta / d$model$Z } ) )
}
dat = data.frame( RunID = runids, word=words, weight=weights )
Xs = unique( dat$word )
first = tapply( dat$RunID, dat$word, min )
first = tapply( dat$RunID, dat$word, min )
method = match.arg(method)
if ( method=="rank" ) {
dat$rnk = dat$weight
for ( i in unique(dat$RunID) ) {
runi = dat$RunID==i
dat$rnk[runi] = rank(abs(dat$weight[runi]))
}
a = tapply( dat$rnk*sign(dat$weight), list(dat$word, dat$RunID), sum )
} else if ( method == "weight" || method=="word" ) {
a = tapply( abs(dat$weight), list(dat$word, dat$RunID), sum )
} else if ( method == "count" ) {
a = tapply( sign(dat$weight), list(dat$word, dat$RunID), length )
} else {
stop( gettextf( "Method %s not understood as valid mode in make.list.table", method ) )
}
stopifnot( all( names(first) == rownames(a) ) )
a[is.na(a)] = 0
a = a[ rownames(a) != "*intercept*", ]
b2 = as.list( data.frame( abs(a) ) )
ord = do.call( order, c( b2, decreasing=TRUE ) )
b = a[ ord, ]
b[ b == 0 ] = NA
if ( method=="word" ) {
words = rownames(b)
b = matrix( as.character(b), nrow=nrow(b) )
for ( i in 1:ncol(b) ) {
b[,i] = ifelse( is.na( b[,i] ), "", words )
}
rownames(b) = words
}
colnames(b) = model.names
if ( annotate ) {
b = annotate.table( b, result.list )
attr(b, "num.models" ) = length( result.list )
}
attr(b, "topic" ) = topic
attr(b, "method" ) = method
b
}
list.table.chart = function( model.list, M=100, linespace=4, ytick=NULL,
dates=NULL,
main = paste( "Word Appearance for ", attr(model.list,"topic"), "\n(Method: ", attr(model.list,"method"),")", sep=""),
xlab="Model",
mar = c(3,5,2.5,0.1),
xaxt="y",
color.breaks = NULL,
color.ramp = NULL,
... ) {
if ( is.data.frame(model.list) ) {
nms = model.list$phrase
b = model.list[2:(1+attr(model.list,"num.models"))]
b = as.matrix(b)
rownames(b) = nms
} else {
b = model.list
}
b = b[ nrow(b):1, ]
if ( nrow( b ) > M ) {
bm = rank( apply(abs(b), 1, max, na.rm=TRUE ), na.last=FALSE )
bm2 = rank( apply(abs(b), 1, mean, na.rm=TRUE ), na.last=FALSE )
bmm = pmax( bm, bm2 )
b[b==0] = NA
b = b[ bmm >= quantile( bmm, 1-M/length(bmm)), ]
}
max = max(abs(b), na.rm=TRUE)
if ( min( b, na.rm=TRUE ) < 0 ) {
min = -max
} else {
min = 0
}
if ( is.null( color.breaks ) ) {
if ( is.null( color.ramp ) ) {
color.breaks <- seq(-max, max, length.out=11 )
} else {
color.breaks <- seq(-max, max, length.out=length(color.ramp) + 1)
}
} else {
if ( !any( 0 == color.breaks ) ) {
warning( "No cut-point at 0 for color.breaks. Hard to seperate negative and positive words." )
}
}
if ( is.null( color.ramp ) ) {
sw = which.min( abs( color.breaks ) )
Ln = length( color.breaks ) - 1
reds = greens = rep( 0, Ln )
if ( sw > 1 ) {
reds[1:(sw-1)] = seq( 1.0, 0.5, length=(sw-1) )
}
if ( sw < Ln ) {
greens[sw:Ln] = seq( 0.5, 1.0, length=1+Ln-sw )
}
color.ramp <- rgb( reds, greens, 0 )
}
par(mar = mar)
image( x=1:ncol(b),
y = 1:nrow(b), zlim=c(min, max),
t(b),
xlab=xlab, ylab="",
xaxt="n", yaxt="n", bty="n",
col=color.ramp,
breaks=color.breaks,
main=main, ... )
for ( i in seq(1,nrow(b), by=linespace ) ) {
abline( h=i, lty=3 )
}
if ( xaxt=="y" ) {
ticks = seq(1,ncol(b),by=1)
axis(BELOW<-1, at=ticks, labels=colnames(b),
cex.axis=0.7, las = 2)
if ( !is.null( dates ) ) {
ticks = seq(1,ncol(b)+1,by=1)
axis(BELOW<-1, at=ticks - 0.5, labels=dates, col="grey",
cex.axis=0.5, las = 2, tick=FALSE)
}
}
axis(LEFT <-2, at=1:nrow(b), labels=rownames(b),
las= HORIZONTAL<-1,
cex.axis=0.5)
invisible( b )
} |
library(readr)
cjb_url <- "data/cjb.csv"
cjb <- read_csv(cjb_url,
locale = locale(encoding = "CP936"))
View(cjb)
library(tidyverse)
cjb %>%
dplyr::select(sx, wlfk) %>%
ggplot(aes(x = wlfk,
y = sx,
fill = wlfk)) +
geom_boxplot(width = 0.5)
as_five_grade_scores <- function(x) {
cut(
x,
breaks = c(0, seq(60, 100, by = 10)),
include.lowest = TRUE,
right = FALSE,
ordered_result = TRUE,
labels = c("不及格", "及格", "中", "良", "优")
)
}
cjb <- cjb %>%
mutate(zcj = rowSums(.[4:12])) %>%
filter(zcj != 0) %>%
mutate_at(vars(xb, wlfk), factor) %>%
mutate_at(vars(yw:sw), as_five_grade_scores)
View(cjb)
library(arulesViz)
my_model <- cjb %>%
select(xb:wlfk) %>%
apriori(parameter = list(supp = 0.06, conf = 0.8),
appearance = list(rhs = paste0("wlfk=", c("文科", "理科"))))
inspectDT(my_model)
plot(my_model, method = "graph")
plot(my_model,
method = "graph",
engine = "htmlwidget") |
stopifnot(require(knitr))
opts_chunk$set(
comment=NA,
message = FALSE,
warning = FALSE,
eval = identical(Sys.getenv("NOT_CRAN"), "true"),
dev = "png",
dpi = 150,
fig.asp = 0.618,
fig.width = 5,
out.width = "60%",
fig.align = "center"
)
library(ggplot2)
library(bayesplot)
theme_set(bayesplot::theme_default())
SEED <- 1234
set.seed(SEED)
eta <- c(1, -0.2)
gamma <- c(1.8, 0.4)
N <- 200
x <- rnorm(N, 2, 2)
z <- rnorm(N, 0, 2)
mu <- binomial(link = logit)$linkinv(eta[1] + eta[2]*x)
phi <- binomial(link = log)$linkinv(gamma[1] + gamma[2]*z)
y <- rbeta(N, mu * phi, (1 - mu) * phi)
dat <- data.frame(cbind(y, x, z))
hist(dat$y, col = "darkgrey", border = F, main = "Distribution of Outcome Variable", xlab = "y", breaks = 20, freq = F)
library(rstanarm)
fit1 <- stan_betareg(y ~ x | z, data = dat, link = "logit", link.phi = "log",
cores = 2, seed = 12345)
fit2 <- stan_betareg(y ~ -1 + x , data = dat, link = "logit", link.phi = "log",
cores = 2, seed = 12345)
round(coef(fit1), 2)
round(coef(fit2), 2)
round(coef(fit1), 2)
round(coef(fit2), 2)
prior_summary(fit1)
library(ggplot2)
library(bayesplot)
bayesplot_grid(
pp_check(fit1), pp_check(fit2),
xlim = c(0,1),
ylim = c(0,4),
titles = c("True Model: y ~ x | z", "False Model: y ~ x - 1"),
grid_args = list(ncol = 2)
)
loo1 <- loo(fit1)
loo2 <- loo(fit2)
loo_compare(loo1, loo2)
library(rstanarm)
data("GasolineYield", package = "betareg")
gas_fit1 <- stan_betareg(yield ~ temp + batch, data = GasolineYield, link = "logit",
seed = 12345)
gas_fit2 <- stan_betareg(yield ~ temp + batch | pressure,
data = GasolineYield, link = "logit",
seed = 12345)
round(coef(gas_fit1), 2)
round(coef(gas_fit2), 2)
round(coef(gas_fit1), 2)
round(coef(gas_fit2), 2)
library(ggplot2)
bayesplot_grid(
pp_check(gas_fit1), pp_check(gas_fit2),
xlim = c(0,1),
ylim = c(0,5),
titles = c("gas_fit1", "gas_fit2"),
grid_args = list(ncol = 2)
)
gas_loo1 <- loo(gas_fit1)
gas_loo2 <- loo(gas_fit2)
loo_compare(gas_loo1, gas_loo2) |
RssCell <- function(xadj, v1, v2, radius){
out <- sum(sapply(1:radius, function(this.r) {
RssThisRadius(xadj, v1, v2, this.r, prepped=TRUE)
}))
return( out )
} |
plot.light_effects <- function(x, use = c("response", "predicted", "pd"),
zero_counts = TRUE, size_factor = 1,
facet_scales = "free_x", facet_nrow = 1L,
rotate_x = TRUE, show_points = TRUE, ...) {
stopifnot(length(use) >= 1L)
if ("all" %in% use) {
use <- c("response", "predicted", "pd", "ale")
}
value_name <- getOption("flashlight.value_name")
label_name <- getOption("flashlight.label_name")
q1_name <- getOption("flashlight.q1_name")
q3_name <- getOption("flashlight.q3_name")
type_name <- getOption("flashlight.type_name")
nby <- length(x$by)
multi <- is.light_effects_multi(x)
if (nby + multi > 1L) {
stop("Plot method unavailable for multiple 'by' variables or a multiflashlight and a 'by' variable.")
}
data <- bind_rows(x[setdiff(use, if (x$stats == "quartiles") "response")])
n <- nrow(data)
if (!zero_counts && n) {
data <- semi_join(data, x$response, by = c(label_name, x$by, x$v))
}
crossbar_required <- x$stats == "quartiles" && "response" %in% use
if (crossbar_required) {
crossbar <- geom_crossbar(
data = x$response, aes_string(ymin = q1_name, ymax = q3_name),
width = 0.3, fill = "darkblue", colour = "black", alpha = 0.1, ...
)
}
if (n) {
tp <- type_name
p <- ggplot(data, aes_string(y = value_name, x = x$v)) +
geom_line(aes_string(color = tp, group = tp),
size = size_factor / 3, ...)
if (show_points) {
p <- p + geom_point(aes_string(color = tp, group = tp),
size = size_factor, ...)
}
if (crossbar_required) {
p <- p + crossbar
}
} else {
p <- ggplot(x$response, aes_string(y = value_name, x = x$v)) +
crossbar
}
if (multi || nby) {
p <- p + facet_wrap(reformulate(if (multi) label_name else x$by[1]),
scales = facet_scales, nrow = facet_nrow)
}
p <- p +
theme_bw() +
theme(legend.position = "bottom", legend.title = element_blank())
if (rotate_x) {
p <- p +
theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1))
}
p
} |
context("pin_coordn_correct")
test_that(desc="control number",{
expect_equal(pin_coordn_correct(pin = "198112189876"), expected = "198112189876")
expect_equal(pin_coordn_correct(pin = "198112789876"), expected = "198112189876")
expect_equal(pin_coordn_correct(pin = "198112680000"), expected = "198112080000")
expect_equal(pin_coordn_correct(pin = "198112880000"), expected = "198112280000")
expect_is(pin_coordn_correct(pin = "198112880000"), "character")
}) |
context("Test on Isomorphic Mapping")
eps <- 0.001
theta <- 0.25*pi
d0 <- ecd.polar(R=1,theta=theta)
a0 <- d0@alpha
r0 <- d0@gamma
x <- 0.5
y0 <- solve(d0, x)
lambda <- 2
s1 <- d0@sigma*lambda^3
a1 <- a0*lambda^6
r1 <- r0*lambda^4
d1 <- ecd(a1,r1)
d1s <- ecd(a1,r1,1/s1)
a2 <- a0*s1^2
r2 <- r0*s1^(4/3)
test_that("test a1 = a2",{
expect_true(abs(a1-a2) < eps)
})
test_that("test r1 = r2",{
expect_true(abs(r1-r2) < eps)
})
test_that("test j-inv of d0 and d1",{
expect_true(abs(jinv(d0)-jinv(d1)) < eps)
})
test_that("test s1",{
expect_true(abs(jinv(d0)-jinv(d1)) < eps)
})
test_that("test y(x) scaled by lambda",{
y1 <- solve(d0, x)*lambda^2
y2 <- solve(d1, x*lambda^3)
y3 <- solve(d1s, x)
y4 <- solve(d1, x*s1)
delta <- abs(y2-y1) + abs(y3-y1) + abs(y4-y1)
expect_true(delta < eps)
})
test_that("test y(x) scaled by sigma and R",{
y1 <- solve(d1, x)
y2 <- solve(d1s, x/s1)
y3 <- solve(d0, x/s1)*s1^(2/3)
y4 <- solve(d0, x/sqrt(d1@R))*d1@R^(1/3)
delta <- abs(y2-y1) + abs(y3-y1) + abs(y4-y1)
expect_true(delta < eps)
})
test_that("test y(0) scaling",{
R <- 30^3
t <- 45/180*pi
y1 <- solve(ecd.polar(R=1, theta=t, bare.bone=TRUE), 0)
y2 <- solve(ecd.polar(R=R, theta=t, bare.bone=TRUE), 0)
expect_true(abs(y2/y1-R^(1/3)) < eps)
})
for (R in c(1, 8, 27)) {
for (degree in c(0, 35, 90, 180, 210, 270, 310, 315)) {
test_that(paste("test y(0) isomorphism at R=",R,"deg=",degree),{
theta <- degree/180*pi
d <- ecd.polar(R,theta, bare.bone=TRUE)
y0 <- solve(d,0)
y1 <- ecd.y0_isomorphic(theta, d@R)
y2 <- ecd.y0_isomorphic(object=d)
expect_true(abs(y1-y0) + abs(y2-y0) < eps)
})
}
}
test_that("test NaN of y(0) isomorphism",{
degree <- seq(0, 315, by=0.01)
y0 <- ecd.y0_isomorphic(degree/180*pi)
expect_true(all(!is.na(y0)))
})
test_that("test j-inv, r_adj>0",{
t <- 30/180*pi
j1 <- jinv(ecd.polar(R=5, theta=t))
j2 <- 1728*sin(t)^2
expect_true(abs(j2-j1) < eps)
})
test_that("test j-inv, r_adj<0",{
t <- (180+30)/180*pi
j1 <- jinv(ecd.polar(R=5, theta=t))
j2 <- 1728/(1-tan(t)^(-2))
expect_true(abs(j2-j1) < eps)
}) |
create_cci_template <- function(
analysis_inputs
) {
template <- CJ(
EMITTER_CELLTYPE = analysis_inputs$cell_types,
RECEIVER_CELLTYPE = analysis_inputs$cell_types,
LRI = analysis_inputs$LRI$LRI
)
template <- merge.data.table(
x = template,
y = analysis_inputs$LRI,
by.x = "LRI",
by.y = "LRI",
all.x = TRUE,
sort = FALSE
)
template <- add_cell_number(
template_table = template,
condition_inputs = analysis_inputs$condition,
metadata = analysis_inputs$metadata
)
if(analysis_inputs$condition$is_samp) {
template <- add_sample_number(
template_table = template,
condition_inputs = analysis_inputs$condition,
metadata = analysis_inputs$metadata
)
}
return(template)
}
add_sample_number <- function(
template_table,
condition_inputs,
metadata
) {
dt_NSAMPLES <- unique(
metadata[, c("cell_type", "sample_id", "condition")]
)[, .N, by = c("cell_type", "condition")]
dt_NSAMPLES <- dcast.data.table(
data = dt_NSAMPLES,
formula = cell_type ~ condition,
value.var = "N"
)
template_table <- merge.data.table(
x = template_table,
y = dt_NSAMPLES,
by.x = "EMITTER_CELLTYPE",
by.y = "cell_type",
all.x = TRUE,
sort = FALSE
)
template_table <- merge.data.table(
x = template_table,
y = dt_NSAMPLES,
by.x = "RECEIVER_CELLTYPE",
by.y = "cell_type",
all.x = TRUE,
sort = FALSE,
suffixes = c("_L", "_R")
)
new_cols <- c(
paste0("EMITTER_NSAMPLES_", condition_inputs$cond1),
paste0("EMITTER_NSAMPLES_", condition_inputs$cond2),
paste0("RECEIVER_NSAMPLES_", condition_inputs$cond1),
paste0("RECEIVER_NSAMPLES_", condition_inputs$cond2)
)
setnames(
x = template_table,
old = c(
paste0(condition_inputs$cond1, "_L"),
paste0(condition_inputs$cond2, "_L"),
paste0(condition_inputs$cond1, "_R"),
paste0(condition_inputs$cond2, "_R")
),
new = new_cols
)
for (j in new_cols) {
set(
template_table,
i = which(is.na(template_table[[j]])),
j = j,
value = 0
)
}
return(template_table)
}
add_cell_number <- function(
template_table,
condition_inputs,
metadata
) {
if (!condition_inputs$is_cond) {
dt_NCELLS <- metadata[, .N, by = "cell_type"]
template_table <- merge.data.table(
x = template_table,
y = dt_NCELLS,
by.x = "EMITTER_CELLTYPE",
by.y = "cell_type",
all.x = TRUE,
sort = FALSE
)
template_table <- merge.data.table(
x = template_table,
y = dt_NCELLS,
by.x = "RECEIVER_CELLTYPE",
by.y = "cell_type",
all.x = TRUE,
sort = FALSE,
suffixes = c("_L", "_R")
)
new_cols <- c("NCELLS_EMITTER", "NCELLS_RECEIVER")
setnames(
x = template_table,
old = c("N_L", "N_R"),
new = new_cols
)
} else {
dt_NCELLS <- metadata[, .N, by = c("cell_type", "condition")]
dt_NCELLS <- dcast.data.table(
data = dt_NCELLS,
formula = cell_type ~ condition,
value.var = "N"
)
template_table <- merge.data.table(
x = template_table,
y = dt_NCELLS,
by.x = "EMITTER_CELLTYPE",
by.y = "cell_type",
all.x = TRUE,
sort = FALSE
)
template_table <- merge.data.table(
x = template_table,
y = dt_NCELLS,
by.x = "RECEIVER_CELLTYPE",
by.y = "cell_type",
all.x = TRUE,
sort = FALSE,
suffixes = c("_L", "_R")
)
new_cols <- c(
paste0("NCELLS_EMITTER_", condition_inputs$cond1),
paste0("NCELLS_EMITTER_", condition_inputs$cond2),
paste0("NCELLS_RECEIVER_", condition_inputs$cond1),
paste0("NCELLS_RECEIVER_", condition_inputs$cond2)
)
setnames(
x = template_table,
old = c(
paste0(condition_inputs$cond1, "_L"),
paste0(condition_inputs$cond2, "_L"),
paste0(condition_inputs$cond1, "_R"),
paste0(condition_inputs$cond2, "_R")
),
new = new_cols
)
}
for (j in new_cols) {
set(
template_table,
i = which(is.na(template_table[[j]])),
j = j,
value = 0
)
}
return(template_table)
}
run_simple_cci_analysis <- function(
analysis_inputs,
cci_template,
log_scale,
score_type,
threshold_min_cells,
threshold_pct,
compute_fast
) {
LOGFC <- LOGFC_ABS <- NULL
averaged_expr <- aggregate_cells(
data_tr = analysis_inputs$data_tr,
metadata = analysis_inputs$metadata,
is_cond = analysis_inputs$condition$is_cond
)
cci_dt <- build_cci_or_drate(
averaged_expr = averaged_expr,
cci_template = cci_template,
max_nL = analysis_inputs$max_nL,
max_nR = analysis_inputs$max_nR,
condition_inputs = analysis_inputs$condition,
threshold_min_cells = threshold_min_cells,
threshold_pct = threshold_pct,
cci_or_drate = "cci",
score_type = score_type
)
if (compute_fast) {
if (!analysis_inputs$condition$is_cond) {
return(cci_dt[["CCI_SCORE"]])
} else {
return(list(
cond1 = cci_dt[[paste0(
"CCI_SCORE_",
analysis_inputs$condition$cond1
)
]],
cond2 = cci_dt[[paste0(
"CCI_SCORE_",
analysis_inputs$condition$cond2
)]]
))
}
}
detection_rate <- aggregate_cells(
data_tr = 1 * (analysis_inputs$data_tr > 0),
metadata = analysis_inputs$metadata,
is_cond = analysis_inputs$condition$is_cond
)
drate_dt <- build_cci_or_drate(
averaged_expr = detection_rate,
cci_template = cci_template,
max_nL = analysis_inputs$max_nL,
max_nR = analysis_inputs$max_nR,
condition_inputs = analysis_inputs$condition,
threshold_pct = threshold_pct,
threshold_min_cells = threshold_min_cells,
cci_or_drate = "drate",
score_type = score_type
)
dt <- merge.data.table(
x = cci_dt,
y = drate_dt,
by = intersect(names(cci_dt), names(drate_dt)),
sort = FALSE
)
if (analysis_inputs$condition$is_cond) {
if (log_scale) {
dt[
,
LOGFC := get(
paste0(
"CCI_SCORE_",
analysis_inputs$condition$cond2
)
) -
get(
paste0(
"CCI_SCORE_",
analysis_inputs$condition$cond1
)
)
]
} else {
dt[
,
LOGFC := log(
get(
paste0(
"CCI_SCORE_",
analysis_inputs$condition$cond2
)
) /
get(
paste0(
"CCI_SCORE_",
analysis_inputs$condition$cond1
)
)
)
]
dt[
,
LOGFC := ifelse(
is.nan(LOGFC),
0,
LOGFC
)
]
}
dt[, LOGFC_ABS := abs(LOGFC)]
}
return(dt)
}
aggregate_cells <- function(
data_tr,
metadata,
is_cond
) {
if (!is_cond) {
group <- metadata[["cell_type"]]
} else {
group <- paste(
metadata[["condition"]],
metadata[["cell_type"]],
sep = "_"
)
}
sums <- DelayedArray::rowsum(
x = data_tr,
group = group,
reorder = TRUE
)
aggr <- sums / as.vector(table(group))
return(aggr)
}
build_cci_or_drate <- function(
averaged_expr,
cci_template,
max_nL,
max_nR,
condition_inputs,
threshold_min_cells,
threshold_pct,
cci_or_drate,
score_type
) {
CONDITION_CELLTYPE <- CELLTYPE <- patterns <- NULL
full_dt <- copy(cci_template)
if (cci_or_drate == "cci") {
name_tag <- "EXPRESSION"
} else if (cci_or_drate == "drate") {
name_tag <- "DETECTION_RATE"
}
if (!condition_inputs$is_cond) {
row_id <- "CELLTYPE"
vars_id <- "CELLTYPE"
cond1_id <- NULL
cond2_id <- NULL
merge_id <- name_tag
score_id <- "CCI_SCORE"
dr_id <- "IS_CCI_EXPRESSED"
n_id <- 1
pmin_id <- NULL
} else {
row_id <- "CONDITION_CELLTYPE"
vars_id <- c("CELLTYPE", "CONDITION")
cond1_id <- paste0("_", condition_inputs$cond1)
cond2_id <- paste0("_", condition_inputs$cond2)
merge_id <- c(condition_inputs$cond1, condition_inputs$cond2)
score_id <- paste0(
"CCI_SCORE_",
c(condition_inputs$cond1, condition_inputs$cond2)
)
dr_id <- paste0(
"IS_CCI_EXPRESSED_",
c(condition_inputs$cond1, condition_inputs$cond2)
)
n_id <- 2
pmin_id <- c(cond1_id, cond2_id)
}
dt <- as.data.table(
x = t(averaged_expr),
keep.rownames = "GENE",
sorted = FALSE
)
if (condition_inputs$is_cond) {
ct_temp <- strsplit(colnames(dt)[-1], "_")
lct_temp <- length(ct_temp)
if(lct_temp %% 2 !=0 ) stop("Internal error in `build_cci_or_drate`")
ct_temp_keep <- unlist(
lapply(
ct_temp,
function(i) i[[2]]
)
)[1:(lct_temp/2)]
ct_temp_check <- unlist(
lapply(
ct_temp,
function(i) i[[2]]
)
)[(lct_temp/2+1):lct_temp]
if(!identical(ct_temp_check, ct_temp_keep)) {
stop("Internal error in `build_cci_or_drate`")
}
dt <- melt.data.table(
data = dt,
id.vars = "GENE",
measure.vars = patterns(
paste0("^", condition_inputs$cond1, "_"),
paste0("^", condition_inputs$cond2, "_")),
value.factor = FALSE,
variable.factor = TRUE,
value.name = c(condition_inputs$cond1, condition_inputs$cond2),
variable.name = "CELLTYPE"
)
dt[, CELLTYPE := ct_temp_keep[CELLTYPE]]
} else {
dt <- melt.data.table(
data = dt,
id.vars = "GENE",
value.factor = FALSE,
variable.factor = FALSE,
value.name = name_tag ,
variable.name = "CELLTYPE"
)
}
dt[is.na(dt)] <- 0
out_names <- c(
sapply(
1:max_nL,
function(i) {
paste0("L", i, "_", name_tag, pmin_id)
}
),
sapply(
1:max_nR,
function(i) {
paste0("R", i, "_", name_tag, pmin_id)
}
)
)
full_dt[
,
c(out_names) :=
c(
sapply(
1:max_nL,
function(i) {
as.list(
dt[.SD,
on = c(paste0(
"GENE==LIGAND_", i),
"CELLTYPE==EMITTER_CELLTYPE"
),
mget(paste0("x.", merge_id))
]
)
}
),
sapply(
1:max_nR,
function(i) {
as.list(
dt[.SD,
on = c(
paste0("GENE==RECEPTOR_", i),
"CELLTYPE==RECEIVER_CELLTYPE"
),
mget(paste0("x.", merge_id))
]
)
}
)
)
]
if (cci_or_drate == "cci") {
if (score_type == "geometric_mean") {
full_dt[
,
(score_id) :=
lapply(
1:n_id,
function(x) {
sqrt(
do.call(
pmin,
c(
lapply(
1:max_nL,
function(i) {
get(
paste0(
"L",
i,
"_",
name_tag,
pmin_id[x]
)
)
}
),
na.rm = TRUE
)
)
*
do.call(
pmin,
c(
lapply(
1:max_nR,
function(i) {
get(
paste0(
"R",
i,
"_",
name_tag,
pmin_id[x]
)
)
}
),
na.rm = TRUE
)
)
)
}
)
]
}
if (score_type == "arithmetic_mean") {
full_dt[
,
(score_id) :=
lapply(
1:n_id,
function(x) {
(
do.call(
pmin,
c(
lapply(
1:max_nL,
function(i) {
get(
paste0(
"L",
i,
"_",
name_tag,
pmin_id[x]
)
)
}
),
na.rm = TRUE
)
)
+
do.call(
pmin,
c(
lapply(
1:max_nR,
function(i) {
get(
paste0(
"R",
i,
"_",
name_tag,
pmin_id[x]
)
)
}
),
na.rm = TRUE
)
)
) / 2
}
)
]
}
} else if (cci_or_drate == "drate") {
full_dt[
,
(dr_id) :=
lapply(
1:n_id,
function(x) {
is_detected_full(
x_dr = do.call(
pmin,
c(
lapply(
1:max_nL,
function(i) {
get(
paste0(
"L",
i,
"_",
name_tag,
pmin_id[x]
)
)
}
),
na.rm = TRUE
)
),
x_ncells = get(
paste0(
"NCELLS_EMITTER",
pmin_id[x]
)
),
y_dr = do.call(
pmin,
c(
lapply(
1:max_nR,
function(i) {
get(
paste0(
"R",
i,
"_",
name_tag,
pmin_id[x]
)
)
}
),
na.rm = TRUE
)
),
y_ncells = get(
paste0(
"NCELLS_RECEIVER",
pmin_id[x]
)
),
dr_thr = threshold_pct,
threshold_min_cells = threshold_min_cells
)
}
)
]
}
return(full_dt)
}
is_detected_full <- Vectorize(
function(
x_dr,
x_ncells,
y_dr,
y_ncells,
dr_thr,
threshold_min_cells
) {
if (x_dr >= dr_thr &
x_dr * x_ncells >= threshold_min_cells &
y_dr >= dr_thr &
y_dr * y_ncells >= threshold_min_cells
) {
return(TRUE)
} else {
return(FALSE)
}
}
) |
match.coefs <- function (x, y = NULL, ina, type = "jacc") {
if (is.null(y)) {
difa <- 3 * x[ina == 1, ] - x[ina == 2, ]
} else difa <- 3 * x - y
f <- .Call(Rfast_odds_helper, difa)
f10 <- f[4, ]
f01 <- f[2, ]
f11 <- f[3, ]
if (type == "jacc") {
mt <- f11 / (f11 + f10 + f01)
mt <- matrix(mt)
colnames(mt) <- "jacc"
} else if (type == "smc") {
f00 <- f[1, ]
mt <- (f11 + f00) / colsums(f)
mt <- matrix(mt)
colnames(mt) <- "smc"
} else {
f00 <- f[1, ]
jmc <- f11 / (f11 + f10 + f01)
smc <- (f11 + f00) / colsums(f)
mt <- cbind(jmc, smc)
colnames(mt) <- c("jacc", "smc")
}
mt
} |
context("stopwords_getlanguages")
snowball <- stopwords::data_stopwords_snowball
stopwordsiso <- stopwords::data_stopwords_stopwordsiso
misc <- stopwords::data_stopwords_misc
smart <- stopwords::data_stopwords_smart
test_that("returns languages for source: Snowball", {
expect_equal(names(snowball), stopwords_getlanguages("snowball"))
})
test_that("returns languages for source: Stopwords-ISO", {
expect_equal(names(stopwordsiso), stopwords_getlanguages("stopwords-iso"))
})
test_that("returns languages for source: Misc", {
expect_equal(names(misc), stopwords_getlanguages("misc"))
})
test_that("returns languages for source: Smart", {
expect_equal(names(smart), stopwords_getlanguages("smart"))
})
test_that("wrong source throws error", {
expect_error(stopwords_getlanguages("not_existing_test_source"))
}) |
asp_steiner <- function (optimize, terminals, glist, color) {
g <- glist[[1]]
paths <- lapply(terminals, function (x) get.all.shortest.paths(g, x, terminals)$res)
nodes <- unique(names(unlist(paths)))
if (optimize) {
steinert <- minimum.spanning.tree(induced_subgraph(graph = g, vids = nodes))
a <- V(steinert)$color
b <- degree(steinert, v = V(steinert), mode = c("all"))
a1 <- match(a, "yellow")
b1 <- match(b, "1")
opt <- sapply(1:length(a1), function (r) a1[r] * b1[r])
new_g <- delete.vertices(steinert, grep(1, opt))
steinert <- new_g
} else
steinert <- induced_subgraph(graph = g, vids = nodes)
glst <- c()
if (color) {
V(g)[setdiff(x = nodes, y = terminals)]$color <- "green"
glst[[length(glst) + 1]] <- g
}
glst[[length(glst) + 1]] <- steinert
return(glst)
}
appr_steiner <- function (repeattimes, optimize, terminals, glist, color) {
set <- c()
g <- glist[[1]]
paths <- lapply(terminals, function (x) get.all.shortest.paths(g, x, terminals)$res)
r <- 1:length(paths)
t1 <- lapply(r, function (r) length(paths[[r]]))
distances <- lapply(r, function (r) lapply(1:t1[[r]], function(x, y) length(paths[[y]][[x]]), y = r))
neighbour_distance <- max(unlist(distances))
paths <- unique(names(unlist(paths)))
set <- V(g)[paths]$name
size <- length(E(minimum.spanning.tree(induced_subgraph(g, union(terminals, set)))))
i <- 1
while (i <= repeattimes) {
seed_list <- names(unlist(neighborhood(graph = g, order = neighbour_distance, nodes = terminals, mode = "all")))
seed_list <- seed_list[!(seed_list %in% terminals)]
seed <- sample(seed_list, 1)
paths2 <- get.all.shortest.paths(g, seed, terminals)
paths2 <- paths2$res
seedpaths <- unique(names(unlist(paths2)))
set2 <- union(set, V(g)[seedpaths]$name)
size2 <- length(E(minimum.spanning.tree(induced_subgraph(g, union(terminals, set2)))))
if (size2 < size) {
size <- size2
set <- set2
}
seed <- sample(set, 1, prob = NULL)
set2 <- V(g)[setdiff(set, seed)]$name
size2 <- length(E(minimum.spanning.tree(induced_subgraph(g, union(terminals, set2)))))
if (size2 < size && is.connected(minimum.spanning.tree(induced_subgraph(g, union(terminals, set2))))) {
size <- size2
set <- set2
}
i <- i + 1
}
if (optimize) {
steinert <- minimum.spanning.tree(induced_subgraph(g, union(terminals, set)))
a <- V(steinert)$color
b <- degree(steinert, v = V(steinert), mode = c("all"))
a1 <- match(a, "yellow")
b1 <- match(b, "1")
opt <- sapply(1:length(a1), function(r) a1[r] * b1[r])
new_g <- delete.vertices(steinert, grep(1, opt))
steinert <- new_g
} else
steinert <- induced_subgraph(g, union(terminals, set))
glst <- c()
if (color) {
V(g)[setdiff(set, terminals)]$color <- "green"
glst[[length(glst) + 1]] <- g
}
glst[[length(glst) + 1]] <- steinert
return(glst)
}
steinertree2 <- function (optimize, terminals, glist, color) {
g <- glist[[1]]
prob <- sample(1:length(terminals), 1)
subtree <- terminals[[prob]]
nsubtree <- setdiff(terminals, subtree)
while ( !all(is.element(terminals, intersect(subtree, terminals))) ) {
paths <- lapply(subtree, function (x) get.all.shortest.paths(g, x, nsubtree))
r <- 1:length(paths)
t <- sapply(r, function (r) sapply(paths[[r]]$res, length))
if ("list" %in% class(t) || "integer" %in% class(t)) {
r <- 1:length(t)
t2 <- sapply(r, function (r) min(t[[r]]))
}
if ("matrix" %in% class(t)) {
r <- 1:dim(t)[2]
t2 <- sapply(r, function (r) min(t[, r]))
}
t3 <- which(t2 == min(t2))
if (length(paths) > 1) {
if ("list" %in% class(t) || "integer" %in% class(t))
t4 <- which(t[[t3[1]]] == min(t[[t3[1]]]))
if ("matrix" %in% class(t))
t4 <- which( t[ , t3[1]] == min(t[ , t3[1]]) )
found <- names(unlist(paths[[t3[1]]][t4][1]$res))
} else {
found <- names(unlist(paths[[1]][t3][1]$res))
}
subtree <- union(subtree, V(g)[unique(found)]$name)
nsubtree <- setdiff(nsubtree, V(g)[unique(found)]$name)
}
if (optimize) {
steinert <- minimum.spanning.tree(induced_subgraph(g, subtree))
a <- V(steinert)$color
b <- degree(steinert, v = V(steinert), mode = c("all"))
a1 <- match(a, "yellow")
b1 <- match(b, "1")
opt <- sapply(1:length(a1), function (r) a1[r] * b1[r])
new_g <- delete.vertices(steinert, grep(1, opt))
steinert <- new_g
} else
steinert <- induced_subgraph(g, subtree)
glst <- c()
if (color) {
V(g)[subtree]$color <- "green"
V(g)[terminals]$color <- "red"
glst[[length(glst) + 1]] <- g
}
glst[[length(glst) + 1]] <- steinert
return(glst)
}
steinertree3 <- function (optimize, terminals, glist, color) {
makesubtrees <- function (x) {
if ( !is.na(any(match(t3, x))) )
return(union(subtrees[[x]],
names(found[[grep(1, match(t3, x))]][[1]])))
else return(subtrees[[x]])
}
subtreenum <- c()
x <- c()
g <- glist[[1]]
r <- 1:length(terminals)
subtrees <- lapply(r, function (r) terminals[[r]])
terminals <- subtrees
nsubtrees <- lapply(r, function (r) setdiff(terminals, subtrees[r]))
while (length(subtrees) > 1) {
r <- 1:length(subtrees)
paths <- lapply(r, function (r) lapply(subtrees[[r]],
function (x, y) get.all.shortest.paths(g, x, y)$res,
y = unlist(nsubtrees[[r]])))
r <- 1:length(paths)
t <- sapply(r, function (r) sapply(paths[[r]][[1]], length))
if ("list" %in% class(t) | "integer" %in% class(t)) {
r <- 1:length(t)
t2 <- sapply(r, function (x) min(t[[x]]))
}
if ("matrix" %in% class(t)) {
r <- 1:dim(t)[2]
t2 <- sapply(r, function (r) min(t[, r]))
}
t3 <- which(t2 == min(t2))
t3len <- 1:length(t3)
if (length(paths) > 1) {
if ("list" %in% class(t) || "integer" %in% class(t))
t4 <- lapply(t3len, function (x) which(t[[t3[x]]] == min(t[[t3[x]]])))
if ("matrix" %in% class(t))
t4 <- lapply(t3len, function (x) which((t[ , t3[x]]) == min(t[ , t3[x]])))
found <- lapply( t3len, function (x) paths[t3[x]][[1]][[1]][t4[[x]][1]] )
} else {
intersect(subtrees[[x]], V(g)[unlist(terminals)])
print("Error")
}
subtrees <- lapply(1:length(subtrees), function (x) makesubtrees(x))
i <- 1
j <- 2
while (i <= (length(subtrees) - 1)) {
j <- i + 1
while (j <= length(subtrees)) {
if (length(intersect(subtrees[[i]], subtrees[[j]])) > 0) {
subtrees[[i]] <- union(subtrees[[i]], subtrees[[j]])
subtrees <- subtrees[-j]
j <- j - 1
}
j <- j + 1
}
i <- i + 1
}
nsubtrees <- lapply(1:length(subtrees), function (x) setdiff(terminals, subtrees[[x]]))
}
if (optimize) {
steinert <- minimum.spanning.tree(induced_subgraph(g, subtrees[[1]]))
a <- V(steinert)$color
b <- degree(steinert, v = V(steinert), mode = c("all"))
a1 <- match(a, "yellow")
b1 <- match(b, "1")
opt <- sapply(1:length(a1), function (r) a1[r] * b1[r] )
new_g <- delete.vertices(steinert, grep(1, opt))
steinert <- new_g
} else
steinert <- induced_subgraph(g, subtrees[[1]])
glst <- c()
if (color) {
V(g)[subtrees[[1]]]$color <- "green"
V(g)[unlist(terminals)]$color <- "red"
glst[[length(glst) + 1]] <- g
}
glst[[length(glst) + 1]] <- steinert
return(glst)
}
steinertree8 <- function (optimize, terminals, glist, color) {
g <- glist[[1]]
queue <- c()
results_queue <- c()
edgeslist <- c()
prob <- sample(1:length(terminals), 1)
subtree <- terminals[[prob]]
nsubtree <- setdiff(terminals, subtree)
startpoint <- subtree
paths <- get.all.shortest.paths(g, subtree, nsubtree)
paths <- paths$res
t <- sapply(paths, length)
t2 <- which(t == min(t))
for (i in 1:length(t2))
queue[[length(queue) + 1]] <- names(unlist(paths[t2[i]]))
index <- length(t2)
while (index > 0) {
edgeslist <- queue[1]
queue[1] <- NULL
index <- index - 1
if (length(intersect(unlist(terminals), unlist(edgeslist))) == length(terminals)) {
graph_is_new <- TRUE
if (length(results_queue) == 0)
results_queue[length(results_queue) + 1] <- edgeslist
for (count_path in 1:length(results_queue)) {
t1 <- unlist(edgeslist[[1]])
t2 <- unlist(results_queue[[count_path]])
if (length(union(t1, t2)) == length(t1))
if (all(union(t1, t2) %in% t2))
graph_is_new <- FALSE
}
if (graph_is_new == TRUE)
results_queue[length(results_queue) + 1] <- edgeslist
} else {
subtree <- intersect(unlist(terminals), unlist(edgeslist))
nsubtree <- setdiff(terminals, subtree)
paths <- get.all.shortest.paths(g, subtree[length(subtree)], nsubtree)
paths <- paths$res
t <- sapply(paths, length)
t2 <- which(t == min(t))
for (i in 1:length(t2))
queue[[index + i]] <- union(unlist(edgeslist), names(unlist(paths[t2[i]])))
index <- index + length(t2)
}
}
paths <- results_queue
t <- sapply(paths, length)
t2 <- which(t == min(t))
queue <- paths[t2]
steinert_list <- c()
glst <- c()
for (i in 1:length(t2)) {
steinert = minimum.spanning.tree(induced_subgraph(g, queue[[i]]))
if (optimize) {
a <- V(steinert)$color
b <- degree(steinert, v = V(steinert), mode = c("all"))
a1 <- match(a, "yellow")
b1 <- match(b, "1")
opt <- sapply(1:length(a1), function (r) a1[r] * b1[r])
new_g <- delete.vertices(steinert, grep(1, opt))
steinert <- new_g
}
if (color)
V(g)[queue[[i]]]$color <- "green"
steinert_list[[length(steinert_list) + 1]] <- steinert
}
if (color) {
V(g)[terminals]$color <- "red"
glst[[length(glst) + 1]] <- g
glst[[length(glst) + 1]] <- steinert_list
} else
glst <- steinert_list
return (glst)
}
steinerexact <- function (terminals, glist, color) {
rwhile <- function (lim) {
if (get("runloop", envir = en)) {
r <- length(V(g)) - lim
allcom <- combn(t[1:length(V(g))], r)
allmst <- lapply(1:dim(allcom)[2],
function (x) minimum.spanning.tree(induced_subgraph(g, allcom[ , x])))
assign("allmst", allmst, envir = en)
edgmst <- lapply(1:dim(allcom)[2],
function (x) get.edgelist(allmst[[x]], names = TRUE))
assign("edgmst", edgmst, envir = en)
connectedlist <- lapply(1:dim(allcom)[2], function (x) is.connected(allmst[[x]]))
withterminals <- lapply(1:dim(allcom)[2], function (x) all(is.element(terminals, V(allmst[[x]])$name)))
smst <- lapply(1:dim(allcom)[2], function (x) connectedlist[[x]] && withterminals[[x]])
assign("runloop", !is.element(TRUE, unlist(smst)), envir = en)
assign("sol_place", get("sol_place", envir = en) + 1, envir = en)
}
return(smst)
}
g <- glist[[1]]
t <- V(g)$name
lim <- length(V(g)) - length(terminals)
en <- new.env(hash = TRUE, parent = emptyenv(), size = NA)
assign("runloop", TRUE, envir = en)
assign("sol_place", 0, envir = en)
smst <- c()
res <- lim:1
sol <- sapply(res, function (x) rwhile(x))
sol_place <- get("sol_place", envir = en)
allmst <- get("allmst", envir = en)
edgmst <- get("edgmst", envir = en)
iter <- length(sol[[sol_place]])
size <- lapply(1:iter, function (x) length(edgmst[[x]]) / 2)
midresult <- lapply(1:iter, function (x) size[[x]] * as.integer(sol[[sol_place]][[x]]))
min_len <- min(unlist(midresult)[unlist(midresult) > 0])
poslist <- which(unlist(midresult) == min_len)
stgraphlist <- allmst[poslist]
stgraphlist2 <- c()
if (color) {
green_guys <- lapply(stgraphlist, function (x) V(x)$name)
green_guys <- unique(unlist(green_guys))
V(g)[green_guys]$color <- "green"
V(g)[terminals]$color <- "red"
stgraphlist2[[length(stgraphlist2) + 1]] <- g
stgraphlist2[[length(stgraphlist2) + 1]] <- stgraphlist
stgraphlist <- stgraphlist2
}
return(stgraphlist)
}
merge_steiner <- function (treelist) {
merged <- treelist[[1]]
if (length(treelist) > 1) {
for (i in 2:length(treelist))
merged <- union(merged, treelist[[i]])
} else
print("Nothing to merge. Only one solution was found")
glist <- c()
glist[[1]] <- merged
return(glist)
}
check_input <- function (type, terminals, glist) {
g <- glist[[1]]
g <- as.undirected(g)
if ( is.null(terminals) | any(is.na(terminals)) | (length(terminals) == 0) )
stop("Error: Terminals not found")
if (is.null(g))
stop("Error: The graph object is Null.")
if (length(V(g)) == 0 )
stop("Error: The graph doesn't contain vertices.")
if (is.null(V(g)$name)) {
V(g)$name <- as.character(1:length(V(g)))
attr_flag <- FALSE
} else {
V(g)$realname <- V(g)$name
V(g)$name <- as.character(1:length(V(g)))
attr_flag <- TRUE
}
if (class(terminals) == "character") {
if (sum(terminals %in% V(g)$realname) != length(terminals)) {
stop("Error: vertices names do not contain terminal names")
} else {
terminals <- V(g)$name[match(terminals, V(g)$realname)]
}
} else if (class(terminals) == "numeric" | class(terminals) == "integer") {
terminals <- V(g)$name[terminals]
} else
print("Error: invalid type of terminals")
V(g)$color <- "yellow"
V(g)[terminals]$color <- "red"
if ( !(type == "SPM" | type == "EXA" | type == "SP" | type == "RSP" | type == "KB" | type == "ASP") )
stop("Error: the input type is not correct. Choose one from SPM, EXA, SP, RSP or KB.")
varlist <- c()
varlist[[1]] <- g
varlist[[2]] <- terminals
varlist[[3]] <- attr_flag
return(varlist)
}
restore_name_attribute <- function (attr_flag, type, result, color) {
if (color) {
if (attr_flag) {
V(result[[1]])$name <- V(result[[1]])$realname
result[[1]] <- delete_vertex_attr(result[[1]], 'realname')
}
}
if (type == "EXA" | type == "SPM") {
if (attr_flag) {
numSteiner <- length(result[[length(result)]])
for (i in 1:numSteiner) {
V(result[[length(result)]][[i]])$name <- V(result[[length(result)]][[i]])$realname
result[[length(result)]][[i]] <- delete_vertex_attr(result[[length(result)]][[i]], 'realname')
}
}
} else {
if (attr_flag) {
V(result[[length(result)]])$name <- V(result[[length(result)]])$realname
result[[length(result)]] <- delete_vertex_attr(result[[length(result)]], 'realname')
}
}
return(result)
}
steinertree <- function (type, repeattimes = 70, optimize = TRUE, terminals, graph, color = TRUE, merge = FALSE) {
glist <- c()
glist[[1]] <- graph
varlist <- check_input(type = type, terminals = terminals, glist = glist)
glist[[1]] <- varlist[[1]]
terminals <- varlist[[2]]
attr_flag <- varlist[[3]]
if (type == "SP")
result <- steinertree2(optimize = optimize, terminals = terminals, glist = glist, color = color)
if (type == "KB")
result <- steinertree3(optimize = optimize, terminals = terminals, glist = glist, color = color)
if (type == "RSP")
result <- appr_steiner(repeattimes = repeattimes, optimize = optimize, terminals = terminals,
glist = glist, color = color)
if (type == "EXA")
result <- steinerexact(terminals = terminals, glist = glist, color = color)
if (type == "SPM")
result <- steinertree8(optimize = optimize, terminals = terminals, glist = glist, color = color)
if (type == "ASP")
result <- asp_steiner(optimize = optimize, terminals = terminals, glist = glist, color = color)
result <- restore_name_attribute(attr_flag, type, result, color)
if (merge & (type == "EXA" | type == "SPM")) {
if (color) {
result[[2]] <- merge_steiner(treelist = result[[2]])
} else {
result <- merge_steiner(treelist = result)
}
}
return(result)
} |
dbicop <- function(u, family, rotation, parameters, var_types = c("c", "c")) {
bicop <- args2bicop(family, rotation, parameters, var_types)
bicop_pdf_cpp(if_vec_to_matrix(u), bicop)
}
pbicop <- function(u, family, rotation, parameters, var_types = c("c", "c")) {
bicop <- args2bicop(family, rotation, parameters, var_types)
bicop_cdf_cpp(if_vec_to_matrix(u), bicop)
}
rbicop <- function(n, family, rotation, parameters, qrng = FALSE) {
if (length(n) > 1) {
n <- length(n)
}
if (inherits(family, "bicop_dist") & !missing(rotation)) {
qrng <- rotation
}
assert_that(is.flag(qrng))
bicop <- args2bicop(family, rotation, parameters)
U <- bicop_sim_cpp(bicop, n, qrng, get_seeds())
if (!is.null(bicop$names)) {
colnames(U) <- bicop$names
}
U
}
hbicop <- function(u, cond_var, family, rotation, parameters, inverse = FALSE,
var_types = c("c", "c")) {
assert_that(in_set(cond_var, 1:2), is.flag(inverse))
bicop <- args2bicop(family, rotation, parameters, var_types)
u <- if_vec_to_matrix(u)
if (!inverse) {
if (cond_var == 1) {
return(bicop_hfunc1_cpp(u, bicop))
} else {
return(bicop_hfunc2_cpp(u, bicop))
}
} else {
if (cond_var == 1) {
return(bicop_hinv1_cpp(u, bicop))
} else {
return(bicop_hinv2_cpp(u, bicop))
}
}
}
par_to_ktau <- function(family, rotation, parameters) {
bicop <- args2bicop(family, rotation, parameters)
bicop_par_to_tau_cpp(bicop)
}
ktau_to_par <- function(family, tau) {
assert_that(is.number(tau), !is.na(tau))
bicop <- args2bicop(family)
if (!(bicop$family %in% family_set_rotationless)) {
bicop$rotation <- ifelse(tau > 0, 0, 90)
}
bicop_tau_to_par_cpp(bicop, tau)
}
predict.bicop_dist <- function(object, newdata, what = "pdf", ...) {
assert_that(in_set(what, what_allowed))
newdata <- if_vec_to_matrix(newdata)
switch(
what,
"pdf" = bicop_pdf_cpp(newdata, object),
"cdf" = bicop_cdf_cpp(newdata, object),
"hfunc1" = bicop_hfunc1_cpp(newdata, object),
"hfunc2" = bicop_hfunc2_cpp(newdata, object),
"hinv1" = bicop_hinv1_cpp(newdata, object),
"hinv2" = bicop_hinv2_cpp(newdata, object)
)
}
what_allowed <- c("pdf", "cdf", "hfunc1", "hfunc2", "hinv1", "hinv2")
fitted.bicop <- function(object, what = "pdf", ...) {
if (is.null(object$data)) {
stop("data have not been stored, use keep_data = TRUE when fitting.")
}
assert_that(in_set(what, what_allowed))
switch(
what,
"pdf" = bicop_pdf_cpp(object$data, object),
"cdf" = bicop_cdf_cpp(object$data, object),
"hfunc1" = bicop_hfunc1_cpp(object$data, object),
"hfunc2" = bicop_hfunc2_cpp(object$data, object),
"hinv1" = bicop_hinv1_cpp(object$data, object),
"hinv2" = bicop_hinv2_cpp(object$data, object)
)
}
logLik.bicop <- function(object, ...) {
structure(object$loglik, "df" = object$npars)
}
print.bicop_dist <- function(x, ...) {
x0 <- x
if (x$family %in% setdiff(family_set_nonparametric, "indep")) {
x$parameters <- paste0(round(x$npars, 2), sep = " d.f.")
}
cat("Bivariate copula ('bicop_dist'): ",
"family = ", x$family,
", rotation = ", x$rotation,
", parameters = ", ifelse(length(x$parameters) > 1,
paste(round(x$parameters, 2),
collapse = ", "
),
x$parameters
),
", var_types = ", paste(x$var_types, collapse = ","),
sep = ""
)
cat("\n")
invisible(x0)
}
summary.bicop_dist <- function(object, ...) {
print.bicop_dist(object, ...)
}
print.bicop <- function(x, ...) {
x0 <- x
if (x$family %in% setdiff(family_set_nonparametric, "indep")) {
pars_formatted <- paste0(round(x$npars, 2), sep = " d.f.")
} else {
pars_formatted <- paste(round(x$parameters, 2), collapse = ", ")
}
cat("Bivariate copula fit ('bicop'): ",
"family = ", x$family,
", rotation = ", x$rotation,
", parameters = ", pars_formatted,
", var_types = ", paste(x$var_types, collapse = ","),
"\n",
sep = ""
)
invisible(x0)
}
summary.bicop <- function(object, ...) {
print.bicop(object, ...)
cat("nobs =", object$nobs, " ")
info <- bicop_fit_info(object)
cat("logLik =", round(info$logLik, 2), " ")
cat("npars =", round(info$npars, 2), " ")
cat("AIC =", round(info$AIC, 2), " ")
cat("BIC =", round(info$BIC, 2), " ")
attr(object, "info") <- info
cat("\n")
invisible(object)
}
coef.bicop_dist <- function(object, ...) {
object$parameters
}
bicop_fit_info <- function(bc) {
ll <- logLik(bc)
list(
nobs = bc$nobs,
logLik = ll[1],
npars = attr(ll, "df"),
AIC = -2 * ll[1] + 2 * attr(ll, "df"),
BIC = -2 * ll[1] + log(bc$nobs) * attr(ll, "df")
)
} |
dm_examine_constraints <- function(dm, progress = NA) {
check_not_zoomed(dm)
dm %>%
dm_examine_constraints_impl(progress = progress, top_level_fun = "dm_examine_constraints") %>%
rename(columns = column) %>%
mutate(columns = new_keys(columns)) %>%
new_dm_examine_constraints()
}
dm_examine_constraints_impl <- function(dm, progress = NA, top_level_fun = NULL) {
pk_results <- check_pk_constraints(dm, progress, top_level_fun = top_level_fun)
fk_results <- check_fk_constraints(dm, progress, top_level_fun = top_level_fun)
bind_rows(
pk_results,
fk_results
) %>%
arrange(is_key, desc(kind), table)
}
new_dm_examine_constraints <- function(x) {
class(x) <- c("dm_examine_constraints", class(x))
x
}
print.dm_examine_constraints <- function(x, ...) {
key_df <-
x %>%
as_tibble()
problem_df <-
key_df %>%
filter(problem != "")
if (nrow(key_df) == 0) {
cli::cli_alert_info("No constraints defined.")
} else if (nrow(problem_df) == 0) {
cli::cli_alert_info("All constraints satisfied.")
} else {
cli::cli_alert_warning("Unsatisfied constraints:")
problem_df %>%
mutate(
into = if_else(kind == "FK", paste0(" into table ", tick(ref_table)), "")
) %>%
mutate(text = paste0(
"Table ", tick(table), ": ",
kind_to_long(kind), " ", format(map(problem_df$columns, tick)),
into,
": ", problem
)) %>%
pull(text) %>%
cli::cat_bullet(bullet_col = "red")
}
invisible(x)
}
kind_to_long <- function(kind) {
if_else(kind == "PK", "primary key", "foreign key")
}
check_pk_constraints <- function(dm, progress = NA, top_level_fun = NULL) {
pks <- dm_get_all_pks_impl(dm)
if (nrow(pks) == 0) {
return(tibble(
table = character(),
kind = character(),
column = new_keys(),
ref_table = character(),
is_key = logical(),
problem = character()
))
}
table_names <- pks$table
columns <- pks$pk_col
ticker <- new_ticker(
"checking pk constraints",
n = length(table_names),
progress = progress,
top_level_fun = top_level_fun
)
candidates <- map2(set_names(table_names), columns, ticker(~ {
tbl <- tbl_impl(dm, .x)
enum_pk_candidates_impl(tbl, list(.y))
}))
tbl_is_pk <-
tibble(table = table_names, candidate = candidates) %>%
unnest_df("candidate", tibble(column = new_keys(), candidate = logical(), why = character())) %>%
rename(is_key = candidate, problem = why)
tibble(
table = table_names,
kind = "PK",
column = pks$pk_col,
ref_table = NA_character_
) %>%
left_join(tbl_is_pk, by = c("table", "column"))
}
check_fk_constraints <- function(dm, progress = NA, top_level_fun = top_level_fun) {
fks <- dm_get_all_fks_impl(dm)
pts <- map(fks$parent_table, tbl_impl, dm = dm)
cts <- map(fks$child_table, tbl_impl, dm = dm)
fks_tibble <-
mutate(fks, t1 = cts, t2 = pts) %>%
select(t1, t1_name = child_table, colname = child_fk_cols, t2, t2_name = parent_table, pk = parent_key_cols)
ticker <- new_ticker(
"checking fk constraints",
n = nrow(fks_tibble),
progress = progress,
top_level_fun = top_level_fun
)
fks_tibble %>%
mutate(
problem = pmap_chr(fks_tibble, ticker(check_fk)),
is_key = (problem == ""),
kind = "FK"
) %>%
select(table = t1_name, kind, column = colname, ref_table = t2_name, is_key, problem)
} |
set.seed(123412)
N = 50
n = 3
x = matrix(rnorm(N*n),nrow = N)
y = matrix(rnorm(N*4),nrow = N)
vec = c(1,1,2,3)
context("centered distance matrices")
test_that("cdms",{
expect_warning(cdm(matrix(1,nrow= N,ncol = n)),"constant")
expect_equal(cdm(x),cdm(x,psi = function(x,y) sqrt(sum((x-y)^2))))
expect_equivalent(fastdist(as.matrix(x[,1])),as.matrix(dist(x[,1])))
expect_equivalent(fastdist(as.matrix(x)),as.matrix(dist(x)))
x = rnorm(100)
expect_equal(
double.center(fastdist(as.matrix(x)),normalize = TRUE),
cdm(x)
)
expect_equal(
double.center(fastdist(as.matrix(x)),normalize = FALSE),
cdm(x,normalize = FALSE)
)
})
context("definition of multivariances")
test_that("multivariance, total.multivariance, m.multivariance", {
expect_warning(multivariance(matrix(1,nrow= N,ncol = n)),"constant")
expect_equal(multivariance(x), multivariance(x[,c(2,3,1)]))
expect_equal(multivariance(x), m.multivariance(x,m=3))
expect_equal(multivariance(x[,c(1,2)]), m.multivariance(x[,c(1,2)],m=2))
expect_equal((multivariance(x[,c(1,2)]) + multivariance(x[,c(1,3)]) + multivariance(x[,c(3,2)]))/3, m.multivariance(x,m=2))
expect_equal(multivariance(x[,c(1,2)]), total.multivariance(x[,c(1,2)]))
expect_equal(total.multivariance(x), (m.multivariance(x,m=3)+m.multivariance(x,m=2)*3)/4)
expect_equivalent(multivariances.all(x),c(multivariance(x),total.multivariance(x),m.multivariance(x,m=2),m.multivariance(x,m=3)))
expect_equivalent(multivariances.all(y,vec),c(multivariance(y,vec),total.multivariance(y,vec),m.multivariance(y,vec,m=2),m.multivariance(y,vec,m=3)))
})
context("resampling")
set.seed(1)
quant = quantile(resample.multivariance(x)$resampled,0.95)
pval = sum(resample.multivariance(x)$resampled>=multivariance(x))/300
set.seed(1)
test_that("resampling p-values and quantiles",{
expect_equal(resample.rejection.level(0.05,x), quant)
expect_equal(resample.pvalue(multivariance(x),x), pval)
})
set.seed(1)
mat = matrix( c(1,2,3,1,1,2,1,2,1,2,1,1),nrow = 4,byrow = TRUE)
for (i in 1:4) {
for (type in c("multi","total","m.multi.2","m.multi.3")) {
ma = multivariances.all(x,vec = mat[i,])
expect_equal(
unname(ma[type]),
as.numeric(resample.multivariance(x,vec = mat[i,],type = type,times = 2)$original)
)
}
expect_equal(
ma,
resample.multivariance(x,vec = mat[i,],type = "all",times = 2)$original
)
}
set.seed(1)
x = matrix(rnorm(10*10),10)
vec = c(1:5,1:5)
for (re in c(FALSE))
for (inc in c(TRUE,FALSE)) {
set.seed(1234)
a = sample.cdms(cdms(x,vec),replace = re,incl.first = inc)
set.seed(1234)
b = cdms(sample.cols(x,vec ,replace = re,incl.first = inc),vec)
expect_equal(a,b)
}
for (n in c(2,5)) {
context(paste0("function arguments, n = ",n))
set.seed(123412)
N = 5
x = matrix(rnorm(N*n),nrow = N)
for (ty in c("total","m.multi.2","m.multi.3","multi"))
for (pvt in c("distribution_free","resample","pearson_approx","pearson_unif"))
multivariance.test(x,type=ty,p.value.type = pvt)
if (n == 5) {
for (ty in c("total","m.multi.2","m.multi.3","multi"))
for (pvt in c("distribution_free","resample","pearson_approx"))
multivariance.test(x,vec= c(1,2,2,1,3),type=ty,p.value.type = pvt)
for (ty in c("total","m.multi.2","m.multi.3","multi"))
for (pvt in c("distribution_free","resample","pearson_approx"))
multivariance.test(x,vec= c(1,2,2,1,2),type=ty,p.value.type = pvt)
}
for (ty in c("total","m.multi.2","m.multi.3","multi"))
for (pvt in c("distribution_free","resample","pearson_approx"))
multivariance.test(x,type=ty,p.value.type = pvt, psi = function(x,y) sum(abs(x-y)))
for (ty in c("total","m.multi.2","m.multi.3","multi"))
for (pvt in c("distribution_free","resample","pearson_approx"))
multivariance.test(x,type=ty,p.value.type = pvt, psi = function(x) abs(x), isotropic = TRUE)
for (ty in c("total","m.multi.2","m.multi.3","multi"))
for (pvt in c("distribution_free","resample","pearson_approx"))
expect_warning(multivariance.test(x,type=ty,p.value.type = pvt, p = 0.5),"p is not in")
for (ty in c("total","m.multi.2","m.multi.3","multi"))
for (pvt in c("distribution_free","resample","pearson_approx"))
multivariance.test(x,type=ty,p.value.type = pvt, p = 1.5)
for (ty in c("total"))
for (pvt in c("distribution_free","resample","pearson_approx"))
multivariance.test(x,type=ty,p.value.type = pvt, lambda = 2)
for (ty in c("total","m.multi.2","m.multi.3","multi"))
for (pvt in c("resample"))
multivariance.test(x,type=ty,p.value.type = pvt, times = 10)
for (ty in c("total","m.multi.2","m.multi.3","multi"))
for (pvt in c("resample"))
expect_warning(multivariance.test(x,type=ty,p.value.type = pvt, resample.type = "bootstrap"),"bootstrap")
context(paste0("equality of distances, n = ",n))
set.seed(123412)
N = 50
x = matrix(rnorm(N*n),nrow = N)
for (ty in c("total","m.multi.2","m.multi.3","multi"))
for (pvt in c("distribution_free","pearson_approx")) {
expect_equal( multivariance.test(x,type=ty,p.value.type = pvt),
multivariance.test(x,type=ty,p.value.type = pvt,psi = function(x,y)sum(abs(x-y)) ))
expect_equal( multivariance.test(x,type=ty,p.value.type = pvt),
multivariance.test(x,type=ty,p.value.type = pvt,external.dm.fun = fastdist) )
expect_equal( multivariance.test(x,type=ty,p.value.type = pvt),
multivariance.test(x,type=ty,p.value.type = pvt,psi = function(x) abs(x),isotropic = TRUE) )
expect_equal( multivariance.test(x,type=ty,p.value.type = pvt)[c("statistic","p.value")],
multivariance.test(apply(x,2,function(y) (abs(rnorm(1))+1)*y+rnorm(1)),type=ty,p.value.type = pvt)[c("statistic","p.value")])
}
}
context("pearsons approximation")
set.seed(123)
x = coins(20)
cmb = multivariance:::cdms.mu.bcd(x)
expect_equal(
pearson.pvalue(x),
pearson.pvalue(cmb)
)
expect_equal(
pearson.pvalue(x[,1:2]),
pearson.pvalue(cmb,1:2)
)
expect_equal(
pearson.pvalue(x[,c(1,3)]),
pearson.pvalue(cmb,c(1,3))
)
expect_equal(
pearson.pvalue(x[,c(2,3)]),
pearson.pvalue(cmb,c(2,3))
)
set.seed(123)
x = coins(20,3)
cmb = multivariance:::cdms.mu.bcd(x)
expect_equal(
pearson.pvalue(cmb,type = "all"),
pearson.pvalue(x,type = "all")
)
expect_equal(
pearson.pvalue(x,type = "all"),
c(multi=pearson.pvalue(x,type = "multi"),
total=pearson.pvalue(x,type = "total"),
m.multi.2=pearson.pvalue(x,type = "m.multi.2"),
m.multi.3=pearson.pvalue(x,type = "m.multi.3"))
)
context("multicorrelation")
set.seed(1213)
y = rnorm(10)
expect_equivalent(multicorrelation(cbind(y,2*y,1-y,y*5-pi,y+1),type="pairwise",multicorrelation.type = "unnormalized",estimator.type = "biased"),1)
expect_equivalent(multicorrelation(cbind(y,2*y,1-y,y*5-pi,y+1),type = "m.multi.3",multicorrelation.type = "unnormalized",estimator.type = "biased"),1)
expect_equivalent(multicorrelation(cbind(y,2*y,1-y,y*5-pi,y+1),type = "multi",multicorrelation.type = "unnormalized",estimator.type = "biased"),1)
x = matrix(rnorm(10*4),10)
expect_equivalent(multicorrelation(x,type = "multi",multicorrelation.type = "unnormalized",estimator.type = "biased"),
multicorrelation(x,type = "multi",multicorrelation.type = "normalized",estimator.type = "biased"))
expect_equivalent(multicorrelation(x,type = "m.multi.2",multicorrelation.type = "unnormalized",estimator.type = "biased"),
multicorrelation(x,type = "m.multi.2",multicorrelation.type = "normalized",estimator.type = "biased"))
suppressWarnings(
expect_equivalent(
multicorrelation(matrix(rep(1,10*3),ncol = 3),type = "total.lower",estimator.type = "biased"),
0)
)
expect_warning(
multicorrelation(matrix(rep(1,10*3),ncol = 3),type = "total.lower",estimator.type = "biased"),"Constant")
suppressWarnings( expect_equivalent(
multicorrelation(matrix(rep(1,10*3),ncol = 3)),
c(0,0)))
expect_equivalent(multicorrelation(cbind(y,2*y,1-y,y*5-pi,y+1),type="pairwise",multicorrelation.type = "normalized",estimator.type = "bias.corrected"),1)
expect_equivalent(multicorrelation(cbind(y,2*y,1-y,y*5-pi,y+1),type="all",multicorrelation.type = "normalized",estimator.type = "bias.corrected")["unnormalized"],1)
context("identities with other measures")
set.seed(1234)
n = 2
N = 100
x = matrix(rnorm(N*n),N)
xx = cbind(x[,1],x[,1])
expect_equal(multivariance(xx,psi = function(x) x^2, isotropic = TRUE, Nscale = FALSE,normalize = FALSE),
(2*(N-1)/N*var(x[,1]))^2)
expect_equal(multivariance(x,psi = function(x) x^2, isotropic = TRUE, Nscale = FALSE,normalize = FALSE),
(2*(N-1)/N*cov(x[,1],x[,2]))^2)
expect_equal(
multivariance(x,psi = function(x) x^2, isotropic = TRUE, Nscale = FALSE),
(cor(x[,1],x[,2]))^2
)
expect_equivalent(
(cor(x[,1],x[,2]))^2,
multicorrelation(x, type = "multi",estimator.type = "biased",psi = function(x) x^2, isotropic = TRUE,squared = TRUE)
)
expect_equal(
multicorrelation(x, type = "multi",estimator.type = "biased",psi = function(x) x^2, isotropic = TRUE),
multicorrelation(x, type = "multi",estimator.type = "biased", multicorrelation.type = "normalized", psi = function(x) x^2, isotropic = TRUE)
)
set.seed(1234)
n = 2
N = 100
x = matrix(rnorm(N*n),N)
y = matrix(rnorm(N*n),N)
expect_equal(
sum((2*(N-1)/N*cov(x,y))^2),
multivariance(cbind(x,y),vec = c(rep(1,n),rep(2,n)),psi = function(x) x^2, isotropic = TRUE, Nscale = FALSE,normalize = FALSE)
)
expect_equal(
sum((2*(N-1)/N*cov(x,x))^2),
multivariance(cbind(x,x),vec = c(rep(1,n),rep(2,n)),psi = function(x) x^2, isotropic = TRUE, Nscale = FALSE,normalize = FALSE)
)
expect_equivalent(
sum((2*(N-1)/N*cov(x,y))^2) /sqrt( sum((2*(N-1)/N*cov(x,x))^2)*sum((2*(N-1)/N*cov(y,y))^2) ),
multicorrelation(cbind(x,y),vec = c(rep(1,n),rep(2,n)), type = "multi",estimator.type = "biased",psi = function(x) x^2, isotropic = TRUE)
)
expect_equal(
multicorrelation(cbind(x,y),vec = c(rep(1,n),rep(2,n)), type = "multi",estimator.type = "biased",psi = function(x) x^2, isotropic = TRUE),
multicorrelation(cbind(x,y),vec = c(rep(1,n),rep(2,n)), type = "multi",estimator.type = "biased",psi = function(x) x^2, isotropic = TRUE,multicorrelation.type = "normalized")
)
context("dependence structures")
set.seed(1023)
x = coins(10,5)
vec = 1:ncol(x)
verbose.output = FALSE
for (sty in c("clustered","full"))
for (ty in c("conservative","resample","pearson_approx","consistent"))
dependence.structure(x,vec,type = ty, structure.type = sty,list.cdm = NULL, alpha = 0.05,stop.too.many = 100000,verbose = verbose.output)
for (sty in c("clustered"))
for (ty in c("conservative","resample","pearson_approx","consistent"))
dependence.structure(x,vec,type = ty, structure.type = sty,list.cdm = NULL, alpha = 0.05,stop.too.many = 100000,verbose = verbose.output)
vec = c(1:3,1)
for (sty in c("clustered","full"))
for (ty in c("conservative","resample","pearson_approx","consistent"))
dependence.structure(x,vec,type = ty, structure.type = sty,list.cdm = NULL, alpha = 0.05,stop.too.many = 100000,verbose = verbose.output)
if (FALSE) {
x = 1:10
y = rnorm(10)
multicorrelation(cbind(x,y))
multicorrelation(data.frame(x,y))
multivariance(cbind(x,y))
multivariance(data.frame(x,y))
} |
computeWeightedMeans <- function(data_table, variables, weight, by) {
if (is.null(weight)) {
res_dt <- data_table[, lapply(.SD, mean, na.rm = TRUE), .SDcols = variables, by = by]
} else {
res_dt <- data_table[, lapply(.SD, weighted.mean, weight = eval(as.name(weight)), na.rm = TRUE),
.SDcols = variables, by = by]
}
res_dt
} |
between_trial_change <- function(Dataframe, TrialRange1, TrialRange2, Time.period = c(min(Dataframe[1]), max(Dataframe[1]))){
trial1ind <- TrialRange1 + 1
trial2ind <- TrialRange2 + 1
start.time <- Time.period[1]
stop.time <- Time.period[2]
sub1 <- Dataframe[Dataframe[1] >= start.time & Dataframe[1] <= stop.time, ]
t1.sub <- as.matrix(sub1[,trial1ind])
t2.sub <- as.matrix(sub1[,trial2ind])
t1.mean <- mean(t1.sub)
t2.mean <- mean(t2.sub)
mean.change <- t2.mean - t1.mean
return(mean.change)
} |
additional_data_fields <- function() {
list(
aa1 = list(
long_names = c(
"aa1_liq_precip_period_quantity",
"aa1_liq_precip_depth_dimension",
"aa1_liq_precip_condition_code",
"aa1_liq_precip_quality_code"
),
field_lengths = c(2, 4, 1, 1),
scale_factors = c(1, 10, NA_real_, NA_real_),
data_types = "nncc"
),
ab1 = list(
long_names = c(
"ab1_liq_precip_monthly_depth_dimension",
"ab1_liq_precip_monthly_condition_code",
"ab1_liq_precip_monthly_quality_code"
),
field_lengths = c(5, 1, 1),
scale_factors = c(10, NA_real_, NA_real_),
data_types = "ncc"
),
ac1 = list(
long_names = c(
"ac1_precip_obs_history_duration_code",
"ac1_precip_obs_history_characteristic_code",
"ac1_precip_obs_history_quality_code"
),
field_lengths = c(1, 1, 1),
scale_factors = c(NA_real_, NA_real_, NA_real_),
data_types = "ccc"
),
ad1 = list(
long_names = c(
"ad1_liq_precip_greatest_amt_24h_month_depth_dimension",
"ad1_liq_precip_greatest_amt_24h_month_condition_code",
"ad1_liq_precip_greatest_amt_24h_month_dates",
"ad1_liq_precip_greatest_amt_24h_month_quality_code"
),
field_lengths = c(5, 1, 4, 4, 4, 1),
scale_factors = c(10, NA, NA, NA, NA, NA),
data_types = "nccccc"
),
ae1 = list(
long_names = c(
"ae1_liq_precip_number_days_amt_month__01inch",
"ae1_liq_precip_number_days_amt_month__01inch_quality_code",
"ae1_liq_precip_number_days_amt_month__10inch",
"ae1_liq_precip_number_days_amt_month__10inch_quality_code",
"ae1_liq_precip_number_days_amt_month__50inch",
"ae1_liq_precip_number_days_amt_month__50inch_quality_code",
"ae1_liq_precip_number_days_amt_month_1_00inch",
"ae1_liq_precip_number_days_amt_month_1_00inch_quality_code"
),
field_lengths = c(2, 1, 2, 1, 2, 1, 2, 1),
scale_factors = rep(NA_real_, 8),
data_types = "cccccccc"
),
ag1 = list(
long_names = c(
"ag1_precip_est_obs_discrepancy_code",
"ag1_precip_est_obs_est_water_depth_dimension"
),
field_lengths = c(1, 3),
scale_factors = c(NA_real_, 1),
data_types = "cn"
),
ah1 = list(
long_names = c(
"ah1_liq_precip_max_short_dur_month_period_quantity",
"ah1_liq_precip_max_short_dur_month_depth_dimension",
"ah1_liq_precip_max_short_dur_month_condition_code",
"ah1_liq_precip_max_short_dur_month_end_date_time",
"ah1_liq_precip_max_short_dur_month_quality_code"
),
field_lengths = c(3, 4, 1, 6, 1),
scale_factors = c(1, 10, NA_real_, NA_real_, NA_real_),
data_types = "nnccc"
),
ai1 = list(
long_names = c(
"ai1_liq_precip_max_short_dur_month_period_quantity",
"ai1_liq_precip_max_short_dur_month_depth_dimension",
"ai1_liq_precip_max_short_dur_month_condition_code",
"ai1_liq_precip_max_short_dur_month_end_date_time",
"ai1_liq_precip_max_short_dur_month_quality_code"
),
field_lengths = c(4, 1, 6, 1),
scale_factors = c(10, NA_real_, NA_real_, NA_real_),
data_types = "nccc"
),
aj1 = list(
long_names = c(
"aj1_snow_depth_dimension",
"aj1_snow_depth_condition_code",
"aj1_snow_depth_quality_code",
"aj1_snow_depth_equiv_water_depth_dimension",
"aj1_snow_depth_equiv_water_condition_code",
"aj1_snow_depth_equiv_water_quality_code"
),
field_lengths = c(4, 1, 1, 6, 1, 1),
scale_factors = c(1, NA_real_, NA_real_, 10, NA_real_, NA_real_),
data_types = "nccncc"
),
ak1 = list(
long_names = c(
"ak1_snow_depth_greatest_depth_month_depth_dimension",
"ak1_snow_depth_greatest_depth_month_condition_code",
"ak1_snow_depth_greatest_depth_month_dates_occurrence",
"ak1_snow_depth_greatest_depth_month_quality_code"
),
field_lengths = c(4, 1, 6, 1),
scale_factors = c(1, NA_real_, NA_real_, NA_real_),
data_types = "nccc"
),
al1 = list(
long_names = c(
"al1_snow_accumulation_period_quantity",
"al1_snow_accumulation_depth_dimension",
"al1_snow_accumulation_condition_code",
"al1_snow_accumulation_quality_code"
),
field_lengths = c(2, 3, 1, 1),
scale_factors = c(1, 1, NA_real_, NA_real_),
data_types = "nncc"
),
am1 = list(
long_names = c(
"am1_snow_accumulation_greatest_amt_24h_month_depth_dimension",
"am1_snow_accumulation_greatest_amt_24h_month_condition_code",
"am1_snow_accumulation_greatest_amt_24h_month_dates_occurrence_1",
"am1_snow_accumulation_greatest_amt_24h_month_dates_occurrence_2",
"am1_snow_accumulation_greatest_amt_24h_month_dates_occurrence_3",
"am1_snow_accumulation_greatest_amt_24h_month_quality_code"
),
field_lengths = c(4, 1, 4, 4, 4, 1),
scale_factors = c(10, NA, NA, NA, NA, NA),
data_types = "nccccc"
),
an1 = list(
long_names = c(
"an1_snow_accumulation_month_period_quantity",
"an1_snow_accumulation_month_depth_dimension",
"an1_snow_accumulation_month_condition_code",
"an1_snow_accumulation_month_quality_code"
),
field_lengths = c(3, 4, 1, 1),
scale_factors = c(1, 10, NA_real_, NA_real_),
data_types = "nncc"
),
ao1 = list(
long_names = c(
"ao1_liq_precip_period_quantity_minutes",
"ao1_liq_precip_depth_dimension",
"ao1_liq_precip_condition_code",
"ao1_liq_precip_quality_code"
),
field_lengths = c(2, 4, 1, 1),
scale_factors = c(1, 10, NA_real_, NA_real_),
data_types = "nncc"
),
ap1 = list(
long_names = c(
"ap1_15_min_liq_precip_hpd_gauge_value_45_min_prior",
"ap1_15_min_liq_precip_hpd_gauge_value_30_min_prior",
"ap1_15_min_liq_precip_hpd_gauge_value_15_min_prior",
"ap1_15_min_liq_precip_hpd_gauge_value_at_obs_time"
),
field_lengths = c(4, 1, 1),
scale_factors = c(10, NA_real_, NA_real_),
data_types = "ncc"
),
au1 = list(
long_names = c(
"au1_present_weather_obs_intensity_code",
"au1_present_weather_obs_descriptor_code",
"au1_present_weather_obs_precipitation_code",
"au1_present_weather_obs_obscuration_code",
"au1_present_weather_obs_other_weather_phenomena_code",
"au1_present_weather_obs_combination_indicator_code",
"au1_present_weather_obs_quality_code"
),
field_lengths = c(1, 1, 2, 1, 1, 1, 1),
scale_factors = rep(NA_real_, 7),
data_types = "ccccccc"
),
aw1 = list(
long_names = c(
"aw1_present_weather_obs_aut_weather_report_1",
"aw1_present_weather_obs_aut_weather_report_2",
"aw1_present_weather_obs_aut_weather_report_3",
"aw1_present_weather_obs_aut_weather_report_4"
),
field_lengths = c(2, 1),
scale_factors = c(NA_real_, NA_real_),
data_types = "cc"
),
ax1 = list(
long_names = c(
"ax1_past_weather_obs_atmos_condition_code",
"ax1_past_weather_obs_quality_manual_atmos_condition_code",
"ax1_past_weather_obs_period_quantity",
"ax1_past_weather_obs_period_quality_code"
),
field_lengths = c(2, 1, 2, 1),
scale_factors = c(NA_real_, NA_real_, 1, NA_real_),
data_types = "ccnc"
),
ay1 = list(
long_names = c(
"ay1_past_weather_obs_manual_occurrence_identifier",
"ay1_past_weather_obs_quality_manual_atmos_condition_code",
"ay1_past_weather_obs_period_quantity",
"ay1_past_weather_obs_period_quality_code"
),
field_lengths = c(1, 1, 2, 1),
scale_factors = c(NA_real_, NA_real_, 1, NA_real_),
data_types = "ccnc"
),
az1 = list(
long_names = c(
"az1_past_weather_obs_aut_occurrence_identifier",
"az1_past_weather_obs_quality_aut_atmos_condition_code",
"az1_past_weather_obs_period_quantity",
"az1_past_weather_obs_period_quality_code"
),
field_lengths = c(1, 1, 2, 1),
scale_factors = c(NA_real_, NA_real_, 1, NA_real_),
data_types = "ccnc"
),
cb1 = list(
long_names = c(
"cb1_subhrly_obs_liq_precip_2_sensor_period_quantity",
"cb1_subhrly_obs_liq_precip_2_sensor_precip_liq_depth",
"cb1_subhrly_obs_liq_precip_2_sensor_qc_quality_code",
"cb1_subhrly_obs_liq_precip_2_sensor_flag_quality_code"
),
field_lengths = c(2, 6, 1, 1),
scale_factors = c(1, 10, NA_real_, NA_real_),
data_types = "nncc"
),
cf1 = list(
long_names = c(
"cf1_hrly_fan_speed_rate",
"cf1_hrly_fan_qc_quality_code",
"cf1_hrly_fan_flag_quality_code"
),
field_lengths = c(4, 1, 1),
scale_factors = c(10, NA_real_, NA_real_),
data_types = "ncc"
),
cg1 = list(
long_names = c(
"cg1_subhrly_obs_liq_precip_1_sensor_precip_liq_depth",
"cg1_subhrly_obs_liq_precip_1_sensor_qc_quality_code",
"cg1_subhrly_obs_liq_precip_1_sensor_flag_quality_code"
),
field_lengths = c(6, 1, 1),
scale_factors = c(10, NA_real_, NA_real_),
data_types = "ncc"
),
ch1 = list(
long_names = c(
"ch1_hrly_subhrly_rh_temp_period_quantity",
"ch1_hrly_subhrly_temp_avg_air_temp",
"ch1_hrly_subhrly_temp_qc_quality_code",
"ch1_hrly_subhrly_temp_flag_quality_code",
"ch1_hrly_subhrly_rh_avg_rh",
"ch1_hrly_subhrly_rh_qc_quality_code",
"ch1_hrly_subhrly_rh_flag_quality_code"
),
field_lengths = c(2, 5, 1, 1, 4, 1, 1),
scale_factors = c(1, 10, NA_real_, NA_real_, 10, NA_real_, NA_real_),
data_types = "nnccncc"
),
ci1 = list(
long_names = c(
"ci1_hrly_rh_temp_min_hrly_temp",
"ci1_hrly_rh_temp_min_hrly_temp_qc_quality_code",
"ci1_hrly_rh_temp_min_hrly_temp_flag_quality_code",
"ci1_hrly_rh_temp_max_hrly_temp",
"ci1_hrly_rh_temp_max_hrly_temp_qc_quality_code",
"ci1_hrly_rh_temp_max_hrly_temp_flag_quality_code",
"ci1_hrly_rh_temp_std_dev_hrly_temp",
"ci1_hrly_rh_temp_std_dev_hrly_temp_qc_quality_code",
"ci1_hrly_rh_temp_std_dev_hrly_temp_flag_quality_code",
"ci1_hrly_rh_temp_std_dev_hrly_rh",
"ci1_hrly_rh_temp_std_dev_hrly_rh_qc_quality_code",
"ci1_hrly_rh_temp_std_dev_hrly_rh_flag_quality_code"
),
field_lengths = c(5, 1, 1, 5, 1, 1, 5, 1, 1, 5, 1, 1),
scale_factors = rep(c(10, NA_real_, NA_real_), 4),
data_types = "nccnccnccncc"
),
cn1 = list(
long_names = c(
"cn1_hrly_batvol_sensors_transm_avg_voltage",
"cn1_hrly_batvol_sensors_transm_avg_voltage_qc_quality_code",
"cn1_hrly_batvol_sensors_transm_avg_voltage_flag_quality_code",
"cn1_hrly_batvol_full_load_avg_voltage",
"cn1_hrly_batvol_full_load_avg_voltage_qc_quality_code",
"cn1_hrly_batvol_full_load_avg_voltage_flag_quality_code",
"cn1_hrly_batvol_datalogger_avg_voltage",
"cn1_hrly_batvol_datalogger_avg_voltage_qc_quality_code",
"cn1_hrly_batvol_datalogger_avg_voltage_flag_quality_code"
),
field_lengths = c(4, 1, 1, 4, 1, 1, 4, 1, 1),
scale_factors = rep(c(10, NA_real_, NA_real_), 3),
data_types = "nccnccncc"
),
cn2 = list(
long_names = c(
"cn2_hrly_diagnostic_equipment_temp",
"cn2_hrly_diagnostic_equipment_temp_qc_quality_code",
"cn2_hrly_diagnostic_equipment_temp_flag_quality_code",
"cn2_hrly_diagnostic_geonor_inlet_temp",
"cn2_hrly_diagnostic_geonor_inlet_temp_qc_quality_code",
"cn2_hrly_diagnostic_geonor_inlet_temp_flag_quality_code",
"cn2_hrly_diagnostic_datalogger_opendoor_time",
"cn2_hrly_diagnostic_datalogger_opendoor_time_qc_quality_code",
"cn2_hrly_diagnostic_datalogger_opendoor_time_flag_quality_code"
),
field_lengths = c(5, 1, 1, 5, 1, 1, 2, 1, 1),
scale_factors = c(rep(c(10, NA_real_, NA_real_), 2), 1, NA_real_, NA_real_),
data_types = "nccnccncc"
),
cn3 = list(
long_names = c(
"cn3_hrly_diagnostic_reference_resistor_avg_resistance",
"cn3_hrly_diagnostic_reference_resistor_avg_resistance_qc_quality_code",
"cn3_hrly_diagnostic_reference_resistor_avg_resistance_flag_quality_code",
"cn3_hrly_diagnostic_datalogger_signature_id",
"cn3_hrly_diagnostic_datalogger_signature_id_qc_quality_code",
"cn3_hrly_diagnostic_datalogger_signature_id_flag_quality_code"
),
field_lengths = c(6, 1, 1, 6, 1, 1),
scale_factors = c(10, NA_real_, NA_real_, 10, NA_real_, NA_real_),
data_types = "nccncc"
),
cn4 = list(
long_names = c(
"cn4_hrly_diagnostic_liq_precip_gauge_flag_bit",
"cn4_hrly_diagnostic_liq_precip_gauge_flag_bit_qc_quality_code",
"cn4_hrly_diagnostic_liq_precip_gauge_flag_bit_flag_quality_code",
"cn4_hrly_diagnostic_doorflag_field",
"cn4_hrly_diagnostic_doorflag_field_qc_quality_code",
"cn4_hrly_diagnostic_doorflag_field_flag_quality_code",
"cn4_hrly_diagnostic_forward_transmitter_rf_power",
"cn4_hrly_diagnostic_forward_transmitter_rf_power_qc_quality_code",
"cn4_hrly_diagnostic_forward_transmitter_rf_power_flag_quality_code",
"cn4_hrly_diagnostic_reflected_transmitter_rf_power",
"cn4_hrly_diagnostic_reflected_transmitter_rf_power_qc_quality_code",
"cn4_hrly_diagnostic_reflected_transmitter_rf_power_flag_quality_code"
),
field_lengths = c(1, 1, 1, 1, 1, 1, 3, 1, 1, 3, 1, 1),
scale_factors = c(rep(NA_real_, 6), 10, NA_real_, NA_real_, 10, NA_real_, NA_real_),
data_types = "ccccccnccncc"
),
cr1 = list(
long_names = c(
"cr1_control_section_datalogger_version_number",
"cr1_control_section_datalogger_version_number_qc_quality_code",
"cr1_control_section_datalogger_version_number_flag_quality_code"
),
field_lengths = c(5, 1, 1),
scale_factors = c(1000, NA_real_, NA_real_),
data_types = "ncc"
),
ct1 = list(
long_names = c(
"ct1_subhrly_temp_avg_air_temp",
"ct1_subhrly_temp_avg_air_temp_qc_quality_code",
"ct1_subhrly_temp_avg_air_temp_flag_quality_code"
),
field_lengths = c(5, 1, 1),
scale_factors = c(10, NA_real_, NA_real_),
data_types = "ncc"
),
cu1 = list(
long_names = c(
"cu1_hrly_temp_avg_air_temp",
"cu1_hrly_temp_avg_air_temp_qc_quality_code",
"cu1_hrly_temp_avg_air_temp_flag_quality_code",
"cu1_hrly_temp_avg_air_temp_st_dev",
"cu1_hrly_temp_avg_air_temp_st_dev_qc_quality_code",
"cu1_hrly_temp_avg_air_temp_st_dev_flag_quality_code"
),
field_lengths = c(5, 1, 1, 4, 1, 1),
scale_factors = c(10, NA_real_, NA_real_, 10, NA_real_, NA_real_),
data_types = "nccncc"
),
cv1 = list(
long_names = c(
"cv1_hrly_temp_min_air_temp",
"cv1_hrly_temp_min_air_temp_qc_quality_code",
"cv1_hrly_temp_min_air_temp_flag_quality_code",
"cv1_hrly_temp_min_air_temp_time",
"cv1_hrly_temp_min_air_temp_time_qc_quality_code",
"cv1_hrly_temp_min_air_temp_time_flag_quality_code",
"cv1_hrly_temp_max_air_temp",
"cv1_hrly_temp_max_air_temp_qc_quality_code",
"cv1_hrly_temp_max_air_temp_flag_quality_code",
"cv1_hrly_temp_max_air_temp_time",
"cv1_hrly_temp_max_air_temp_time_qc_quality_code",
"cv1_hrly_temp_max_air_temp_time_flag_quality_code"
),
field_lengths = c(5, 1, 1, 4, 1, 1, 5, 1, 1, 4, 1, 1),
scale_factors = c(10, rep(NA_real_, 5), 10, rep(NA_real_, 5)),
data_types = "ncccccnccccc"
),
cw1 = list(
long_names = c(
"cw1_subhrly_wetness_wet1_indicator",
"cw1_subhrly_wetness_wet1_indicator_qc_quality_code",
"cw1_subhrly_wetness_wet1_indicator_flag_quality_code",
"cw1_subhrly_wetness_wet2_indicator",
"cw1_subhrly_wetness_wet2_indicator_qc_quality_code",
"cw1_subhrly_wetness_wet2_indicator_flag_quality_code"
),
field_lengths = c(5, 1, 1, 5, 1, 1),
scale_factors = c(10, NA_real_, NA_real_, 10, NA_real_, NA_real_),
data_types = "nccncc"
),
cx1 = list(
long_names = c(
"cx1_hourly_geonor_vib_wire_total_precip",
"cx1_hourly_geonor_vib_wire_total_precip_qc_quality_code",
"cx1_hourly_geonor_vib_wire_total_precip_flag_quality_code",
"cx1_hourly_geonor_vib_wire_freq_avg_precip",
"cx1_hourly_geonor_vib_wire_freq_avg_precip_qc_quality_code",
"cx1_hourly_geonor_vib_wire_freq_avg_precip_flag_quality_code",
"cx1_hourly_geonor_vib_wire_freq_min_precip",
"cx1_hourly_geonor_vib_wire_freq_min_precip_qc_quality_code",
"cx1_hourly_geonor_vib_wire_freq_min_precip_flag_quality_code",
"cx1_hourly_geonor_vib_wire_freq_max_precip",
"cx1_hourly_geonor_vib_wire_freq_max_precip_qc_quality_code",
"cx1_hourly_geonor_vib_wire_freq_max_precip_flag_quality_code"
),
field_lengths = c(6, 1, 1, 4, 1, 1, 4, 1, 1, 4, 1, 1),
scale_factors = c(10, NA_real_, NA_real_, rep(c(1, NA_real_, NA_real_), 3)),
data_types = "nccnccnccncc"
),
co1 = list(
long_names = c(
"co1_network_metadata_climate_division_number",
"co1_network_metadata_utc_lst_time_conversion"
),
field_lengths = c(2, 3),
scale_factors = c(1, 1),
data_types = "nn"
),
co2 = list(
long_names = c(
"co2_us_network_cooperative_element_id",
"co2_us_network_cooperative_time_offset"
),
field_lengths = c(3, 5),
scale_factors = c(NA_real_, 10),
data_types = "cn"
),
ed1 = list(
long_names = c(
"ed1_runway_vis_range_obs_direction_angle",
"ed1_runway_vis_range_obs_runway_designator_code",
"ed1_runway_vis_range_obs_vis_dimension",
"ed1_runway_vis_range_obs_quality_code"
),
field_lengths = c(2, 1, 4, 1),
scale_factors = c(0.1, NA_real_, 1, NA_real_),
data_types = "ncnc"
),
ga1 = list(
long_names = c(
"ga1_sky_cover_layer_coverage_code",
"ga1_sky_cover_layer_coverage_quality_code",
"ga1_sky_cover_layer_base_height",
"ga1_sky_cover_layer_base_height_quality_code",
"ga1_sky_cover_layer_cloud_type",
"ga1_sky_cover_layer_cloud_type_quality_code"
),
field_lengths = c(2, 1, 6, 1, 2, 1),
scale_factors = c(NA_real_, NA_real_, 1, NA_real_, NA_real_, NA_real_),
data_types = "ccnccc"
),
gd1 = list(
long_names = c(
"gd1_sky_cover_summation_state_coverage_1",
"gd1_sky_cover_summation_state_coverage_2",
"gd1_sky_cover_summation_state_coverage_quality_code",
"gd1_sky_cover_summation_state_height",
"gd1_sky_cover_summation_state_height_quality_code",
"gd1_sky_cover_summation_state_characteristic_code"
),
field_lengths = c(1, 2, 1, 6, 1, 1),
scale_factors = c(NA_real_, NA_real_, NA_real_, 1, NA_real_, NA_real_),
data_types = "cccncc"
),
gf1 = list(
long_names = c(
"gf1_sky_condition_obs_total_coverage",
"gf1_sky_condition_obs_total_opaque_coverage",
"gf1_sky_condition_obs_total_coverage_quality_code",
"gf1_sky_condition_obs_total_lowest_cloud_cover",
"gf1_sky_condition_obs_total_lowest_cloud_cover_quality_code",
"gf1_sky_condition_obs_low_cloud_genus",
"gf1_sky_condition_obs_low_cloud_genus_quality_code",
"gf1_sky_condition_obs_lowest_cloud_base_height",
"gf1_sky_condition_obs_lowest_cloud_base_height_quality_code",
"gf1_sky_condition_obs_mid_cloud_genus",
"gf1_sky_condition_obs_mid_cloud_genus_quality_code",
"gf1_sky_condition_obs_high_cloud_genus",
"gf1_sky_condition_obs_high_cloud_genus_quality_code"
),
field_lengths = c(2, 2, 1, 2, 1, 2, 1, 5, 1, 2, 1, 2, 1),
scale_factors = c(rep(NA_real_, 7), 1, rep(NA_real_, 5)),
data_types = "cccccccnccccc"
),
gg1 = list(
long_names = c(
"gg1_below_stn_cloud_layer_coverage",
"gg1_below_stn_cloud_layer_coverage_quality_code",
"gg1_below_stn_cloud_layer_top_height",
"gg1_below_stn_cloud_layer_top_height_quality_code",
"gg1_below_stn_cloud_layer_type",
"gg1_below_stn_cloud_layer_type_quality_code",
"gg1_below_stn_cloud_layer_top",
"gg1_below_stn_cloud_layer_top_quality_code"
),
field_lengths = c(2, 1, 5, 1, 2, 1, 2, 1),
scale_factors = c(NA_real_, NA_real_, 1, rep(NA_real_, 5)),
data_types = "ccnccccc"
),
gh1 = list(
long_names = c(
"gh1_hrly_solar_rad_hrly_avg_solarad",
"gh1_hrly_solar_rad_hrly_avg_solarad_qc_quality_code",
"gh1_hrly_solar_rad_hrly_avg_solarad_flag_quality_code",
"gh1_hrly_solar_rad_min_solarad",
"gh1_hrly_solar_rad_min_solarad_qc_quality_code",
"gh1_hrly_solar_rad_min_solarad_flag_quality_code",
"gh1_hrly_solar_rad_max_solarad",
"gh1_hrly_solar_rad_max_solarad_qc_quality_code",
"gh1_hrly_solar_rad_max_solarad_flag_quality_code",
"gh1_hrly_solar_rad_std_dev_solarad",
"gh1_hrly_solar_rad_std_dev_solarad_qc_quality_code",
"gh1_hrly_solar_rad_std_dev_solarad_flag_quality_code"
),
field_lengths = c(5, 1, 1, 5, 1, 1, 5, 1, 1, 5, 1, 1),
scale_factors = rep(c(10, NA_real_, NA_real_), 4),
data_types = "nccnccnccncc"
),
gj1 = list(
long_names = c(
"gj1_sunshine_obs_duration",
"gj1_sunshine_obs_duration_quality_code"
),
field_lengths = c(4, 1),
scale_factors = c(1, NA_real_),
data_types = "nc"
),
gk1 = list(
long_names = c(
"gk1_sunshine_obs_pct_possible_sunshine",
"gk1_sunshine_obs_pct_possible_quality_code"
),
field_lengths = c(3, 1),
scale_factors = c(1, NA_real_),
data_types = "nc"
),
gl1 = list(
long_names = c(
"gl1_sunshine_obs_duration",
"gl1_sunshine_obs_duration_quality_code"
),
field_lengths = c(5, 1),
scale_factors = c(1, NA_real_),
data_types = "nc"
),
gm1 = list(
long_names = c(
"gm1_solar_irradiance_time_period",
"gm1_solar_irradiance_global_irradiance",
"gm1_solar_irradiance_global_irradiance_data_flag",
"gm1_solar_irradiance_global_irradiance_quality_code",
"gm1_solar_irradiance_direct_beam_irradiance",
"gm1_solar_irradiance_direct_beam_irradiance_data_flag",
"gm1_solar_irradiance_direct_beam_irradiance_quality_code",
"gm1_solar_irradiance_diffuse_irradiance",
"gm1_solar_irradiance_diffuse_irradiance_data_flag",
"gm1_solar_irradiance_diffuse_irradiance_quality_code",
"gm1_solar_irradiance_uvb_global_irradiance",
"gm1_solar_irradiance_uvb_global_irradiance_data_flag",
"gm1_solar_irradiance_uvb_global_irradiance_quality_code"
),
field_lengths = c(4, 4, 2, 1, 4, 2, 1, 4, 2, 1, 4, 1),
scale_factors = c(1, 1, NA_real_, NA_real_, 1, NA_real_, NA_real_, 1, NA_real_, NA_real_, 1, NA_real_),
data_types = "nnccnccnccnc"
),
gn1 = list(
long_names = c(
"gn1_solar_rad_time_period",
"gn1_solar_rad_upwelling_global_solar_rad",
"gn1_solar_rad_upwelling_global_solar_rad_quality_code",
"gn1_solar_rad_downwelling_thermal_ir_rad",
"gn1_solar_rad_downwelling_thermal_ir_rad_quality_code",
"gn1_solar_rad_upwelling_thermal_ir_rad",
"gn1_solar_rad_upwelling_thermal_ir_rad_quality_code",
"gn1_solar_rad_par",
"gn1_solar_rad_par_quality_code",
"gn1_solar_rad_solar_zenith_angle",
"gn1_solar_rad_solar_zenith_angle_quality_code"
),
field_lengths = c(4, 4, 2, 1, 4, 2, 1, 4, 2, 1, 4, 1),
scale_factors = c(1, 1, NA_real_, NA_real_, 1, NA_real_, NA_real_, 1, NA_real_, NA_real_, 1, NA_real_),
data_types = "nnccnccnccnc"
),
go1 = list(
long_names = c(
"go1_net_solar_rad_time_period",
"go1_net_solar_rad_net_solar_radiation",
"go1_net_solar_rad_net_solar_radiation_quality_code",
"go1_net_solar_rad_net_ir_radiation",
"go1_net_solar_rad_net_ir_radiation_quality_code",
"go1_net_solar_rad_net_radiation",
"go1_net_solar_rad_net_radiation_quality_code"
),
field_lengths = c(4, 4, 1, 4, 1, 4, 1),
scale_factors = c(1, 1, NA_real_, 1, NA_real_, 1, NA_real_),
data_types = "nncncnc"
),
gp1 = list(
long_names = c(
"gp1_modeled_solar_irradiance_data_time_period",
"gp1_modeled_solar_irradiance_global_horizontal",
"gp1_modeled_solar_irradiance_global_horizontal_src_flag",
"gp1_modeled_solar_irradiance_global_horizontal_uncertainty",
"gp1_modeled_solar_irradiance_direct_normal",
"gp1_modeled_solar_irradiance_direct_normal_src_flag",
"gp1_modeled_solar_irradiance_direct_normal_uncertainty",
"gp1_modeled_solar_irradiance_diffuse_normal",
"gp1_modeled_solar_irradiance_diffuse_normal_src_flag",
"gp1_modeled_solar_irradiance_diffuse_normal_uncertainty",
"gp1_modeled_solar_irradiance_diffuse_horizontal",
"gp1_modeled_solar_irradiance_diffuse_horizontal_src_flag",
"gp1_modeled_solar_irradiance_diffuse_horizontal_uncertainty"
),
field_lengths = c(4, 4, 2, 3, 4, 2, 3, 4, 2, 3),
scale_factors = c(1, 1, NA_real_, 1, 1, NA_real_, 1, 1, NA_real_, 1),
data_types = "nncnncnncn"
),
gq1 = list(
long_names = c(
"gq1_hrly_solar_angle_time_period",
"gq1_hrly_solar_angle_mean_zenith_angle",
"gq1_hrly_solar_angle_mean_zenith_angle_quality_code",
"gq1_hrly_solar_angle_mean_azimuth_angle",
"gq1_hrly_solar_angle_mean_azimuth_angle_quality_code"
),
field_lengths = c(4, 4, 1, 4, 1),
scale_factors = c(1, 10, NA_real_, 10, NA_real_),
data_types = "nncnc"
),
gr1 = list(
long_names = c(
"gr1_hrly_extraterrestrial_rad_time_period",
"gr1_hrly_extraterrestrial_rad_horizontal",
"gr1_hrly_extraterrestrial_rad_horizontal_quality_code",
"gr1_hrly_extraterrestrial_rad_normal",
"gr1_hrly_extraterrestrial_rad_normal_quality_code"
),
field_lengths = c(4, 4, 1, 4, 1),
scale_factors = c(1, 1, NA_real_, 1, NA_real_),
data_types = "nncnc"
),
hl1 = list(
long_names = c(
"hl1_hail_size",
"hl1_hail_size_quality_code"
),
field_lengths = c(3, 1),
scale_factors = c(10, NA),
data_types = "nc"
),
ia1 = list(
long_names = c(
"ia1_ground_surface_obs_code",
"ia1_ground_surface_obs_code_quality_code"
),
field_lengths = c(2, 1),
scale_factors = c(NA_real_, NA_real_),
data_types = "cc"
),
ia2 = list(
long_names = c(
"ia2_ground_surface_obs_min_temp_time_period",
"ia2_ground_surface_obs_min_temp",
"ia2_ground_surface_obs_min_temp_quality_code"
),
field_lengths = c(3, 5, 1),
scale_factors = c(10, 10, NA_real_),
data_types = "nnc"
),
ib1 = list(
long_names = c(
"ib1_hrly_surface_temp",
"ib1_hrly_surface_temp_qc_quality_code",
"ib1_hrly_surface_temp_flag_quality_code",
"ib1_hrly_surface_min_temp",
"ib1_hrly_surface_min_temp_qc_quality_code",
"ib1_hrly_surface_min_temp_flag_quality_code",
"ib1_hrly_surface_max_temp",
"ib1_hrly_surface_max_temp_qc_quality_code",
"ib1_hrly_surface_max_temp_flag_quality_code",
"ib1_hrly_surface_std_temp",
"ib1_hrly_surface_std_temp_qc_quality_code",
"ib1_hrly_surface_std_temp_flag_quality_code"
),
field_lengths = c(5, 1, 1, 5, 1, 1, 5, 1, 1, 4, 1, 1),
scale_factors = rep(c(10, NA_real_, NA_real_), 4),
data_types = "nccnccnccncc"
),
ib2 = list(
long_names = c(
"ib2_hrly_surface_temp_sb",
"ib2_hrly_surface_temp_sb_qc_quality_code",
"ib2_hrly_surface_temp_sb_flag_quality_code",
"ib2_hrly_surface_temp_sb_std",
"ib2_hrly_surface_temp_sb_std_qc_quality_code",
"ib2_hrly_surface_temp_sb_std_flag_quality_code"
),
field_lengths = c(5, 1, 1, 4, 1, 1),
scale_factors = c(10, NA_real_, NA_real_, 10, NA_real_, NA_real_),
data_types = "nccncc"
),
ic1 = list(
long_names = c(
"ic1_grnd_surface_obs_pan_evap_time_period",
"ic1_grnd_surface_obs_pan_evap_wind",
"ic1_grnd_surface_obs_pan_evap_wind_condition_code",
"ic1_grnd_surface_obs_pan_evap_wind_quality_code",
"ic1_grnd_surface_obs_pan_evap_data",
"ic1_grnd_surface_obs_pan_evap_data_condition_code",
"ic1_grnd_surface_obs_pan_evap_data_quality_code",
"ic1_grnd_surface_obs_pan_max_water_data",
"ic1_grnd_surface_obs_pan_max_water_data_condition_code",
"ic1_grnd_surface_obs_pan_max_water_data_quality_code",
"ic1_grnd_surface_obs_pan_min_water_data",
"ic1_grnd_surface_obs_pan_min_water_data_condition_code",
"ic1_grnd_surface_obs_pan_min_water_data_quality_code"
),
field_lengths = c(2, 4, 1, 1, 3, 1, 1, 4, 1, 1, 4, 1, 1),
scale_factors = c(1, 1, NA_real_, NA_real_, 100, NA_real_, NA_real_, 10, NA_real_, NA_real_, 10, NA_real_, NA_real_),
data_types = "nnccnccnccncc"
),
ka1 = list(
long_names = c(
"ka1_extreme_air_temp_time_period",
"ka1_extreme_air_temp_code",
"ka1_extreme_air_temp_high_or_low",
"ka1_extreme_air_temp_high_or_low_quality_code"
),
field_lengths = c(3, 1, 5, 1),
scale_factors = c(10, NA_real_, 10, NA_real_),
data_types = "ncnc"
),
kb1 = list(
long_names = c(
"kb1_avg_air_temp_time_period",
"kb1_avg_air_temp_code",
"kb1_avg_air_temp_air_temp",
"kb1_avg_air_temp_air_temp_quality_code"
),
field_lengths = c(3, 1, 5, 1),
scale_factors = c(10, NA_real_, 10, NA_real_),
data_types = "ncnc"
),
kc1 = list(
long_names = c(
"kc1_extreme_air_temp_monthly_code",
"kc1_extreme_air_temp_monthly_condition_code",
"kc1_extreme_air_temp_monthly_temp",
"kc1_extreme_air_temp_monthly_date",
"kc1_extreme_air_temp_monthly_temp_quality_code"
),
field_lengths = c(1, 1, 5, 6, 1),
scale_factors = c(NA_real_, NA_real_, 10, NA_real_, NA_real_),
data_types = "ccncc"
),
kd1 = list(
long_names = c(
"kd1_heat_cool_deg_days_time_period",
"kd1_heat_cool_deg_days_code",
"kd1_heat_cool_deg_days_value",
"kd1_heat_cool_deg_days_quality_code"
),
field_lengths = c(3, 1, 4, 1),
scale_factors = c(1, NA_real_, 1, NA_real_),
data_types = "ncnc"
),
ke1 = list(
long_names = c(
"ke1_extreme_temp_number_days_max_32f_or_lower",
"ke1_extreme_temp_number_days_max_32f_or_lower_quality_code",
"ke1_extreme_temp_number_days_max_90f_or_higher",
"ke1_extreme_temp_number_days_max_90f_or_higher_quality_code",
"ke1_extreme_temp_number_days_min_32f_or_lower",
"ke1_extreme_temp_number_days_min_32f_or_lower_quality_code",
"ke1_extreme_temp_number_days_min_0f_or_lower",
"ke1_extreme_temp_number_days_min_0f_or_lower_quality_code"
),
field_lengths = c(2, 1, 2, 1, 2, 1, 2, 1),
scale_factors = c(1, NA_real_, 1, NA_real_, 1, NA_real_, 1, NA_real_),
data_types = "ncncncnc"
),
kf1 = list(
long_names = c(
"kf1_hrly_calc_temp",
"kf1_hrly_calc_temp_quality_code"
),
field_lengths = c(5, 1),
scale_factors = c(10, NA_real_),
data_types = "nc"
),
kg1 = list(
long_names = c(
"kg1_avg_dp_wb_temp_time_period",
"kg1_avg_dp_wb_temp_code",
"kg1_avg_dp_wb_temp",
"kg1_avg_dp_wb_temp_derived_code",
"kg1_avg_dp_wb_temp_quality_code"
),
field_lengths = c(3, 1, 5, 1, 1),
scale_factors = c(1, NA_real_, 100, NA_real_, NA_real_),
data_types = "ncncc"
),
ma1 = list(
long_names = c(
"ma1_atmos_p_obs_altimeter_setting_rate",
"ma1_atmos_p_obs_altimeter_quality_code",
"ma1_atmos_p_obs_stn_pressure_rate",
"ma1_atmos_p_obs_stn_pressure_rate_quality_code"
),
field_lengths = c(5, 1, 5, 1),
scale_factors = c(10, NA_real_, 10, NA_real_),
data_types = "ncnc"
),
md1 = list(
long_names = c(
"md1_atmos_p_change_tendency_code",
"md1_atmos_p_change_tendency_code_quality_code",
"md1_atmos_p_change_3_hr_quantity",
"md1_atmos_p_change_3_hr_quantity_quality_code",
"md1_atmos_p_change_24_hr_quantity",
"md1_atmos_p_change_24_hr_quantity_quality_code"
),
field_lengths = c(1, 1, 3, 1, 4, 1),
scale_factors = c(NA_real_, NA_real_, 10, NA_real_, 10, NA_real_),
data_types = "ccncnc"
),
me1 = list(
long_names = c(
"me1_geopotential_hgt_isobaric_lvl_code",
"me1_geopotential_hgt_isobaric_lvl_height",
"me1_geopotential_hgt_isobaric_lvl_height_quality_code"
),
field_lengths = c(1, 4, 1),
scale_factors = c(NA_real_, 1, NA_real_),
data_types = "cnc"
),
mf1 = list(
long_names = c(
"mf1_atmos_p_obs_stp_avg_stn_pressure_day",
"mf1_atmos_p_obs_stp_avg_stn_pressure_day_quality_code",
"mf1_atmos_p_obs_stp_avg_sea_lvl_pressure_day",
"mf1_atmos_p_obs_stp_avg_sea_lvl_pressure_day_quality_code"
),
field_lengths = c(5, 1, 5, 1),
scale_factors = c(10, NA_real_, 10, NA_real_),
data_types = "ncnc"
),
mg1 = list(
long_names = c(
"mg1_atmos_p_obs_avg_stn_pressure_day",
"mg1_atmos_p_obs_avg_stn_pressure_day_quality_code",
"mg1_atmos_p_obs_avg_sea_lvl_pressure_day",
"mg1_atmos_p_obs_avg_sea_lvl_pressure_day_quality_code"
),
field_lengths = c(5, 1, 5, 1),
scale_factors = c(10, NA_real_, 10, NA_real_),
data_types = "ncnc"
),
mh1 = list(
long_names = c(
"mh1_atmos_p_obs_avg_stn_pressure_month",
"mh1_atmos_p_obs_avg_stn_pressure_month_quality_code",
"mh1_atmos_p_obs_avg_sea_lvl_pressure_month",
"mh1_atmos_p_obs_avg_sea_lvl_pressure_month_quality_code"
),
field_lengths = c(5, 1, 5, 1),
scale_factors = c(10, NA_real_, 10, NA_real_),
data_types = "ncnc"
),
mk1 = list(
long_names = c(
"mk1_atmos_p_obs_max_sea_lvl_pressure_month",
"mk1_atmos_p_obs_max_sea_lvl_pressure_date_time",
"mk1_atmos_p_obs_max_sea_lvl_pressure_quality_code",
"mk1_atmos_p_obs_min_sea_lvl_pressure_month",
"mk1_atmos_p_obs_min_sea_lvl_pressure_date_time",
"mk1_atmos_p_obs_min_sea_lvl_pressure_quality_code"
),
field_lengths = c(5, 6, 1, 5, 6, 1),
scale_factors = c(10, NA_real_, NA_real_, 10, NA_real_, NA_real_),
data_types = "nccncc"
),
mv1 = list(
long_names = c(
"mv1_present_weather_obs_condition_code",
"mv1_present_weather_obs_condition_code_quality_code"
),
field_lengths = c(2, 1),
scale_factors = c(NA_real_, NA_real_),
data_types = "cc"
),
mw1 = list(
long_names = c(
"mw1_present_weather_obs_manual_occurrence_condition_code",
"mw1_present_weather_obs_manual_occurrence_condition_code_quality_code"
),
field_lengths = c(2, 1),
scale_factors = c(NA_real_, NA_real_),
data_types = "cc"
),
oa1 = list(
long_names = c(
"oa1_suppl_wind_obs_type",
"oa1_suppl_wind_obs_time_period",
"oa1_suppl_wind_obs_speed_rate",
"oa1_suppl_wind_obs_speed_rate_quality_code"
),
field_lengths = c(1, 2, 4, 1),
scale_factors = c(NA_real_, 1, 10, NA_real_),
data_types = "cnnc"
),
ob1 = list(
long_names = c(
"ob1_hly_subhrly_wind_avg_time_period",
"ob1_hly_subhrly_wind_max_gust",
"ob1_hly_subhrly_wind_max_gust_quality_code",
"ob1_hly_subhrly_wind_max_gust_flag",
"ob1_hly_subhrly_wind_max_dir",
"ob1_hly_subhrly_wind_max_dir_quality_code",
"ob1_hly_subhrly_wind_max_dir_flag",
"ob1_hly_subhrly_wind_max_stdev",
"ob1_hly_subhrly_wind_max_stdev_quality_code",
"ob1_hly_subhrly_wind_max_stdev_flag",
"ob1_hly_subhrly_wind_max_dir_stdev",
"ob1_hly_subhrly_wind_max_dir_stdev_quality_code",
"ob1_hly_subhrly_wind_max_dir_stdev_flag"
),
field_lengths = c(3, 4, 1, 1, 3, 1, 1, 5, 1, 1, 5, 1, 1),
scale_factors = c(1, 10, NA_real_, NA_real_, 1, NA_real_, NA_real_, 100, NA_real_, NA_real_, 100, NA_real_, NA_real_),
data_types = "nnccnccnccncc"
),
oc1 = list(
long_names = c(
"oc1_wind_gust_obs_speed_rate",
"oc1_wind_gust_obs_speed_rate_quality_code"
),
field_lengths = c(4, 1),
scale_factors = c(10, NA_real_),
data_types = "nc"
),
oe1 = list(
long_names = c(
"oe1_summary_of_day_wind_obs_type",
"oe1_summary_of_day_wind_obs_time_period",
"oe1_summary_of_day_wind_obs_speed_rate",
"oe1_summary_of_day_wind_obs_dir",
"oe1_summary_of_day_wind_obs_time_occurrence",
"oe1_summary_of_day_wind_obs_quality_code"
),
field_lengths = c(1, 2, 5, 3, 4, 1),
scale_factors = c(NA_real_, 1, 100, 1, 10, NA_real_),
data_types = "cnnnnc"
),
rh1 = list(
long_names = c(
"rh1_relative_humidity_time_period",
"rh1_relative_humidity_code",
"rh1_relative_humidity_percentage",
"rh1_relative_humidity_derived_code",
"rh1_relative_humidity_quality_code"
),
field_lengths = c(3, 1, 3, 1, 1),
scale_factors = c(1, NA_real_, 1, NA_real_, NA_real_),
data_types = "ncncc"
),
sa1 = list(
long_names = c(
"sa1_sea_surf_temp",
"sa1_sea_surf_temp_quality_code"
),
field_lengths = c(4, 1),
scale_factors = c(10, NA_real_),
data_types = "nc"
),
st1 = list(
long_names = c(
"st1_soil_temp_type",
"st1_soil_temp_soil_temp",
"st1_soil_temp_soil_temp_quality_code",
"st1_soil_temp_depth",
"st1_soil_temp_depth_quality_code",
"st1_soil_temp_soil_cover",
"st1_soil_temp_soil_cover_quality_code",
"st1_soil_temp_sub_plot",
"st1_soil_temp_sub_plot_quality_code"
),
field_lengths = c(1, 5, 1, 4, 1, 2, 1, 1, 1),
scale_factors = c(NA_real_, 10, NA_real_, 10, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_),
data_types = "cncnccccc"
),
ua1 = list(
long_names = c(
"ua1_wave_meas_method_code",
"ua1_wave_meas_wave_period_quantity",
"ua1_wave_meas_wave_height_dimension",
"ua1_wave_meas_quality_code",
"ua1_wave_meas_sea_state_code",
"ua1_wave_meas_sea_state_code_quality_code"
),
field_lengths = c(1, 2, 3, 1, 2, 1),
scale_factors = c(NA_real_, 1, 10, NA_real_, NA_real_, NA_real_),
data_types = "cnnccc"
),
ug1 = list(
long_names = c(
"ug1_wave_meas_primary_swell_time_period",
"ug1_wave_meas_primary_swell_height_dimension",
"ug1_wave_meas_primary_swell_dir_angle",
"ug1_wave_meas_primary_swell_quality_code"
),
field_lengths = c(2, 3, 3, 1),
scale_factors = c(1, 10, 1, NA_real_),
data_types = "nnnc"
),
ug2 = list(
long_names = c(
"ug2_wave_meas_secondary_swell_time_period",
"ug2_wave_meas_secondary_swell_height_dimension",
"ug2_wave_meas_secondary_swell_dir_angle",
"ug2_wave_meas_secondary_swell_quality_code"
),
field_lengths = c(2, 3, 3, 1),
scale_factors = c(1, 10, 1, NA_real_),
data_types = "nnnc"
),
wa1 = list(
long_names = c(
"wa1_platform_ice_accr_source_code",
"wa1_platform_ice_accr_thickness_dimension",
"wa1_platform_ice_accr_tendency_code",
"wa1_platform_ice_accr_quality_code"
),
field_lengths = c(1, 3, 1, 1),
scale_factors = c(NA_real_, 10, NA_real_, NA_real_),
data_types = "cncc"
),
wd1 = list(
long_names = c(
"wd1_water_surf_ice_obs_edge_bearing_code",
"wd1_water_surf_ice_obs_uniform_conc_rate",
"wd1_water_surf_ice_obs_non_uniform_conc_rate",
"wd1_water_surf_ice_obs_ship_rel_pos_code",
"wd1_water_surf_ice_obs_ship_penetrability_code",
"wd1_water_surf_ice_obs_ice_trend_code",
"wd1_water_surf_ice_obs_development_code",
"wd1_water_surf_ice_obs_growler_bergy_bit_pres_code",
"wd1_water_surf_ice_obs_growler_bergy_bit_quantity",
"wd1_water_surf_ice_obs_iceberg_quantity",
"wd1_water_surf_ice_obs_quality_code"
),
field_lengths = c(2, 3, 2, 1, 1, 1, 2, 1, 3, 3, 1),
scale_factors = c(NA_real_, 1, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, 1, 1, NA_real_),
data_types = "cnccccccnnc"
),
wg1 = list(
long_names = c(
"wg1_water_surf_ice_hist_obs_edge_distance",
"wg1_water_surf_ice_hist_obs_edge_orient_code",
"wg1_water_surf_ice_hist_obs_form_type_code",
"wg1_water_surf_ice_hist_obs_nav_effect_code",
"wg1_water_surf_ice_hist_obs_quality_code"
),
field_lengths = c(2, 2, 2, 2, 2, 1),
scale_factors = c(NA_real_, 1, NA_real_, NA_real_, NA_real_, NA_real_),
data_types = "cncccc"
)
)
}
field_categories <- function() {
c(
"AA1", "AB1", "AC1", "AD1", "AE1", "AG1", "AH1", "AI1", "AJ1",
"AK1", "AL1", "AM1", "AN1", "AO1", "AP1", "AU1", "AW1", "AX1",
"AY1", "AZ1", "CB1", "CF1", "CG1", "CH1", "CI1", "CN1", "CN2",
"CN3", "CN4", "CR1", "CT1", "CU1", "CV1", "CW1", "CX1", "CO1",
"CO2", "ED1", "GA1", "GD1", "GF1", "GG1", "GH1", "GJ1", "GK1",
"GL1", "GM1", "GN1", "GO1", "GP1", "GQ1", "GR1", "HL1", "IA1",
"IA2", "IB1", "IB2", "IC1", "KA1", "KB1", "KC1", "KD1", "KE1",
"KF1", "KG1", "MA1", "MD1", "ME1", "MF1", "MG1", "MH1", "MK1",
"MV1", "MW1", "OA1", "OB1", "OC1", "OE1", "RH1", "SA1", "ST1",
"UA1", "UG1", "UG2", "WA1", "WD1", "WG1"
) %>%
tolower()
}
get_df_from_category <- function(category_key,
field_lengths,
scale_factor,
data_types,
add_data) {
pb <-
progress::progress_bar$new(
format = " processing :what [:bar] :percent",
total = nchar(data_types)
)
column_names <- paste0(category_key %>% tolower(), "_", seq(field_lengths))
dtypes <- c()
for (i in seq(nchar(data_types))) {
dtypes <-
c(dtypes, ifelse(substr(data_types, i, i) == "n", "numeric", "character"))
}
data_strings <-
add_data %>%
stringr::str_extract(paste0(category_key, ".*"))
res_list <- list()
for (i in seq(field_lengths)){
if (i == 1) {
substr_start <- 4
substr_end <- substr_start + (field_lengths[i] - 1)
} else {
substr_start <- substr_end + 1
substr_end <- substr_start + (field_lengths[i] - 1)
}
if (dtypes[i] == "numeric") {
data_column <- rep(NA_real_, length(data_strings))
for (j in seq(data_strings)) {
if (!is.na(data_strings[j])) {
data_column[j] <-
(substr(data_strings[j], substr_start, substr_end) %>%
as.numeric()) / scale_factor[i]
}
}
}
if (dtypes[i] == "character"){
data_column <- rep(NA_character_, length(data_strings))
for (j in seq(data_strings)) {
if (!is.na(data_strings[j])) {
data_column[j] <-
substr(data_strings[j], substr_start, substr_end)
}
}
}
res_list <- res_list %>% append(list(data_column))
pb$tick(tokens = list(what = category_key))
}
names(res_list) <- column_names
res_list %>% dplyr::as_tibble()
}
bind_additional_data <- function(data,
add_data,
category_key) {
category_key <- tolower(category_key)
category_params <- additional_data_fields()[[category_key]]
additional_data <-
get_df_from_category(
category_key = toupper(category_key),
field_lengths = category_params$field_lengths,
scale_factor = category_params$scale_factors,
data_types = category_params$data_types,
add_data = add_data
)
dplyr::bind_cols(data, additional_data)
} |
source(system.file(file.path('tests', 'testthat', 'test_utils.R'), package = 'nimble'))
RwarnLevel <- options('warn')$warn
options(warn = 1)
nimbleVerboseSetting <- nimbleOptions('verbose')
nimbleOptions(verbose = FALSE)
context("Testing of expandNodeNames")
test_that("expandNodeNames works for various cases, including going beyond extent of variable", {
code <- nimbleCode({
for(i in 1:4)
mu[i] ~ dnorm(0,1)
for(i in 1:3)
for(j in 1:3)
theta[i,j] ~ dnorm(0,1)
p[1:4] ~ ddirch(alpha[1:4])
})
m <- nimbleModel(code, inits = list(alpha = rep(1, 4)))
expect_equal(m$expandNodeNames("mu"), c("mu[1]","mu[2]","mu[3]","mu[4]"))
expect_equal(m$expandNodeNames("mu[3:5]"), c("mu[3]","mu[4]"))
expect_equal(m$expandNodeNames("mu[5:7]"), character(0))
expect_equal(m$expandNodeNames("theta"), c("theta[1, 1]","theta[2, 1]","theta[3, 1]","theta[1, 2]","theta[2, 2]","theta[3, 2]","theta[1, 3]","theta[2, 3]","theta[3, 3]"))
expect_equal(m$expandNodeNames("theta[3:5,1:2]"), c("theta[3, 1]","theta[3, 2]"))
expect_equal(m$expandNodeNames("theta[1:2,3:5]"), c("theta[1, 3]","theta[2, 3]"))
expect_equal(m$expandNodeNames("theta[4:6,5]"), character(0))
expect_equal(m$expandNodeNames(c("theta[1, 7]", "mu")), c("mu[1]","mu[2]","mu[3]","mu[4]"))
expect_equal(m$expandNodeNames(c("theta[1, 3:5]", "mu[3:5]")), c("theta[1, 3]", "mu[3]", "mu[4]"))
expect_equal(m$expandNodeNames(c("theta[1, 7]", "mu[5]")), character(0))
expect_equal(m$expandNodeNames(c("mu[1, 1]", "theta[1, 1]")), "theta[1, 1]")
expect_equal(m$expandNodeNames(c("mu[3:5]", "mu[3:9]"), unique = FALSE),
c("mu[3]","mu[4]","mu[3]","mu[4]"))
expect_equal(m$expandNodeNames(c("theta[1:3, 3:5]", "theta[3:5, 1:3]"), unique = FALSE),
c("theta[1, 3]","theta[2, 3]","theta[3, 3]","theta[3, 1]","theta[3, 2]","theta[3, 3]"))
expect_failure(expect_error(m$expandNodeNames("mu[a]"), "variable was found in the indexing"))
expect_failure(expect_error(m$expandNodeNames("mu[[a]]"), "variable was found in the indexing"))
expect_identical(m$expandNodeNames("mu[[2]]"), "mu[2]")
expect_equal(m$expandNodeNames("p[1:4]"), "p[1:4]")
expect_equal(m$expandNodeNames("p[1:2]"), "p[1:4]")
expect_equal(m$expandNodeNames("p[1:5]"), "p[1:4]")
expect_equal(m$expandNodeNames("p[5:7]"), character(0))
expect_equal(m$expandNodeNames(c("p[1:5]", "mu[3:5]")), c("p[1:4]", "mu[3]", "mu[4]"))
expect_equal(m$expandNodeNames(c("p[1:5]", "p"), unique = FALSE), c("p[1:4]", "p[1:4]"))
})
test_that("Node index 100000 is not handled as 'x[1e+5]'", {
set.seed(1)
mc <- nimbleCode({
for(i in 99998:100002) {
x[i] ~ dnorm(0,1)
}
})
m <- nimbleModel(mc, inits = list(x = 1:100002), calculate = FALSE)
expect_identical(m$getNodeNames()[3], "x[100000]")
expect_identical(m$expandNodeNames("x[1e+5]"), "x[100000]")
Cm <- compileNimble(m)
nf <- nimbleFunction(
setup = function(model) {
node <- 'x[100000]'
nodeSci <- 'x[1e+5]'
mv <- modelValues(model)
},
run = function() {},
methods = list(
getVal1 = function() {return(model[[node]]); returnType(double())},
getVal2 = function() {return(model[[nodeSci]]); returnType(double())},
getVal3 = function() {return(model[['x[1e+5]']]); returnType(double())},
getVal4 = function() {v <- values(model, node); return(v[1]); returnType(double())},
calc1 = function() {return(model$calculate(node)); returnType(double())},
calc2 = function() {return(model$calculate(nodeSci)); returnType(double())},
calc3 = function() {return(model$calculate('x[1e+5]')); returnType(double())},
calc4 = function() {return(model$calculate(node)); returnType(double())},
copy1 = function() {nimCopy(from = model, to = mv, rowTo = 1, nodes = node, logProb = TRUE)}
)
)
nf1 <- nf(m)
Cnf1 <- compileNimble(nf1, project = m)
expect_identical(m$getDependencies("x[1e+5]"), 'x[100000]')
expect_identical(m$expandNodeNames("x[1e+5]"), 'x[100000]')
m$x[100000] <- 1
Cm$x[100000] <- 2
for(ver in 1:2) {
if(ver == 1) {
mod <- m
fun <- nf1
} else {
mod <- Cm
fun = Cnf1
}
i <- 0
i <- i+1; mod$x[100000] <- i; expect_identical(fun$getVal1(), i)
i <- i+1; mod$x[100000] <- i; expect_identical(fun$getVal2(), i)
i <- i+1; mod$x[100000] <- i; expect_identical(fun$getVal3(), i)
i <- i+1; mod$x[100000] <- i; expect_identical(fun$getVal4(), i)
i <- 0
i <- i+1; mod$x[100000] <- i; expect_identical(fun$calc1(), dnorm(i, 0, 1, log = TRUE))
i <- i+1; mod$x[100000] <- i; expect_identical(fun$calc2(), dnorm(i, 0, 1, log = TRUE))
i <- i+1; mod$x[100000] <- i; expect_identical(fun$calc3(), dnorm(i, 0, 1, log = TRUE))
i <- i+1; mod$x[100000] <- i; expect_identical(fun$calc4(), dnorm(i, 0, 1, log = TRUE))
i <- 0
i <- i+1; mod$x[100000] <- i; mod$calculate(); fun$copy1(); expect_identical(i, fun$mv['x', 1][100000])
}
})
options(warn = RwarnLevel)
nimbleOptions(verbose = nimbleVerboseSetting)
|
"quacau" <-
function(f,para,paracheck=TRUE) {
if(! check.fs(f)) return()
if(paracheck == TRUE) {
if(! are.parcau.valid(para)) return()
}
names(para$para) <- NULL
return(qcauchy(f, location=para$para[1], scale=para$para[2]))
} |
qphat <- function(p, n, mu=0, sigma=1, type="known", LSL=-3, USL=3, nodes=30) {
if ( n < 1 )
stop("n must be >= 1")
if ( sigma<1e-10 )
stop("sigma much too small")
ctyp <- -1 + pmatch(type, c("known", "estimated"))
if ( is.na(ctyp) )
stop("invalid sigma mode")
if ( LSL >= USL )
stop("wrong relationship between lower and upper specification limits (LSL must be smaller than USL)")
if ( nodes<2 )
stop("far too less nodes")
qf <- rep(NA, length(p))
for ( i in 1:length(p) ) {
qf[i] <- NA
if ( 0<p[i] && p[i]<1 )
qf[i] <- .C("phat_qf",
as.double(p[i]), as.integer(n), as.double(mu), as.double(sigma), as.integer(ctyp),
as.double(LSL), as.double(USL), as.integer(nodes),
ans=double(length=1), PACKAGE="spc")$ans
}
names(qf) <- "qf"
qf
} |
boxplot_trio <- function(polar,
value,
box_colours = c('green3', 'blue', 'red'),
test = "polar_pvalue",
levels_order = NULL,
my_comparisons = NULL,
text_size = 10,
stat_colour = "black",
stat_size = 3,
step_increase = 0.05,
plot_method="ggplot",
...){
sampledata <- polar@sampledata
expression <- polar@expression
pvalues <- polar@pvalues
if(! test %in% c("polar_pvalue", "polar_padj", "polar_multi_pvalue",
"polar_multi_padj", "t.test", "wilcox.test", "anova",
"kruskal.test")) {
stop(paste("expression must be a data frame or c('polar_pvalues',",
"'polar_padj', 'polar_multi_pvalue', 'polar_multi_padj',",
"'t.test', 'wilcox.test', 'anova', 'kruskal.test')"))
}
if(is.null(levels_order)) {
levels_order <- levels(sampledata[, polar@contrast])
}
if(! class(levels_order) %in% c("character")) {
stop("levels_order must be a character vector")
}
if(! all(levels_order %in% levels(sampledata[, polar@contrast]))){
stop(paste('levels_order must be a character vector defining the order',
'of levels in sampledata[, contrast]'))
}
if(length(box_colours) != length(levels_order)){
stop(paste0('The length of box_colours must match teh length of',
'levels_order'))
}
box_colours <- unlist(lapply(box_colours, function(x) {
if(! grepl("
class(try(col2rgb(x), silent = TRUE))[1] == "try-error") {
stop(paste(x, 'is not a valid colour'))
} else if (! grepl("
y <- col2rgb(x)[, 1]
x <- rgb(y[1], y[2], y[3], maxColorValue=255)
}
return(x)
}))
if(! class(sampledata) %in% c("data.frame")) {
stop("sampledata must be a data frame")
}
if(! class(expression)[1] %in% c("data.frame", "matrix")) {
stop("expression must be a data frame or matrix")
}
if(! class(value) %in% c("character", "numeric")) {
stop("value must be a character")
}
if(length(value) > 1) stop("value must be of length 1")
if(! value %in% rownames(expression)) {
stop("value/gene is not in rownames(expression)")
}
if(! identical(colnames(expression), as.character(sampledata$ID))) {
stop("expression and sampledata misalligned")
}
if(grepl("multi", test) & is.null(polar@multi_group_test)){
stop(paste("A multi-group test parameter is required in pvalues to use",
test))
}
if(! plot_method %in% c('plotly', 'ggplot')){
stop("plot_method must be either plotly or ggplot")
}
colour_map <- setNames(box_colours, levels_order)
sampledata$comp <- sampledata[, polar@contrast]
expression <- expression[, match(as.character(sampledata$ID),
colnames(expression))]
if(class(value) == "character") {
index <- which(rownames(expression) == value)
}
if(is.null(my_comparisons)) {
comps <- levels_order
my_comparisons <- lapply(seq_len(ncol(combn(comps, 2))), function(i) {
as.character(combn(comps, 2)[, i])
})
}
df <- data.frame("ID" = sampledata$ID,
"group" = sampledata$comp,
"row" = as.numeric(as.character(expression[value, ])))
df <- df[! is.na(df$row), ]
df <- df[df$group %in% levels_order, ]
df$group <- factor(df$group, levels_order)
df$col <- factor(df$group, labels=colour_map[match(levels(df$group),
names(colour_map))])
map_pos <- setNames(seq_along(levels(df$group)), levels(df$group))
if(test %in% c("t.test", "wilcox.test", "anova", "kruskal.test")){
pvals <- compare_means(formula = row ~ group, data = df,
comparisons = my_comparisons,
method = test,
step.increase = step_increase,
size=stat_size)
pvals$x.position <- map_pos[pvals$group1] +
(map_pos[pvals$group2] - map_pos[pvals$group1])/2
pvals$y.position <- max(df$row, na.rm=TRUE)*
(1.01 + step_increase*c(seq_len(nrow(pvals))-1))
pvals$new_p_label <- pvals$p.format
} else if (! grepl("multi", test)){
if(! any(grepl(gsub("polar_", "", test), colnames(pvalues)))){
stop(paste(test, "tests must have", gsub(".*_", "", test),
"columns in polar@pvalues"))
}
pvals <- pvalues[value, ]
pvals <- pvals[, grepl(gsub("polar_", "", test), colnames(pvals))]
if(! is.null(polar@multi_group_test)){
pvals <- pvals[, ! grepl(polar@multi_group_test, colnames(pvals))]
}
colnames(pvals) <- gsub(paste0("_", gsub("polar_", "", test)), "",
colnames(pvals))
rownames(pvals) <- "p"
pvals <- data.frame(t(pvals))
pvals$group1 <- gsub("_.*", "", rownames(pvals))
pvals$group2 <- gsub(".*_", "", rownames(pvals))
pvals$p.format <- format(pvals$p, digits=2)
pvals$method <- test
pvals$y.position <- max(df$row, na.rm=TRUE)
pvals$comp <- paste0(pvals$group1, "_", pvals$group2)
pvals$x.position <- map_pos[pvals$group1] +
(map_pos[pvals$group2] - map_pos[pvals$group1])/2
pvals$y.position <- pvals$y.position[1]*
(1.01 + step_increase*(seq_len(nrow(pvals))-1))
comp_use <- unlist(lapply(my_comparisons, function(x) {
c(paste0(x[1], "_", x[2]), paste0(x[2], "_", x[1]))
}))
pvals <- pvals[pvals$comp %in% comp_use, ]
} else{
if(! any(grepl(gsub("polar_multi_", "", test), colnames(pvalues)))){
stop(paste(test, "tests must have", gsub(".*_", "", test),
"columns in polar@pvalues"))
}
pvals <- pvalues[value, ]
pvals <- pvals[, grepl(gsub("polar_multi_", "", test), colnames(pvals))]
pvals <- pvals[, grepl(polar@multi_group_test, colnames(pvals))]
}
if(plot_method == 'ggplot'){
p <- ggboxplot(data = df,
x = "group",
y = "row",
xlab = "",
ylab = paste(polar@pvalues$label[index], "Expression"),
fill = "group",
color = "group",
palette = box_colours,
outlier.shape = NA,
alpha = 0.3) +
geom_jitter(data=df, height = 0, width = 0.30,
aes_string(color="group")) +
theme(legend.position = "none",
text = element_text(size = text_size),
plot.background = element_rect(fill="transparent", color=NA),
panel.background = element_rect(fill="transparent", colour=NA),
legend.background = element_rect(fill="transparent", colour=NA))
if(! grepl("multi", test)){
p <- p + stat_pvalue_manual(
data = pvals, label = "p.format",
xmin = "group1", xmax = "group2",
step.increase = step_increase,
y.position = "y.position", color = stat_colour,
size=stat_size, ...)
} else{
p <- p + annotate("text", x = 0.5 + length(unique(df$group))/2,
y = Inf, vjust = 2, hjust = 0.5, color = stat_colour,
label = paste("p =", format(pvals, digits = 2)))
}
} else{
p <- df %>%
plot_ly() %>%
add_trace(x = ~as.numeric(group), y = ~row,
type = "box",
colors = levels(df$col), color = ~col,
opacity=0.5, marker = list(opacity = 0),
hoverinfo="none", showlegend = FALSE) %>%
add_markers(x = ~jitter(as.numeric(group)), y = ~row,
marker = list(size = 6, color=~col),
hoverinfo = "text",
text = ~paste0(ID,
"<br>Group: ", group,
"<br>Expression: ", row),
showlegend = FALSE) %>%
layout(legend = list(orientation = "h",
x =0.5, xanchor = "center",
y = 1, yanchor = "bottom"
),
xaxis = list(title = polar@contrast, tickvals = 1:3,
ticktext = levels(df$group)),
yaxis = list(title = paste(value, "Expression")))
lines <- list()
if(! grepl("multi", test)){
for (i in seq_len(nrow(pvals))) {
line <- list(line=list(color = stat_colour))
line[["x0"]] <- map_pos[pvals$group1][i]
line[["x1"]] <- map_pos[pvals$group2][i]
line[c("y0", "y1")] <- pvals$y.position[i]
lines <- c(lines, list(line))
}
a <- list(
x = as.numeric(pvals$x.position),
y = pvals$y.position,
text = format(pvals$p.format, digits=3),
xref = "x",
yref = "y",
yanchor = "bottom",
font = list(color = stat_colour),
showarrow = FALSE
)
p <- p %>% layout(annotations = a, shapes=lines)
} else{
a <- list(
x = 2,
y = step_increase + max(df$row, na.rm=TRUE),
text = format(pvals, digits=3),
xref = "x",
yref = "y",
font = list(color = stat_colour),
showarrow = FALSE
)
p <- p %>% layout(annotations = a)
}
}
return(p)
} |
bimodality_coefficient <- function(x, na.rm=FALSE) {
if (na.rm) {
x <- x[!is.na(x)]
}
n <- length(x)
if (n == 0) {
return(NaN)
} else {
m3 <- psych::skew(x, type=2)
m4 <- psych::kurtosi(x, type=2)
bc <- (m3^2 + 1) /
(m4 + 3 * ((n - 1)^2 / ((n - 2) * (n - 3))))
return(bc)
}
}
mt_check_bimodality <- function(data,
use="measures", use_variables=NULL,
methods=c("BC", "HDS"), B=2000,
grouping_variables=NULL, ...) {
results <- list()
for (method in methods) {
if (method == "BC") {
method_label <- "BC"
aggregation_function <- bimodality_coefficient
} else if (method == "HDS") {
method_label <- "HDS_p_value"
aggregation_function <- function(x) {
return(diptest::dip.test(x)$p)
}
} else if (method == "HDS_sim") {
method_label <- "HDS_simulated_p_value"
aggregation_function <- function(x) {
return(
diptest::dip.test(x, simulate.p.value=TRUE, B=B)$p
)
}
} else {
stop("Argument for methods may only contain BC, HDS, or HDS_sim")
}
results[[method_label]] <- mt_reshape(
data=data,
use=use, use_variables=use_variables,
use2_variables=grouping_variables,
aggregate=TRUE,
.funs=aggregation_function,
...
)
}
return(results)
} |
wacc <- function(){
url <- "http://people.stern.nyu.edu/adamodar/New_Home_Page/datafile/wacc.htm"
res <- xml2::read_html(url) %>%
rvest::html_nodes("table") %>%
rvest::html_table() %>%
.[[1]] %>%
dplyr::as_data_frame() %>%
magrittr::set_colnames(value = c("Industry", "Number_Firms", "Beta",
"Cost_Equity", "Equity_Debt", "Std_Dev_Stock",
"Cost_Debt", "Tax_Rate", "AfterTax_Cost_Debt",
"Debt_Equity", "Cost_Capital")) %>%
dplyr::slice(-1) %>%
dplyr::slice(-95:-96) %>%
dplyr::mutate(Industry = gsub("\\r", "", Industry)) %>%
dplyr::mutate_if(is.character, gsub, pattern = "%", replacement = "") %>%
dplyr::mutate_at(dplyr::vars(Number_Firms:Cost_Capital), as.numeric)
return(res)
}
betas <- function(){
url <- "http://people.stern.nyu.edu/adamodar/New_Home_Page/datafile/totalbeta.html"
res <- xml2::read_html(url) %>%
rvest::html_nodes("table") %>%
rvest::html_table() %>%
.[[1]] %>%
dplyr::as_data_frame() %>%
magrittr::set_colnames(value = c("Industry", "Number_Firms", "Av_Unlevered_Beta",
"Av_Levered_Beta", "Av_Corr_Market",
"Total_Unlevered_Beta", "Total_Levered_Beta")) %>%
dplyr::slice(-1) %>%
dplyr::slice(-96:-97) %>%
dplyr::mutate(Industry = gsub("\\r", "", Industry)) %>%
dplyr::mutate_if(is.character, gsub, pattern = "%", replacement = "") %>%
dplyr::mutate_at(dplyr::vars(Number_Firms:Total_Levered_Beta), as.numeric)
return(res)
} |
parseMonaco <- function(x, planInfo=FALSE, courseAsID=FALSE, ...) {
planInfo <- as.character(planInfo)
dots <- list(...)
header <- unlist(strsplit(x[1], " [|] "))
patName <- trimWS(sub("^Patient ID: (.+)[~].+$", "\\1", header[1]))
patID <- trimWS(sub("^Patient ID: .+[~](.+)$", "\\1", header[1]))
plan <- trimWS(sub("^Plan Name: (.+)$", "\\1", header[2]))
doseUnit <- toupper(trimWS(sub("^Dose Units: (.+)$", "\\1", header[5])))
if(doseUnit == "%") {
isDoseRel <- TRUE
doseUnit <- toupper(trimWS(sub("^Bin Width: [.[:digit:]]+\\((.+)\\)$", "\\1", header[4])))
} else {
isDoseRel <- FALSE
}
if(!grepl("^(GY|CGY)$", doseUnit)) {
warning("Could not determine dose measurement unit")
doseUnit <- NA_character_
}
volumeUnit <- toupper(trimWS(sub("^Volume Units: (.+)$", "\\1", header[6])))
volumeUnit <- if(grepl("^CM.+", volumeUnit)) {
isVolRel <- FALSE
"CC"
} else if(grepl("^%", volumeUnit)) {
isVolRel <- TRUE
"PERCENT"
} else {
isVolRel <- FALSE
warning("Could not determine volume measurement unit")
NA_character_
}
isoDoseRx <- if(tolower(planInfo) == "doserx") {
warning("Iso-dose-Rx is assumed to be 100")
100
} else {
warning("No info on % for dose")
NA_real_
}
doseRx <- if(tolower(planInfo) == "doserx") {
drx <- sub("^[[:alnum:]]+_([.[:digit:]]+)(GY|CGY)_[[:alnum:]]*", "\\1",
plan, perl=TRUE, ignore.case=TRUE)
as.numeric(drx)
} else {
warning("No info on prescribed dose")
NA_real_
}
DVHdate <- x[length(x)]
DVHspan <- x[4:(length(x)-2)]
DVHlen <- length(DVHspan)
pat <- "^(.+?)[[:blank:]]+([.[:digit:]]+)[[:blank:]]+([.[:digit:]]+)$"
structs <- sub(pat, "\\1", DVHspan)[-DVHlen]
doses <- as.numeric(sub(pat, "\\2", DVHspan)[-DVHlen])
volumes <- as.numeric(sub(pat, "\\3", DVHspan)[-DVHlen])
DVHall <- data.frame(structure=structs, dose=doses, volume=volumes,
stringsAsFactors=FALSE)
names(DVHall) <- if(isDoseRel) {
if(isVolRel) {
c("structure", "doseRel", "volumeRel")
} else {
c("structure", "doseRel", "volume")
}
} else {
if(isVolRel) {
c("structure", "dose", "volumeRel")
} else {
c("structure", "dose", "volume")
}
}
structList <- split(DVHall, DVHall$structure)
getDVH <- function(strct, info) {
structure <- strct$structure[1]
dvh <- data.matrix(strct[ , 2:3])
haveVars <- colnames(dvh)
if(!("volume" %in% haveVars)) {
isVolRel <- TRUE
dvh <- cbind(dvh, volume=NA_real_)
}
structVol <- if(hasName(dots, "volume_from_dvh")) {
if((dots[["volume_from_dvh"]] == TRUE) && ("volume" %in% haveVars)) {
max(dvh[ , "volume"])
}
} else {
NA_real_
}
if(!("volumeRel" %in% haveVars)) {
isVolRel <- FALSE
volRel <- 100*(dvh[ , "volume"] / structVol)
dvh <- cbind(dvh, volumeRel=volRel)
}
if(!("dose" %in% haveVars)) {
dvh <- cbind(dvh, dose=NA_real_)
}
if(!("doseRel" %in% haveVars)) {
doseRel <- if(!is.null(info$doseRx) && !is.na(info$doseRx)) {
100*(dvh[ , "dose"] / info$doseRx)
} else { NA_real_ }
dvh <- cbind(dvh, doseRel=doseRel)
}
stopifnot(isIncreasing(dvh))
DVHtype <- dvhType(dvh)
DVH <- list(dvh=dvh,
patName=info$patName,
patID=info$patID,
date=info$date,
DVHtype=DVHtype,
plan=info$plan,
structure=structure,
structVol=structVol,
doseUnit=info$doseUnit,
volumeUnit=info$volumeUnit,
doseRx=info$doseRx,
isoDoseRx=info$isoDoseRx,
doseMin=NA_real_,
doseMax=NA_real_,
doseAvg=NA_real_,
doseMed=NA_real_,
doseMode=NA_real_,
doseSD=NA_real_)
if(DVHtype == "differential") {
DVH$dvh <- convertDVH(dvh, toType="cumulative",
toDoseUnit="asis", perDose=FALSE)
DVH$dvhDiff <- dvh
}
class(DVH) <- "DVHs"
return(DVH)
}
info <- list(patID=patID, patName=patName, date=DVHdate,
plan=plan, doseRx=doseRx, isoDoseRx=isoDoseRx,
doseUnit=doseUnit, volumeUnit=volumeUnit)
dvhL <- lapply(structList, getDVH, info=info)
dvhL <- Filter(Negate(is.null), dvhL)
names(dvhL) <- sapply(dvhL, function(y) y$structure)
if(length(unique(names(dvhL))) < length(dvhL)) {
warning("Some structures have the same name - this can lead to problems")
}
class(dvhL) <- "DVHLst"
attr(dvhL, which="byPat") <- TRUE
return(dvhL)
} |
setCNNTorch <- function(nbfilters=c(16, 32), epochs=c(20, 50), seed=0, class_weight = 0, type = 'CNN'){
ParallelLogger::logWarn('This model has broken - please use setCNN() or setCNN2() instead ')
if(is.null(seed[1])){
seed <- as.integer(sample(100000000,1))
}
result <- list(model='fitCNNTorch', param=split(expand.grid(nbfilters=nbfilters,
epochs=epochs, seed=seed[1],
class_weight = class_weight, type = type),
1:(length(nbfilters)*length(epochs)) ),
name='CNN Torch')
class(result) <- 'modelSettings'
return(result)
}
fitCNNTorch <- function(population, plpData, param, search='grid', quiet=F,
outcomeId, cohortId, ...){
if (!FeatureExtraction::isCovariateData(plpData$covariateData))
stop("Needs correct covariateData")
if(colnames(population)[ncol(population)]!='indexes'){
warning('indexes column not present as last column - setting all index to 1')
population$indexes <- rep(1, nrow(population))
}
start <- Sys.time()
population$rowIdPython <- population$rowId-1
pPopulation <- as.matrix(population[,c('rowIdPython','outcomeCount','indexes')])
result <- toSparseTorchPython(plpData,population, map=NULL, temporal=T)
outLoc <- createTempModelLoc()
for(file in dir(outLoc))
file.remove(file.path(outLoc,file))
hyperParamSel <- lapply(param, function(x) do.call(trainCNNTorch, listAppend(x,
list(plpData = result$data,
population = pPopulation,
train=TRUE,
modelOutput=outLoc)) ))
hyperSummary <- cbind(do.call(rbind, param), unlist(hyperParamSel))
bestInd <- which.max(abs(unlist(hyperParamSel)-0.5))[1]
finalModel <- do.call(trainCNNTorch, listAppend(param[[bestInd]],
list(plpData = result$data,
population = pPopulation,
train=FALSE,
modelOutput=outLoc)))
covariateRef <- as.data.frame(plpData$covariateData$covariateRef)
incs <- rep(1, nrow(covariateRef))
covariateRef$included <- incs
covariateRef$covariateValue <- rep(0, nrow(covariateRef))
modelTrained <- file.path(outLoc)
param.best <- param[[bestInd]]
comp <- start-Sys.time()
pred <- as.matrix(finalModel)
pred[,1] <- pred[,1] + 1
colnames(pred) <- c('rowId','outcomeCount','indexes', 'value')
pred <- as.data.frame(pred)
attr(pred, "metaData") <- list(predictionType="binary")
pred$value <- 1-pred$value
prediction <- merge(population, pred[,c('rowId','value')], by='rowId')
result <- list(model = modelTrained,
trainCVAuc = -1,
hyperParamSearch = hyperSummary,
modelSettings = list(model='fitCNNTorch',modelParameters=param.best),
metaData = plpData$metaData,
populationSettings = attr(population, 'metaData'),
outcomeId=outcomeId,
cohortId=cohortId,
varImp = covariateRef,
trainingTime =comp,
dense=1,
covariateMap=result$map,
predictionTrain = prediction
)
class(result) <- 'plpModel'
attr(result, 'type') <- 'pythonReticulate'
attr(result, 'predictionType') <- 'binary'
return(result)
}
trainCNNTorch <- function(plpData, population, epochs=50, nbfilters = 16, seed=0, class_weight= 0, type = 'CNN', train=TRUE, modelOutput, quiet=F){
train_deeptorch <- function(){return(NULL)}
python_dir <- system.file(package='PatientLevelPrediction','python')
e <- environment()
reticulate::source_python(system.file(package='PatientLevelPrediction','python','deepTorchFunctions.py'), envir = e)
result <- train_deeptorch(population = population,
plpData = plpData,
epochs = as.integer(epochs),
nbfilters = as.integer(nbfilters),
seed = as.integer(seed),
class_weight = as.double(class_weight),
model_type = as.character(type),
train = train,
modelOutput = modelOutput,
quiet = quiet
)
if(train){
pred <- as.matrix(result)
colnames(pred) <- c('rowId','outcomeCount','indexes', 'value')
pred <- as.data.frame(pred)
attr(pred, "metaData") <- list(predictionType="binary")
pred$value <- 1-pred$value
auc <- computeAuc(pred)
writeLines(paste0('Model obtained CV AUC of ', auc))
return(auc)
}
return(result)
} |
context("mle")
test_that("NBD", {
set.seed(1)
params <- c(r = 0.85, alpha = 4.45)
expect_silent(nbd.GenerateData(100, 32, c(16, 32), params, "2010-01-01"))
cbs <- nbd.GenerateData(1000, 32, 32, params)$cbs
est <- nbd.EstimateParameters(cbs[, c("x", "T.cal")])
expect_equal(params, est, tolerance = 0.05)
cbs$x.est <- nbd.ConditionalExpectedTransactions(params, cbs$T.star, cbs$x, cbs$T.cal)
expect_equal(sum(cbs$x.star), sum(cbs$x.est), tolerance = 0.05)
expect_true(min(cbs$x.star) >= 0)
expect_true(all(cbs$x.star == round(cbs$x.star)))
}) |
cookDist <- function(object, label.id, n.label.id, xlab, ylab, pos, ...) {
if(!inherits(object, 'influence')) stop('cookDist can only be used with object of class', dQuote('influence'), ', see ?influenceDiag')
cookd <- object$cookDist
n.obs <- length(cookd)
index <- seq_len(n.obs)
if(missing(label.id)) label.id <- index
if(missing(n.label.id)) n.label.id <- 2
if(missing(xlab)) xlab <- 'Index'
if(missing(ylab)) ylab <- ifelse(object$family == 'betabinomial',
expression((beta - beta(-i)) ~ V(beta)^{-1} ~ (beta - beta(-i))/p),
"Cook's distance")
if(missing(pos)) pos <- 4
points.lab <- getMaxIndex(cookd, label.id, k = n.label.id)
backup.par <- par(no.readonly = T)
on.exit(par(backup.par))
par(mgp=c(2,1,0))
plot(cookd, xlab = xlab, ylab = ylab, type = "h", ...)
text(x = index, y = cookd, label = points.lab, pos = pos)
} |
gb <- function(..., control = list(), raw_chars = FALSE, code = FALSE) {
x <- list(x = lapply(dots(...), eval, envir = parent.frame()))
if(is.list(x) && (length(x) == 1) && is.m2_ideal(x[[c(1,1)]])) x <- x[[1]]
if(is.list(x) && (length(x) == 1) && is.m2_ideal_pointer(x[[c(1,1)]])) x <- x[[1]]
otherArgs <- as.list(match.call(expand.dots = FALSE))[-c(1:2)]
args <- lapply(c(x, otherArgs), eval)
do.call("gb_", args)
}
gb. <- function(..., control = list(), raw_chars = FALSE, code = FALSE) {
x <- list(x = lapply(dots(...), eval, envir = parent.frame()))
if(is.list(x) && (length(x) == 1) && is.m2_ideal(x[[c(1,1)]])) x <- x[[1]]
if(is.list(x) && (length(x) == 1) && is.m2_ideal_pointer(x[[c(1,1)]])) x <- x[[1]]
otherArgs <- as.list(match.call(expand.dots = FALSE))[-c(1:2)]
args <- lapply(c(x, otherArgs), eval)
do.call("gb_.", args)
}
gb_ <- function(x, control = list(), raw_chars = FALSE, code = FALSE, ...) {
args <- as.list(match.call())[-1]
eargs <- lapply(args, eval, envir = parent.frame())
pointer <- do.call(gb_., eargs)
if(code) return(invisible(pointer))
parsed_out <- m2_parse(pointer)
out <- m2_structure(
parsed_out[1,],
m2_name = m2_name(pointer),
m2_class = "m2_grobner_basis",
m2_meta = list(ideal = m2_meta(pointer, "ideal")),
base_class = "mpolyList"
)
out
}
gb_. <- function(x, control = list(), raw_chars = FALSE, code = FALSE, ...) {
gb_name <- name_and_increment("gb", "m2_gb_count")
if (raw_chars) {
ideal_name <- paste0("ideal(", paste0(x, collapse = ", "), ")")
} else {
if (is.m2_ideal(x)) {
ideal_name <- m2_name(x)
} else if (is.m2_ideal_pointer(x)) {
ideal_name <- m2_name(x)
} else {
x <- do.call(ideal_., list(x = x))
ideal_name <- m2_name(x)
}
}
if (length(control) > 0) {
control_string <- paste0(", ",
paste(names(control), r_to_m2(unlist(control)), sep = " => ", collapse = ", ")
)
} else {
control_string <- ""
}
m2_code <- sprintf("%1$s = gb(%2$s%3$s); gens %1$s", gb_name, ideal_name, control_string)
if(code) { message(m2_code); return(invisible(m2_code)) }
out <- m2.(m2_code)
m2_name(out) <- gb_name
if(!raw_chars) m2_meta(out) <- c(m2_meta(out), list(ideal = ideal_name))
out
}
r_to_m2 <- function(x) {
if (length(x) > 1) return(vapply(x, r_to_m2, character(1)))
if(is.logical(x)) return(tolower(as.character(x)))
if(is.numeric(x)) return(x)
if(is.character(x)) return(x)
stop("unexpected input.")
} |
expect_linpred_equal <- function(object, tol = 0.1) {
linpred <- posterior_linpred(object)
expect_equal(apply(linpred, 2, median), object$linear.predictors,
tolerance = tol,
check.attributes = FALSE)
} |
library(xts)
library(nvmix)
library(qrmdata)
library(qrmtools)
data("FTSE")
data("SMI")
FTSE.X <- returns(FTSE)
SMI.X <- returns(SMI)
X <- merge(FTSE = FTSE.X, SMI = SMI.X, all = FALSE)
acf(X)
acf(abs(X))
X.cor <- apply.monthly(X, FUN = cor)[,2]
X.vols <- apply.monthly(X, FUN = function(x) apply(x, 2, sd))
X.cor.vols <- merge(X.cor, X.vols)
names(X.cor.vols) <- c("Cross-correlation", "Volatility FTSE", "Volatility SMI")
plot.zoo(X.cor.vols, xlab = "Time", main = "Cross-correlation and volatility estimates")
FTSE.sig <- as.numeric(X.vols[,"FTSE"])
SMI.sig <- as.numeric(X.vols[,"SMI"])
fisher <- function(r) log((1 + r)/(1 - r))/2
rho <- fisher(X.cor)
plot(FTSE.sig, rho, xlab = "Estimated volatility", ylab = "Estimated cross-correlation")
reg <- lm(rho ~ FTSE.sig)
summary(reg)
abline(reg)
plot(SMI.sig, rho, xlab = "Estimated volatility", ylab = "Estimated cross-correlation")
reg <- lm(rho ~ SMI.sig)
summary(reg)
abline(reg)
set.seed(271)
X.t <- xts(rStudent(n = nrow(X), df = 3, scale = cor(X)), time(X))
X.N <- xts(rNorm(n = nrow(X), scale = cor(X)), time(X))
plot.zoo(X, xlab = "Time", main = "Log-returns")
X. <- apply(X, 2, rank) / (nrow(X) + 1)
plot(X., main = "Componentwise scaled ranks") |
wordcount <- function(x) UseMethod("wordcount", x)
wordcount.default <- function(x) {
assert_that(is.string(x))
str_count(x, boundary("word"))
}
wordcount.TextDocument <- function(x) wordcount(x$content)
wordcount.TextReuseCorpus <- function(x) {
vapply(x$documents, wordcount, integer(1))
} |
roundpretty <- function(kvec,maxdig)
{
if (length(kvec)>1){
result=rep(NA,length(kvec))
for (i in 1:length(kvec)){
result[i]=roundpretty.sub(kvec[i],maxdig)
}
}
else result=roundpretty.sub(kvec,maxdig)
return(result)
}
roundpretty.sub <- function(k,maxdig)
{
if (is.na(k)) {kr=k}
else {
if (k<0.00001) {kr=format(round(k,min(maxdig,8)),nsmall=min(maxdig,8),scientific=FALSE)}
else if (k<0.0001) {kr=format(round(k,min(maxdig,7)),nsmall=min(maxdig,7),scientific=FALSE)}
else if (k<0.001) {kr=format(round(k,min(maxdig,6)),nsmall=min(maxdig,6),scientific=FALSE)}
else if (k<0.01) {kr=format(round(k,min(maxdig,5)),nsmall=min(maxdig,5),scientific=FALSE)}
else if (k<0.1) {kr=format(round(k,min(maxdig,4)),nsmall=min(maxdig,4),scientific=FALSE)}
else if (k<1) {kr=format(round(k,min(maxdig,3)),nsmall=min(maxdig,3),scientific=FALSE)}
else if (k<10) {kr=format(round(k,min(maxdig,2)),nsmall=min(maxdig,2),scientific=FALSE)}
else if (k<100) {kr=format(round(k,min(maxdig,1)),nsmall=min(maxdig,1),scientific=FALSE)}
else {kr=format(round(k,min(maxdig,0)),nsmall=min(maxdig,0),scientific=FALSE)}
}
return(kr)
} |
mlnormal_proc_variance_shortcut_Z_R <- function( Z_list, Z_index, G, freq_id){
gg0 <- freq_id[ 1, "orig_id" ]
Z_list_gg0 <- Z_list[[gg0]]
NZ <- length(Z_list_gg0)
Z_index_gg0 <- Z_index[gg0,,]
for (gg in 2:G){
gg1 <- freq_id[ gg, "orig_id" ]
Z_list_gg <- Z_list[[gg1]]
Z_index_gg <- Z_index[gg1,,]
if ( freq_id[ gg, "update_dim"]==0 ){
crit1 <- mlnormal_equal_matrix( mat1=Z_index_gg0, mat2=Z_index_gg )
crit2 <- mlnormal_equal_list_matrices( list_mat1=Z_list_gg0,
list_mat2=Z_list_gg, dim_list=NZ)
crit <- 1 * crit1 * crit2
if ( crit==0 ){
freq_id[ gg, "update_dim" ] <- 1
}
}
Z_list_gg0 <- Z_list_gg
Z_index_gg0 <- Z_index_gg
gg0 <- gg1
}
res <- list( "freq_id"=freq_id, "rcpp_args"=NULL)
return(res)
} |
zooplot_individuals <- function (input, cumulative=TRUE, toplot=NULL, ncols=2) {
layout ( matrix (1:2, nrow=1), widths=c(0.90, 0.1))
par (mar =c(1, 0, 4, 3))
par (oma=c(6, 6, 0, 0))
if (is (input, "list")) {
if(any (lapply (input, class) != "zres")) {
stop ("Some objects are NOT of class \"zres\"\n")
}
}else {
if(is (input,"zres")){
input <- list(input)
}
else {
stop ("input should be a list\n")
}}
if(length(names(input))==0 & length(input) > 1){
warning("No names were provided for the input list!\n We will use capital letters.\n")
names(input)=LETTERS[1:length(input)]}
ks <- c()
for (i in 1:length (input)) {
myres <- input[[i]]@realized
ks [i] <- ncol (myres)
myres <- myres [, -c(ncol(myres))]
myres <- data.frame (id=input[[i]]@ids, fullid=input[[i]]@sampleids, myres)
myres$pop <- i
if (i ==1) {
allres <- myres
}else {
allres <- rbind(allres, myres)
}
}
if (length (unique (ks)) >1) {
stop ("different models used for the data\n")
}else {
k <- unique (ks) -1
}
if (cumulative ==TRUE) {
xlab=expression("Value of the threshold T used to estimate F"['G-T'] * " (using HBD classes with R"['K']<=" T)")
ylab =expression ("Genomic inbreeding coefficient" ~ ( F [G-T] ))
ylim <- c(0, max(apply (allres [, 3:(k+2)], 1, cumsum))) * 1.06
}else {
xlab="Rate of the HBD class"
ylab="Proportion of the genome in HBD class"
ylim <- c(0, max(allres [, 3:(k+2)])) *1.06
}
npop = length(input)
if(npop>1 & npop%%2==1){npop=npop+1}
mymat <- matrix (1:npop, ncol=ncols, byrow=TRUE)
layout (mymat)
for (j in 1:length (input)) {
myres <- allres [allres$pop ==j, ]
mymean <- apply (myres[, 3:(k+2)] , 2, mean)
if (!is.null (toplot)) {
if (length (toplot) != length (input)) {
stop ('length of topot should match the length of input\n')
}
if (sum (myres$id %in% toplot [[j]]) == length (toplot [[j]]) ) {
myres <- myres [myres$id %in% toplot [[j]], ]
cat ('population ', j, ": ", nrow (myres), "\n")
}else {
warning ("some ids for data ", j, " was not found\n")
}
}else {
if (j ==1) {
warning ("\nAll individuals are plotted; use toplot to select individuals\n")
}
}
plot (1, type="n",xlim=c(1, k), ylim=ylim, xaxt="n", cex.lab=2, cex.axis=1.5, bty="l", xlab="", ylab="", main=names(input)[j])
axis (side=1, at=1:k, labels=input[[1]]@krates [1, 1:k], cex.axis=1.5, cex.lab=2)
for (n in 1:nrow (myres)){
if (cumulative==TRUE) {
lines (1:k, cumsum( as.numeric(myres[n, 3:(k+2)]) ) , type="l", col="gray")
if (n == nrow (myres)) {
lines (1:k, cumsum(mymean) , type="l", col="indianred2", lwd=2)
}
}else {
lines (1:k, myres[n, 3:(k+2)] , type="l", col="gray")
if (n == nrow (myres)) {
lines (1:k, mymean , type="l", col="indianred2", lwd=2)
}
}
}
}
mtext(side=1, xlab, line=3, cex=1, outer=TRUE)
mtext(side=2, ylab, line=3, cex=1, outer=TRUE)
} |
HandleBasisVol <- function(trades) {
vol_hedging_sets = array()
basis_hedging_sets = array()
vol_trades = list()
basis_swap_trades = list()
results = list()
for (i in 1:length(trades))
{
if(is(trades[[i]],"Vol"))
vol_trades[[length(vol_trades)+1]]=trades[[i]]
else if(is(trades[[i]],"Swap"))
{
if(trades[[i]]$isBasisSwap())
basis_swap_trades[length(basis_swap_trades)+1] = trades[[i]]
}
}
if(length(basis_swap_trades)!=0)
{
basis_hedging_sets = unique(lapply(basis_swap_trades, function(x) paste(x$pay_leg_ref,x$rec_leg_ref)))
basis_trade_ids = list()
basis_trade_ids_all = array()
for (i in 1:length(basis_hedging_sets))
{
split_pair = strsplit(basis_hedging_sets[[i]]," ")
trades_temp <- basis_swap_trades[sapply(basis_swap_trades, function(x) x$pay_leg_ref==split_pair[[1]][1]&&x$rec_leg_ref==split_pair[[1]][2])]
basis_trade_ids[[i]] = sapply(trades_temp, function(x) x$external_id)
basis_trade_ids_all = c(basis_trade_ids_all, basis_trade_ids[[i]])
}
basis_trade_ids_all = basis_trade_ids_all[!is.na(basis_trade_ids_all)]
basis_hedging_sets = paste0("Basis_",basis_hedging_sets)
results$trade_ids = c(results$trade_ids, basis_trade_ids)
results$trade_ids_all = c(results$trade_ids_all, basis_trade_ids_all)
results$hedging_sets = c(results$hedging_sets, basis_hedging_sets)
}
if(length(vol_trades)!=0)
{
vol_hedging_sets = unique(lapply(vol_trades, function(x) x$Underlying_Instrument))
vol_trade_ids = list()
vol_trade_ids_all = array()
for (i in 1:length(vol_hedging_sets))
{
group_trades <- vol_trades[sapply(vol_trades, function(x) x$Underlying_Instrument==vol_hedging_sets[i])]
vol_trade_ids[[i]] = sapply(group_trades, function(x) x$external_id)
vol_trade_ids_all = c(vol_trade_ids_all, vol_trade_ids[[i]])
}
vol_trade_ids_all = vol_trade_ids_all[!is.na(vol_trade_ids_all)]
vol_trade_ids = vol_trade_ids
vol_hedging_sets = paste0("Vol_",vol_hedging_sets)
results$trade_ids = c(results$trade_ids, vol_trade_ids)
results$trade_ids_all = c(results$trade_ids_all, vol_trade_ids_all)
results$hedging_sets = c(results$hedging_sets, vol_hedging_sets)
}
return(results)
} |
coordOfFilt <- function(mat, cond, sortByRows=FALSE, silent=FALSE, callFrom=NULL) {
fxNa <- .composeCallName(callFrom, newNa="coordOfFilt")
if(any(length(dim(mat)) !=2, dim(mat) < 2:1)) stop("Invalid argument 'mat'; must be matrix or data.frame with min 2 lines and 1 col")
cond <- if(is.logical(cond)) which(cond) else as.integer(cond)
chNa <- is.na(cond)
if(any(chNa)) cond <- cond[which(!chNa)]
if(min(cond) <1 | max(cond) > prod(dim(mat))) stop("invalid entry for 'cond'")
out <- cbind(row=cond %% nrow(mat), col=(cond +nrow(mat) -1) %/% nrow(mat))
ch0 <- out[,1]==0
if(any(ch0)) out[which(ch0)] <- nrow(mat)
if(length(unique(names(cond)))==length(cond)) rownames(out) <- names(cond)
if(sortByRows) out <- out[order(out[,1], decreasing=FALSE), ]
out }
|
library(testthat)
library(timelineR)
Sys.setenv(TZ="Asia/Kolkata")
context("data_cleaner: check_input_arguments")
teardown({
unlink("Rplots.pdf")
})
test_that("Check if is sorted", {
test_df = data.frame(timestamp = as.POSIXct(c("2017-01-01","2017-01-03", "2017-01-02")),
col_1 = c("value1", "value2", "value3"),
col_2 = c("value1", "value2", "value3"))
expect_output(plot_timeline(test_df), "WARN.*")
}) |
lnre.vgc <- function (model, N, m.max=0, variances=FALSE)
{
if (!inherits(model, "lnre")) stop("first argument must belong to a subclass of 'lnre'")
if (! (is.numeric(N) && all(N >= 0)))
stop("'N' argument must be a vector of non-negative numbers")
if (!missing(m.max) && !(length(m.max) == 1 && is.numeric(m.max) && 0 <= m.max && m.max <= 9))
stop("'m.max' must be a single integer in the range 1 ... 9")
E.V <- EV(model, N)
E.Vm.list <- list()
if (m.max > 0) E.Vm.list <- lapply(1:m.max, function (.m) EVm(model, .m, N))
if (!variances) {
vgc(N=N, V=E.V, Vm=E.Vm.list, expected=TRUE)
}
else {
V.V <- VV(model, N)
V.Vm.list <- list()
if (m.max > 0) V.Vm.list <- lapply(1:m.max, function (.m) VVm(model, .m, N))
vgc(N=N, V=E.V, VV=V.V, Vm=E.Vm.list, VVm=V.Vm.list, expected=TRUE)
}
} |
context("wbt_z_scores")
test_that("Standardizes the values in an input raster by converting to z-scores", {
skip_on_cran()
skip_if_not(check_whitebox_binary())
dem <- system.file("extdata", "DEM.tif", package = "whitebox")
ret <- wbt_z_scores(input = dem, output = "output.tif")
expect_match(ret, "Elapsed Time")
}) |
get_tol <- function(searchterm) {
baseurl <- "http://tolweb.org/onlinecontributors/app?service=external&page=xml/GroupSearchService&group="
url <- paste(baseurl, searchterm, sep = "")
tt <- getURL(url)
ttp <- xmlRoot(xmlTreeParse(tt))
ID <- as.character(xmlAttrs(ttp[[1]], name="ID"))
return(ID)
}
library(httr)
tt <- GET('http://tolweb.org/onlinecontributors/app?service=external&page=xml/GroupSearchService&group=Bembidion')
stop_for_status(tt)
content(tt)
out <- GET('http://tolweb.org/onlinecontributors/app?service=external&page=xml/TreeStructureService&node_id=146766')
stop_for_status(out)
txt <- content(out, "text")
cat(txt)
library("xml2")
txt2 <- content(out, "text")
xml <- xml2::read_xml(txt2)
one <- xml2::xml_children(xml)[[1]] |
vcov.jmodelMult <- function (object, ...) {
object$Vcov
} |
Bysmxmss<-function(m,tmax,timepoints,group,chains,iter,data){
gendergrp<-group
w1<-m[1]
w2<-(m[length(m)])+tmax-1
data1<-data[,w1:w2]
data2<-data[,1:(w1-1)]
data1.imp <- missForest(data1)
data1 <- data.frame(data1.imp$ximp)
data <- data.frame(data2,data1)
var1 <- data[,timepoints]
dmat <- matrix(nrow=0,ncol=2)
rlt=list()
for( k in m){
m1=k
m2=k+tmax-1
Distance = as.matrix(data[,m1:m2])
colnames(Distance)<-NULL
mn=length(m1:m2)
var1 <- var1[1:mn]
var2 <- data[,gendergrp]
mdata <- list(R=matrix(c(.001,0,0,.001),2,2), mu0=c(0,0) ,
a2=var2,a1 = var1,
Distance = Distance,N=nrow(data),MN=mn)
mixedmodinslp <- function(){
for( i in 1:N){
for(j in 1:MN){
Distance[ i , j ] ~ dnorm(m[ i , j ], tau)
m[ i , j ] <- (beta[1] + b[ i , 1 ]) + (beta[2] + b[ i , 2 ])*a1[ j ] + beta[3]*a2[ i ]
}
}
for( i in 1:N){
b[ i , 1:2 ] ~ dmnorm(mu0[1:2], prec[1:2,1:2])
}
prec[1:2,1:2] ~ dwish(R[ 1:2 , 1:2 ], 2)
for(i in 1:3){ beta[ i ] ~ dnorm(0,0.0001) }
tau ~dgamma(0.0001,0.0001)
sigma <- sqrt(1/(tau*tau))
}
inits<-function(){list(beta =c(0,0,0), tau=1, prec=matrix(c(.001,0,0,.001),2,2))}
param = c("tau","sigma")
v1.sim.rem1 <- jags(mdata, inits, model.file = mixedmodinslp,
parameters.to.save = param,
n.chains = chains, n.iter = iter, n.burnin = iter/2 )
return(v1.sim.rem1)
}
}
utils::globalVariables(c("b","tau","Male")) |
inflationPlot <- function(package = "cholera", filter = "size",
legend.loc = "topleft") {
if (package %in% c("cholera", "ggplot2", "VR") == FALSE) {
stop('package must be "cholera", "ggplot2", or "VR".')
}
if (!filter %in% c("size", "version", "size.version")) {
stop('Filter must be "size", "version", or "size.version".')
}
pt1 <- paste0("0", 1:9)
oct.root <- "2019-10-"
oct <- as.Date(c(paste0(oct.root, pt1), paste0(oct.root, 10:31)))
id <- which(weekdays(oct, abbreviate = TRUE) == "Wed")
dat <- packageRank::blog.data[[paste0(package, ".data")]]
plot(oct, dat$ct, type = "o", col = "red", pch = 15,
ylim = range(dat[, 1:4]), xlab = "Date", ylab = "Count")
filter.id <- paste0("ct.", tolower(filter))
lines(oct, dat[, filter.id], type = "o", col = "black", pch = 16)
lines(stats::lowess(oct, dat$ct), lty = "dotted", col = "red")
lines(stats::lowess(oct, dat[, filter.id]), lty = "dotted")
abline(v = oct[id], col = "gray", lty = "dotted")
axis(3, at = oct[id], labels = rep("W", length(id)), cex.axis = 0.5,
col.ticks = "black", mgp = c(3, 0.5, 0))
if (filter == "size") {
title(main = paste0(package, ": Size"))
sel <- "ct.size"
} else if (filter == "version") {
title(main = paste0(package, ": Version"))
sel <- "ct.version"
} else if (filter == "size.version") {
title(main = paste0(package, ": Size & Version"))
sel <- "ct.size.version"
}
tot <- colSums(dat)
ptA <- paste0("Downloads: all = ", format(tot["ct"], big.mark = ","),
"; filtered = ")
ptB <- paste0("% | ", unique(dat$version.ct), " versions")
delta.pct <- round(100 * (tot["ct"] - tot[sel]) / tot[sel], 1)
title(sub = paste0(ptA, format(tot[sel], big.mark = ","), "; inflation = ",
format(delta.pct, big.mark = ","), ptB))
legend(x = legend.loc,
legend = c("all", "filtered"),
col = c("red", "black"),
pch = c(15, 16),
bg = "white",
cex = 2/3,
lwd = 1,
title = NULL)
}
inflationPlot2 <- function(dataset = "october", filter = "small", wed = FALSE,
subtitle = TRUE, legend.loc = "topleft") {
if (dataset == "october") dat <- packageRank::blog.data$october.downloads
else if (dataset == "july") dat <- packageRank::blog.data$july.downloads
else stop('"dataset" must be "october" or "july".')
vars <- names(dat) != "date"
tot <- colSums(dat[, vars])
unfiltered.ct <- tot["unfiltered"]
filtered.ct <- tot[filter]
inflation <- round(100 * (unfiltered.ct - filtered.ct) / filtered.ct, 1)
dat[, vars] <- dat[, vars] / 10^6
mo <- dat$date
id <- which(weekdays(mo, abbreviate = TRUE) == "Wed")
plot(dat$date, dat$unfiltered, type = "o", pch = 15, col = "red",
ylim = range(dat[, vars]), xlab = "Date", ylab = "Downloads (millions)")
lines(dat$date, dat[, filter], type = "o", pch = 16)
title(main = paste("Package Download Counts:", filter))
if (wed) {
abline(v = mo[id], col = "gray", lty = "dotted")
axis(3, at = mo[id], labels = rep("W", length(id)), cex.axis = 0.5,
col.ticks = "black", mgp = c(3, 0.5, 0))
}
legend(x = legend.loc, legend = c("unfiltered", "filtered"),
col = c("red", "black"), pch = c(15, 16), bg = "white", cex = 2/3, lwd = 1,
title = NULL)
if (subtitle) {
ptA <- paste0("unfiltered = ",
format(round(unfiltered.ct, 2), big.mark = ","),
"; filtered = ",
format(round(filtered.ct, 2), big.mark = ","))
title(sub = paste0(ptA, "; inflation = ", paste0(inflation, "%")))
}
}
cranInflationPlot <- function(dataset = "october") {
if (dataset == "october") dat <- packageRank::blog.data$october.downloads
else if (dataset == "july") dat <- packageRank::blog.data$july.downloads
else stop('"dataset" must be "october" or "july".')
vars <- names(dat) != "date"
dat[, vars] <- dat[, vars] / 10^6
mo <- dat$date
id <- which(weekdays(mo, abbreviate = TRUE) == "Wed")
plot(dat$date, dat$unfiltered, type = "o", pch = 16,
ylim = range(dat[, vars]), xlab = "Date", ylab = "Downloads (millions)")
title(main = "Package Downloads")
} |
unlink("Rplots.pdf")
test_that("Check sigprofiler-formatted indel catalog rownames",
{
spIDCat <- read.csv("testdata/sigProfiler_ID_signatures.csv",
row.names = 1,
stringsAsFactors = FALSE)
expect_equal(catalog.row.headers.sp$ID83,rownames(spIDCat))
unlink("Rplots.pdf")
})
test_that("TransRownames.ID.PCAWG.SigPro function",
{
inputRownames <- ICAMS::catalog.row.order$ID
outputRownames <- TransRownames.ID.PCAWG.SigPro(inputRownames)
expect_true(setequal(outputRownames,catalog.row.headers.sp$ID83))
})
test_that("TransRownames.ID.SigPro.PCAWG function",
{
inputRownames <- catalog.row.headers.sp$ID83
outputRownames <- TransRownames.ID.SigPro.PCAWG(inputRownames)
expect_true(setequal(outputRownames,ICAMS::catalog.row.order$ID))
}) |
bias.par <- function(penden.env) {
M <- get("M",penden.env)
N <- get("N",penden.env)
K <- get("K",penden.env)
beta.val.help <- c(get("beta.val",penden.env)[1:(N*(M-1))],get("beta.val",penden.env)[(N*M+1):(K*N)])
val <- -get("lambda0",penden.env)*my.positive.definite.solve(get("Derv2.pen",penden.env))%*%get("Dm",penden.env)%*%beta.val.help
return(val)
} |
output$ui_resdiaglink <- renderUI({
if (input$restrial1 == "Residual vs Predicted Plot") {
fluidRow(
column(6, align = 'left',
h4('Residual vs Predicted Plot'),
p('Plot to detect non-linearity, unequal error variances, and outliers.')
),
column(6, align = 'right',
actionButton(inputId='rvsp1', label="Help", icon = icon("question-circle"),
onclick ="window.open('https://olsrr.rsquaredacademy.com/reference/ols_plot_resid_fit.html', '_blank')")
)
)
} else if (input$restrial1 == "Residual Box Plot") {
fluidRow(
column(6, align = 'left',
h4('Residual Box Plot')
),
column(6, align = 'right',
actionButton(inputId='rbplot1', label="Help", icon = icon("question-circle"),
onclick ="window.open('https://olsrr.rsquaredacademy.com/reference/ols_plot_resid_box.html', '_blank')")
)
)
} else if (input$restrial1 == "Residual Histogram") {
fluidRow(
column(6, align = 'left',
h4('Residual Histogram'),
p('Histogram of residuals for detecting violation of normality assumption.')
),
column(6, align = 'right',
actionButton(inputId='rhist1', label="Help", icon = icon("question-circle"),
onclick ="window.open('https://olsrr.rsquaredacademy.com/reference/ols_plot_resid_hist.html', '_blank')")
)
)
} else if (input$restrial1 == "Residual QQ Plot") {
fluidRow(
column(6, align = 'left',
h4('Residual QQ Plot'),
p('Graph for detecting violation of normality assumption.')
),
column(6, align = 'right',
actionButton(inputId='rqq1', label="Help", icon = icon("question-circle"),
onclick ="window.open('https://olsrr.rsquaredacademy.com/reference/ols_plot_resid_qq.html', '_blank')")
)
)
} else if (input$restrial1 == "Normality Test") {
fluidRow(
column(6, align = 'left',
h4('Normality Test'),
p('Test for detecting violation of normality assumption.')
),
column(6, align = 'right',
actionButton(inputId='resnorm1', label="Help", icon = icon("question-circle"),
onclick ="window.open('https://olsrr.rsquaredacademy.com/reference/ols_test_normality.html', '_blank')")
)
)
}
})
output$ui_resdiagfmla <- renderUI({
if (input$restrial1 == "Residual vs Predicted Plot") {
fluidRow(
column(2, align = 'right', br(), h5('Model Formula:')),
column(10, align = 'left',
textInput("respred_fmla", label = '', width = '660px',
value = ""),
bsTooltip("respred_fmla", "Specify model formula",
"left", options = list(container = "body")))
)
} else if (input$restrial1 == "Residual Box Plot") {
fluidRow(
column(2, align = 'right', br(), h5('Model Formula:')),
column(10, align = 'left',
textInput("resbox_fmla", label = '', width = '660px',
value = ""),
bsTooltip("resbox_fmla", "Specify model formula",
"left", options = list(container = "body")))
)
} else if (input$restrial1 == "Residual Histogram") {
fluidRow(
column(2, align = 'right', br(), h5('Model Formula:')),
column(10, align = 'left',
textInput("reshist_fmla", label = '', width = '660px',
value = ""),
bsTooltip("reshist_fmla", "Specify model formula",
"left", options = list(container = "body")))
)
} else if (input$restrial1 == "Residual QQ Plot") {
fluidRow(
column(2, align = 'right', br(), h5('Model Formula:')),
column(10, align = 'left',
textInput("resqq_fmla", label = '', width = '660px',
value = ""),
bsTooltip("resqq_fmla", "Specify model formula",
"left", options = list(container = "body")))
)
} else if (input$restrial1 == "Normality Test") {
fluidRow(
column(2, align = 'right', br(), h5('Model Formula:')),
column(10, align = 'left',
textInput("resnorm_fmla", label = '', width = '660px',
value = ""),
bsTooltip("resnorm_fmla", "Specify model formula",
"left", options = list(container = "body")))
)
}
})
output$ui_resdiagsubmit <- renderUI({
if (input$restrial1 == "Residual vs Predicted Plot") {
fluidRow(
column(12, align = 'center',
br(),
br(),
actionButton(inputId = 'submit_respred_plot', label = 'Submit', width = '120px', icon = icon('check')),
bsTooltip("submit_respred_plot", "Click here to view regression result.",
"bottom", options = list(container = "body")))
)
} else if (input$restrial1 == "Residual Box Plot") {
fluidRow(
column(12, align = 'center',
br(),
br(),
actionButton(inputId = 'submit_resbox_plot', label = 'Submit', width = '120px', icon = icon('check')),
bsTooltip("submit_resbox_plot", "Click here to view regression result.",
"bottom", options = list(container = "body")))
)
} else if (input$restrial1 == "Residual Histogram") {
fluidRow(
column(12, align = 'center',
br(),
br(),
actionButton(inputId = 'submit_reshist_plot', label = 'Submit', width = '120px', icon = icon('check')),
bsTooltip("submit_reshist_plot", "Click here to view regression result.",
"bottom", options = list(container = "body")))
)
} else if (input$restrial1 == "Residual QQ Plot") {
fluidRow(
column(12, align = 'center',
br(),
br(),
actionButton(inputId = 'submit_resqq_plot', label = 'Submit', width = '120px', icon = icon('check')),
bsTooltip("submit_resqq_plot", "Click here to view regression result.",
"bottom", options = list(container = "body")))
)
} else if (input$restrial1 == "Normality Test") {
fluidRow(
column(12, align = 'center',
br(),
br(),
actionButton(inputId = 'submit_resnorm', label = 'Submit', width = '120px', icon = icon('check')),
bsTooltip("submit_resnorm", "Click here to view normality test result.",
"bottom", options = list(container = "body")))
)
}
})
output$ui_resdiagprev <- renderUI({
if (input$restrial1 == "Residual vs Predicted Plot") {
fluidRow(
column(2, align = 'right', br(), h5('Use previous model:')),
column(2, align = 'left', br(),
checkboxInput(inputId = 'respred_use_prev', label = '',
value = FALSE),
bsTooltip("respred_use_prev", "Use model from Regression Tab.",
"left", options = list(container = "body"))
)
)
} else if (input$restrial1 == "Residual Box Plot") {
fluidRow(
column(2, align = 'right', br(), h5('Use previous model:')),
column(2, align = 'left', br(),
checkboxInput(inputId = 'resbox_use_prev', label = '',
value = FALSE),
bsTooltip("resbox_use_prev", "Use model from Regression Tab.",
"left", options = list(container = "body"))
)
)
} else if (input$restrial1 == "Residual Histogram") {
fluidRow(
column(2, align = 'right', br(), h5('Use previous model:')),
column(2, align = 'left', br(),
checkboxInput(inputId = 'reshist_use_prev', label = '',
value = FALSE),
bsTooltip("reshist_use_prev", "Use model from Regression Tab.",
"left", options = list(container = "body"))
)
)
} else if (input$restrial1 == "Residual QQ Plot") {
fluidRow(
column(2, align = 'right', br(), h5('Use previous model:')),
column(2, align = 'left', br(),
checkboxInput(inputId = 'resqq_use_prev', label = '',
value = FALSE),
bsTooltip("resqq_use_prev", "Use model from Regression Tab.",
"left", options = list(container = "body"))
)
)
} else if (input$restrial1 == "Normality Test") {
fluidRow(
column(2, align = 'right', br(), h5('Use previous model:')),
column(2, align = 'left', br(),
checkboxInput(inputId = 'resnorm_use_prev', label = '',
value = FALSE),
bsTooltip("resnorm_use_prev", "Use model from Regression Tab.",
"left", options = list(container = "body"))
)
)
}
})
output$ui_resdiagout <- renderUI({
if (input$restrial1 == "Residual vs Predicted Plot") {
fluidRow(
br(),
column(12, align = 'center', plotOutput('resvsplot', height = '500px'))
)
} else if (input$restrial1 == "Residual Box Plot") {
fluidRow(
br(),
column(12, align = 'center', plotOutput('resboxplot', height = '500px'))
)
} else if (input$restrial1 == "Residual Histogram") {
fluidRow(
br(),
column(12, align = 'center', plotOutput('reshistplot', height = '500px'))
)
} else if (input$restrial1 == "Residual QQ Plot") {
fluidRow(
br(),
column(12, align = 'center', plotOutput('resqqplot', height = '500px'))
)
} else if (input$restrial1 == "Normality Test") {
fluidRow(
br(),
column(12, align = 'center', verbatimTextOutput('resnormtest'))
)
}
})
d_respred_mod <- eventReactive(input$submit_respred_plot, {
if(input$respred_use_prev) {
ols_plot_resid_fit(all_use_n())
} else {
k <- lm(input$respred_fmla, data = final_split$train)
ols_plot_resid_fit(k)
}
})
d_resbox_mod <- eventReactive(input$submit_resbox_plot, {
if(input$resbox_use_prev) {
ols_plot_resid_box(all_use_n())
} else {
k <- lm(input$resbox_fmla, data = final_split$train)
ols_plot_resid_box(k)
}
})
d_reshist_mod <- eventReactive(input$submit_reshist_plot, {
if(input$reshist_use_prev) {
ols_plot_resid_hist(all_use_n())
} else {
k <- lm(input$reshist_fmla, data = final_split$train)
ols_plot_resid_hist(k)
}
})
d_resqq_mod <- eventReactive(input$submit_resqq_plot, {
if(input$resqq_use_prev) {
ols_plot_resid_qq(all_use_n())
} else {
k <- lm(input$resqq_fmla, data = final_split$train)
ols_plot_resid_qq(k)
}
})
d_resnorm_mod <- eventReactive(input$submit_resnorm, {
if(input$resnorm_use_prev) {
ols_test_normality(all_use_n())
} else {
k <- lm(input$resnorm_fmla, data = final_split$train)
ols_test_normality(k)
}
})
output$resvsplot <- renderPlot({
print(d_respred_mod())
})
output$resboxplot <- renderPlot({
print(d_resbox_mod())
})
output$reshistplot <- renderPlot({
print(d_reshist_mod())
})
output$resqqplot <- renderPlot({
print(d_resqq_mod())
})
output$resnormtest <- renderPrint({
print(d_resnorm_mod())
}) |
processInput_group_order = function(formula, group_order){
if(is.null(group_order)){
if(sum(charToRaw(labels(terms(formula))) == charToRaw('$')) == 0){
group_order = sort(unique(eval(get(all.vars(formula)[3]))))
}else if(sum(charToRaw(labels(terms(formula))) == charToRaw('$')) >= 1){
group_order = sort(unique(eval(parse(text = labels(terms(formula))))))
}
}
return(group_order)
} |
trip1 <-
function(x){
a1 = mycount1(x)
a2 = a1$v
a3 = a1$ct
a4 = sum(a3 > 2.5)
if(a4 < 0.5) return(0)
a5 = sort(a2[a3>2.5],decreasing=TRUE)
a6 = sort(c(0,0,x[(x != a5[1])]),decreasing=TRUE)
225*a5[1] + 15*a6[1] + a6[2]
} |
listdown <- function(package = NULL,
decorator = list(),
decorator_chunk_opts = list(),
default_decorator = identity,
setup_expr = NULL,
init_expr = NULL,
load_cc_expr = NULL,
...,
chunk_opts = NULL) {
if ( !("default_decorator" %in% names(as.list(match.call))) ) {
default_decorator <- as.symbol("identity")
} else {
default_decorator <- as.list(match.call()$default_decorator)
}
if (is.null(chunk_opts)) {
chunk_opts <- list(...)
}
not_r_chunk_opts <- not_r_chunk_opts(names(chunk_opts))
if (length(not_r_chunk_opts) > 0) {
stop(red("Unrecognized options:\n\t",
paste(not_r_chunk_opts, collapse = "\n\t"),
"\n", sep = ""))
}
for (i in seq_along(decorator_chunk_opts)) {
not_r_chunk_opts <- not_r_chunk_opts(names(decorator_chunk_opts[[i]]))
if (length(not_r_chunk_opts) > 0) {
stop(red("Unrecognized options for element type",
names(decorator_chunk_opts)[i], ":\n\t",
paste(not_r_chunk_opts, collapse = "\n\t"),
"\n", sep = ""))
}
}
if ( !("decorator" %in% names(match.call())) ) {
decorator <- NULL
} else {
if (as.character(as.list(match.call()$decorator)[[1]]) == "list") {
decorator <- as.list(match.call()$decorator)[-1]
} else {
decorator <- eval(match.call()$decorator)
}
if ("list" %in% names(decorator)) {
stop("You may not decorate a list. Consider making a list element you ",
"would\nlike to present a class instance and define a decorator.")
}
}
if (!is.null(load_cc_expr)) {
load_cc_expr <- create_load_cc_expr(match.call()$load_cc_expr)
}
ret <- list(load_cc_expr = load_cc_expr,
decorator = decorator,
package = package,
init_expr = match.call()$init_expr,
setup_expr = match.call()$setup_expr,
decorator_chunk_opts = decorator_chunk_opts,
default_decorator = default_decorator,
chunk_opts = chunk_opts)
class(ret) <- "listdown"
ret
}
print.listdown <- function(x, ...) {
cat(bold("\nListdown object description\n"))
cat("\n")
if ("package" %in% names(x)) {
cat(bold(" Package(s) imported:\n"))
for (package in x$package) {
cat("\t", package, "\n", sep = "")
}
} else {
warning(yellow("No packages imported."))
}
if ("setup_expr" %in% names(x)) {
cat("\n")
cat(bold(" Setup expression(s) (run before packages are loaded):\n"))
cat("\t")
if (length(x$setup_expr) == 0) {
cat("(none)\n")
} else {
cat(deparse(x$setup_expr), sep = "\n\t")
}
}
if ("init_expr" %in% names(x)) {
cat("\n")
cat(bold(" Initial expression(s) (run after packages are loaded):\n"))
cat("\t")
if (length(x$init_expr) == 0) {
cat("(none)\n")
} else {
cat(deparse(x$init_expr), sep = "\n\t")
}
}
if ("load_cc_expr" %in% names(x)) {
cat("\n")
cat(bold(" Expression to read data:\n"))
cat("\t", deparse(x$load_cc_expr), "\n", sep = "")
} else {
warning(yellow("No load_cc expression provided."))
}
if ("decorator" %in% names(x)) {
cat("\n")
cat(bold(" Decorator(s):\n"))
if (length(x$decorator) == 0) {
cat("\t(none)\n")
} else {
ns <- format(c("Type", names(x$decorator)))
cv <- c("Method", as.vector(unlist(sapply(x$decorator, deparse))))
for (i in seq_along(ns)) {
if (i == 1) {
cat("\t", bold(ns[i]), "\t", bold(cv[i]), "\n", sep = "")
} else {
cat("\t", ns[i], "\t", cv[i], "\n", sep = "")
}
}
}
}
if ("default_decorator" %in% names(x)) {
cat("\n")
cat(bold(" Defaut decorator:\n"))
cat("\t", deparse(x$default_decorator), "\n", sep = "")
}
if ("chunk_opts" %in% names(x)) {
cat("\n")
cat(bold(" Chunk option(s):\n"))
if (length(x$chunk_opts) == 0) {
cat("\t(none)\n")
} else {
for (i in seq_along(x$chunk_opts)) {
cat("\t", names(x$chunk_opts)[i], " = ",
deparse(x$chunk_opts[[i]]), "\n",
sep = "")
}
}
}
if ("decorator_chunk_opts" %in% names(x)) {
cat("\n")
cat(bold(" Decorator chunk option(s):\n"))
if (length(x$decorator_chunk_opts) == 0) {
cat("\t(none)\n")
} else {
for (i in seq_along(x$decorator_chunk_opts)) {
cat("\t", bold("Type: "), names(x$decorator_chunk_opts)[i], ":",
sep = "")
ns <- names(x$decorator_chunk_opts[[i]])
ns[ns == ''] <- "(chunk name)"
ns <- c("Option", ns)
ns <- format(ns)
cv <- unlist(x$decorator_chunk_opts[[i]])
cv <- c("Value", cv)
for (j in seq_along(ns)) {
if (j == 1) {
cat("\n\t\t", bold(ns[j]), " ", bold(cv[j]))
} else {
cat("\n\t\t", ns[j], " ", cv[j])
}
}
cat("\n")
}
}
}
cat("\n")
invisible(x)
}
ld_make_chunks <- function(ld) {
UseMethod("ld_make_chunks", ld)
}
ld_make_chunks.default <- function(ld) {
stop(red("Don't know how to render an object of class ",
paste(class(ld), collapse = ":"), ".", sep = ""))
}
expr_to_string <- function(expr) {
if (deparse(expr[[1]]) == "{") {
unlist(lapply(expr[-1], function(x) c(deparse(x))))
} else {
deparse(expr)
}
}
ld_make_chunks.listdown <- function(ld) {
if (is.null(ld$load_cc_expr)) {
stop("The load_cc_expr needs to be specified. ",
"Use `create_load_cc_expr()` to set it.")
}
cc_list <- eval(ld$load_cc_expr)
if (is.character(cc_list)) {
cc_list <- eval(parse(text = cc_list))
}
ret_string <- ""
if (length(ld$setup_expr)) {
ret_string <- c(ret_string,
"```{r setup, include = FALSE}",
expr_to_string(ld$setup_expr),
"```",
"")
}
ret_string <-
c(ret_string,
sprintf("```{r%s}", make_chunk_option_string(ld$chunk_opts)))
if (length(ld$package) > 0) {
ret_string <-
c(ret_string,
as.character(vapply(eval(ld$package),
function(x) sprintf("library(%s)", as.character(x)),
NA_character_)),
"")
}
if (length(ld$init_expr)) {
ret_string <-
c(ret_string,
expr_to_string(ld$init_expr),
"")
}
c(ret_string,
sprintf("cc_list <- %s", deparse(ld$load_cc_expr)),
"```",
depth_first_concat(cc_list, ld))
} |
context("Create a subject profile summary plot with a table")
library(ggplot2)
library(plyr)
test_that("A text variable is correctly set", {
summaryTable <- data.frame(
visit = c(1, 2),
n = c(10, 20)
)
gg <- subjectProfileSummaryTable(
data = summaryTable,
xVar = "visit",
text = "n"
)
expect_s3_class(gg, "ggplot")
isGeomText <- sapply(gg$layers, function(l) inherits(l$geom, "GeomText"))
ggDataText <- layer_data(gg, which(isGeomText))
expect_identical(
object = unname(ggDataText[, c("x", "label")]),
expected = unname(summaryTable)
)
})
test_that("An error is generated if the text variable is not available", {
summaryTable <- data.frame(
visit = c(1, 2),
n = c(10, 20)
)
expect_error(
subjectProfileSummaryTable(
data = summaryTable,
xVar = "visit",
text = "n2"
),
"'n2' should be among the columns of 'data'"
)
})
test_that("A text variable is correctly set as an expression", {
summaryTable <- data.frame(
visit = c(1, 2),
n = c(10, 20),
TRT = c("A", "B")
)
textExpr <- bquote(paste("
gg <- subjectProfileSummaryTable(
data = summaryTable,
xVar = "visit",
text = textExpr
)
isGeomText <- sapply(gg$layers, function(l) inherits(l$geom, "GeomText"))
ggDataText <- layer_data(gg, which(isGeomText))
summaryTable$label <- with(summaryTable, eval(textExpr))
expect_identical(
object = unname(ggDataText[, c("x", "label")]),
expected = unname(summaryTable[, c("visit", "label")])
)
})
test_that("The size of the text is correctly set", {
summaryTable <- data.frame(
visit = c(1, 2),
n = c(10, 20)
)
textSize <- 67
gg <- subjectProfileSummaryTable(
data = summaryTable,
xVar = "visit",
text = "n", textSize = textSize
)
isGeomText <- sapply(gg$layers, function(l) inherits(l$geom, "GeomText"))
ggDataText <- layer_data(gg, which(isGeomText))
expect_setequal(
object = ggDataText$size,
expected = textSize
)
})
test_that("The label for the x variable is correctly set", {
summaryTable <- data.frame(
visit = c(1, 2),
n = c(10, 20)
)
xLab <- "Study visit"
gg <- subjectProfileSummaryTable(
data = summaryTable,
xVar = "visit",
text = "n",
xLab = xLab
)
expect_identical(
object = gg$labels$x,
expected = xLab
)
})
test_that("The x-axis labels are correctly set for a continuous x variable", {
summaryTable <- data.frame(
visit = c(1, 2),
n = c(10, 20)
)
xAxisLabs <- c(1, 4)
gg <- subjectProfileSummaryTable(
data = summaryTable,
xVar = "visit",
text = "n",
xAxisLabs = xAxisLabs
)
ggScales <- gg$scales$scales
isScaleX <- sapply(ggScales, function(x)
"x" %in% x[["aesthetics"]]
)
expect_equal(
object = gg$scales$scales[[which(isScaleX)]]$limits,
expected = xAxisLabs
)
})
test_that("The x-axis labels are correctly set for a categorical x variable", {
summaryTable <- data.frame(
visit = c("Visit 0", "Visit 1"),
n = c(10, 20)
)
xAxisLabs <- c(
`Visit 0` = "Baseline",
"Visit 1" = "First visit"
)
gg <- subjectProfileSummaryTable(
data = summaryTable,
xVar = "visit",
text = "n",
xAxisLabs = xAxisLabs
)
ggScales <- gg$scales$scales
isScaleX <- sapply(ggScales, function(x)
"x" %in% x[["aesthetics"]]
)
expect_equal(
object = gg$scales$scales[[which(isScaleX)]]$breaks,
expected = xAxisLabs
)
})
test_that("The limits are correctly set for the x-axis", {
summaryTable <- data.frame(
visit = c(1, 2),
n = c(10, 20)
)
xLim <- c(1, 10)
gg <- subjectProfileSummaryTable(
data = summaryTable,
xVar = "visit",
text = "n",
xLim = xLim
)
expect_equal(
object = ggplot_build(gg)$layout$coord$limits$x,
expected = xLim
)
})
test_that("The labels of the color variable are correctly displayed in the y-axis", {
summaryTable <- data.frame(
visit = c(1, 1, 2, 2),
n = c("1", "2", "3", "4"),
TRT = factor(c("A", "B", "A", "B"), levels = c("B", "A"))
)
gg <- subjectProfileSummaryTable(
data = summaryTable,
xVar = "visit",
text = "n",
colorVar = "TRT"
)
isGeomText <- sapply(gg$layers, function(l) inherits(l$geom, "GeomText"))
ggDataText <- layer_data(gg, which(isGeomText))
ggDataText$y <- as.numeric(ggDataText$y)
ggDataText <- ggDataText[with(ggDataText, order(x, y)), ]
dataPlotReference <- data.frame(
x = c(1, 1, 2, 2),
y = c(1, 2, 1, 2),
label = c("1", "2", "3", "4")
)
expect_equal(
object = ggDataText[, c("x", "y", "label")],
expected = dataPlotReference,
check.attributes = FALSE
)
})
test_that("The text and point are colored based on the specified color variable", {
summaryTable <- data.frame(
visit = c(1, 1, 2, 2),
n = sample.int(4),
TRT = factor(c("A", "B", "A", "B"), levels = c("B", "A"))
)
gg <- subjectProfileSummaryTable(
data = summaryTable,
xVar = "visit",
text = "n",
colorVar = "TRT"
)
isGeomTextPoint <- sapply(gg$layers, function(l) inherits(l$geom, c("GeomText", "GeomPoint")))
ggDataTextPoint <- do.call(plyr::rbind.fill, ggplot_build(gg)$data[isGeomTextPoint])
colors <- with(ggDataTextPoint, tapply(colour, y, unique))
expect_type(colors, "character")
expect_length(colors, 2)
expect_length(unique(colors), 2)
})
test_that("A color palette is correctly set", {
summaryTable <- data.frame(
visit = c(1, 1, 2, 2),
n = sample.int(4),
TRT = c("A", "B", "A", "B"),
stringsAsFactors = TRUE
)
colorPalette <- c(A = "red", B = "yellow")
gg <- subjectProfileSummaryTable(
data = summaryTable,
xVar = "visit",
text = "n",
colorVar = "TRT", colorPalette = colorPalette
)
summaryTable$y <- as.numeric(factor(summaryTable$TRT, levels = rev(levels(summaryTable$TRT))))
isGeomTextPoint <- sapply(gg$layers, function(l) inherits(l$geom, c("GeomText", "GeomPoint")))
ggDataTextPoint <- do.call(plyr::rbind.fill, ggplot_build(gg)$data[isGeomTextPoint])
ggDataTextPointWithInput <- merge(ggDataTextPoint, summaryTable,
by.x = c("x", "y"),
by.y = c("visit", "y"),
all = TRUE
)
colors <- with(ggDataTextPointWithInput, tapply(colour, TRT, unique))
expect_equal(
object = as.vector(colors[names(colorPalette)]),
expected = unname(colorPalette)
)
})
test_that("A label for the color variable is correctly set", {
summaryTable <- data.frame(
visit = c(1, 1, 2, 2),
n = sample.int(4),
TRT = c("A", "B", "A", "B")
)
colorLab <- "Study Treatment"
gg <- subjectProfileSummaryTable(
data = summaryTable,
xVar = "visit",
text = "n",
colorVar = "TRT",
colorLab = colorLab
)
ggScales <- gg$scales$scales
isColorAes <- sapply(ggScales, function(x)
all(x[["aesthetics"]] == "colour")
)
expect_equal(
object = sum(isColorAes),
expected = 1
)
expect_equal(
object = ggScales[[which(isColorAes)]]$name,
expected = colorLab
)
})
test_that("The size of the points (in the legend) is correctly set", {
summaryTable <- data.frame(
visit = c(1, 2),
n = c(10, 20),
TRT = c("a", "b")
)
pointSize <- 10
gg <- subjectProfileSummaryTable(
data = summaryTable,
xVar = "visit",
text = "n",
colorVar = "TRT",
pointSize = pointSize
)
expect_equal(
object = gg$guides$colour$override.aes$size,
expected = pointSize
)
})
test_that("The variable labels are correctly extracted from the labels of all variables", {
summaryTable <- data.frame(
visit = c(1, 2),
n = c(10, 20),
TRT = c("A", "B", "A", "B")
)
labelVars <- c(visit = "Study visit", TRT = "Study treatment")
gg <- subjectProfileSummaryTable(
data = summaryTable,
xVar = "visit",
text = "n",
colorVar = "TRT",
labelVars = labelVars
)
ggScales <- gg$scales$scales
isColorAes <- sapply(ggScales, function(x)
all(x[["aesthetics"]] == "colour")
)
expect_equal(sum(isColorAes), 1)
expect_equal(ggScales[[which(isColorAes)]]$name, labelVars["TRT"])
})
test_that("The labels of the y-axis are correctly included with a color variable as a factor", {
summaryTable <- data.frame(
visit = c(1, 2),
n = c(10, 20),
TRT = factor(c("A", "B", "A", "B"), levels = c("B", "A", "C", "Z"))
)
colorPalette <- c(A = "red", B = "blue")
withCallingHandlers(
expr = {
gg <- subjectProfileSummaryTable(
data = summaryTable,
xVar = "visit",
text = "n",
colorVar = "TRT",
colorPalette = colorPalette,
yAxisLabs = TRUE
)
},
warning = function(w){
if(grepl("Vectorized input", conditionMessage(w)))
invokeRestart("muffleWarning")
}
)
expect_false(inherits(gg$theme$axis.text.y, "element_blank"))
expect_equal(
object = gg$theme$axis.text.y$colour,
expected = c("red", "blue")
)
})
test_that("A warning is generated if the labels for the y-axis are requested but no color variable is specified", {
summaryTable <- data.frame(
visit = c(1, 2),
n = c(10, 20),
TRT = factor(c("A", "B", "A", "B"), levels = c("B", "A", "C", "Z"))
)
expect_warning(
gg <- subjectProfileSummaryTable(
data = summaryTable,
xVar = "visit",
text = "n",
yAxisLabs = TRUE
),
"Labels for the y-axis are not included because color variable is not specified."
)
})
test_that("The labels of the y-axis are correctly included with a color variable as a character", {
summaryTable <- data.frame(
visit = c(1, 2),
n = c(10, 20),
TRT = c("B", "A", "B", "A"),
stringsAsFactors = FALSE
)
colorPalette <- c(A = "red", B = "blue")
withCallingHandlers(
expr = {
gg <- subjectProfileSummaryTable(
data = summaryTable,
xVar = "visit",
text = "n",
colorVar = "TRT",
colorPalette = colorPalette,
yAxisLabs = TRUE
)
},
warning = function(w){
if(grepl("Vectorized input", conditionMessage(w)))
invokeRestart("muffleWarning")
}
)
expect_equal(
object = gg$theme$axis.text.y$colour,
expected = c("blue", "red")
)
isGeomText <- sapply(gg$layers, function(l) inherits(l$geom, "GeomText"))
ggDataText <- layer_data(gg, which(isGeomText))
expect_equal(
unname(c(with(ggDataText, tapply(colour, y, unique)))),
c("blue", "red")
)
})
test_that("The labels of the y-axis are correctly not included", {
summaryTable <- data.frame(
visit = c(1, 2),
n = c(10, 20),
TRT = c("A", "B", "A", "B")
)
gg <- subjectProfileSummaryTable(
data = summaryTable,
xVar = "visit",
text = "n",
colorVar = "TRT",
yAxisLabs = FALSE
)
expect_s3_class(gg$theme$axis.text.y, "element_blank")
})
test_that("A color palette is correctly set for the labels of the y-axis ", {
summaryTable <- data.frame(
visit = c(1, 2),
n = c(10, 20),
TRT = c("A", "B")
)
colorPalette <- c(A = "red", B = "blue")
withCallingHandlers(
expr = {
gg <- subjectProfileSummaryTable(
data = summaryTable,
xVar = "visit",
text = "n",
colorVar = "TRT",
colorPalette = colorPalette,
yAxisLabs = TRUE
)
},
warning = function(w){
if(grepl("Vectorized input", conditionMessage(w)))
invokeRestart("muffleWarning")
}
)
expect_equal(
object = gg$theme$axis.text.y$colour,
expected = c("blue", "red")
)
})
test_that("The font size of the text is correctly set", {
summaryTable <- data.frame(
visit = c(1, 2),
n = c(10, 20)
)
fontsize <- 10
gg <- subjectProfileSummaryTable(
data = summaryTable,
xVar = "visit",
text = "n",
fontsize = fontsize
)
expect_equal(
object = gg$theme$text$size,
expected = fontsize
)
})
test_that("The font face of the text is correctly set", {
summaryTable <- data.frame(
visit = c(1, 2),
n = c(10, 20)
)
fontface <- 3
gg <- subjectProfileSummaryTable(
data = summaryTable,
xVar = "visit",
text = "n",
fontface = fontface
)
isGeomText <- sapply(gg$layers, function(l) inherits(l$geom, "GeomText"))
ggDataText <- layer_data(gg, which(isGeomText))
expect_setequal(
object = ggDataText$fontface,
expected = fontface
)
})
test_that("The font of the text is correctly set", {
summaryTable <- data.frame(
visit = c(1, 2),
n = c(10, 20)
)
fontname <- "Arial"
gg <- subjectProfileSummaryTable(
data = summaryTable,
xVar = "visit",
text = "n",
fontname = fontname
)
expect_equal(
object = gg$theme$text$family,
expected = fontname
)
})
test_that("A theme is correctly set for the plot", {
summaryTable <- data.frame(
visit = c(1, 2),
n = c(10, 20)
)
gg <- subjectProfileSummaryTable(
data = summaryTable,
xVar = "visit",
text = "n",
themeFct = function()
ggplot2::theme(aspect.ratio = 0.75)
)
expect_equal(
object = gg$theme$aspect.ratio,
expected = 0.75
)
})
test_that("A legend is correctly included", {
summaryTable <- data.frame(
visit = c(1, 1, 2, 2),
n = sample.int(4),
TRT = c("A", "B", "A", "B")
)
gg <- subjectProfileSummaryTable(
data = summaryTable,
xVar = "visit",
text = "n",
colorVar = "TRT",
showLegend = TRUE
)
expect_false(gg$theme$legend.position == "none")
})
test_that("A legend is correctly not included", {
summaryTable <- data.frame(
visit = c(1, 1, 2, 2),
n = sample.int(4),
TRT = c("A", "B", "A", "B")
)
gg <- subjectProfileSummaryTable(
data = summaryTable,
xVar = "visit",
text = "n",
colorVar = "TRT",
showLegend = FALSE
)
expect_equal(
object = gg$theme$legend.position,
expected = "none"
)
}) |
master_is_gateway <- function(master) {
length(grep("^(sparklyr://)?[^:]+:[0-9]+(/[0-9]+)?$", master)) > 0
}
gateway_connection <- function(master, config) {
if (!master_is_gateway(master)) {
stop("sparklyr gateway master expected to be formatted as sparklyr://address:port")
}
protocol <- strsplit(master, "//")[[1]]
components <- strsplit(protocol[[2]], ":")[[1]]
gatewayAddress <- components[[1]]
portAndSesssion <- strsplit(components[[2]], "/")[[1]]
gatewayPort <- as.integer(portAndSesssion[[1]])
sessionId <- if (length(portAndSesssion) > 1) as.integer(portAndSesssion[[2]]) else 0
gatewayInfo <- spark_connect_gateway(
gatewayAddress = gatewayAddress,
gatewayPort = gatewayPort,
sessionId = sessionId,
config = config
)
if (is.null(gatewayInfo)) {
stop("Failed to connect to gateway: ", master)
}
sc <- spark_gateway_connection(master, config, gatewayInfo, gatewayAddress)
if (is.null(gatewayInfo)) {
stop("Failed to open connection from gateway: ", master)
}
sc
}
spark_gateway_connection <- function(master, config, gatewayInfo, gatewayAddress) {
tryCatch(
{
interval <- spark_config_value(config, "sparklyr.backend.interval", 1)
backend <- socketConnection(
host = gatewayAddress,
port = gatewayInfo$backendPort,
server = FALSE,
blocking = interval > 0,
open = "wb",
timeout = interval
)
class(backend) <- c(class(backend), "shell_backend")
monitoring <- socketConnection(
host = gatewayAddress,
port = gatewayInfo$backendPort,
server = FALSE,
blocking = interval > 0,
open = "wb",
timeout = interval
)
class(monitoring) <- c(class(monitoring), "shell_backend")
},
error = function(err) {
close(gatewayInfo$gateway)
stop("Failed to open connection to backend:", err$message)
}
)
sc <- new_spark_gateway_connection(list(
master = master,
method = "gateway",
app_name = "sparklyr",
config = config,
state = new.env(),
spark_home = NULL,
backend = backend,
monitoring = monitoring,
gateway = gatewayInfo$gateway,
output_file = NULL
))
reg.finalizer(baseenv(), function(x) {
if (connection_is_open(sc)) {
stop_shell(sc)
}
}, onexit = TRUE)
sc
}
connection_is_open.spark_gateway_connection <- connection_is_open.spark_shell_connection
spark_log.spark_gateway_connection <- function(sc, n = 100, filter = NULL, ...) {
stop("spark_log is not available while connecting through an sparklyr gateway")
}
spark_web.spark_gateway_connection <- function(sc, ...) {
stop("spark_web is not available while connecting through an sparklyr gateway")
}
invoke_method.spark_gateway_connection <- invoke_method.spark_shell_connection
j_invoke_method.spark_gateway_connection <- j_invoke_method.spark_shell_connection
print_jobj.spark_gateway_connection <- print_jobj.spark_shell_connection |
chisq.test2 <-
function(obj, chisq.test.perm, chisq.test.B, chisq.test.seed){
if (any(dim(obj)<2) || is.null(dim(obj)) || sum(rowSums(obj)>0)<2 || sum(colSums(obj)>0)<2)
return(NaN)
obj<-obj[,colSums(obj)>0]
expect<-outer(rowSums(obj),colSums(obj))/sum(obj)
if (any(expect<5)){
if (chisq.test.perm){
if (!is.null(chisq.test.seed)) set.seed(chisq.test.seed)
test<-try(chisq.test(obj, simulate.p.value=chisq.test.perm, B = chisq.test.B),silent=TRUE)
} else
test <- try(fisher.test(obj),silent=TRUE)
} else {
test <- try(chisq.test(obj),silent=TRUE)
}
if (inherits(test,"try-error"))
return(NaN)
ans <- test$p.value
ans
} |
NULL
getTargomoRoutes <- function(source_data = NULL, source_lat = NULL, source_lng = NULL,
target_data = NULL, target_lat = NULL, target_lng = NULL,
source_id = NULL, target_id = NULL,
options = targomoOptions(),
api_key = Sys.getenv("TARGOMO_API_KEY"),
region = Sys.getenv("TARGOMO_REGION"),
config = list(),
verbose = FALSE,
progress = FALSE,
timeout = NULL) {
output <- list()
tms <- options$travelType
s_points <- createPoints(source_data, source_lat, source_lng, source_id)
t_points <- createPoints(target_data, target_lat, target_lng, target_id)
targets <- deriveTargets(t_points)
messageMultipleTravelModes(tms)
for (tm in tms) {
options$travelType <- tm
tm_opts <- deriveOptions(options)
sources <- deriveSources(s_points, tm_opts)
body <- createRequestBody("route", sources, targets, tm_opts)
response <- callTargomoAPI(api_key = api_key, region = region,
service = "route", body = body,
config = config,
verbose = verbose, progress = progress,
timeout = timeout)
output[[tm]] <- processResponse(response, service = "route")
}
return(output)
}
drawTargomoRoutes <- function(map, routes, drawOptions = routeDrawOptions(), group = NULL, ...) {
travelModes <- names(routes)
for (tm in travelModes) {
for (route in routes[[tm]]) {
features <- route$features
segments <- features[sf::st_is(features$geometry, "LINESTRING"), ]
if (drawOptions$showMarkers) {
src <- features[!is.na(features$sourceId), ]
trg <- features[!is.na(features$targetId), ]
map <- map %>%
leaflet::addMarkers(data = src, label = ~paste("Source:", sourceId), group = group) %>%
leaflet::addMarkers(data = trg, label = ~paste("Target:", targetId), group = group)
}
if (tm == "car") {
map <- drawCar(map, segments, drawOptions, group, ...)
} else if (tm == "bike"){
map <- drawBike(map, segments, drawOptions, group, ...)
} else if (tm == "walk"){
map <- drawWalk(map, segments, drawOptions, group, ...)
} else if (tm %in% "transit") {
walk <- segments[segments$travelType == "WALK", ]
transit <- segments[segments$travelType == "TRANSIT", ]
map <- map %>%
drawWalk(segment = walk, drawOptions = drawOptions, group = group, ...) %>%
drawTransit(segment = transit, drawOptions = drawOptions, group = group, ...)
}
if (tm == "transit" && drawOptions$showTransfers &&
any(features$travelType == "TRANSFER", na.rm = TRUE)) {
transfers <- suppressWarnings({
features[features$travelType == "TRANSFER", ] %>%
sf::st_cast(to = "POINT") %>%
unique()
})
map <- map %>%
leaflet::addCircleMarkers(data = transfers,
color = drawOptions$transferColour,
radius = drawOptions$transferRadius,
group = group)
}
}
}
return(map)
}
addTargomoRoutes <- function(map,
source_data = NULL, source_lat = NULL, source_lng = NULL, source_id = NULL,
target_data = NULL, target_lat = NULL, target_lng = NULL, target_id = NULL,
options = targomoOptions(),
drawOptions = routeDrawOptions(),
group = NULL,
api_key = Sys.getenv("TARGOMO_API_KEY"),
region = Sys.getenv("TARGOMO_REGION"),
config = list(),
verbose = FALSE, progress = FALSE,
timeout = NULL) {
routes <- getTargomoRoutes(api_key = api_key, region = region,
source_data = source_data, source_lat = source_lat,
source_lng = source_lng, source_id = source_id,
target_data = target_data, target_lat = target_lat,
target_lng = target_lng, target_id = target_id,
options = options, config = config,
verbose = verbose, progress = progress,
timeout = timeout)
map <- drawTargomoRoutes(
map = map,
routes = routes,
drawOptions = drawOptions,
group = group
)
return(map)
}
routeDrawOptions <- function(showMarkers = TRUE,
showTransfers = TRUE,
walkColour = "green",
walkWeight = 5,
walkDashArray = "1,10",
carColour = "blue",
carWeight = 5,
carDashArray = NULL,
bikeColour = "orange",
bikeWeight = 5,
bikeDashArray = NULL,
transitColour = "red",
transitWeight = 5,
transitDashArray = NULL,
transferColour = "blue",
transferRadius = 10) {
leaflet::filterNULL(
list(showMarkers = showMarkers,
showTransfers = showTransfers,
walkColour = walkColour,
walkWeight = walkWeight,
walkDashArray = walkDashArray,
carColour = carColour,
carWeight = carWeight,
carDashArray = carDashArray,
bikeColour = bikeColour,
bikeWeight = bikeWeight,
bikeDashArray = bikeDashArray,
transitColour = transitColour,
transitWeight = transitWeight,
transitDashArray = transitDashArray,
transferColour = transferColour,
transferRadius = transferRadius
)
)
}
NULL
drawRouteSegment <- function(map, segment, drawOptions, type, group, ...) {
drawOpts <- drawOptions[paste0(type, c("Colour", "Weight", "DashArray"))]
names(drawOpts) <- c("colour", "weight", "dashArray")
map <- leaflet::addPolylines(
map = map,
data = segment,
color = drawOpts$colour,
weight = drawOpts$weight,
dashArray = drawOpts$dashArray,
label = "Click for more information",
popup = createRoutePopup(segment, transit = {type == "transit"}),
group = group,
...
)
return(map)
}
drawWalk <- function(map, segment, drawOptions, group, ...) {
map <- drawRouteSegment(map, segment, drawOptions, "walk", group, ...)
map
}
drawBike <- function(map, segment, drawOptions, group, ...) {
map <- drawRouteSegment(map, segment, drawOptions, "bike", group, ...)
map
}
drawCar <- function(map, segment, drawOptions, group, ...) {
drawRouteSegment(map, segment, drawOptions, "car", group, ...)
}
drawTransit <- function(map, segment, drawOptions, group, ...) {
map <- drawRouteSegment(map, segment, drawOptions, "transit", group, ...)
map
}
createRoutePopup <- function(data, transit = FALSE, startEnd = transit) {
if (transit) {
header <- ifelse(data$routeType == 1, "UNDERGROUND",
ifelse(data$routeType == 2, "TRAIN",
ifelse(data$routeType == 3, "BUS",
"PUBLIC TRANSPORT")))
header <- paste(header, "-", data$routeShortName)
} else {
header <- data$travelType
}
paste0("<b>", header, "</b></br>",
if(startEnd) paste0("Start: ", data$startName, "</br>",
"End: ", data$endName, "</br>"),
"Journey time: ", sapply(data$travelTime, prettyEdgeWeight, type = "time"))
} |
context("write_pzfx")
test_that("Test writing 'Column' type table", {
tmp <- tempfile(fileext=".pzfx")
on.exit(unlink(tmp))
expected_file <- system.file("testdata/column.tab", package="pzfx", mustWork=TRUE)
expected <- read.table(expected_file, sep="\t", header=TRUE, stringsAsFactors=FALSE)
write_pzfx(expected, tmp, row_names=FALSE)
pzfx <- read_pzfx(tmp)
expect_equal(pzfx, expected)
})
test_that("Test writing 'XY' type table", {
tmp <- tempfile(fileext=".pzfx")
on.exit(unlink(tmp))
expected_file <- system.file("testdata/x_y_no_rep.tab", package="pzfx", mustWork=TRUE)
expected <- read.table(expected_file, sep="\t", header=TRUE, stringsAsFactors=FALSE)
to_write <- expected[, colnames(expected) != "ROWTITLE"]
rownames(to_write) <- expected$ROWTITLE
write_pzfx(to_write, tmp, row_names=TRUE, x_col="XX")
pzfx <- read_pzfx(tmp)
expect_equal(pzfx, expected)
})
test_that("Test multiple input tables work", {
tmp <- tempfile(fileext=".pzfx")
on.exit(unlink(tmp))
expected_file <- system.file("testdata/column.tab", package="pzfx", mustWork=TRUE)
expected <- read.table(expected_file, sep="\t", header=TRUE, stringsAsFactors=FALSE)
to_write <- list("T1"=expected, "T2"=expected)
write_pzfx(to_write, tmp, row_names=FALSE)
pzfx1 <- read_pzfx(tmp, table="T1")
pzfx2 <- read_pzfx(tmp, table=2)
expect_equal(pzfx1, expected)
expect_equal(pzfx2, expected)
})
test_that("Test writing matrix works", {
tmp <- tempfile(fileext=".pzfx")
on.exit(unlink(tmp))
expected_file <- system.file("testdata/column.tab", package="pzfx", mustWork=TRUE)
expected <- read.table(expected_file, sep="\t", header=TRUE, stringsAsFactors=FALSE)
write_pzfx(as.matrix(expected), tmp, row_names=FALSE)
pzfx <- read_pzfx(tmp)
expect_equal(pzfx, expected)
})
test_that("Should raise when provided with wrong type of input", {
tmp <- tempfile(fileext=".pzfx")
on.exit(unlink(tmp))
expect_error(write_pzfx(1:10, tmp, row_names=FALSE), "Cannot process x of class integer")
expect_error(write_pzfx(rnorm(10), tmp, row_names=FALSE), "Cannot process x of class numeric")
expect_error(write_pzfx("Existence is pain", tmp, row_names=FALSE),
"Cannot process x of class character")
expect_error(write_pzfx(data.frame("X"=c("a", "b"), "Y"=1:2), tmp, row_names=FALSE),
"These tables are not all numeric: Data 1")
expect_error(write_pzfx(list("a"=1:10), tmp, row_names=FALSE),
"These elements are not data frame or matrix: a")
})
test_that("Should raise when provided with wrong 'x_col'", {
tmp <- tempfile(fileext=".pzfx")
on.exit(unlink(tmp))
expect_error(write_pzfx(data.frame("SingleColumn"=1:10), tmp, x_col=2),
"Not enough columns for table Data 1")
expect_error(write_pzfx(list(data.frame(1:2), data.frame(3:4)), tmp, x_col=c(1, 1, 1)),
"Argument 'x_col' can only be of length 1 or the length of 'x'")
})
test_that("Should raise when provided with wrong 'row_names'", {
tmp <- tempfile(fileext=".pzfx")
on.exit(unlink(tmp))
expect_error(write_pzfx(list(data.frame(1:2), data.frame(3:4)), tmp, row_names=c(TRUE, FALSE, TRUE)),
"Argument 'row_names' can only be of length 1 or the length of 'x'")
}) |
fncLRpROC <- function(){
varPosListn <- function(vars, var){
if (is.null(var)) return(NULL)
if (any(!var %in% vars)) NULL
else apply(outer(var, vars, "=="), 1, which) - 1
}
.activeModel <- ActiveModel()
if (is.null(.activeModel)) {
errorCondition(recall=fncLRpROC, message=gettext("No GLM model selected.", domain="R-RcmdrPlugin.ROC"))
return()
}
defaults <- list(
initial.narm = 1, initial.percent = 0, initial.direction = "auto",
initial.smooth = 0, initial.smoothingmethod = "binormal",
initial.smoothinbandwidth = "nrd0", initial.bandwidthnumeric = "",
initial.bandwidthadjustment = "1", initial.bandwidthwindow = "gaussian",
initial.distributioncontrols = "normal", initial.distributioncases = "normal",
initial.cicompute = 1, initial.cilevel = "0.95", initial.cimethod = "bootstrap",
initial.cibootn = "2000", initial.cibootstratified = 0,
initial.citype = "se", initial.cithresholds = "local maximas",
initial.civalues = "seq(0, 1, 0.05)", initial.ciplottype = "shape",
initial.auc = 1, initial.partialauc = 0,
initial.partialfrom = 0, initial.partialto = 1,
initial.partialfocus = "specificity", initial.partialcorrect = 0,
initial.plot = 1, initial.add = 0,
initial.printauc = 0, initial.aucpolygon = 0, initial.maxaucpolygon = 0,
initial.grid = 0, initial.identity = 1, initial.ciplot = 0, initial.values = 0,
initial.printthresrb = "no", initial.customthres = "c(0.5, 1, 10, 100)",
initial.colorroc=palette()[1],
initial.ltyroc="solid",
initial.xlab=gettextRcmdr("<auto>"), initial.ylab=gettextRcmdr("<auto>"),
initial.main=gettextRcmdr("<auto>"),
initial.tab=0)
dialog.values <- getDialog("pROC", defaults)
initializeDialog(title=gettext("Plot ROC curve", domain="R-RcmdrPlugin.ROC"), use.tabs=TRUE, tabs=c("dataTab", "smoothingTab", "aucTab", "ciTab", "optionsTab"))
generalFrame <- tkframe(dataTab)
generaldataFrame <- ttklabelframe(generalFrame, text = gettext("Data", domain="R-RcmdrPlugin.ROC"))
checkBoxes(window = generalFrame,
frame = "dataoptionsFrame",
boxes = c("narm", "percent"),
initialValues = c(dialog.values$initial.narm, dialog.values$initial.percent),
labels = gettextRcmdr(c("Remove NAs", "Show/input % instead of 0-1")),
title = gettext("Options", domain="R-RcmdrPlugin.ROC"), ttk=TRUE)
radioButtons(dataoptionsFrame,
name="directionrb",
buttons=c("auto", "gt", "lt"),
values=c("auto", ">", "<"),
labels=gettextRcmdr(c("auto", "Control > cases", "Control <= cases")),
title=gettext("Direction", domain="R-RcmdrPlugin.ROC"),
initialValue = dialog.values$initial.direction)
smoothingFrame <- tkframe(smoothingTab)
smoothingleftpaneFrame <- tkframe(smoothingFrame)
smoothinggeneralFrame <- ttklabelframe(smoothingleftpaneFrame, text = gettext("General", domain="R-RcmdrPlugin.ROC"))
smoothingdensityFrame <- ttklabelframe(smoothingleftpaneFrame, text = gettext("Density options", domain="R-RcmdrPlugin.ROC"))
smoothingdistributionFrame <- ttklabelframe(smoothingFrame, text = gettext("Distributions options", domain="R-RcmdrPlugin.ROC"))
radioButtons(smoothinggeneralFrame,
name="smoothingmethodrb",
buttons=c("binormal", "density", "fitdistr", "logcondens", "logcondens.smooth"),
values=c("binormal", "density", "fitdistr", "logcondens", "logcondens.smooth"),
labels=gettextRcmdr(c("binormal", "density", "fit distribution", "logcondens", "logcondens.smooth")),
title=gettext("Smoothing method", domain="R-RcmdrPlugin.ROC"),
initialValue = dialog.values$initial.smoothingmethod)
radioButtons(smoothingdensityFrame,
name="smoothinbandwidthrb",
buttons=c("nrd0", "nrd", "ucv", "bcv", "SJ", "numeric"),
values=c("nrd0", "nrd", "ucv", "bcv", "SJ", "numeric"),
labels=gettextRcmdr(c("nrd0", "nrd", "ucv", "bcv", "SJ", "<numeric>")),
title=gettext("Bandwidth", domain="R-RcmdrPlugin.ROC"),
initialValue = dialog.values$initial.smoothinbandwidth)
bandwidthnumericVar <- tclVar(dialog.values$initial.bandwidthnumeric)
bandwidthnumericEntry <- ttkentry(smoothingdensityFrame, width = "25", textvariable = bandwidthnumericVar)
bandwidthnumericScroll <- ttkscrollbar(smoothingdensityFrame, orient = "horizontal",
command = function(...) tkxview(bandwidthnumericEntry, ...))
tkconfigure(bandwidthnumericEntry, xscrollcommand = function(...) tkset(bandwidthnumericScroll,
...))
tkbind(bandwidthnumericEntry, "<FocusIn>", function() tkselection.clear(bandwidthnumericEntry))
bandwidthadjustmentVar <- tclVar(dialog.values$initial.bandwidthadjustment)
bandwidthadjustmentEntry <- ttkentry(smoothingdensityFrame, width = "25", textvariable = bandwidthadjustmentVar)
radioButtons(smoothingdensityFrame,
name="bandwidthwindowrb",
buttons=c("gaussian", "epanechnikov", "rectangular", "triangular", "biweight", "cosine", "optcosine"),
values=c("gaussian", "epanechnikov", "rectangular", "triangular", "biweight", "cosine", "optcosine"),
labels=gettextRcmdr(c("gaussian", "epanechnikov", "rectangular", "triangular", "biweight", "cosine", "optcosine")),
title=gettext("Kernel", domain="R-RcmdrPlugin.ROC"),
initialValue = dialog.values$initial.bandwidthwindow)
radioButtons(smoothingdistributionFrame,
name="distributioncontrolsrb",
buttons=c("normal", "lognormal", "logistic", "exponential", "weibull", "gamma", "cauchy"),
values=c("normal", "lognormal", "logistic", "exponential", "weibull", "gamma", "cauchy"),
labels=gettextRcmdr(c("normal", "lognormal", "logistic", "exponential", "weibull", "gamma", "cauchy")),
title=gettext("Distribution of controls", domain="R-RcmdrPlugin.ROC"),
initialValue = dialog.values$initial.distributioncontrols)
radioButtons(smoothingdistributionFrame,
name="distributioncasesrb",
buttons=c("normal", "lognormal", "logistic", "exponential", "weibull", "gamma", "cauchy"),
values=c("normal", "lognormal", "logistic", "exponential", "weibull", "gamma", "cauchy"),
labels=gettextRcmdr(c("normal", "lognormal", "logistic", "exponential", "weibull", "gamma", "cauchy")),
title=gettext("Distribution of cases", domain="R-RcmdrPlugin.ROC"),
initialValue = dialog.values$initial.distributioncases)
ciFrame <- tkframe(ciTab)
checkBoxes(window = ciFrame, frame = "cibootstrapFrame",
boxes = c("cibootstratified"), initialValues = c(
dialog.values$initial.cibootstratified
),labels = gettextRcmdr(c(
"Stratified")), title = gettext("Bootstrap options", domain="R-RcmdrPlugin.ROC"), ttk=TRUE)
checkBoxes(window = ciFrame, frame = "cigeneralFrame",
boxes = c("cicompute"), initialValues = c(
dialog.values$initial.cicompute
),labels = gettextRcmdr(c(
"Compute Confidence Interval (CI)")), title = gettext("General", domain="R-RcmdrPlugin.ROC"), ttk=TRUE)
cilevelVar <- tclVar(dialog.values$initial.cilevel)
cilevelEntry <- ttkentry(cigeneralFrame, width = "25", textvariable = cilevelVar)
radioButtons(cigeneralFrame, name="cimethodrb", buttons=c("delong", "bootstrap", "auto"), values=c("delong", "bootstrap", "auto"),
labels=gettextRcmdr(c("delong", "bootstrap", "auto")), title=gettext("Method", domain="R-RcmdrPlugin.ROC"),
initialValue = dialog.values$initial.cimethod)
radioButtons(cigeneralFrame, name="cityperb", buttons=c("auc", "se", "sp", "thresholds"), values=c("auc", "se", "sp", "thresholds"),
labels=gettextRcmdr(c("auc", "se", "sp", "thresholds")), title=gettext("Type of CI", domain="R-RcmdrPlugin.ROC"),
initialValue = dialog.values$initial.citype)
radioButtons(cigeneralFrame, name="cithresholdsrb", buttons=c("all", "localmaximas", "custom"), values=c("all", "local maximas", "custom"),
labels=gettextRcmdr(c("all", "local maximas", "<custom>")), title=gettext("Thresholds", domain="R-RcmdrPlugin.ROC"),
initialValue = dialog.values$initial.cithresholds)
civaluesVar <- tclVar(dialog.values$initial.civalues)
civaluesEntry <- ttkentry(cigeneralFrame, width = "25", textvariable = civaluesVar)
civaluesScroll <- ttkscrollbar(cigeneralFrame, orient = "horizontal",
command = function(...) tkxview(civaluesEntry, ...))
tkconfigure(civaluesEntry, xscrollcommand = function(...) tkset(civaluesScroll,
...))
tkbind(civaluesEntry, "<FocusIn>", function() tkselection.clear(civaluesEntry))
cibootnVar <- tclVar(dialog.values$initial.cibootn)
cibootnEntry <- ttkentry(cibootstrapFrame, width = "5", textvariable = cibootnVar)
tkgrid(labelRcmdr(cibootstrapFrame, text = gettext("Confidence level number of replicates", domain="R-RcmdrPlugin.ROC")), cibootnEntry, sticky = "ew", padx=6)
aucFrame <- tkframe(aucTab)
checkBoxes(window = aucFrame, frame = "generalaucFrame",
boxes = c("auc", "partialauc"), initialValues = c(
dialog.values$initial.auc, dialog.values$initial.partialauc
),labels = gettextRcmdr(c(
"Compute Area Under Curve (AUC)", "Compute partial AUC")), title = gettext("General", domain="R-RcmdrPlugin.ROC"), ttk=TRUE)
checkBoxes(window = aucFrame, frame = "partialaucFrame",
boxes = c("partialcorrect"), initialValues = c(
dialog.values$initial.partialcorrect),labels = gettextRcmdr(c(
"Correct partial AUC")), title = gettext("Partial AUC", domain="R-RcmdrPlugin.ROC"), ttk=TRUE)
partialfromVar <- tclVar(dialog.values$initial.partialfrom)
partialfromEntry <- ttkentry(partialaucFrame, width = "25", textvariable = partialfromVar)
tkgrid(labelRcmdr(partialaucFrame, text = gettext("From:", domain="R-RcmdrPlugin.ROC")), partialfromEntry, sticky = "ew", padx=6)
partialtoVar <- tclVar(dialog.values$initial.partialto)
partialtoEntry <- ttkentry(partialaucFrame, width = "25", textvariable = partialtoVar)
tkgrid(labelRcmdr(partialaucFrame, text = gettext("To:", domain="R-RcmdrPlugin.ROC")), partialtoEntry, sticky = "ew", padx=6)
radioButtons(partialaucFrame, name="partialfocus", buttons=c("specificity", "sensitivity"), values=c("specificity", "sensitivity"),
labels=gettextRcmdr(c("specificity", "sensitivity")), title=gettext("Focus", domain="R-RcmdrPlugin.ROC"),
initialValue = dialog.values$initial.partialfocus)
optionsParFrame <- tkframe(optionsTab)
optFrame <- ttklabelframe(optionsParFrame, text = gettext("Plot Options", domain="R-RcmdrPlugin.ROC"))
parFrame <- ttklabelframe(optionsParFrame, text = gettext("Plot Labels", domain="R-RcmdrPlugin.ROC"))
checkBoxes(window = optFrame, frame = "optionsFrame",
boxes = c("plot", "add", "smooth", "grid","identity","ciplot","values"), initialValues = c(
dialog.values$initial.plot, dialog.values$initial.add, dialog.values$initial.smooth,
dialog.values$initial.grid, dialog.values$initial.identity, dialog.values$initial.ciplot, dialog.values$initial.values),labels = gettextRcmdr(c(
"Plot", "Add curve to existing plot", "Smooth","Display grid","Display identity line",
"Display confidence interval","Display values (Se, Sp, Thresholds)")), title = gettext("General", domain="R-RcmdrPlugin.ROC"), ttk=TRUE)
checkBoxes(window = optFrame, frame = "aucpolygonFrame",
boxes = c("aucpolygon", "maxaucpolygon"), initialValues = c(
dialog.values$initial.aucpolygon, dialog.values$initial.maxaucpolygon),labels = gettextRcmdr(c(
"Polygon of AUC", "Polygon of maximal AUC")), title = gettext("Display area as polygon", domain="R-RcmdrPlugin.ROC"), ttk=TRUE)
checkBoxes(window = optFrame, frame = "informationFrame",
boxes = c("printauc"), initialValues = c(
dialog.values$initial.printauc),labels = gettextRcmdr(c(
"AUC")), title = gettext("Display information on plot", domain="R-RcmdrPlugin.ROC"), ttk=TRUE)
radioButtons(informationFrame, name="printthresrb", buttons=c("no", "best", "all", "localmaximas", "customthres"), values=c("no", "best", "all", "local maximas", "customthres"),
labels=gettextRcmdr(c("no", "best: max(sum(Se + Sp))", "all", "local maximas", "<custom>")), title=gettext("Display threshold(s)", domain="R-RcmdrPlugin.ROC"),
initialValue = dialog.values$initial.printthresrb)
customthresVar <- tclVar(dialog.values$initial.customthres)
customthresEntry <- ttkentry(informationFrame, width = "25", textvariable = customthresVar)
customthresScroll <- ttkscrollbar(informationFrame, orient = "horizontal",
command = function(...) tkxview(customthresEntry, ...))
tkconfigure(customthresEntry, xscrollcommand = function(...) tkset(customthresScroll,
...))
tkbind(customthresEntry, "<FocusIn>", function() tkselection.clear(customthresEntry))
xlabVar <- tclVar(dialog.values$initial.xlab)
ylabVar <- tclVar(dialog.values$initial.ylab)
mainVar <- tclVar(dialog.values$initial.main)
xlabEntry <- ttkentry(parFrame, width = "25", textvariable = xlabVar)
xlabScroll <- ttkscrollbar(parFrame, orient = "horizontal",
command = function(...) tkxview(xlabEntry, ...))
tkconfigure(xlabEntry, xscrollcommand = function(...) tkset(xlabScroll,
...))
tkbind(xlabEntry, "<FocusIn>", function() tkselection.clear(xlabEntry))
tkgrid(labelRcmdr(parFrame, text = gettextRcmdr("x-axis label")), xlabEntry, sticky = "ew", padx=6)
tkgrid(labelRcmdr(parFrame, text =""), xlabScroll, sticky = "ew", padx=6)
ylabEntry <- ttkentry(parFrame, width = "25", textvariable = ylabVar)
ylabScroll <- ttkscrollbar(parFrame, orient = "horizontal",
command = function(...) tkxview(ylabEntry, ...))
tkconfigure(ylabEntry, xscrollcommand = function(...) tkset(ylabScroll,
...))
tkgrid(labelRcmdr(parFrame, text = gettextRcmdr("y-axis label")), ylabEntry, sticky = "ew", padx=6)
tkgrid(labelRcmdr(parFrame, text=""), ylabScroll, sticky = "ew", padx=6)
mainEntry <- ttkentry(parFrame, width = "25", textvariable = mainVar)
mainScroll <- ttkscrollbar(parFrame, orient = "horizontal",
command = function(...) tkxview(mainEntry, ...))
tkconfigure(mainEntry, xscrollcommand = function(...) tkset(mainScroll,
...))
tkgrid(labelRcmdr(parFrame, text = gettextRcmdr("Graph title")), mainEntry, sticky = "ew", padx=6)
tkgrid(labelRcmdr(parFrame, text=""), mainScroll, sticky = "ew", padx=6)
radioButtons(parFrame, name="ciplottyperb", buttons=c("shape", "bars"), values=c("shape", "bars"),
labels=gettextRcmdr(c("shape", "bars")), title=gettext("CI plot type", domain="R-RcmdrPlugin.ROC"),
initialValue = dialog.values$initial.ciplottype)
colorrocBox <- variableListBox(parFrame, palette(), title=gettext("Color of ROC (from Palette)", domain="R-RcmdrPlugin.ROC"),
initialSelection=varPosListn(palette(), dialog.values$initial.colorroc))
ltys <- c("solid", "dashed", "dotted", "dotdash", "longdash", "twodash", "blank")
ltyrocBox <- variableListBox(parFrame, ltys, title=gettext("Line type of ROC (from Palette)", domain="R-RcmdrPlugin.ROC"),
initialSelection=varPosListn(ltys, dialog.values$initial.ltyroc))
onOK <- function(){
tab <- if (as.character(tkselect(notebook)) == dataTab$ID) 0 else 1
narm <- as.character("1" == tclvalue(narmVariable))
percent <- as.character("1" == tclvalue(percentVariable))
direction <- as.character(tclvalue(directionrbVariable))
smoothingmethod <- as.character(tclvalue(smoothingmethodrbVariable))
smoothinbandwidth <- as.character(tclvalue(smoothinbandwidthrbVariable))
bandwidthnumeric <- as.character(tclvalue(bandwidthnumericVar))
bandwidthadjustment <- as.character(tclvalue(bandwidthadjustmentVar))
bandwidthwindow <- as.character(tclvalue(bandwidthwindowrbVariable))
distributioncases <- as.character(tclvalue(distributioncasesrbVariable))
distributioncontrols <- as.character(tclvalue(distributioncontrolsrbVariable))
cicompute <- as.character("1" == tclvalue(cicomputeVariable))
cilevel <- as.numeric(as.character(tclvalue(cilevelVar)))
cimethod <- as.character(tclvalue(cimethodrbVariable))
citype <- as.character(tclvalue(cityperbVariable))
cithresholds <- as.character(tclvalue(cithresholdsrbVariable))
civalues <- as.character(tclvalue(civaluesVar))
cibootn <- as.integer(as.character(tclvalue(cibootnVar)))
cibootstratified <- as.character("1" == tclvalue(cibootstratifiedVariable))
auc <- as.character("1" == tclvalue(aucVariable))
partialauc <- as.character("1" == tclvalue(partialaucVariable))
partialfrom <- as.character(tclvalue(partialfromVar))
partialto <- as.character(tclvalue(partialtoVar))
partialfocus <- as.character(tclvalue(partialfocusVariable))
partialcorrect <- as.character("1" == tclvalue(partialcorrectVariable))
add <- as.character("1" == tclvalue(addVariable))
plot <- as.character("1" == tclvalue(plotVariable))
smooth <- as.character("1" == tclvalue(smoothVariable))
printauc <- as.character("1" == tclvalue(printaucVariable))
aucpolygon <- as.character("1" == tclvalue(aucpolygonVariable))
maxaucpolygon <- as.character("1" == tclvalue(maxaucpolygonVariable))
grid <- as.character("1" == tclvalue(gridVariable))
identity <- as.character("1" == tclvalue(identityVariable))
ciplot <- as.character("1" == tclvalue(ciplotVariable))
values <- as.character("1" == tclvalue(valuesVariable))
printthresrb <- as.character(tclvalue(printthresrbVariable))
customthres <- as.character(tclvalue(customthresVar))
xlab <- trim.blanks(tclvalue(xlabVar))
xlab <- if (xlab == gettextRcmdr("<auto>"))
""
else paste(", xlab=\"", xlab, "\"", sep = "")
ylab <- trim.blanks(tclvalue(ylabVar))
ylab <- if (ylab == gettextRcmdr("<auto>"))
""
else paste(", ylab=\"", ylab, "\"", sep = "")
main <- trim.blanks(tclvalue(mainVar))
main <- if (main == gettextRcmdr("<auto>"))
""
else paste(", main=\"", main, "\"", sep = "")
ciplottype <- as.character(tclvalue(ciplottyperbVariable))
colorroc <- getSelection(colorrocBox)
convert <- function (color){
f=col2rgb(color)
rgb(f[1],f[2],f[3],maxColorValue=255)
}
if(substr(colorroc,1,1) != "
ltyroc <- as.character(getSelection(ltyrocBox))
putDialog ("pROC", list(
initial.narm = tclvalue(narmVariable), initial.percent = tclvalue(percentVariable),
initial.direction = as.character(tclvalue(directionrbVariable)),
initial.smooth = tclvalue(smoothVariable), initial.smoothingmethod = tclvalue(smoothingmethodrbVariable),
initial.smoothinbandwidth = tclvalue(smoothinbandwidthrbVariable), initial.bandwidthnumeric = tclvalue(bandwidthnumericVar),
initial.bandwidthadjustment = "1", initial.bandwidthwindow = tclvalue(bandwidthwindowrbVariable),
initial.distributioncontrols = tclvalue(distributioncontrolsrbVariable), initial.distributioncases = tclvalue(distributioncasesrbVariable),
initial.cicompute = tclvalue(cicomputeVariable), initial.cilevel = tclvalue(cilevelVar), initial.cimethod = tclvalue(cimethodrbVariable),
initial.cibootn = tclvalue(cibootnVar), initial.cibootstratified = tclvalue(cibootstratifiedVariable),
initial.citype = tclvalue(cityperbVariable), initial.cithresholds = tclvalue(cithresholdsrbVariable),
initial.civalues = tclvalue(civaluesVar), initial.ciplottype = tclvalue(ciplottyperbVariable),
initial.auc = tclvalue(aucVariable), initial.partialauc = tclvalue(partialaucVariable),
initial.partialfrom = tclvalue(partialfromVar), initial.partialto = tclvalue(partialtoVar),
initial.partialfocus = tclvalue(partialfocusVariable), initial.partialcorrect = tclvalue(partialcorrectVariable),
initial.plot = tclvalue(plotVariable), initial.add = tclvalue(addVariable),
initial.printauc = tclvalue(printaucVariable), initial.aucpolygon = tclvalue(aucpolygonVariable), initial.maxaucpolygon = tclvalue(maxaucpolygonVariable),
initial.grid = tclvalue(gridVariable), initial.identity = tclvalue(identityVariable),
initial.ciplot = tclvalue(ciplotVariable), initial.values = tclvalue(valuesVariable), initial.printthresrb = tclvalue(printthresrbVariable), initial.customthres = as.character(tclvalue(customthresVar)),
initial.colorroc = getSelection(colorrocBox),
initial.ltyroc = getSelection(ltyrocBox),
initial.xlab=tclvalue(xlabVar), initial.ylab=tclvalue(ylabVar),
initial.main=tclvalue(mainVar),
initial.tab=tab))
closeDialog()
if (percent == "TRUE") {
percentupper = 100
} else {
percentupper = 1
}
if (cicompute == "TRUE") {
if (0 == length(cilevel)) {
errorCondition(recall=fncLRpROC, message=gettext("You must set a confidence interval level.", domain="R-RcmdrPlugin.ROC"))
return()
}
cilevel = as.numeric(cilevel)
if ((cilevel < 0) || (cilevel > 1)) {
errorCondition(recall=fncLRpROC, message=gettext("Confidence interval level outside of range.", domain="R-RcmdrPlugin.ROC"))
return()
}
if (0 == length(cibootn)) {
errorCondition(recall=fncLRpROC, message=gettext("You must set a confidence interval number of replicates.", domain="R-RcmdrPlugin.ROC"))
return()
}
if (cibootn < 0) {
errorCondition(recall=fncLRpROC, message=gettext("Confidence interval number of replicates should be a pozitive number.", domain="R-RcmdrPlugin.ROC"))
return()
}
}
if (partialauc == "TRUE") {
if (0 == length(partialto)) {
errorCondition(recall=fncLRpROC, message=gettext("You must set a partial AUC 'to' limit.", domain="R-RcmdrPlugin.ROC"))
return()
}
partialto = as.numeric(partialto)
partialfrom = as.numeric(partialfrom)
if ((partialto < 0) | (partialto > percentupper)) {
errorCondition(recall=fncLRpROC, message=gettext("Partial AUC 'to' limit outside of range.", domain="R-RcmdrPlugin.ROC"))
return()
}
if (0 == length(partialfrom)) {
errorCondition(recall=fncLRpROC, message=gettext("You must set a partial AUC 'from' limit.", domain="R-RcmdrPlugin.ROC"))
return()
}
if ((partialfrom < 0) | (partialfrom > percentupper)) {
errorCondition(recall=fncLRpROC, message=gettext("Partial AUC 'from' limit outside of range.", domain="R-RcmdrPlugin.ROC"))
return()
}
if ((max(c(partialfrom, partialto)) <= 1) & (percent=="TRUE")) {
Message(message="Maybe you didn't specified well the values, you probably wanted to set the values between 0-100 instead of between 0-1, since percent is checked", type="warning")
}
if ((max(c(partialfrom, partialto)) > 1) & (percent=="FALSE")) {
Message(message="Maybe you didn't specified well the values, you probably wanted to set the values between 0-1 instead of between 0-100, since percent is not checked", type="warning")
}
}
if ((printthresrb == "custom") & (0 == length(customthres))) {
errorCondition(recall=fncLRpROC, message=gettext("Custom threshold should not be empty.", domain="R-RcmdrPlugin.ROC"))
return()
}
.activeDataSet <- ActiveDataSet()
if (printthresrb == "customthres") {
threshold = customthres
} else {
threshold = paste("'", printthresrb, "'", sep="")
}
if (partialauc == "TRUE") {
partialauc = paste("c(", partialfrom, ", ", partialto, ")", sep="")
}
.activeDataSet <- ActiveDataSet()
command <- paste("summary(", .activeModel, ")", sep = "")
doItAndPrint(command)
command <- paste("glm.prediction <- predict(", .activeModel, ", type=c('response'))", sep = "")
doItAndPrint(command)
command <- paste(".depname <- ", "as.character((attr(", .activeModel, "$terms, 'variables')[2]))", sep = "")
doItAndPrint(command)
command <- paste(".outcome <- ifelse(", .activeDataSet, "$", .depname, "==levels(as.factor(", .activeDataSet, "$", .depname, "))[2], 1, 0 )", sep = "")
doItAndPrint(command)
command <- paste("roc.obj <- pROC::roc(.outcome ~ glm.prediction, data=", .activeDataSet, ", na.rm=", narm, ", percent=", percent, ", direction='", direction, "'",
", partial.auc=", partialauc, ", partial.auc.focus='", partialfocus, "'", ", partial.auc.correct=", partialcorrect,
", auc=", auc, ", plot=FALSE, ci=TRUE, of='auc', conf.level=", cilevel, ", ci.method='", cimethod,"', boot.n=", cibootn, ", boot.stratified=", cibootstratified,")", sep = "")
doItAndPrint(command)
if (plot == "TRUE") {
command <- paste("plot(roc.obj, add=", add,
", print.auc=", printauc, ", auc.polygon=", aucpolygon, ", max.auc.polygon=", maxaucpolygon,
", print.auc.x=ifelse(roc.obj$percent, 50, .5), print.auc.y=ifelse(roc.obj$percent, 45, .45), print.auc.pattern='AUC: %.2f (%.2f, %.2f)'",
", grid=", grid, ", identity=", identity, ", col='", colorroc, "', lty='", ltyroc, "'",
", print.thres=", threshold, ", print.thres.adj=c(0,0.5), print.thres.cex=0.7, print.thres.pattern='%.2f (%.2f, %.2f)'",
xlab, ylab, main, ")", sep = "")
doItAndPrint(command)
}
command <- paste("roc.obj$levels[1]
doItAndPrint(command)
command <- paste("roc.obj$levels[2]
doItAndPrint(command)
if (cicompute == "TRUE") {
cilevel = paste(", conf.level=", cilevel, sep="")
cimethod = paste(", method='", cimethod, "'", sep="")
}
if (ciplot == "TRUE") {
if (citype == "thresholds") {
if (cithresholds == "custom") {
threshold = civalues
} else {
threshold = paste("'", cithresholds, "'", sep="")
}
command <- paste("roc.ci.obj <- ci(roc.obj, of='thresholds', thresholds=", threshold, cilevel, cimethod,", boot.n=", cibootn, ", boot.stratified=", cibootstratified,")", sep = "")
doItAndPrint(command)
command <- paste("plot(roc.ci.obj, type='", ciplottype, "', col='
doItAndPrint(command)
} else {
if ((citype == "se") & (citype == "sp")) {
if ((max(eval(parse(text=as.character(civalues)))) <= 1) & (percent=="TRUE")) {
Message(message="Maybe you didn't specified well the values, since percent is selected you probably wanted to set seq(0,100,5) (or values between 0-100%) instead of seq(0,1,0.05), since percent is checked", type="warning")
}
if ((max(eval(parse(text=as.character(civalues)))) > 1) & (percent=="FALSE")) {
Message(message="Maybe you didn't specified well the values, since percent is selected you probably wanted to set seq(0,1,0.05) (or values between 0-1) instead of seq(0,100,5), since percent is not checked", type="warning")
}
}
if (citype == "se") {
command <- paste("roc.ci.obj <- ci(roc.obj, of='se', specificities=", civalues, cilevel, cimethod,", boot.n=", cibootn, ", boot.stratified=", cibootstratified,")", sep = "")
doItAndPrint(command)
}
if (citype == "sp") {
command <- paste("roc.ci.obj <- ci(roc.obj, of='sp', sensitivities=", civalues, cilevel, cimethod,", boot.n=", cibootn, ", boot.stratified=", cibootstratified,")", sep = "")
doItAndPrint(command)
}
if (citype == "auc") {
command <- paste("roc.ci.obj <- ci(roc.obj, of='auc'", cilevel, cimethod,", boot.n=", cibootn, ", boot.stratified=", cibootstratified,")", sep = "")
doItAndPrint(command)
doItAndPrint("roc.ci.obj")
}
command <- paste("plot(roc.ci.obj, type='", ciplottype, "', col='
doItAndPrint(command)
}
}
if (smooth == "TRUE") {
bandwidth = ""
density = ""
if (smoothingmethod == "density") {
if (smoothinbandwidth == "numeric") {
bandwidth = paste(", bw=", bandwidthnumeric, "", sep="")
} else {
bandwidth = paste(", bw='", smoothinbandwidth, "'", sep="")
}
}
if (smoothingmethod == "fitdistr") {
density = paste(", density.cases='", distributioncases, "', density.controls='", distributioncontrols, "'", sep="")
}
command <- paste("lines(smooth(roc.obj, method = '", smoothingmethod, "'", bandwidth, density, "), col='
doItAndPrint(command)
}
if (values == "TRUE") {
doItAndPrint("roc.obj$sensitivities")
doItAndPrint("roc.obj$specificities")
doItAndPrint("roc.obj$thresholds")
}
command <- paste("remove(roc.obj)", sep = "")
doItAndPrint(command)
command <- paste("remove(glm.prediction)", sep = "")
doItAndPrint(command)
command <- paste("remove(.depname)", sep = "")
doItAndPrint(command)
command <- paste("remove(.outcome)", sep = "")
doItAndPrint(command)
if (ciplot == "TRUE") {
command <- paste("remove(roc.ci.obj)", sep = "")
doItAndPrint(command)
}
activateMenus()
tkfocus(CommanderWindow())
}
OKCancelHelp(helpSubject="plot.roc", reset = "fncLRpROC", apply="fncLRpROC")
tkgrid(directionrbFrame, sticky = "w", padx=6, pady=c(0, 6))
tkgrid(generaldataFrame , dataoptionsFrame, sticky = "nswe", padx=6, pady=6)
tkgrid(generalFrame, sticky = "we")
tkgrid(smoothingmethodrbFrame, sticky = "w", padx=6, pady=c(6, 6))
tkgrid(smoothinbandwidthrbFrame, sticky = "w", padx=6, pady=c(6, 0))
tkgrid(labelRcmdr(smoothingdensityFrame, text = gettext("Numeric bandwidth", domain="R-RcmdrPlugin.ROC")), bandwidthnumericEntry, sticky = "ew", padx=6, pady=c(6, 0))
tkgrid(labelRcmdr(smoothingdensityFrame, text =""), bandwidthnumericScroll, sticky = "ew", padx=6)
tkgrid(labelRcmdr(smoothingdensityFrame, text = gettext("Adjustment", domain="R-RcmdrPlugin.ROC")), bandwidthadjustmentEntry, sticky = "ew", padx=6, pady=c(6, 0))
tkgrid(bandwidthwindowrbFrame, sticky = "w", padx=6, pady=c(6, 6))
tkgrid(distributioncontrolsrbFrame, sticky = "w", padx=6, pady=c(6, 6))
tkgrid(distributioncasesrbFrame, sticky = "w", padx=6, pady=c(0, 6))
tkgrid(smoothinggeneralFrame, sticky = "w")
tkgrid(smoothingdensityFrame, sticky = "w")
tkgrid(smoothingleftpaneFrame , smoothingdistributionFrame, sticky = "nswe", padx=6, pady=6)
tkgrid(smoothingFrame, sticky = "we")
tkgrid(labelRcmdr(cigeneralFrame, text = gettext("Confidence level", domain="R-RcmdrPlugin.ROC")), cilevelEntry, sticky = "ew", padx=6)
tkgrid(cimethodrbFrame, sticky = "w", padx=6, pady=c(0, 6))
tkgrid(cityperbFrame, sticky = "w", padx=6, pady=c(0, 6))
tkgrid(cithresholdsrbFrame, sticky = "w", padx=6, pady=c(0, 6))
tkgrid(labelRcmdr(cigeneralFrame, text = gettext("Values (Se/Sp/Custom thres.)", domain="R-RcmdrPlugin.ROC")), civaluesEntry, sticky = "ew", padx=6, pady=c(0, 6))
tkgrid(labelRcmdr(cigeneralFrame, text =""), civaluesScroll, sticky = "ew", padx=6, pady=c(0, 6))
tkgrid(cigeneralFrame , cibootstrapFrame, sticky = "nswe", padx=6, pady=6)
tkgrid(ciFrame, sticky = "we")
tkgrid(partialfocusFrame, sticky = "w", padx=6, pady=c(6, 6))
tkgrid(generalaucFrame , partialaucFrame, sticky = "nswe", padx=6, pady=6)
tkgrid(aucFrame, sticky = "we")
tkgrid(optionsFrame, sticky = "w", padx=6, pady=c(0, 6))
tkgrid(aucpolygonFrame, sticky = "w", padx=6, pady=c(0, 6))
tkgrid(printthresrbFrame, sticky = "w", padx=6, pady=c(0, 6))
tkgrid(labelRcmdr(informationFrame, text = gettext("Custom threshold", domain="R-RcmdrPlugin.ROC")), customthresEntry, sticky = "ew", padx=6)
tkgrid(labelRcmdr(informationFrame, text =""), customthresScroll, sticky = "ew", padx=6)
tkgrid(informationFrame, sticky = "w", padx=6, pady=c(0, 6))
tkgrid(ciplottyperbFrame, sticky = "w", padx=6, pady=c(6, 6))
tkgrid(getFrame(colorrocBox), sticky = "w", padx=6, pady=c(6, 0))
tkgrid(getFrame(ltyrocBox), sticky = "w", padx=6, pady=c(6, 18))
tkgrid(optFrame , parFrame, sticky = "nswe", padx=6, pady=6)
tkgrid(optionsParFrame, sticky = "we")
tkgrid(ttklabel(dataTab, text=""))
tkgrid(ttklabel(dataTab, text=""))
tkgrid(labelRcmdr(top, text = " "), padx=6)
dialogSuffix(use.tabs=TRUE, grid.buttons=TRUE, tabs=c("dataTab", "smoothingTab", "aucTab", "ciTab", "optionsTab"),
tab.names=c("General", "Smoothing", "AUC", "CI", "Plot"))
} |
d_vMF <- function(x, mu, kappa, log = FALSE) {
if (is.null(dim(x))) {
x <- rbind(x)
}
x <- check_unit_norm(x = x, warnings = TRUE)
mu <- check_unit_norm(x = mu, warnings = TRUE)
p <- ncol(x)
if (p != length(mu)) {
stop("x and mu do not have the same dimension.")
}
log_dens <- c_vMF(p = p, kappa = kappa, log = TRUE) + kappa * x %*% mu
return(switch(log + 1, exp(log_dens), log_dens))
}
c_vMF <- function(p, kappa, log = FALSE) {
if (any(kappa < 0)) {
stop("kappa must be non-negative.")
}
log_c_vMF <- (0.5 * (p - 2)) * log(kappa) - (0.5 * p) * log(2 * pi) -
kappa - log(besselI(nu = 0.5 * (p - 2), x = kappa, expon.scaled = TRUE))
log_c_vMF[kappa == 0] <- -w_p(p = p, log = TRUE)
return(switch(log + 1, exp(log_c_vMF), log_c_vMF))
}
r_vMF <- function(n, mu, kappa) {
mu <- check_unit_norm(x = mu, warnings = TRUE)
if (kappa < 0) {
stop("kappa must be non-negative.")
}
p <- length(mu)
if (kappa == 0) {
samp <- r_unif_sphere(n = n, p = p)
} else {
if (p > 1) {
r_V <- function(n) r_g_vMF(n = n, p = p, kappa = kappa)
r_U <- function(n) r_unif_sphere(n = n, p = p - 1)
samp <- r_tang_norm(n = n, theta = mu, r_V = r_V, r_U = r_U)
} else {
samp <- sample(x = c(-1, 1), size = n, replace = TRUE,
prob = d_vMF(x = cbind(c(-1, 1)), mu = mu, kappa = kappa))
}
}
return(samp)
}
g_vMF <- function(t, p, kappa, scaled = TRUE, log = FALSE) {
if (kappa < 0) {
stop("kappa must be non-negative.")
}
g_c <- ifelse(scaled, c_vMF(p = p, kappa = kappa, log = TRUE), 0) +
kappa * t
g_c[abs(t) > 1] <- -Inf
return(switch(log + 1, exp(g_c), g_c))
}
r_g_vMF <- function(n, p, kappa) {
if (n < 1) {
stop("n has to be an integer larger or equal to 1")
}
if (p < 1) {
stop("p has to be an integer larger or equal to 1")
}
if (kappa < 0) {
stop("kappa has to be non-negative")
}
return(.r_g_vMF_Cpp(n = n, p = p, kappa = kappa))
} |
quadtrafo <- function(e, f=NULL, g=NULL, h=NULL)
{
if (is.matrix(e)) dat <- e
else if (is.null(f)) dat <- t(e)
else dat <- cbind(e,f,g,h)
result <- dat %*%
matrix(c(1, 0.5, 0.5, 0,
0, sqrt(3)/2, sqrt(3)/6, 0,
0, 0, sqrt(6)/3, 0), nrow = 4)
colnames(result) <- c("x", "y", "z")
return(result)
}
quadlines <- function(e, f=NULL, g=NULL, h=NULL, sp, ...)
{
result <- quadtrafo(e,f,g,h)
sp$points3d(result[,1], result[,2], result[,3], type="l", ...)
invisible(result)
}
quadpoints <- function(e, f=NULL, g=NULL, h=NULL, sp, ...)
{
result <- quadtrafo(e,f,g,h)
sp$points3d(result[,1], result[,2], result[,3], type="p", ...)
invisible(result)
}
quadplot <- function(e=NULL, f=NULL, g=NULL, h=NULL, angle=75, scale.y=0.6,
label=1:4, labelcol=rainbow(4), labelpch=19,
labelcex=1.5, main="",
s3d.control = list(), simplex.control = list(),
legend.control = list(), ...)
{
corners <- quadtrafo(diag(4))
if(!requireNamespace("scatterplot3d", quietly=TRUE)){
message("Package 'scatterplot3d' is required for functionality from the quadplot function")
return(NULL)
}
s3d <- do.call(scatterplot3d::scatterplot3d,
c(list(0.5, 0.2886751, 0.2041241, type="n",
xlim=range(corners[,1]), ylim=range(corners[,2]), zlim=range(corners[,3]),
axis = FALSE, grid=FALSE, angle = angle, scale.y = scale.y, main = main),
s3d.control)
)
usr <- as.vector(sapply(s3d$xyz.convert(corners[-2,]), range))
opar <- par(usr = usr, xpd = NA)
assign("usr", usr, envir = environment(s3d$points3d))
on.exit(par(opar))
do.call("quadlines",
c(list(e = diag(4)[c(1:4,1,3,2,4),], sp = s3d),
simplex.control)
)
do.call("quadpoints",
c(list(e = diag(4), sp = s3d, pch = labelpch, col = labelcol, cex = labelcex)))
do.call("legend",
c(list(usr[1], usr[4], legend = label, col = labelcol, pch = labelpch, cex = labelcex),
legend.control)
)
if (!is.null(e))
quadpoints(e, f, g, h, sp = s3d, ...)
return(s3d)
} |
test_that("type argument is checked for proper input", {
expect_error(
scale_colour_continuous(type = function() "abc"),
"is not a scale function"
)
expect_error(
scale_fill_continuous(type = geom_point),
"is not a scale function"
)
expect_error(
scale_colour_binned(type = function(...) scale_colour_binned(aesthetics = c("fill", "point_colour"))),
"works with the following aesthetics: fill, point_colour"
)
expect_error(
scale_fill_binned(type = scale_fill_brewer),
"provided scale is discrete"
)
}) |
setCosts = function(pred, costs) {
assertClass(pred, classes = "Prediction")
assertMatrix(costs, any.missing = FALSE)
td = pred$task.desc
if (td$type != "classif") {
stop("Costs can only be set for classification predictions!")
}
if (pred$predict.type != "prob") {
stop("Costs can only be set for predict.type 'prob'!")
}
levs = td$class.levels
if (any(dim(costs))) {
if (any(dim(costs) != length(levs))) {
stop("Dimensions of costs have to be the same as number of class levels!")
}
rns = rownames(costs)
cns = colnames(costs)
if (!setequal(rns, levs) || !setequal(cns, levs)) {
stop("Row and column names of cost matrix have to equal class levels!")
}
}
p = getPredictionProbabilities(pred, cl = levs)
costs = costs[levs, levs]
costs = costs - diag(costs)
p = as.matrix(p) %*% costs
ind = getMaxIndexOfRows(-p)
pred$data$response = factor(ind, levels = seq_along(levs), labels = levs)
ncl = length(levs)
if (ncl == 2L) {
threshold = costs[td$negative, td$positive] / (costs[td$negative, td$positive] + costs[td$positive, td$negative])
threshold = c(threshold, 1 - threshold)
names(threshold) = c(td$positive, td$negative)
} else {
u = apply(costs, 1, function(x) length(unique(x)))
if (all(u < 3)) {
threshold = (ncl - 1) / rowSums(costs)
threshold = threshold / sum(threshold)
} else {
threshold = rep_len(NA, ncl)
}
names(threshold) = levs
}
pred$threshold = threshold
return(pred)
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.