licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 477 | module Backends # begin submodule PredictMD.Backends
import ..AbstractBackend
struct ClassImbalance <: AbstractBackend
end
struct DecisionTree <: AbstractBackend
end
struct Documenter <: AbstractBackend
end
struct Flux <: AbstractBackend
end
struct GLM <: AbstractBackend
end
struct Knet <: AbstractBackend
end
struct LIBSVM <: AbstractBackend
end
struct PGFPlots <: AbstractBackend
end
struct PGFPlotsX <: AbstractBackend
end
end # end submodule PredictMD.Backends
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 5865 | import StatsModels
struct FeatureContrastsNotYetGenerated <: AbstractNonExistentFeatureContrasts
end
struct FitNotYetRunUnderlyingObject <: AbstractNonExistentUnderlyingObject
end
struct FitFailedUnderlyingObject <: AbstractNonExistentUnderlyingObject
end
"""
"""
mutable struct GLMModel <: AbstractEstimator
name::T1 where T1 <: AbstractString
isclassificationmodel::T2 where T2 <: Bool
isregressionmodel::T3 where T3 <: Bool
formula::T4 where T4 <: Any
family::T5 where T5 <: Any
link::T6 where T6 <: Any
# parameters (learned from data):
underlyingglm::T7 where T7 <: Any
end
"""
"""
mutable struct KnetModel <: AbstractEstimator
name::T1 where T1 <: AbstractString
isclassificationmodel::T2 where T2 <: Bool
isregressionmodel::T3 where T3 <: Bool
# hyperparameters (not learned from data):
predict_function_source::T4 where T4 <: AbstractString
loss_function_source::T5 where T5 <: AbstractString
predict_function::T6 where T6 <: Any
loss_function::T7 where T7 <: Any
losshyperparameters::T8 where T8 <: AbstractDict
optimizationalgorithm::T9 where T9 <: Symbol
optimizerhyperparameters::T10 where T10 <: AbstractDict
minibatchsize::T11 where T11 <: Integer
maxepochs::T12 where T12 <: Integer
printlosseverynepochs::T13 where T13 <: Integer
# parameters (learned from data):
modelweights::T14 where T14 <: AbstractArray
modelweightoptimizers::T15 where T15 <: Any
# learning state
history::T16 where T16 <: Any
end
"""
"""
mutable struct SimplePipeline{S<:AbstractString, T<:AbstractVector} <: AbstractPipeline
name::S
objectsvector::T
end
struct PGFPlotsXPlot <: AbstractPlot
underlying_object::T where T <: Any
end
"""
"""
struct ImmutablePackageMultiLabelPredictionTransformer <: AbstractEstimator
label_names::T1 where T1 <: AbstractVector{Symbol}
end
"""
"""
struct ImmutablePackageSingleLabelPredictionTransformer <: AbstractEstimator
single_label_name::T1 where T1 <: Symbol
end
"""
"""
struct ImmutablePackageSingleLabelPredictProbaTransformer <:
AbstractEstimator
single_label_name::T1 where T1 <: Symbol
end
"""
"""
struct ImmutablePredictionsSingleLabelInt2StringTransformer <:
AbstractEstimator
index::T1 where T1 <: Integer
levels::T2 where T2 <: AbstractVector
end
"""
"""
struct ImmutablePredictProbaSingleLabelInt2StringTransformer <:
AbstractEstimator
index::T1 where T1 <: Integer
levels::T2 where T2 <: AbstractVector
end
"""
"""
struct DataFrameFeatureContrasts <: AbstractFeatureContrasts
columns::T1 where T1 <: AbstractVector{Symbol}
num_df_columns::T2 where T2 <: Integer
schema_without_intercept::T3 where T3 <: StatsModels.Schema
formula_without_intercept::T4 where T4 <: StatsModels.AbstractTerm
num_array_columns_without_intercept::T5 where T5 <: Integer
schema_with_intercept::T6 where T6 <: StatsModels.Schema
formula_with_intercept::T7 where T7 <: StatsModels.AbstractTerm
num_array_columns_with_intercept::T8 where T8 <: Integer
end
"""
"""
mutable struct MutableDataFrame2DecisionTreeTransformer <: AbstractEstimator
feature_names::T1 where T1 <: AbstractVector
single_label_name::T2 where T2 <: Symbol
levels::T3 where T3 <: AbstractVector
dffeaturecontrasts::T4 where T4 <: AbstractFeatureContrasts
end
"""
"""
struct ImmutableDataFrame2GLMSingleLabelBinaryClassTransformer <:
AbstractEstimator
label::T1 where T1 <: Symbol
positive_class::T2 where T2 <: AbstractString
end
"""
"""
mutable struct MutableDataFrame2ClassificationKnetTransformer <:
AbstractEstimator
feature_names::T1 where T1 <: AbstractVector
label_names::T2 where T2 <: AbstractVector{Symbol}
label_levels::T3 where T3 <: AbstractDict
index::T4 where T4 <: Integer
transposefeatures::T5 where T5 <: Bool
transposelabels::T6 where T6 <: Bool
dffeaturecontrasts::T7 where T7 <: AbstractFeatureContrasts
end
"""
"""
mutable struct MutableDataFrame2RegressionKnetTransformer <:
AbstractEstimator
feature_names::T1 where T1 <: AbstractVector
label_names::T2 where T2 <: AbstractVector{Symbol}
transposefeatures::T3 where T3 <: Bool
transposelabels::T4 where T4 <: Bool
dffeaturecontrasts::T5 where T5 <: AbstractFeatureContrasts
function MutableDataFrame2RegressionKnetTransformer(
feature_names::AbstractVector,
label_names::AbstractVector{Symbol};
transposefeatures::Bool = true,
transposelabels::Bool = false,
)
result = new(
feature_names,
label_names,
transposefeatures,
transposelabels,
)
return result
end
end
"""
"""
struct ImmutableFeatureArrayTransposerTransformer <: AbstractEstimator
end
"""
"""
mutable struct LIBSVMModel <: AbstractEstimator
name::T1 where T1 <: AbstractString
isclassificationmodel::T2 where T2 <: Bool
isregressionmodel::T3 where T3 <: Bool
levels::T4 where T4 <: AbstractVector
# hyperparameters (not learned from data):
hyperparameters::T5 where T5 <: AbstractDict
# parameters (learned from data):
underlyingsvm::T6 where T6 <: Any
end
"""
"""
mutable struct DecisionTreeModel <:
AbstractEstimator
name::T1 where T1 <: AbstractString
isclassificationmodel::T2 where T2 <: Bool
isregressionmodel::T3 where T3 <: Bool
single_label_name::T4 where T4 <: Symbol
levels::T5 where T5 <: AbstractVector
# hyperparameters (not learned from data):
hyperparameters::T6 where T6 <: AbstractDict
# parameters (learned from data):
underlyingrandomforest::T7 where T7 <: Any
end
"""
"""
struct CrossValidation{T}
leavein::Vector{CrossValidation{T}}
leaveout::Vector{Vector{T}}
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 464 | import PredictMDAPI
PredictMDAPI.get_history(::AbstractFittable; kwargs...)::Nothing = nothing
PredictMDAPI.get_underlying(::AbstractFittable; kwargs...)::Nothing = nothing
PredictMDAPI.parse_functions!(::AbstractFittable)::Nothing = nothing
PredictMDAPI.set_feature_contrasts!(::AbstractFittable,
::AbstractFeatureContrasts)::Nothing = nothing
PredictMDAPI.set_max_epochs!(::AbstractFittable,::Integer)::Nothing = nothing
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 391 | """
"""
module Cleaning # begin submodule PredictMD.Cleaning
############################################################################
# PredictMD.Cleaning source files #############################################
############################################################################
# submodules/Cleaning/hcup/
include("hcup/hcup.jl")
end # begin submodule PredictMD.Cleaning
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 14660 | # import CSV
import CSVFiles
import DataFrames
import FileIO
# import selected names from PredictMD
import ..convert_value_to_missing!
import ..filename_extension
import ..fix_type
import ..is_nothing
"""
"""
function x_contains_y(
x::AbstractString,
y::AbstractVector{<:AbstractString},
)
if length(y) == 0
return false
end
for i = 1:length(y)
if occursin(y[i], x)
return true
end
end
return false
end
"""
"""
function symbol_begins_with(
x::Symbol,
y::AbstractString
)
if length(y) <= length(string(x)) && string(x)[1:length(y)] == y
return true
else
return false
end
return nothing
end
"""
Given a dataframe, return the column names corresponding to CCS "one-hot"
columns.
# Examples
```julia
import CSVFiles
import FileIO
import PredictMD
df = DataFrames.DataFrame(
FileIO.load(
MY_CSV_FILE_NAME;
type_detect_rows = 30_000,
)
)
@info(PredictMD.Cleaning.ccs_onehot_names(df))
@info(PredictMD.Cleaning.ccs_onehot_names(df, "ccs_onehot_"))
```
"""
function ccs_onehot_names(
df::DataFrames.AbstractDataFrame,
ccs_onehot_prefix::AbstractString = "ccs_onehot_",
)
result = column_names_with_prefix(
df,
ccs_onehot_prefix,
)
return result
end
"""
"""
function column_names_with_prefix(
df::DataFrames.AbstractDataFrame,
prefix::AbstractString,
)
all_names = DataFrames.names(df)
name_begins_with_prefix = Vector{Bool}(length(all_names))
for j = 1:length(all_names)
name_begins_with_prefix[j] = symbol_begins_with(
all_names[j],
prefix,
)
end
vector_of_matching_names = all_names[name_begins_with_prefix]
return vector_of_matching_names
end
"""
Given a single ICD 9 code, import the relevant patients from the
Health Care Utilization Project (HCUP) National Inpatient Sample (NIS)
database.
# Examples:
```julia
import CSVFiles
import FileIO
import PredictMD
icd_code_list = ["8841"]
icd_code_type=:procedure
input_file_name_list = [
"./data/nis_2012_core.csv",
"./data/nis_2013_core.csv",
"./data/nis_2014_core.csv",
]
output_file_name = "./output/hcup_nis_pr_8841.csv"
PredictMD.Cleaning.clean_hcup_nis_csv_icd9(
icd_code_list,
input_file_name_list,
output_file_name;
icd_code_type=icd_code_type,
rows_for_type_detect = 30_000,
)
df = DataFrames.DataFrame(
FileIO.load(
output_file_name;
type_detect_rows = 30_000,
)
)
@info(PredictMD.Cleaning.ccs_onehot_names(df))
```
"""
function clean_hcup_nis_csv_icd9(
icd_code_list::AbstractVector{<:AbstractString},
input_file_name_list::AbstractVector{<:AbstractString},
output_file_name::AbstractString;
header_row::Bool = true,
print_every_n_lines::Integer = 1_000_000,
icd_code_type::Union{Nothing, Symbol} = nothing,
num_dx_columns::Integer = 25,
num_pr_columns::Integer = 15,
ccs_onehot_prefix::AbstractString = "ccs_onehot_",
rows_for_type_detect::Union{Nothing, Integer} = nothing,
)
if is_nothing(rows_for_type_detect)
error("you need to specify rows_for_type_detect")
end
if rows_for_type_detect <= 0
error("rows_for_type_detect must be > 0")
end
if is_nothing(icd_code_type)
error("you need to specify icd_code_type")
end
if icd_code_type==:diagnosis
elseif icd_code_type==:procedure
else
error("\"icd_code_type\" must be one of: :diagnosis, :procedure")
end
if length(input_file_name_list) == 0
error("length(input_file_name_list) == 0")
end
input_file_name_list = strip.(input_file_name_list)
for i = 1:length(input_file_name_list)
if filename_extension(input_file_name_list[i]) != ".csv"
error("all input files must be .csv")
end
end
output_file_name = strip.(output_file_name)
if filename_extension(output_file_name) != ".csv"
error("output file must be .csv")
end
if ispath(output_file_name)
error(
string(
"Output file already exists. ",
"Rename, move, or delete the file, and then try again.",
)
)
end
temp_file_name_vector = Vector{String}(length(input_file_name_list))
for i = 1:length(input_file_name_list)
temp_file_name_vector[i] = string(tempname(), "_", i, ".csv")
end
icd_code_list = strip.(icd_code_list)
for i = 1:length(input_file_name_list)
if ispath(temp_file_name_vector[i])
error("ispath(temp_file_name_vector[i])")
end
@info(
string(
"Starting to read input file ",
i,
" of ",
length(input_file_name_list),
".",
)
)
open(input_file_name_list[i], "r") do f_input
open(temp_file_name_vector[i], "w") do f_temp_output
line_number = 1
for line in eachline(f_input)
if line_number == 1 && header_row
write(f_temp_output, line)
write(f_temp_output, "\n")
else
if x_contains_y(line, icd_code_list)
write(f_temp_output, line)
write(f_temp_output, "\n")
end
end
line_number += 1
if (print_every_n_lines >= 0) &&
(line_number % print_every_n_lines == 0)
@info(
string(
"Input file ",
i,
" of ",
length(input_file_name_list),
". Current line number: ",
line_number,
)
)
end
end
end
end
@info(
string(
"Finished reading input file ",
i,
" of ",
length(input_file_name_list),
".",
)
)
end
df_vector = Vector{DataFrames.DataFrame}(length(input_file_name_list))
for i = 1:length(temp_file_name_vector)
@info(
string(
"Starting to read temporary file ",
i,
" of ",
length(input_file_name_list),
".",
)
)
# df_i = DataFrames.readtable(temp_file_name_vector[i])
# We can't use DataFrames.readtable because it is deprecated.
df_i = DataFrames.DataFrame(
FileIO.load(
temp_file_name_vector[i];
type_detect_rows = rows_for_type_detect,
)
)
df_vector[i] = df_i
@info(
string(
"Finished reading temporary file ",
i,
" of ",
length(input_file_name_list),
".",
)
)
end
for i = 1:length(temp_file_name_vector)
Base.Filesystem.rm(
temp_file_name_vector[i];
force = true,
recursive = true,
)
end
all_column_names_vectors = [
DataFrames.names(df) for df in df_vector
]
shared_column_names = intersect(all_column_names_vectors...)
for i = 1:length(df_vector)
extra_column_names = setdiff(
names(df_vector[i]),
shared_column_names,
)
DataFrames.deletecols!(df_vector[i], extra_column_names,)
end
combined_df = vcat(df_vector...)
for i = 1:length(df_vector)
df_vector[i] = DataFrames.DataFrame()
end
if icd_code_type==:diagnosis
icd_code_column_names = Symbol[
Symbol( string("DX", j) ) for j = 1:num_dx_columns
]
elseif icd_code_type==:procedure
icd_code_column_names = Symbol[
Symbol( string("PR", j) ) for j = 1:num_pr_columns
]
else
error("\"icd_code_type\" must be one of: :diagnosis, :procedure")
end
row_i_has_kth_icd_code_matrix = Matrix{Bool}(
size(combined_df, 1),
length(icd_code_list),
)
for k = 1:length(icd_code_list)
current_icd_code = icd_code_list[k]
row_i_has_current_icd_code_in_col_j_matrix =
Matrix{Bool}(
size(combined_df, 1),
length(icd_code_column_names),
)
for j = 1:length(icd_code_column_names)
@info(
string(
"icd9 code ",
k,
" of ",
length(icd_code_list),
". DX column ",
j,
" of ",
length(icd_code_column_names),
".",
)
)
for i = 1:size(combined_df, 1)
cell_value = combined_df[i, icd_code_column_names[j]]
if DataFrames.ismissing(cell_value)
row_i_has_current_icd_code_in_col_j_matrix[i, j] = false
else
cell_value = strip(string(cell_value))
row_i_has_current_icd_code_in_col_j_matrix[i, j] =
cell_value == current_icd_code
end
end
end
row_i_has_current_icd_code_in_any_icdcode_column = vec(
sum(row_i_has_current_icd_code_in_col_j_matrix, 2) .> 0
)
row_i_has_kth_icd_code_matrix[:, k] =
row_i_has_current_icd_code_in_any_icdcode_column
end
matching_rows =
findall(Bool.(vec(sum(row_i_has_kth_icd_code_matrix, 2).>0)))
num_rows_before = size(combined_df, 1)
combined_df = combined_df[matching_rows, :]
num_rows_after = size(combined_df, 1)
@info(
string(
"I initially identified ",
num_rows_before,
" rows that could possibly have matched your ICD code(s).",
" I checked each row, and ",
num_rows_after,
" of those rows actually matched your ICD code(s).",
"I removed the ",
num_rows_before - num_rows_after,
" rows that did not match.",
)
)
dx_column_names = [Symbol(string("DX", i)) for i = 1:num_dx_columns]
dx_ccs_column_names =
[Symbol(string("DXCCS", i)) for i = 1:num_dx_columns]
index_to_ccs = strip.(
string.(
unique(
DataFrames.skipmissing(
vcat(
[combined_df[:, col] for
col in dx_ccs_column_names]...
)
)
)
)
)
index_to_ccs = index_to_ccs[findall(index_to_ccs .!= "")]
index_to_ccs = unique(index_to_ccs)
index_to_ccs = parse.(Int, index_to_ccs)
sort!(index_to_ccs)
index_to_ccs = string.(index_to_ccs)
ccs_to_index = Dict()
for k = 1:length(index_to_ccs)
ccs_to_index[ index_to_ccs[ k ] ] = k
end
ccs_to_index = fix_type(ccs_to_index)
row_i_has_vcode_dx_in_kth_ccs = Matrix{Bool}(
size(combined_df, 1),
length(index_to_ccs),
)
for j = 1:length(dx_column_names)
@info(
string(
"Processing DXCCS column ",
j,
" of ",
length(dx_column_names),
".",
)
)
jth_dx_col_name = dx_column_names[j]
jth_dx_ccs_col_name = dx_ccs_column_names[j]
for i = 1:size(combined_df, 1)
dx_value = combined_df[i, jth_dx_col_name]
if DataFrames.ismissing(dx_value)
else
dx_value = strip(string(dx_value))
if length(dx_value) == 0
elseif dx_value[1] == 'V' || dx_value[1] == "V"
ccs_value = combined_df[i, jth_dx_ccs_col_name]
if DataFrames.ismissing(ccs_value)
error(
error(
"dx value was not missing but",
"ccs value was missing"
)
)
else
ccs_value = strip(string(ccs_value))
row_i_has_vcode_dx_in_kth_ccs[
i,
ccs_to_index[ccs_value]
] = true
end
end
end
end
end
for k = 1:length(index_to_ccs)
kth_ccs = index_to_ccs[k]
kth_ccs_onehot_column_name = Symbol(
string(
ccs_onehot_prefix,
kth_ccs,
)
)
temporary_column_ints = Int.(row_i_has_vcode_dx_in_kth_ccs[:, k])
if sum(temporary_column_ints) > 0
temporary_column_strings = Vector{String}(size(combined_df, 1))
for i = 1:size(combined_df, 1)
if temporary_column_ints[i] > 0
temporary_column_strings[i] = "Yes"
else
temporary_column_strings[i] = "No"
end
end
combined_df[kth_ccs_onehot_column_name] =
temporary_column_strings
else
end
end
convert_value_to_missing!(
combined_df,
"A",
DataFrames.names(combined_df),
)
convert_value_to_missing!(
combined_df,
"C",
DataFrames.names(combined_df),
)
convert_value_to_missing!(
combined_df,
-99,
DataFrames.names(combined_df),
)
try
mkpath(dirname(output_file_name))
catch
end
@info(string("Attempting to write output file..."))
FileIO.save(output_file_name, combined_df)
@info(
string(
"Wrote ",
size(combined_df, 1),
" rows to output file: \"",
output_file_name,
"\"",
)
)
return output_file_name
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 510 | """
"""
module Compilation # begin submodule PredictMD.Compilation
############################################################################
# PredictMD.Compilation source files #######################################
############################################################################
# submodules/Server/buildexecutable/
include("buildexecutable/buildexecutable.jl")
# submodules/Server/packagecompiler/
include("packagecompiler/packagecompiler.jl")
end # end submodule PredictMD.Compilation
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 26 | # import BuildExecutable
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 48 | # import PackageCompiler
# import SnoopCompile
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 319 | """
"""
module GPU # begin submodule PredictMD.GPU
############################################################################
# PredictMD.GPU source files ###############################################
############################################################################
end # end submodule PredictMD.GPU
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 675 | """
"""
module Server # begin submodule PredictMD.Server
############################################################################
# PredictMD.Server source files ############################################
############################################################################
# submodules/Server/cryptography/
# include("cryptography/mbedtls.jl")
# include("cryptography/transportlayersecurity.jl")
# submodules/Server/web/
# include("web/genie.jl")
include("web/http.jl")
# include("web/httpclient.jl")
# include("web/httpserver.jl")
# include("web/juliawebapi.jl")
# include("web/mux.jl")
# include("web/websockets.jl")
end # end submodule PredictMD.Server
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 33 | # import TransportLayerSecurity
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 51 | # import Flax
# import Genie
# import SearchLight
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 3487 | import ClassImbalance
import DataFrames
import Random
"""
"""
function calculate_smote_pct_under(
;
pct_over::Real = 0,
minority_to_majority_ratio::Real = 0,
)
if pct_over < 0
error("pct_over must be >=0")
end
if minority_to_majority_ratio <= 0
error("minority_to_majority_ratio must be >0")
end
result = 100*minority_to_majority_ratio*(100+pct_over)/pct_over
return result
end
"""
"""
function smote(
features_df::DataFrames.AbstractDataFrame,
labels_df::DataFrames.AbstractDataFrame,
feature_names::AbstractVector{Symbol},
label_name::Symbol;
majorityclass::AbstractString = "",
minorityclass::AbstractString = "",
pct_over::Real = 0,
minority_to_majority_ratio::Real = 0,
k::Integer = 5,
)
result = smote(
Random.GLOBAL_RNG,
features_df,
labels_df,
feature_names,
label_name,
majorityclass = majorityclass,
minorityclass = minorityclass,
pct_over = pct_over,
minority_to_majority_ratio = minority_to_majority_ratio,
k = k,
)
return result
end
"""
"""
function smote(
rng::Random.AbstractRNG,
features_df::DataFrames.AbstractDataFrame,
labels_df::DataFrames.AbstractDataFrame,
feature_names::AbstractVector{Symbol},
label_name::Symbol;
majorityclass::AbstractString = "",
minorityclass::AbstractString = "",
pct_over::Real = 0,
minority_to_majority_ratio::Real = 0,
k::Integer = 5,
)
if majorityclass == ""
error("you need to specify majorityclass")
end
if minorityclass == ""
error("you need to specify minorityclass")
end
pct_under = calculate_smote_pct_under(
;
pct_over = pct_over,
minority_to_majority_ratio = minority_to_majority_ratio,
)
if size(features_df, 1) != size(labels_df, 1)
error("size(features_df, 1) != size(labels_df, 1)")
end
if !(size(features_df, 1) > 0)
error("!(size(features_df, 1) > 0)")
end
labelsstringarray = labels_df[label_name]
labelsbinaryarray = fill(Int(0), length(labelsstringarray))
for i = 1:length(labelsstringarray)
# Paul's smote code assumes 1 = minority, 0 = majority
if labelsstringarray[i] == minorityclass
labelsbinaryarray[i] = 1
elseif labelsstringarray[i] == majorityclass
labelsbinaryarray[i] = 0
else
error("value in labels column is neither majority nor minority")
end
end
smotedfeatures_df, smotedlabelsbinaryarray = ClassImbalance.smote(
features_df[feature_names],
labelsbinaryarray;
k = k,
pct_over = pct_over,
pct_under = pct_under,
)
smotedlabelsstringarray = Array{String}(
undef,
length(smotedlabelsbinaryarray),
)
for i = 1:length(smotedlabelsbinaryarray)
if smotedlabelsbinaryarray[i] == 1
smotedlabelsstringarray[i] = minorityclass
elseif smotedlabelsbinaryarray[i] == 0
smotedlabelsstringarray[i] = majorityclass
else
error("if you see this error, you will be very sad.")
end
end
smotedlabels_df = DataFrames.DataFrame()
smotedlabels_df[label_name] = smotedlabelsstringarray
return smotedfeatures_df, smotedlabels_df
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 3586 | # function require_julia_version(varargs...)::VersionNumber
# current_julia_version = convert(VersionNumber, Base.VERSION)
# version_meets_requirements = does_given_version_meet_requirements(
# current_julia_version,
# collect(varargs),
# )
# if !version_meets_requirements
# error(
# string(
# "Current Julia version (",
# current_julia_version,
# ") does not match any of the ",
# "user-specified version intervals.",
# )
# )
# end
# return current_julia_version
# end
# function require_predictmd_version(varargs...)::VersionNumber
# current_predictmd_version = convert(VersionNumber, version())
# version_meets_requirements = does_given_version_meet_requirements(
# current_predictmd_version,
# collect(varargs),
# )
# if !version_meets_requirements
# error(
# string(
# "Current PredictMD version (",
# current_predictmd_version,
# ") does not match any of the ",
# "user-specified version intervals.",
# )
# )
# end
# return current_predictmd_version
# end
# function does_given_version_meet_requirements(
# given_version::VersionNumber,
# version_requirements::AbstractVector,
# )::Bool
# num_version_requirements = length(version_requirements)
# if num_version_requirements == 0
# answer = true
# else
# if iseven(num_version_requirements)
# num_intervals = Int((num_version_requirements)/(2))
# answer_for_each_interval = Vector{Bool}(undef, num_intervals)
# for interval = (1):(num_intervals)
# answer_for_each_interval[interval] =
# does_given_version_meet_requirements(
# given_version,
# version_requirements[2*interval - 1],
# version_requirements[2*interval],
# )
# end
# else
# num_intervals = Int((num_version_requirements+1)/(2))
# answer_for_each_interval = Vector{Bool}(undef, num_intervals)
# for interval = (1):(num_intervals - 1)
# answer_for_each_interval[interval] =
# does_given_version_meet_requirements(
# given_version,
# version_requirements[2*interval - 1],
# version_requirements[2*interval],
# )
# end
# answer_for_each_interval[num_intervals] =
# does_given_version_meet_requirements(
# given_version,
# version_requirements[2*num_intervals - 1],
# )
# end
# answer = any(answer_for_each_interval)
# end
# return answer
# end
# function does_given_version_meet_requirements(
# given_version::VersionNumber,
# min_version,
# )::Bool
# min_version = VersionNumber(min_version)
# answer = min_version <= given_version
# return answer
# end
# function does_given_version_meet_requirements(
# given_version::VersionNumber,
# min_version,
# max_version,
# )::Bool
# min_version = VersionNumber(min_version)
# max_version = VersionNumber(max_version)
# answer = min_version <= given_version < max_version
# return answer
# end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 27 | # import CSV
import FileIO
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 52 | import CSVFiles
import FileIO
import IterableTables
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 31 | import RData
import RDatasets
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 2073 | function cp_files_and_directories(
src::AbstractString,
dst::AbstractString;
overwrite::Bool,
)::Nothing
if isdir(src)
# @debug("src is dir", src, isdir(src))
mkpath(dst)
for item in readdir(src)
# @debug("item: ", item)
item_src_path = joinpath(src, item)
item_dst_path = joinpath(dst, item)
cp_files_and_directories(
item_src_path,
item_dst_path;
overwrite = overwrite,
)
end
elseif isfile(src)
# @debug("src is file", src, isfile(src))
# @debug("dst: ", dst, isfile(dst), ispath(dst))
if overwrite || !ispath(dst)
rm(dst; force = true, recursive = true,)
cp(src, dst)
# @debug("copied file", src, dst)
end
else
# @error("weird src", src)
end
return nothing
end
function cache_to_path!(
;
from::AbstractVector{<:AbstractString},
to::AbstractVector{<:AbstractString},
cache = joinpath(homedir(), "predictmd_cache_travis"),
)::Nothing
cache_path_src::String = joinpath(cache, from...)
path_dst::String = joinpath(to...)
mkpath(cache_path_src)
mkpath(path_dst)
@debug("cache_path_src: ", cache_path_src,)
@debug("path_dst: ", path_dst,)
cp_files_and_directories(
cache_path_src,
path_dst;
overwrite = false,
)
return nothing
end
function path_to_cache!(
;
from::AbstractVector{<:AbstractString},
to::AbstractVector{<:AbstractString},
cache = joinpath(homedir(), "predictmd_cache_travis"),
)::Nothing
path_src::String = joinpath(from...)
cache_path_dst::String = joinpath(cache, to...)
mkpath(path_src)
mkpath(cache_path_dst)
@debug("path_src: ", path_src,)
@debug("cache_path_dst: ", cache_path_dst,)
cp_files_and_directories(
path_src,
cache_path_dst;
overwrite = true,
)
return nothing
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 9701 | import Literate
function preprocess_example_shared(
content::AbstractString;
output_directory::AbstractString,
)::String
content = replace(content,
"## %PREDICTMD_GENERATED_BY%\n" =>
string("## This file was generated by PredictMD ",
"version $(version())",
"\n",
"## For help, please visit ",
"https://predictmd.net",
"\n",))
return content
end
function get_if_include_test_statements_regex()::Regex
test_statements_regex::Regex = r"# PREDICTMD IF INCLUDE TEST STATEMENTS\n([\S\s]*?)# PREDICTMD ELSE\n([\S\s]*?)# PREDICTMD ENDIF INCLUDE TEST STATEMENTS\n{0,5}"
return test_statements_regex
end
function preprocess_example_do_not_include_test_statements(
content::AbstractString;
output_directory::AbstractString,
)::String
content::String = preprocess_example_shared(
content;
output_directory = output_directory,
)
test_statements_regex::Regex = get_if_include_test_statements_regex()
for m in eachmatch(test_statements_regex, content)
original_text::String = String(m.match)
replacement_text = string(strip(String(m[2])), "\n\n",)
content = replace(content, original_text => replacement_text)
end
content = replace(content, r"logger_stream, " => "")
return content
end
function preprocess_example_include_test_statements(
content::AbstractString;
output_directory::AbstractString,
)::String
content::String = preprocess_example_shared(
content;
output_directory = output_directory,
)
test_statements_regex::Regex = get_if_include_test_statements_regex()
for m in eachmatch(test_statements_regex, content)
content = replace(content,
String(m.match) =>
string(strip(String(m[1])), "\n\n",))
end
return content
end
function fix_example_blocks(filename::AbstractString)::Nothing
if filename_extension(filename) == ".md"
content = read(filename, String)
rm(filename; force = true, recursive = true,)
content = replace(content, r"```@example \w*\n" => "```julia\n")
write(filename, content)
end
return nothing
end
function generate_examples(
output_directory::AbstractString;
execute_notebooks = false,
markdown = false,
notebooks = false,
scripts = false,
include_test_statements::Bool = false,
)::String
_abspath_output_directory::String = abspath(output_directory)
if Sys.iswindows()
execute_notebooks = false
end
ENV["PREDICTMD_IS_MAKE_EXAMPLES"] = "true"
if !markdown && !notebooks && !scripts
error(
string(
"At least one of markdown, notebooks, scripts must be true.",
)
)
end
if ispath(output_directory)
error(
string(
"The output directory already exists. ",
"Delete the output directory and then ",
"re-run generate_examples."
)
)
end
if include_test_statements
preprocess_example = (x) ->
preprocess_example_include_test_statements(
x;
output_directory = _abspath_output_directory,
)
else
preprocess_example = (x) ->
preprocess_example_do_not_include_test_statements(
x;
output_directory = _abspath_output_directory,
)
end
@debug("Starting to generate examples...")
temp_examples_dir = joinpath(
maketempdir(),
"generate_examples",
"PredictMDTemp",
"docs",
"src",
"examples",
)
try
mkpath(temp_examples_dir)
catch
end
examples_input_parent_directory =
PredictMD.package_directory("templates", "examples")
cpu_examples_input_parent_directory = joinpath(
examples_input_parent_directory,
"cpu_examples",
)
cpu_examples_output_parent_directory = joinpath(
temp_examples_dir,
"cpu_examples",
)
try
mkpath(cpu_examples_output_parent_directory)
catch
end
boston_housing_input_directory = joinpath(
cpu_examples_input_parent_directory,
"boston_housing",
)
boston_housing_output_directory = joinpath(
cpu_examples_output_parent_directory,
"boston_housing",
)
boston_housing_input_src_directory = joinpath(
boston_housing_input_directory,
"src",
)
boston_housing_output_src_directory = joinpath(
boston_housing_output_directory,
"src",
)
try
mkpath(boston_housing_output_directory)
mkpath(boston_housing_output_src_directory)
catch
end
for x in [".gitignore", "Project.toml", "README.md"]
cp(joinpath(boston_housing_input_directory, x),
joinpath(boston_housing_output_directory, x);
force=true)
end
boston_housing_input_file_list =
readdir(boston_housing_input_src_directory)
boston_housing_input_file_list =
boston_housing_input_file_list[
[endswith(x, ".jl") for x in
boston_housing_input_file_list]
]
sort!(boston_housing_input_file_list)
for input_file in boston_housing_input_file_list
input_file_full_path = joinpath(
boston_housing_input_src_directory,
input_file,
)
if markdown
Literate.markdown(
input_file_full_path,
boston_housing_output_src_directory;
codefence = "```@example boston_housing" => "```",
documenter = true,
preprocess = preprocess_example,
)
end
if notebooks
Literate.notebook(
input_file_full_path,
boston_housing_output_src_directory;
documenter = true,
execute = execute_notebooks,
preprocess = preprocess_example,
)
end
if scripts
Literate.script(
input_file_full_path,
boston_housing_output_src_directory;
documenter = true,
keep_comments = true,
preprocess = preprocess_example,
)
end
end
breast_cancer_biopsy_input_directory = joinpath(
cpu_examples_input_parent_directory,
"breast_cancer_biopsy",
)
breast_cancer_biopsy_output_directory = joinpath(
cpu_examples_output_parent_directory,
"breast_cancer_biopsy",
)
breast_cancer_biopsy_input_src_directory = joinpath(
breast_cancer_biopsy_input_directory,
"src",
)
breast_cancer_biopsy_output_src_directory = joinpath(
breast_cancer_biopsy_output_directory,
"src",
)
try
mkpath(breast_cancer_biopsy_output_directory)
mkpath(breast_cancer_biopsy_output_src_directory)
catch
end
for x in [".gitignore", "Project.toml", "README.md"]
cp(joinpath(breast_cancer_biopsy_input_directory, x),
joinpath(breast_cancer_biopsy_output_directory, x);
force=true)
end
breast_cancer_biopsy_input_file_list =
readdir(breast_cancer_biopsy_input_src_directory)
breast_cancer_biopsy_input_file_list =
breast_cancer_biopsy_input_file_list[
[endswith(x, ".jl") for x in
breast_cancer_biopsy_input_file_list]
]
sort!(breast_cancer_biopsy_input_file_list)
for input_file in breast_cancer_biopsy_input_file_list
input_file_full_path = joinpath(
breast_cancer_biopsy_input_src_directory,
input_file,
)
if markdown
Literate.markdown(
input_file_full_path,
breast_cancer_biopsy_output_src_directory;
codefence = "```@example breast_cancer_biopsy" => "```",
documenter = true,
preprocess = preprocess_example,
)
end
if notebooks
Literate.notebook(
input_file_full_path,
breast_cancer_biopsy_output_src_directory;
documenter = true,
execute = execute_notebooks,
preprocess = preprocess_example,
)
end
if scripts
Literate.script(
input_file_full_path,
breast_cancer_biopsy_output_src_directory;
documenter = true,
keep_comments = true,
preprocess = preprocess_example,
)
end
end
for (root, dirs, files) in walkdir(temp_examples_dir)
for f in files
filename = joinpath(root, f)
fix_example_blocks(filename)
end
end
try
mkpath(dirname(output_directory))
catch
end
cp(
temp_examples_dir,
output_directory;
force = true,
)
@debug(
string(
"Finished generating examples. ",
"Files were written to: \"",
output_directory,
"\".",
)
)
ENV["PREDICTMD_IS_MAKE_EXAMPLES"] = "false"
return output_directory
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 3624 | import BSON
# import CSV
import CSVFiles
import DataFrames
import FileIO
import JLD2
import ProgressMeter
"""
"""
function save_model(
filename::AbstractString,
fittable_object_to_save::AbstractFittable,
)
if filename_extension(filename) == ".jld2"
save_result = save_model_jld2(filename,fittable_object_to_save)
elseif filename_extension(filename) == ".bson"
save_result = save_model_bson(filename,fittable_object_to_save)
else
error("extension must be one of: .jld2, .bson")
end
return filename
end
function save_model_jld2(
filename::AbstractString,
fittable_object_to_save::AbstractFittable,
)
if filename_extension(filename) != ".jld2"
error(
string(
"Filename \"",
filename,
"\" does not end in \".jld2\"")
)
end
dict_of_objects_to_save = Dict(
"jld2_saved_model" => fittable_object_to_save,
)
@debug("Attempting to save model...")
# make sure the parent directory exists
parent_directory = Base.Filesystem.dirname(filename)
try
Base.Filesystem.mkpath(parent_directory)
catch
end
# save the .jld2 file
FileIO.save(filename, dict_of_objects_to_save)
@debug(string("Saved model to file \"", filename, "\""))
return filename
end
function save_model_bson(
filename::AbstractString,
fittable_object_to_save::AbstractFittable,
)
if filename_extension(filename) != ".bson"
error(
string(
"Filename \"",
filename,
"\" does not end in \".bson\"")
)
end
dict_of_objects_to_save = Dict(
:bson_saved_model => fittable_object_to_save,
)
@debug("Attempting to save model...")
# make sure the parent directory exists
parent_directory = Base.Filesystem.dirname(filename)
try
Base.Filesystem.mkpath(parent_directory)
catch
end
# save the .bson file
BSON.bson(filename, dict_of_objects_to_save)
@debug(string("Saved model to file \"", filename, "\""))
return filename
end
"""
"""
function load_model(filename::AbstractString)
if filename_extension(filename) == ".jld2"
load_result = load_model_jld2(filename)
return load_result
elseif filename_extension(filename) == ".bson"
load_result = load_model_bson(filename)
return load_result
else
error("extension must be one of: .jld2, .bson")
end
end
function load_model_jld2(filename::AbstractString)
if filename_extension(filename) != ".jld2"
error(
string(
"Filename \"",
filename,
"\" does not end in \".jld2\"")
)
end
@debug("Attempting to load model...")
dict_of_loaded_objects = FileIO.load(filename)
loaded_fittable_object = dict_of_loaded_objects["jld2_saved_model"]
@debug(string("Loaded model from file \"", filename, "\""))
return loaded_fittable_object
end
function load_model_bson(filename::AbstractString)
if filename_extension(filename) != ".bson"
error(
string(
"Filename \"",
filename,
"\" does not end in \".bson\"")
)
end
@debug("Attempting to load model...")
dict_of_loaded_objects = BSON.load(filename)
loaded_fittable_object = dict_of_loaded_objects[:bson_saved_model]
@debug(string("Loaded model from file \"", filename, "\""))
return loaded_fittable_object
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 10536 | import DataFrames
import GLM
import StatsModels
function GLMModel(
formula::StatsModels.AbstractTerm,
family::GLM.Distribution,
link::GLM.Link;
name::AbstractString = "",
isclassificationmodel::Bool = false,
isregressionmodel::Bool = false,
)
underlyingglm = FitNotYetRunUnderlyingObject()
result = GLMModel(
name,
isclassificationmodel,
isregressionmodel,
formula,
family,
link,
underlyingglm,
)
return result
end
"""
"""
function get_underlying(x::GLMModel;
saving::Bool = false,
loading::Bool = false)
return x.underlyingglm
end
"""
"""
function fit!(
estimator::GLMModel,
features_df::DataFrames.AbstractDataFrame,
labels_df::DataFrames.AbstractDataFrame,
)
labelsandfeatures_df = hcat(labels_df, features_df)
@info(string("Starting to train GLM model."))
glm = try
GLM.glm(
estimator.formula,
labelsandfeatures_df,
estimator.family,
estimator.link,
)
catch e
@warn(
string(
"while training GLM model, ignored error: ",
e,
)
)
FitFailedUnderlyingObject()
end
# glm =
@info(string("Finished training GLM model."))
estimator.underlyingglm = glm
return estimator
end
"""
"""
function predict(
estimator::GLMModel,
features_df::DataFrames.AbstractDataFrame,
)
if estimator.isclassificationmodel && !estimator.isregressionmodel
probabilitiesassoc = predict_proba(
estimator,
features_df,
)
predictionsvector = single_labelprobabilitiestopredictions(
probabilitiesassoc
)
result = DataFrames.DataFrame()
label_name = estimator.formula.lhs.sym
result[label_name] = predictionsvector
return result
elseif !estimator.isclassificationmodel && estimator.isregressionmodel
if isa(estimator.underlyingglm, AbstractNonExistentUnderlyingObject)
glmpredictoutput = fill(Float64(0), size(features_df,1))
else
glmpredictoutput = GLM.predict(
estimator.underlyingglm,
features_df,
)
end
result = DataFrames.DataFrame()
label_name = estimator.formula.lhs.sym
result[label_name] = glmpredictoutput
return result
else
error(
"Could not figure out if model is classification or regression"
)
end
end
"""
"""
function predict(
estimator::GLMModel,
features_df::DataFrames.AbstractDataFrame,
positive_class::Integer,
threshold::AbstractFloat,
)
if estimator.isclassificationmodel && !estimator.isregressionmodel
probabilitiesassoc = predict_proba(
estimator,
features_df,
)
predictionsvector = single_labelprobabilitiestopredictions(
probabilitiesassoc,
positive_class,
threshold,
)
result = DataFrames.DataFrame()
label_name = estimator.formula.lhs.sym
result[label_name] = predictionsvector
return result
else
error(
"Can only use the `threshold` argument with classification models"
)
end
end
"""
"""
function predict_proba(
estimator::GLMModel,
features_df::DataFrames.AbstractDataFrame,
)
if estimator.isclassificationmodel && !estimator.isregressionmodel
if isa(estimator.underlyingglm, AbstractNonExistentUnderlyingObject)
glmpredictoutput = fill(Float64(0), size(features_df, 1))
else
glmpredictoutput = GLM.predict(
estimator.underlyingglm,
features_df,
)
end
result = Dict()
result[1] = glmpredictoutput
result[0] = 1 .- glmpredictoutput
result = fix_type(result)
return result
elseif !estimator.isclassificationmodel && estimator.isregressionmodel
error("predict_proba is not defined for regression models")
else
error(
"Could not figure out if model is classification or regression"
)
end
end
"""
"""
function singlelabelbinaryclassdataframelogisticclassifier_GLM(
feature_names::AbstractVector,
single_label_name::Symbol,
single_label_levels::AbstractVector;
intercept::Bool = true,
interactions::Integer = 1,
name::AbstractString = "",
)
negative_class = single_label_levels[1]
positive_class = single_label_levels[2]
formula = generate_formula(
[single_label_name],
feature_names;
intercept = intercept,
interactions = interactions,
)
dftransformer =
ImmutableDataFrame2GLMSingleLabelBinaryClassTransformer(
single_label_name,
positive_class,
)
glmestimator = GLMModel(
formula,
GLM.Binomial(),
GLM.LogitLink();
isclassificationmodel = true,
isregressionmodel = false,
)
predictlabelfixer =
ImmutablePredictionsSingleLabelInt2StringTransformer(
0,
single_label_levels,
)
predprobalabelfixer =
ImmutablePredictProbaSingleLabelInt2StringTransformer(
0,
single_label_levels,
)
probapackager = ImmutablePackageSingleLabelPredictProbaTransformer(
single_label_name,
)
finalpipeline = dftransformer |>
glmestimator |>
predictlabelfixer |>
predprobalabelfixer |>
probapackager
finalpipeline.name = name
return finalpipeline
end
"""
"""
function singlelabelbinaryclassdataframelogisticclassifier(
feature_names::AbstractVector,
single_label_name::Symbol,
single_label_levels::AbstractVector;
package::Symbol = :none,
intercept::Bool = true,
interactions::Integer = 1,
name::AbstractString = "",
)
if package == :GLM
result = singlelabelbinaryclassdataframelogisticclassifier_GLM(
feature_names,
single_label_name,
single_label_levels;
intercept = intercept,
interactions = interactions,
name = name,
)
return result
else
error("$(package) is not a valid value for package")
end
end
"""
"""
# function singlelabelbinaryclassdataframeprobitclassifier_GLM(
# feature_names::AbstractVector,
# single_label_name::Symbol,
# single_label_levels::AbstractVector;
# intercept::Bool = true,
# interactions::Integer = 1,
# name::AbstractString = "",
# )
# negative_class = single_label_levels[1]
# positive_class = single_label_levels[2]
# formula = generate_formula(
# [single_label_name],
# feature_names;
# intercept = intercept,
# interactions = interactions,
# )
# dftransformer =
# ImmutableDataFrame2GLMSingleLabelBinaryClassTransformer(
# single_label_name,
# positive_class,
# )
# glmestimator = GLMModel(
# formula,
# GLM.Binomial(),
# GLM.ProbitLink();
# isclassificationmodel = true,
# isregressionmodel = false,
# )
# predictlabelfixer =
# ImmutablePredictionsSingleLabelInt2StringTransformer(
# 0,
# single_label_levels,
# )
# predprobalabelfixer =
# ImmutablePredictProbaSingleLabelInt2StringTransformer(
# 0,
# single_label_levels,
# )
# probapackager = ImmutablePackageSingleLabelPredictProbaTransformer(
# single_label_name,
# )
# finalpipeline = dftransformer |>
# glmestimator |>
# predictlabelfixer |>
# predprobalabelfixer |>
# probapackager
# finalpipeline.name = name
# return finalpipeline
# end
"""
"""
# function singlelabelbinaryclassdataframeprobitclassifier(
# feature_names::AbstractVector,
# single_label_name::Symbol,
# single_label_levels::AbstractVector;
# package::Symbol = :none,
# intercept::Bool = true,
# interactions::Integer = 1,
# name::AbstractString = "",
# )
# if package == :GLM
# result = singlelabelbinaryclassdataframeprobitclassifier_GLM(
# feature_names,
# single_label_name,
# single_label_levels;
# intercept = intercept,
# interactions = interactions,
# name = name,
# )
# return result
# else
# error("$(package) is not a valid value for package")
# end
# end
"""
"""
function single_labeldataframelinearregression_GLM(
feature_names::AbstractVector,
single_label_name::Symbol;
intercept::Bool = true,
interactions::Integer = 1,
name::AbstractString = "",
)
formula = generate_formula(
[single_label_name],
feature_names;
intercept = intercept,
interactions = interactions,
)
glmestimator = GLMModel(
formula,
GLM.Normal(),
GLM.IdentityLink();
isclassificationmodel = false,
isregressionmodel = true,
)
finalpipeline = SimplePipeline(
AbstractFittable[
glmestimator,
];
name = name,
)
return finalpipeline
end
"""
"""
function single_labeldataframelinearregression(
feature_names::AbstractVector,
single_label_name::Symbol;
package::Symbol = :none,
intercept::Bool = true,
interactions::Integer = 1,
name::AbstractString = "",
)
if package == :GLM
result = single_labeldataframelinearregression_GLM(
feature_names,
single_label_name;
intercept = intercept,
interactions = interactions,
name = name,
)
return result
else
error("$(package) is not a valid value for package")
end
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 1402 | import Statistics
"""
simple_linear_regression(x::AbstractVector, y::AbstractVector)
Simple linear regression - given a set of two-dimensional points (x, y), use
the ordinary least squares method to find the best fit line of the form
y = a + b*x (where a and b are real numbers) and return the tuple (a, b).
"""
function simple_linear_regression(
x::AbstractVector,
y::AbstractVector,
)::Tuple
if length(x) != length(y)
error("length(x) != length(y)")
end
if length(x) == 0
error("length(x) == 0")
end
x_bar = Statistics.mean(x)
y_bar = Statistics.mean(y)
var_x = Statistics.var(x)
cov_x_y = Statistics.cov(x,y)
@assert(isfinite(x_bar))
@assert(isfinite(y_bar))
@assert(isfinite(var_x))
@assert(isfinite(cov_x_y))
coefficient = cov_x_y/var_x
if isfinite(coefficient)
intercept = y_bar - coefficient*x_bar
@debug(
string("Found best fit line: "),
intercept,
coefficient,
)
else
@warn(
string(
"The best fit line does not have a finite slope. ",
"I will ignore this result and will instead return ",
"intercept = 0 and coefficient = 0",
)
)
intercept = 0
coefficient = 0
end
return intercept, coefficient
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 581 | import MLBase
import NumericalIntegration
import StatsBase
"""
"""
function auprc(
ytrue::AbstractVector{<:Integer},
yscore::AbstractVector{<:Real},
)
allprecisions, allrecalls, allthresholds = prcurve(
ytrue,
yscore,
)
#
permutation = sortperm(allthresholds; rev=true)
allprecisions = allprecisions[permutation]
allrecalls = allrecalls[permutation]
allthresholds = allthresholds[permutation]
#
x = allrecalls
y = allprecisions
areaunderprcurve = trapz(x, y)
return areaunderprcurve
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 1456 | import MLBase
import ROCAnalysis
import StatsBase
function _aurocc_trapz(
ytrue::AbstractVector{<:Integer},
yscore::AbstractVector{<:Real},
)
allfpr, alltpr, allthresholds = roccurve(
ytrue,
yscore,
)
#
permutation = sortperm(allthresholds; rev=true)
allfpr = allfpr[permutation]
alltpr = alltpr[permutation]
allthresholds = allthresholds[permutation]
#
x = allfpr
y = alltpr
aurocc_trapz_result = trapz(x, y)
return aurocc_trapz_result
end
function _aurocc_verify(
ytrue::AbstractVector{<:Integer},
yscore::AbstractVector{<:Real},
)
targetlevel = 1
nontargetlevel = 0
targetscores = yscore[ytrue .== targetlevel]
nontargetscores = yscore[ytrue .== nontargetlevel]
r = ROCAnalysis.roc(targetscores, nontargetscores)
complement_of_aurocc = ROCAnalysis.auc(r)
aurocc_verify_result = 1 - complement_of_aurocc
return aurocc_verify_result
end
"""
"""
function aurocc(
ytrue::AbstractVector{<:Integer},
yscore::AbstractVector{<:Real},
)
aurocc_trapz_value = _aurocc_trapz(
ytrue,
yscore,
)
aurocc_verify_value = _aurocc_verify(
ytrue,
yscore,
)
if !( isapprox(aurocc_trapz_value, aurocc_verify_value; atol=0.00000001) )
error("Was not able to accurately compute the AUROCC.")
end
return aurocc_trapz_value
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 1142 | import MLBase
import StatsBase
"""
"""
function avg_precision(
allprecisions::AbstractVector{<:Real},
allrecalls::AbstractVector{<:Real},
allthresholds::AbstractVector{<:Real},
)
if length(allprecisions) != length(allrecalls)
error("length(allprecisions) != length(allrecalls)")
end
if length(allprecisions) < 2
error("length(allprecisions) < 2")
end
#
permutation = sortperm(allthresholds; rev = true)
allprecisions = allprecisions[permutation]
allrecalls = allrecalls[permutation]
#
N = length(allprecisions)
result = 0
for k = 2:N
result += (allrecalls[k] - allrecalls[k-1]) * allprecisions[k]
end
return result
end
"""
"""
function averageprecisionscore(
ytrue::AbstractVector{<:Integer},
yscore::AbstractVector{<:Real},
)
allprecisions, allrecalls, allthresholds = prcurve(
ytrue,
yscore,
)
x = allrecalls
y = allprecisions
avgprecision = avg_precision(
allprecisions,
allrecalls,
allthresholds,
)
return avgprecision
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 587 | """
binary_brier_score(ytrue, yscore)
Computes the binary formulation of the Brier score, defined as:
```math
\\frac{1}{N}\\sum\\limits _{t=1}^{N}(f_t-o_t)^2 \\,\\!
```
Lower values are better. Best value is 0.
"""
function binary_brier_score(
ytrue::AbstractVector{<:Integer},
yscore::AbstractVector{<:AbstractFloat},
)
if length(ytrue) != length(yscore)
error("length(ytrue) != length(yscore)")
end
if length(ytrue) == 0
error("length(ytrue) == 0")
end
result = mean_square_error(ytrue, yscore)
return result
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 720 | import Statistics
import StatsBase
"""
r2_score(ytrue, ypred)
Computes coefficient of determination. Higher values are better. Best value
is 1.
"""
function r2_score(
ytrue::AbstractVector{<:Real},
ypred::AbstractVector{<:Real},
)
if length(ytrue) != length(ypred)
error("length(ytrue) != length(ypred)")
end
if length(ytrue) == 0
error("length(ytrue) == 0")
end
# ybar = mean of the true y values
ybar = Statistics.mean(ytrue)
# SStot = total sum of squares
SStot = sum( (ytrue .- ybar).^2 )
# SSres = sum of squares of residuals
residuals = ytrue .- ypred
SSres = sum( residuals.^2 )
R2 = 1 - SSres/SStot
return R2
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 1922 | import LinearAlgebra
import MLBase
"""
"""
is_square(m::AbstractMatrix) = size(m, 1) == size(m, 2)
"""
"""
function cohen_kappa(contingency_table::AbstractMatrix)
if !is_square(contingency_table)
error("contingency_table must be a square matrix")
end
numclasses = size(contingency_table, 1)
if numclasses < 2
error("number of classes must be >= 2")
end
totalnumcases = sum(contingency_table)
# p_o = "relative observed agreement among raters (identical to accuracy)"
p_o = sum(LinearAlgebra.diag(contingency_table))/totalnumcases
# n_k1 = number of times rater 1 predicted class k
# n_k2 = number of times rater 2 predicted class k
# rater 1 = rows
# rater 2 = columns
n_k1 = [sum( contingency_table[k,:] ) for k = 1:numclasses]
n_k2 = [sum( contingency_table[:,k] ) for k = 1:numclasses]
# p_e = "hypothetical probability of chance agreement"
p_e = sum(n_k1 .* n_k2)/(totalnumcases^2)
kappa = (p_o - p_e)/(1 - p_e)
return kappa
end
"""
"""
function compute_contingency_table(y1::AbstractVector, y2::AbstractVector)
classes = sort(unique(vcat(y1, y2)); rev = false,)
numclasses = length(classes)
if numclasses < 2
error("number of classes must be >= 2")
end
return contingency_table
end
"""
"""
function cohen_kappa(y1::AbstractVector, y2::AbstractVector)
contingency_table = compute_contingency_table(y1, y2)
result = cohen_kappa(contingency_table)
return result
end
"""
"""
function compute_contingency_table(rocnums::MLBase.ROCNums)
# we will arbitrarily set rows = predicted, columns = true/gold
contingency_table = [rocnums.tp rocnums.fp; rocnums.fn rocnums.tp]
return contingency_table
end
"""
"""
function cohen_kappa(rocnums::MLBase.ROCNums)
contingency_table = compute_contingency_table(rocnums)
result = cohen_kappa(contingency_table)
return result
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 598 | import StatsBase
"""
"""
function get_binary_thresholds(
yscore::AbstractVector{<:Real};
additionalthreshold::AbstractFloat = 0.5,
)
if !all(0 .<= yscore .<= 1)
error("not all scores are in [0,1]")
end
result = sort(
unique(
vcat(
0 - eps(),
0,
0 + eps(),
1 - eps(),
1,
1 + eps(),
additionalthreshold,
unique(yscore),
)
);
rev = false,
)
return result
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 911 | import Statistics
"""
mean_square_error(ytrue, ypred)
"""
function mean_square_error(
ytrue::AbstractVector{<:Real},
ypred::AbstractVector{<:Real},
)
result = Statistics.mean(abs2, ytrue .- ypred)
return result
end
"""
root_mean_square_error(ytrue, ypred)
"""
root_mean_square_error(ytrue,ypred) = sqrt(mean_square_error(ytrue,ypred))
# convenience aliases for mean squared error:
mean_squared_error(ytrue,ypred) = mean_square_error(ytrue,ypred)
mean_square_deviation(ytrue,ypred) = mean_square_error(ytrue,ypred)
mean_squared_deviation(ytrue,ypred) = mean_square_error(ytrue,ypred)
# convenience aliases for root mean squared error:
root_mean_squared_error(ytrue,ypred) = root_mean_square_error(ytrue,ypred)
root_mean_square_deviation(ytrue,ypred) = root_mean_square_error(ytrue,ypred)
root_mean_squared_deviation(ytrue,ypred) = root_mean_square_error(ytrue,ypred)
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 812 | import MLBase
import StatsBase
"""
"""
function prcurve(
ytrue::AbstractVector{<:Integer},
yscore::AbstractVector{<:Real},
)
allrocnums, allthresholds = getallrocnums(
ytrue,
yscore,
)
result = prcurve(
allrocnums,
allthresholds,
)
return result
end
"""
"""
function prcurve(
allrocnums::AbstractVector{<:MLBase.ROCNums},
allthresholds::AbstractVector{<:Real},
)
allprecisions = [precision(x) for x in allrocnums]
allrecalls = [recall(x) for x in allrocnums]
permutation = sortperm(allthresholds; rev = false)
allprecisions = allprecisions[permutation]
allrecalls = allrecalls[permutation]
allthresholds = allthresholds[permutation]
return allprecisions, allrecalls, allthresholds
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 3269 | import DataFrames
import Statistics
"""
"""
function risk_score_cutoff_values(
estimator::AbstractFittable,
features_df::DataFrames.AbstractDataFrame,
labels_df::DataFrames.AbstractDataFrame,
single_label_name::Symbol,
positive_class::AbstractString;
multiply_by::Real = 1.0,
average_function = Statistics.mean,
)
#
ytrue = Int.(
singlelabelbinaryytrue(
labels_df[single_label_name],
positive_class,
)
)
#
predictedprobabilitiesalllabels =
predict_proba(estimator, features_df)
yscore = Float64.(
singlelabelbinaryyscore(
predictedprobabilitiesalllabels[single_label_name],
positive_class,
)
)
#
cutoffs, risk_group_prevalences = risk_score_cutoff_values(
ytrue,
yscore;
multiply_by = multiply_by,
average_function = average_function,
)
return cutoffs, risk_group_prevalences
end
"""
"""
function risk_score_cutoff_values(
ytrue::AbstractVector{<:Integer},
yscore::AbstractVector{<:AbstractFloat};
multiply_by::Real = 1.0,
average_function = Statistics.mean,
)
true_negative_rows = findall(
ytrue .== 0
)
true_positive_rows = findall(
ytrue .== 1
)
#
average_score_true_negatives = average_function(
yscore[true_negative_rows]
)
average_score_true_positives = average_function(
yscore[true_positive_rows]
)
#
lower_cutoff = multiply_by * average_score_true_negatives
higher_cutoff = multiply_by * average_score_true_positives
#
cutoffs = (lower_cutoff, higher_cutoff,)
#
low_risk_group_rows = findall(
yscore .<= average_score_true_negatives
)
medium_risk_group_rows = findall(
average_score_true_negatives .<=
yscore .<=
average_score_true_positives
)
high_risk_group_rows = findall(
average_score_true_positives .<= yscore
)
#
risk_group_prevalences = DataFrames.DataFrame()
risk_group_prevalences[:Risk_group] = [
"Low risk",
"Medium risk",
"High risk",
]
risk_group_prevalences[:User_supplied_average_function] = [
average_function( ytrue[low_risk_group_rows] ),
average_function( ytrue[medium_risk_group_rows] ),
average_function( ytrue[high_risk_group_rows] ),
]
risk_group_prevalences[:Arithmetic_mean] = [
Statistics.mean( ytrue[low_risk_group_rows] ),
Statistics.mean( ytrue[medium_risk_group_rows] ),
Statistics.mean( ytrue[high_risk_group_rows] ),
]
risk_group_prevalences[:Median] = [
Statistics.median( ytrue[low_risk_group_rows] ),
Statistics.median( ytrue[medium_risk_group_rows] ),
Statistics.median( ytrue[high_risk_group_rows] ),
]
if average_function==Statistics.mean || average_function==Statistics.median
DataFrames.deletecols!(
risk_group_prevalences,
[:User_supplied_average_function],
)
end
return cutoffs, risk_group_prevalences
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 811 | import MLBase
import StatsBase
"""
"""
function roccurve(
ytrue::AbstractVector{<:Integer},
yscore::AbstractVector{<:Real},
)
allrocnums, allthresholds = getallrocnums(
ytrue,
yscore,
)
result = roccurve(
allrocnums,
allthresholds,
)
return result
end
"""
"""
function roccurve(
allrocnums::AbstractVector{<:MLBase.ROCNums},
allthresholds::AbstractVector{<:Real},
)
allfpr = [false_positive_rate(x) for x in allrocnums]
alltpr = [true_positive_rate(x) for x in allrocnums]
#
permutation = sortperm(allthresholds; rev = false)
allfpr = allfpr[permutation]
alltpr = alltpr[permutation]
allthresholds = allthresholds[permutation]
return allfpr, alltpr, allthresholds
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 1892 | import MLBase
import StatsBase
"""
"""
function getallrocnums(
ytrue::AbstractVector{<:Integer},
yscore::AbstractVector{<:Real};
additionalthreshold::AbstractFloat = 0.5,
)
allthresholds = get_binary_thresholds(
yscore;
additionalthreshold = additionalthreshold,
)
allrocnums = MLBase.roc(
ytrue,
yscore,
allthresholds,
)
return allrocnums, allthresholds
end
"""
"""
accuracy(x::MLBase.ROCNums) = (x.tp + x.tn)/(x.p + x.n)
"""
"""
true_positive_rate(x::MLBase.ROCNums) = (x.tp)/(x.p)
"""
"""
true_negative_rate(x::MLBase.ROCNums) = (x.tn)/(x.n)
"""
"""
false_positive_rate(x::MLBase.ROCNums) = (x.fp)/(x.n)
"""
"""
false_negative_rate(x::MLBase.ROCNums) = (x.fn)/(x.p)
"""
"""
function positive_predictive_value(x::MLBase.ROCNums)
if (x.tp == 0) && (x.tp + x.fp == 0)
result = 1
elseif (x.tp != 0) && (x.tp + x.fp == 0)
error("x.tp != 0) && (x.tp + x.fp == 0)")
else
result = (x.tp) / (x.tp + x.fp)
end
return result
end
"""
"""
function negative_predictive_value(x::MLBase.ROCNums)
if (x.tn == 0) && (x.tn + x.fn ==0)
result = 1
elseif (x.tn != 0) && (x.tn + x.fn == 0)
error("(x.tn != 0) && (x.tn + x.fn == 0)")
else
result = (x.tn) / (x.tn + x.fn)
end
return result
end
"""
"""
sensitivity(x::MLBase.ROCNums) = true_positive_rate(x)
"""
"""
specificity(x::MLBase.ROCNums) = true_negative_rate(x)
"""
"""
precision(x::MLBase.ROCNums) = positive_predictive_value(x)
"""
"""
recall(x::MLBase.ROCNums) = true_positive_rate(x)
"""
"""
function fbetascore(
x::MLBase.ROCNums,
beta::Real,
)
p = precision(x)
r = recall(x)
result = ( 1 + beta^2 ) * ( p*r ) / ( ((beta^2) * p) + r )
return result
end
"""
"""
f1score(x::MLBase.ROCNums) = fbetascore(x, 1)
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 9893 | import DataFrames
import MLBase
import StatsBase
"""
"""
function singlelabelbinaryytrue(
labels::AbstractVector,
positive_class::AbstractString;
inttype::Type=Int,
)
if !(inttype<:Integer)
error("!(inttype<:Integer)")
end
result = inttype.(labels .== positive_class)
return result
end
"""
"""
function singlelabelbinaryyscore(
single_labelprobabilities::AbstractDict,
positive_class::AbstractString;
float_type::Type{<:AbstractFloat} = Float64,
)
result = float_type.(single_labelprobabilities[positive_class])
return result
end
"""
"""
function singlelabelbinaryclassificationmetrics_tunableparam(
kwargsassoc::AbstractDict,
)
tunableparams = [
:threshold,
:sensitivity,
:specificity,
:maximize,
]
maximizableparams = [
:f1score,
:cohen_kappa,
]
kwargshastunableparam = [
haskey(kwargsassoc, x) for x in tunableparams
]
if sum(kwargshastunableparam) != 1
msg = "you must specify one (and only one) of the following: " *
join(tunableparams, ", ")
error(msg)
end
if length(tunableparams[kwargshastunableparam]) != 1
error("oh boy you definitely should never see this error message")
end
selectedtunableparam =
tunableparams[kwargshastunableparam][1]
if selectedtunableparam == :maximize
selectedparamtomax = kwargsassoc[:maximize]
if !in(selectedparamtomax, maximizableparams)
msg = "Cannot max $(selectedparamtomax). Select one " *
"of the following: " * join(maximizableparams, ", ")
error(msg)
end
else
selectedparamtomax = :notapplicable
end
#
metricdisplaynames = Dict()
metricdisplaynames[:AUPRC] = string("AUPRC")
metricdisplaynames[:AUROCC] = string("AUROCC")
metricdisplaynames[:AveragePrecision] = string("Average precision")
if selectedtunableparam == :threshold
metricdisplaynames[:threshold] = string("[fix] * Threshold")
else
metricdisplaynames[:threshold] = string("* Threshold")
end
metricdisplaynames[:accuracy] = string("* Accuracy")
if selectedtunableparam == :maximize && selectedparamtomax ==
:cohen_kappa
metricdisplaynames[:cohen_kappa] =
string("[max] * Cohen's Kappa statistic")
else
metricdisplaynames[:cohen_kappa] =
string("* Cohen's Kappa statistic")
end
if selectedtunableparam == :maximize && selectedparamtomax ==
:f1score
metricdisplaynames[:f1score] = string("[max] * F1 score")
else
metricdisplaynames[:f1score] = string("* F1 Score")
end
metricdisplaynames[:precision] =
string("* Precision (positive predictive value)")
metricdisplaynames[:negative_predictive_value] =
string("* Negative predictive value")
metricdisplaynames[:recall] =
string("* Recall (sensitivity, true positive rate)")
if selectedtunableparam == :sensitivity
metricdisplaynames[:sensitivity] =
string("[fix] * Sensitivity (recall, true positive rate)")
else
metricdisplaynames[:sensitivity] =
string("* Sensitivity (recall, true positive rate)")
end
if selectedtunableparam == :specificity
metricdisplaynames[:specificity] =
string("[fix] * Specificity (true negative rate)")
else
metricdisplaynames[:specificity] =
string("* Specificity (true negative rate)")
end
metricdisplaynames = fix_type(metricdisplaynames)
return selectedtunableparam, selectedparamtomax, metricdisplaynames
end
"""
"""
function singlelabelbinaryclassificationmetrics_resultdict(
estimator::AbstractFittable,
features_df::DataFrames.AbstractDataFrame,
labels_df::DataFrames.AbstractDataFrame,
single_label_name::Symbol,
positive_class::AbstractString;
kwargs...
)
#
kwargsdict = Dict(kwargs)
kwargsdict = fix_type(kwargsdict)
selectedtunableparam, selectedparamtomax, metricdisplaynames =
singlelabelbinaryclassificationmetrics_tunableparam(kwargsdict)
#
predictedprobabilitiesalllabels = predict_proba(estimator, features_df)
yscore = Float64.(
singlelabelbinaryyscore(
predictedprobabilitiesalllabels[single_label_name],
positive_class,
)
)
ytrue = Int.(
singlelabelbinaryytrue(
labels_df[single_label_name],
positive_class,
)
)
results = Dict()
results[:ytrue] = ytrue
results[:yscore] = yscore
results[:AUROCC] = aurocc(ytrue, yscore)
results[:AUPRC] = auprc(ytrue, yscore)
results[:AveragePrecision] = averageprecisionscore(ytrue, yscore)
if selectedtunableparam == :threshold
additionalthreshold = kwargsdict[:threshold]
else
additionalthreshold = 0.5
end
allrocnums, allthresholds = getallrocnums(
ytrue,
yscore;
additionalthreshold = additionalthreshold,
)
if selectedtunableparam == :threshold
selectedthreshold = kwargsdict[:threshold]
bestindex = argmin(abs.(allthresholds .- selectedthreshold))
elseif selectedtunableparam == :sensitivity
selectedsensitivity = kwargsdict[:sensitivity]
allsensitivity = [sensitivity(x) for x in allrocnums]
bestindex = argmin(abs.(allsensitivity .- selectedsensitivity))
elseif selectedtunableparam == :specificity
selectedspecificity = kwargsdict[:specificity]
allspecificity = [specificity(x) for x in allrocnums]
bestindex = argmin(abs.(allspecificity .- selectedspecificity))
elseif selectedtunableparam == :maximize
selectedparamtomax = kwargsdict[:maximize]
if selectedparamtomax == :f1score
allf1score = [fbetascore(x, 1) for x in allrocnums]
bestindex = argmin(allf1score)
elseif selectedparamtomax == :cohen_kappa
allcohen_kappa = [cohen_kappa(x) for x in allrocnums]
bestindex = argmin(allcohen_kappa)
else
error("this is an error that should never happen")
end
else
error("this is another error that should never happen")
end
results[:allrocnums] = allrocnums
results[:allthresholds] = allthresholds
results[:bestindex] = bestindex
bestrocnums = allrocnums[bestindex]
bestthreshold = allthresholds[bestindex]
results[:threshold] = bestthreshold
results[:accuracy] = accuracy(bestrocnums)
results[:sensitivity] = sensitivity(bestrocnums)
results[:specificity] = specificity(bestrocnums)
results[:precision] = precision(bestrocnums)
results[:negative_predictive_value] =
negative_predictive_value(bestrocnums)
results[:recall] = recall(bestrocnums)
results[:f1score] = f1score(bestrocnums)
results[:cohen_kappa] = cohen_kappa(bestrocnums)
results = fix_type(results)
return results
end
"""
"""
function singlelabelbinaryclassificationmetrics(
estimator::AbstractFittable,
features_df::DataFrames.AbstractDataFrame,
labels_df::DataFrames.AbstractDataFrame,
single_label_name::Symbol,
positive_class::AbstractString;
kwargs...
)
vectorofestimators = AbstractFittable[estimator]
result = singlelabelbinaryclassificationmetrics(
vectorofestimators,
features_df,
labels_df,
single_label_name,
positive_class;
kwargs...
)
return result
end
"""
"""
function singlelabelbinaryclassificationmetrics(
vectorofestimators::AbstractVector{AbstractFittable},
features_df::DataFrames.AbstractDataFrame,
labels_df::DataFrames.AbstractDataFrame,
single_label_name::Symbol,
positive_class::AbstractString;
kwargs...
)
kwargsdict = Dict(kwargs)
kwargsdict = fix_type(kwargsdict)
selectedtunableparam, selectedparamtomax, metricdisplaynames =
singlelabelbinaryclassificationmetrics_tunableparam(kwargsdict)
metricsforeachestimator = [
singlelabelbinaryclassificationmetrics_resultdict(
est,
features_df,
labels_df,
single_label_name,
positive_class;
kwargs...
)
for est in vectorofestimators
]
result = DataFrames.DataFrame()
result[:metric] = [
metricdisplaynames[:AUPRC],
metricdisplaynames[:AUROCC],
metricdisplaynames[:AveragePrecision],
metricdisplaynames[:threshold],
metricdisplaynames[:accuracy],
metricdisplaynames[:cohen_kappa],
metricdisplaynames[:f1score],
metricdisplaynames[:precision],
metricdisplaynames[:negative_predictive_value],
metricdisplaynames[:recall],
metricdisplaynames[:sensitivity],
metricdisplaynames[:specificity],
]
for i = 1:length(vectorofestimators)
result[Symbol(vectorofestimators[i].name)] = [
metricsforeachestimator[i][:AUPRC],
metricsforeachestimator[i][:AUROCC],
metricsforeachestimator[i][:AveragePrecision],
metricsforeachestimator[i][:threshold],
metricsforeachestimator[i][:accuracy],
metricsforeachestimator[i][:cohen_kappa],
metricsforeachestimator[i][:f1score],
metricsforeachestimator[i][:precision],
metricsforeachestimator[i][:negative_predictive_value],
metricsforeachestimator[i][:recall],
metricsforeachestimator[i][:sensitivity],
metricsforeachestimator[i][:specificity],
]
end
return result
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 2754 | import DataFrames
import MLBase
import StatsBase
"""
"""
function singlelabelregressionytrue(
labels::AbstractVector;
float_type::Type{<:AbstractFloat} = Float64,
)
result = float_type.(labels)
return result
end
"""
"""
function singlelabelregressionypred(
labels::AbstractVector;
float_type::Type{<:AbstractFloat} = Float64,
)
result = float_type.(labels)
return result
end
"""
"""
function singlelabelregressionmetrics_resultdict(
estimator::AbstractFittable,
features_df::DataFrames.AbstractDataFrame,
labels_df::DataFrames.AbstractDataFrame,
single_label_name::Symbol,
)
ytrue = singlelabelregressionytrue(
labels_df[single_label_name],
)
predictionsalllabels = predict(estimator, features_df)
ypred = singlelabelregressionypred(
predictionsalllabels[single_label_name],
)
results = Dict()
results[:r2_score] = r2_score(
ytrue,
ypred,
)
results[:mean_square_error] = mean_square_error(
ytrue,
ypred,
)
results[:root_mean_square_error] = root_mean_square_error(
ytrue,
ypred,
)
results = fix_type(results)
return results
end
"""
"""
function singlelabelregressionmetrics(
estimator::AbstractFittable,
features_df::DataFrames.AbstractDataFrame,
labels_df::DataFrames.AbstractDataFrame,
single_label_name::Symbol,
)
vectorofestimators = AbstractFittable[estimator]
result = singlelabelregressionmetrics(
vectorofestimators,
features_df,
labels_df,
single_label_name,
)
return result
end
"""
"""
function singlelabelregressionmetrics(
vectorofestimators::AbstractVector{AbstractFittable},
features_df::DataFrames.AbstractDataFrame,
labels_df::DataFrames.AbstractDataFrame,
single_label_name::Symbol;
kwargs...
)
metricsforeachestimator = [
singlelabelregressionmetrics_resultdict(
est,
features_df,
labels_df,
single_label_name,
)
for est in vectorofestimators
]
result = DataFrames.DataFrame()
result[:metric] = [
"R^2 (coefficient of determination)",
"Mean squared error (MSE)",
"Root mean square error (RMSE)",
]
for i = 1:length(vectorofestimators)
result[Symbol(vectorofestimators[i].name)] = [
metricsforeachestimator[i][:r2_score],
metricsforeachestimator[i][:mean_square_error],
metricsforeachestimator[i][:root_mean_square_error],
]
end
return result
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 8124 | function get_number_in_each_fold(num_samples::Integer, num_folds::Integer)::Vector{Int}
if num_samples < num_folds
throw(ArgumentError("`num_samples` must be greater than or equal to `num_folds`"))
end
result::Vector{Int} = zeros(Int, num_folds)
if num_samples % num_folds == 0
result .= Int(num_samples//num_folds)
else
min_number_per_fold = round(Int, floor(num_samples/num_folds))
remaining_number = num_samples - min_number_per_fold*num_folds
result[1:remaining_number] .= min_number_per_fold + 1
result[(remaining_number+1):end] .= min_number_per_fold
end
@assert sum(result) == num_samples
return result
end
function get_indices_in_each_fold(all_indices::AbstractVector{<:Integer}, num_folds::Integer)::Vector{Vector{Int}}
all_indices_deepcopy = deepcopy(all_indices)
unique!(all_indices_deepcopy)
sort!(all_indices_deepcopy)
num_samples = length(all_indices_deepcopy)
number_in_each_fold = get_number_in_each_fold(num_samples, num_folds)
result::Vector{Vector{Int}} = Vector{Vector{Int}}(undef, num_folds)
lower::Int = 1
upper::Int = lower + number_in_each_fold[1] - 1
result[1] = all_indices_deepcopy[lower:upper]
@assert length(result[1]) == length(unique(result[1]))
@assert length(result[1]) == number_in_each_fold[1]
for fold = 2:num_folds
lower = upper + 1
upper = lower + number_in_each_fold[fold] - 1
result[fold] = all_indices_deepcopy[lower:upper]
# @assert length(result[fold]) == length(unique(result[fold]))
# @assert length(result[fold]) == number_in_each_fold[fold]
end
@assert all(sort(unique(all_indices_deepcopy)) .== sort(unique(vcat(result...))))
return result
end
function get_leavein_indices(all_indices::AbstractVector{<:Integer}, num_folds::Integer, fold_choice::Integer)::Vector{Int}
indices_in_each_fold::Vector{Vector{Int}} = get_indices_in_each_fold(all_indices, num_folds)
result::Vector{Int} = reduce(vcat, indices_in_each_fold[(1:num_folds).!=fold_choice])
return result
end
function get_leaveout_indices(all_indices::AbstractVector{<:Integer}, num_folds::Integer, fold_choice::Integer)::Vector{Int}
indices_in_each_fold::Vector{Vector{Int}} = get_indices_in_each_fold(all_indices, num_folds)
result::Vector{Int} = indices_in_each_fold[fold_choice]
return result
end
function vector_to_ranges(vector::T1)::Vector{UnitRange{Int}} where
T1 <: AbstractVector{T2} where
T2 <: Integer
result::Vector{UnitRange{Int}} = Vector{UnitRange{Int}}(undef, 0)
vector_deepcopy = deepcopy(vector)
unique!(vector_deepcopy)
sort!(vector_deepcopy)
if !isempty(vector_deepcopy)
lower::Int = vector_deepcopy[1]
upper::Int = vector_deepcopy[1]
for x in vector_deepcopy
if (x) > (upper + 1)
push!(result, lower:upper)
lower = x
upper = x
else
upper = x
end
end
push!(result, lower:upper)
end
unique!(result)
sort!(result)
return result
end
function ranges_to_vector(ranges::T1)::Vector{Int} where
T1 <: AbstractVector{T2} where
T2 <: AbstractRange{T3} where
T3 <: Integer
ranges_deepcopy = deepcopy(ranges)
unique!(ranges_deepcopy)
sort!(ranges_deepcopy)
result = Vector{Int}(undef, 0)
for r in ranges_deepcopy
append!(result, collect(r))
end
unique!(result)
sort!(result)
return result
end
function Base.:(==)(x::CrossValidation{T}, y::CrossValidation{T})::Bool where T
result::Bool = (x.leaveout == y.leaveout) && (x.leavein == y.leavein)
return result
end
function CrossValidation{T}(
;
num_folds_per_level,
all_indices,
)::CrossValidation{T} where T
all_indices_deepcopy = deepcopy(all_indices)
unique!(all_indices_deepcopy)
sort!(all_indices_deepcopy)
if length(num_folds_per_level) < 1
leaveout = Vector{Vector{T}}(undef, 1)
leaveout[1] = all_indices_deepcopy
leavein = Vector{CrossValidation{T}}(undef, 0)
else
num_folds_this_level = num_folds_per_level[1]
remaining_num_folds_per_level = num_folds_per_level[2:end]
leaveout = Vector{Vector{T}}(undef, num_folds_this_level)
leavein = Vector{CrossValidation{T}}(undef, num_folds_this_level)
for fold_choice = 1:num_folds_this_level
leaveout[fold_choice] = get_leaveout_indices(all_indices, num_folds_this_level, fold_choice)
leavein[fold_choice] = CrossValidation{T}(
;
num_folds_per_level = remaining_num_folds_per_level,
all_indices = get_leavein_indices(all_indices, num_folds_this_level, fold_choice)
)
end
end
result::CrossValidation{T} = CrossValidation{T}(leavein, leaveout)
return result
end
function CrossValidation{T}(
cv::CrossValidation{R},
)::CrossValidation{T} where
T where
R <: AbstractUnitRange{S} where
S <: Integer
if isleaf(cv)
leaveout = Vector{Vector{T}}(undef, 1)
leaveout[1] = ranges_to_vector(get_leavein_indices(cv))
leavein = Vector{CrossValidation{T}}(undef, 0)
else
num_folds = get_top_level_num_folds(cv)
leaveout = Vector{Vector{T}}(undef, num_folds)
leavein = Vector{CrossValidation{T}}(undef, num_folds)
for fold_choice = 1:num_folds
leaveout[fold_choice] = ranges_to_vector(get_leaveout_indices(cv, fold_choice))
leavein[fold_choice] = CrossValidation{T}(get_leavein_cv(cv, fold_choice))
end
end
result::CrossValidation{T} = CrossValidation{T}(leavein, leaveout)
return result
end
function CrossValidation{T}(
cv::CrossValidation{<:Integer},
)::CrossValidation{T} where T
if isleaf(cv)
leaveout = Vector{Vector{T}}(undef, 1)
leaveout[1] = vector_to_ranges(get_leavein_indices(cv))
leavein = Vector{CrossValidation{T}}(undef, 0)
else
num_folds = get_top_level_num_folds(cv)
leaveout = Vector{Vector{T}}(undef, num_folds)
leavein = Vector{CrossValidation{T}}(undef, num_folds)
for fold_choice = 1:num_folds
leaveout[fold_choice] = vector_to_ranges(get_leaveout_indices(cv, fold_choice))
leavein[fold_choice] = CrossValidation{T}(get_leavein_cv(cv, fold_choice))
end
end
result::CrossValidation{T} = CrossValidation{T}(leavein, leaveout)
return result
end
function isleaf(cv::CrossValidation{T})::Bool where T
result::Bool = length(cv.leavein) < 1
return result
end
function get_leavein_cv(cv::CrossValidation{T}, fold_choice)::CrossValidation{T} where T
if isleaf(cv)
throw(ArgumentError("`cv` is a leaf, so `get_leavein_cv(cv, fold_choice)` is not defined"))
end
result::CrossValidation{T} = cv.leavein[fold_choice]
return result
end
function get_leavein_indices(cv::CrossValidation{T})::Vector{T} where T
if !isleaf(cv)
throw(ArgumentError("`cv` is not a leaf, so `get_leavein_indices(cv)` is not defined"))
end
result::Vector{T} = cv.leaveout[1]
return result
end
function get_leaveout_indices(cv::CrossValidation{T}, fold_choice)::Vector{T} where T
if isleaf(cv)
throw(ArgumentError("`cv` is a leaf, so `get_leaveout_indices(cv, fold_choice)` is not defined"))
end
result::Vector{T} = cv.leaveout[fold_choice]
return result
end
function get_all_indices(cv::CrossValidation{T})::Vector{T} where T
result::Vector{T} = sort(
unique(
reduce(vcat, cv.leaveout)
)
)
return result
end
function get_top_level_num_folds(cv::CrossValidation{T})::Int where T
if isleaf(cv)
result = 0
else
if length(cv.leaveout) != length(cv.leavein)
throw(DimensionMismatch("length(cv.leaveout) != length(cv.leavein)"))
end
result = length(cv.leaveout)
end
return result
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 1534 | import DataFrames
import Random
import StatsBase
"""
"""
function split_data(
features_df::DataFrames.AbstractDataFrame,
labels_df::DataFrames.AbstractDataFrame,
split::Real,
)
result = split_data(
Random.GLOBAL_RNG,
features_df,
labels_df,
split,
)
return result
end
"""
"""
function split_data(
rng::Random.AbstractRNG,
features_df::DataFrames.AbstractDataFrame,
labels_df::DataFrames.AbstractDataFrame,
split::Real,
)
#
if !(0 < split < 1)
error("split must be >0 and <1")
end
if size(features_df, 1) != size(labels_df, 1)
error("features_df and labels_df do not have the same number of rows")
end
#
num_rows = size(features_df, 1)
num_partition_1 = round(Int, split * num_rows)
num_partition_2 = num_rows - num_partition_1
#
allrows = convert(Array, 1:num_rows)
partition_1_rows = StatsBase.sample(
rng,
allrows,
num_partition_1;
replace = false,
)
partition_2_rows = setdiff(allrows, partition_1_rows)
#
partition_1_features_df = features_df[partition_1_rows, :]
partition_2_features_df = features_df[partition_2_rows, :]
#
partition_1_labels_df = labels_df[partition_1_rows, :]
partition_2_labels_df = labels_df[partition_2_rows, :]
#
return partition_1_features_df,
partition_1_labels_df,
partition_2_features_df,
partition_2_labels_df
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 107 | # import Flux
# import GPUArrays
# import NNlib
import ProgressMeter
import ValueHistories
# import Zygote
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 19173 | # import GPUArrays
import Knet
import ProgressMeter
import ValueHistories
function KnetModel(
;
name::AbstractString = "",
predict_function_source::AbstractString = "",
loss_function_source::AbstractString = "",
predict_function::Function = identity,
loss_function::Function = identity,
losshyperparameters::AbstractDict = Dict(),
optimizationalgorithm::Symbol = :nothing,
optimizerhyperparameters::AbstractDict = Dict(),
minibatchsize::Integer = 0,
modelweights::AbstractArray = [],
isclassificationmodel::Bool = false,
isregressionmodel::Bool = false,
maxepochs::Integer = 0,
printlosseverynepochs::Integer = 0,
)
optimizersymbol2type=Dict()
optimizersymbol2type[:Sgd] = Knet.Sgd
optimizersymbol2type[:Momentum] = Knet.Momentum
optimizersymbol2type[:Nesterov] = Knet.Nesterov
optimizersymbol2type[:Rmsprop] = Knet.Rmsprop
optimizersymbol2type[:Adagrad] = Knet.Adagrad
optimizersymbol2type[:Adadelta] = Knet.Adadelta
optimizersymbol2type[:Adam] = Knet.Adam
optimizersymbol2type=fix_type(optimizersymbol2type)
modelweightoptimizers = Knet.optimizers(
modelweights,
optimizersymbol2type[optimizationalgorithm];
optimizerhyperparameters...
)
# lastepoch = 0
# lastiteration = 0
history = ValueHistories.MVHistory()
ValueHistories.push!(
history,
:epoch_at_iteration,
0,
0,
)
losshyperparameters = fix_type(losshyperparameters)
optimizerhyperparameters = fix_type(optimizerhyperparameters)
result = KnetModel(
name,
isclassificationmodel,
isregressionmodel,
predict_function_source,
loss_function_source,
predict_function,
loss_function,
losshyperparameters,
optimizationalgorithm,
optimizerhyperparameters,
minibatchsize,
maxepochs,
printlosseverynepochs,
modelweights,
modelweightoptimizers,
history,
)
return result
end
"""
"""
function set_max_epochs!(
x::KnetModel,
new_max_epochs::Integer,
)
x.maxepochs = new_max_epochs
return nothing
end
"""
"""
function get_underlying(
x::KnetModel;
saving::Bool = false,
loading::Bool = false,
)
result = (x.modelweights, x.modelweightoptimizers,)
return result
end
"""
"""
function get_history(
x::KnetModel;
saving::Bool = false,
loading::Bool = false,
)
result = x.history
return result
end
function parse_functions!(estimator::KnetModel)
estimator.predict_function = eval(
Meta.parse(
strip(
estimator.predict_function_source
)
)
)
estimator.loss_function = eval(
Meta.parse(
strip(
estimator.loss_function_source
)
)
)
return nothing
end
"""
"""
function fit!(
estimator::KnetModel,
training_features_array::AbstractArray,
training_labels_array::AbstractArray,
tuning_features_array::Union{Nothing, AbstractArray} = nothing,
tuning_labels_array::Union{Nothing, AbstractArray} = nothing,
)
has_tuning_data::Bool =
!is_nothing(tuning_labels_array) && !is_nothing(tuning_features_array)
training_features_array = Float64.(training_features_array)
if estimator.isclassificationmodel && !estimator.isregressionmodel
training_labels_array = Int.(training_labels_array)
elseif !estimator.isclassificationmodel && estimator.isregressionmodel
training_labels_array = Float64.(training_labels_array)
else
error(
"Could not figure out if model is classification or regression"
)
end
training_data = Knet.minibatch(
training_features_array,
training_labels_array,
estimator.minibatchsize,
)
loss_function_gradient = Knet.grad(
estimator.loss_function,
2,
)
all_iterations_so_far, all_epochs_so_far = ValueHistories.get(
estimator.history,
:epoch_at_iteration,
)
last_iteration = all_iterations_so_far[end]
last_epoch = all_epochs_so_far[end]
@info(
string(
"Starting to train Knet model. Max epochs: ",
estimator.maxepochs,
".",
)
)
training_lossbeforetrainingstarts = estimator.loss_function(
estimator.predict_function,
estimator.modelweights,
training_features_array,
training_labels_array;
estimator.losshyperparameters...
)
if has_tuning_data
tuning_lossbeforetrainingstarts = estimator.loss_function(
estimator.predict_function,
estimator.modelweights,
tuning_features_array,
tuning_labels_array;
estimator.losshyperparameters...
)
end
if (estimator.printlosseverynepochs) > 0
if has_tuning_data
@info(
string(
"Epoch: ",
last_epoch,
". Loss (training set): ",
training_lossbeforetrainingstarts,
". Loss (tuning set): ",
tuning_lossbeforetrainingstarts,
".",
)
)
else
@info(
string(
"Epoch: ",
lastepoch,
". Loss: ",
lossbeforetrainingstarts,
"."
)
)
end
end
while last_epoch < estimator.maxepochs
for (x_training, y_training) in training_data
grads = loss_function_gradient(
estimator.predict_function,
estimator.modelweights,
x_training,
y_training;
estimator.losshyperparameters...
)
Knet.update!(
estimator.modelweights,
grads,
estimator.modelweightoptimizers,
)
last_iteration += 1
training_currentiterationloss = estimator.loss_function(
estimator.predict_function,
estimator.modelweights,
x_training,
y_training;
estimator.losshyperparameters...
)
ValueHistories.push!(
estimator.history,
:training_loss_at_iteration,
last_iteration,
training_currentiterationloss,
)
end # end for
last_epoch += 1
ValueHistories.push!(
estimator.history,
:epoch_at_iteration,
last_iteration,
last_epoch,
)
training_currentepochloss = estimator.loss_function(
estimator.predict_function,
estimator.modelweights,
training_features_array,
training_labels_array;
estimator.losshyperparameters...
)
ValueHistories.push!(
estimator.history,
:training_loss_at_epoch,
last_epoch,
training_currentepochloss,
)
if has_tuning_data
tuning_currentepochloss = estimator.loss_function(
estimator.predict_function,
estimator.modelweights,
tuning_features_array,
tuning_labels_array;
estimator.losshyperparameters...
)
ValueHistories.push!(
estimator.history,
:tuning_loss_at_epoch,
last_epoch,
tuning_currentepochloss,
)
end
printlossthisepoch = (estimator.printlosseverynepochs > 0) &&
( (last_epoch == estimator.maxepochs) ||
( (last_epoch %
estimator.printlosseverynepochs) == 0 ) )
if printlossthisepoch
if has_tuning_data
@info(
string(
"Epoch: ",
last_epoch,
". Loss (training set): ",
training_currentepochloss,
". Loss (tuning set): ",
tuning_currentepochloss,
".",
),
)
else
@info(
string(
"Epoch: ",
last_epoch,
". Loss: ",
training_currentepochloss,
".",
),
)
end
end
end # end while
@info(string("Finished training Knet model."))
return estimator
end
"""
"""
function predict(
estimator::KnetModel,
featuresarray::AbstractArray,
)
if estimator.isclassificationmodel
probabilitiesassoc = predict_proba(
estimator,
featuresarray,
)
predictionsvector = single_labelprobabilitiestopredictions(
probabilitiesassoc
)
return predictionsvector
elseif estimator.isregressionmodel
output = estimator.predict_function(
estimator.modelweights,
featuresarray;
)
outputtransposed = transpose(output)
result = convert(Array, outputtransposed)
return result
else
error("unable to predict")
end
end
"""
"""
function predict(
estimator::KnetModel,
featuresarray::AbstractArray,
positive_class::Integer,
threshold::AbstractFloat,
)
if estimator.isclassificationmodel
probabilitiesassoc = predict_proba(
estimator,
featuresarray,
)
predictionsvector = single_labelprobabilitiestopredictions(
probabilitiesassoc,
positive_class,
threshold,
)
return predictionsvector
else
error("can only use the `threshold` argument with classification models")
end
end
"""
"""
function predict_proba(
estimator::KnetModel,
featuresarray::AbstractArray,
)
if estimator.isclassificationmodel
output = estimator.predict_function(
estimator.modelweights,
featuresarray;
probabilities = true,
)
outputtransposed = transpose(output)
numclasses = size(outputtransposed, 2)
result = Dict()
for i = 1:numclasses
result[i] = outputtransposed[:, i]
end
return result
elseif estimator.isregressionmodel
error("predict_proba is not defined for regression models")
else
error("unable to predict")
end
end
"""
"""
function single_labelmulticlassdataframeknetclassifier_Knet(
feature_names::AbstractVector,
single_label_name::Symbol,
single_label_levels::AbstractVector;
name::AbstractString = "",
predict_function_source::AbstractString = "",
loss_function_source::AbstractString = "",
losshyperparameters::AbstractDict = Dict(),
optimizationalgorithm::Symbol = :nothing,
optimizerhyperparameters::AbstractDict = Dict(),
minibatchsize::Integer = 0,
modelweights::AbstractArray = [],
maxepochs::Integer = 0,
printlosseverynepochs::Integer = 0,
feature_contrasts::Union{Nothing, AbstractFeatureContrasts} =
nothing,
)
label_names = [single_label_name]
label_levels = Dict()
label_levels[single_label_name] = single_label_levels
label_levels = fix_type(label_levels)
dftransformer_index = 1
dftransformer_transposefeatures = true
dftransformer_transposelabels = true
dftransformer = MutableDataFrame2ClassificationKnetTransformer(
feature_names,
label_names,
label_levels,
dftransformer_index;
transposefeatures = dftransformer_transposefeatures,
transposelabels = dftransformer_transposelabels,
)
knetestimator = KnetModel(
;
name = name,
predict_function_source = predict_function_source,
loss_function_source = loss_function_source,
losshyperparameters = losshyperparameters,
optimizationalgorithm = optimizationalgorithm,
optimizerhyperparameters = optimizerhyperparameters,
minibatchsize = minibatchsize,
modelweights = modelweights,
isclassificationmodel = true,
isregressionmodel = false,
maxepochs = maxepochs,
printlosseverynepochs = printlosseverynepochs,
)
predprobalabelfixer =
ImmutablePredictProbaSingleLabelInt2StringTransformer(
1,
single_label_levels
)
predictlabelfixer =
ImmutablePredictionsSingleLabelInt2StringTransformer(
1,
single_label_levels
)
probapackager = ImmutablePackageSingleLabelPredictProbaTransformer(
single_label_name,
)
predpackager = ImmutablePackageSingleLabelPredictionTransformer(
single_label_name,
)
finalpipeline = dftransformer |>
knetestimator |>
predprobalabelfixer |>
predictlabelfixer |>
probapackager |>
predpackager
finalpipeline.name = name
if !is_nothing(feature_contrasts)
set_feature_contrasts!(finalpipeline, feature_contrasts)
end
return finalpipeline
end
"""
"""
function single_labelmulticlassdataframeknetclassifier(
feature_names::AbstractVector,
single_label_name::Symbol,
single_label_levels::AbstractVector;
package::Symbol = :none,
name::AbstractString = "",
predict_function_source::AbstractString = "",
loss_function_source::AbstractString = "",
losshyperparameters::AbstractDict = Dict(),
optimizationalgorithm::Symbol = :nothing,
optimizerhyperparameters::AbstractDict = Dict(),
minibatchsize::Integer = 0,
modelweights::AbstractArray = [],
maxepochs::Integer = 0,
printlosseverynepochs::Integer = 0,
feature_contrasts::Union{Nothing, AbstractFeatureContrasts} =
nothing,
)
if package == :Knet
result = single_labelmulticlassdataframeknetclassifier_Knet(
feature_names,
single_label_name,
single_label_levels;
name = name,
predict_function_source = predict_function_source,
loss_function_source = loss_function_source,
losshyperparameters = losshyperparameters,
optimizationalgorithm = optimizationalgorithm,
optimizerhyperparameters = optimizerhyperparameters,
minibatchsize = minibatchsize,
modelweights = modelweights,
maxepochs = maxepochs,
printlosseverynepochs = printlosseverynepochs,
feature_contrasts = feature_contrasts
)
return result
else
error("$(package) is not a valid value for package")
end
end
"""
"""
function single_labeldataframeknetregression_Knet(
feature_names::AbstractVector,
single_label_name::Symbol;
name::AbstractString = "",
predict_function_source::AbstractString = "",
loss_function_source::AbstractString = "",
losshyperparameters::AbstractDict = Dict(),
optimizationalgorithm::Symbol = :nothing,
optimizerhyperparameters::AbstractDict = Dict(),
minibatchsize::Integer = 0,
modelweights::AbstractArray = [],
maxepochs::Integer = 0,
printlosseverynepochs::Integer = 0,
feature_contrasts::Union{Nothing, AbstractFeatureContrasts} =
nothing,
)
label_names = [single_label_name]
dftransformer_index = 1
dftransformer_transposefeatures = true
dftransformer_transposelabels = true
dftransformer = MutableDataFrame2RegressionKnetTransformer(
feature_names,
label_names;
transposefeatures = true,
transposelabels = true,
)
knetestimator = KnetModel(
;
name = name,
predict_function_source = predict_function_source,
loss_function_source = loss_function_source,
losshyperparameters = losshyperparameters,
optimizationalgorithm = optimizationalgorithm,
optimizerhyperparameters = optimizerhyperparameters,
minibatchsize = minibatchsize,
modelweights = modelweights,
isclassificationmodel = false,
isregressionmodel = true,
maxepochs = maxepochs,
printlosseverynepochs = printlosseverynepochs,
)
predpackager = ImmutablePackageMultiLabelPredictionTransformer(
[single_label_name,],
)
finalpipeline = dftransformer |>
knetestimator |>
predpackager
finalpipeline.name = name
if !is_nothing(feature_contrasts)
set_feature_contrasts!(finalpipeline, feature_contrasts)
end
return finalpipeline
end
"""
"""
function single_labeldataframeknetregression(
feature_names::AbstractVector,
single_label_name::Symbol;
package::Symbol = :none,
name::AbstractString = "",
predict_function_source::AbstractString = "",
loss_function_source::AbstractString = "",
losshyperparameters::AbstractDict = Dict(),
optimizationalgorithm::Symbol = :nothing,
optimizerhyperparameters::AbstractDict = Dict(),
minibatchsize::Integer = 0,
modelweights::AbstractArray = [],
maxepochs::Integer = 0,
printlosseverynepochs::Integer = 0,
feature_contrasts::Union{Nothing, AbstractFeatureContrasts} =
nothing,
)
if package == :Knet
result = single_labeldataframeknetregression_Knet(
feature_names,
single_label_name;
name = name,
predict_function_source = predict_function_source,
loss_function_source = loss_function_source,
losshyperparameters = losshyperparameters,
optimizationalgorithm = optimizationalgorithm,
optimizerhyperparameters = optimizerhyperparameters,
minibatchsize = minibatchsize,
modelweights = modelweights,
maxepochs = maxepochs,
printlosseverynepochs = printlosseverynepochs,
feature_contrasts = feature_contrasts
)
return result
else
error("$(package) is not a valid value for package")
end
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 3496 | const _ICD_ICD9_CCS_APPENDIXASINGLEDX_FILE_HAS_BEEN_PARSED = Ref(false)
const _ICD9_CODE_TO_SINGLE_LEVEL_DX_CCS = Ref(Dict{String, Int}())
const _SINGLE_LEVEL_DX_CCS_NUMBER_TO_NAME = Ref(Dict{Int, String}())
const _SINGLE_LEVEL_DX_CCS_TO_LIST_OF_ICD9_CODES = Ref(Dict{Int, Vector{String}}())
function remove_all_full_stops(x::AbstractString)::String
result = replace(x, "." => "")
return result
end
function parse_icd_icd9_ccs_appendixasingledx_file!()::Nothing
global _ICD_ICD9_CCS_APPENDIXASINGLEDX_FILE_HAS_BEEN_PARSED
global _ICD9_CODE_TO_SINGLE_LEVEL_DX_CCS
global _SINGLE_LEVEL_DX_CCS_NUMBER_TO_NAME
global _SINGLE_LEVEL_DX_CCS_TO_LIST_OF_ICD9_CODES
if _ICD_ICD9_CCS_APPENDIXASINGLEDX_FILE_HAS_BEEN_PARSED[]
else
filename = package_directory(
"assets",
"icd",
"icd9",
"ccs",
"AppendixASingleDX.txt"
)
file_sections = strip.(split(strip(read(filename, String)), "\n\n"))
for section in file_sections
if isempty(section)
elseif startswith(section, "Appendix")
elseif startswith(section, "Revised")
else
ccs_number::Int = parse(
Int,
strip.(
split(strip(section))
)[1],
)
ccs_name::String = strip(
join(
strip.(
split(
strip(
split(strip(section), "\n")[1]
)
)[2:end]
), " "
)
)
_SINGLE_LEVEL_DX_CCS_NUMBER_TO_NAME[][ccs_number] = ccs_name
icd9_code_list::Vector{String} = strip.(
remove_all_full_stops.(
split(
strip(
split(strip(section), "\n")[2]
)
)
)
)
_SINGLE_LEVEL_DX_CCS_TO_LIST_OF_ICD9_CODES[][
ccs_number
] = icd9_code_list
for icd9_code in icd9_code_list
_ICD9_CODE_TO_SINGLE_LEVEL_DX_CCS[][icd9_code] = ccs_number
end
end
end
_ICD_ICD9_CCS_APPENDIXASINGLEDX_FILE_HAS_BEEN_PARSED[] = true
end
return nothing
end
function single_level_dx_ccs_number_to_name(
ccs_number::Int,
)::String
parse_icd_icd9_ccs_appendixasingledx_file!()
global _SINGLE_LEVEL_DX_CCS_NUMBER_TO_NAME
result = _SINGLE_LEVEL_DX_CCS_NUMBER_TO_NAME[][ccs_number]
return result
end
function single_level_dx_ccs_to_list_of_icd9_codes(
ccs_number::Int,
)::Vector{String}
parse_icd_icd9_ccs_appendixasingledx_file!()
global _SINGLE_LEVEL_DX_CCS_TO_LIST_OF_ICD9_CODES
result = _SINGLE_LEVEL_DX_CCS_TO_LIST_OF_ICD9_CODES[][ccs_number]
return result
end
"""
"""
function icd9_code_to_single_level_dx_ccs(
icd9_code::AbstractString,
)::Int
parse_icd_icd9_ccs_appendixasingledx_file!()
global _ICD9_CODE_TO_SINGLE_LEVEL_DX_CCS
result = _ICD9_CODE_TO_SINGLE_LEVEL_DX_CCS[][
remove_all_full_stops(string(icd9_code))
]
return result
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 7479 | import MacroTools
"""
"""
function SimplePipeline(
objectsvector::T;
name::S = "",
) where
S <: AbstractString where
T <: AbstractVector{F} where
F <: AbstractFittable
result = SimplePipeline{S,T}(name,
objectsvector)
return result
end
SimplePipeline(x::T) where T = SimplePipeline(T[x])
SimplePipeline(p::SimplePipeline) = p
Base.IteratorEltype(itertype::Type{SimplePipeline{S, T}}) where S where T =
Base.IteratorEltype(T)
Base.IteratorSize(itertype::Type{SimplePipeline{S, T}}) where S where T =
Base.IteratorSize(T)
Base.IndexStyle(itertype::Type{SimplePipeline{S, T}}) where S where T =
Base.IndexStyle(T)
MacroTools.@forward SimplePipeline.objectsvector Base.axes
MacroTools.@forward SimplePipeline.objectsvector Base.eachindex
MacroTools.@forward SimplePipeline.objectsvector Base.eltype
MacroTools.@forward SimplePipeline.objectsvector Base.empty!
MacroTools.@forward SimplePipeline.objectsvector Base.findall
MacroTools.@forward SimplePipeline.objectsvector Base.findfirst
MacroTools.@forward SimplePipeline.objectsvector Base.findlast
MacroTools.@forward SimplePipeline.objectsvector Base.findnext
MacroTools.@forward SimplePipeline.objectsvector Base.findprev
MacroTools.@forward SimplePipeline.objectsvector Base.firstindex
MacroTools.@forward SimplePipeline.objectsvector Base.getindex
MacroTools.@forward SimplePipeline.objectsvector Base.isassigned
MacroTools.@forward SimplePipeline.objectsvector Base.isempty
MacroTools.@forward SimplePipeline.objectsvector Base.iterate
MacroTools.@forward SimplePipeline.objectsvector Base.lastindex
MacroTools.@forward SimplePipeline.objectsvector Base.length
MacroTools.@forward SimplePipeline.objectsvector Base.ndims
MacroTools.@forward SimplePipeline.objectsvector Base.setindex!
MacroTools.@forward SimplePipeline.objectsvector Base.size
MacroTools.@forward SimplePipeline.objectsvector Base.vec
MacroTools.@forward SimplePipeline.objectsvector Base.view
Base.:|>(x::AbstractFittable, y::AbstractFittable) = SimplePipeline(x) |>
SimplePipeline(y)
# alternatively, we could replace `Union{F1, F2}` with `typejoin(F1, F2)`
function Base.:|>(p1::SimplePipeline{S1, T1},
p2::SimplePipeline{S2, T2}) where
S1 where T1 <:AbstractVector{F1} where F1 where
S2 where T2 <:AbstractVector{F2} where F2
length_1 = length(p1)
length_2 = length(p2)
new_objectsvector = Vector{Union{F1, F2}}(undef, length_1 + length_2)
for i = 1:length_1
new_objectsvector[i] = p1[i]
end
for i = 1:length_2
new_objectsvector[length_1 + i] = p2[i]
end
result = SimplePipeline(new_objectsvector;
name = string(p1.name, p2.name))
return result
end
"""
"""
function ispipeline end
ispipeline(::Any) = false
ispipeline(::AbstractPipeline) = true
"""
"""
function isflat end
isflat(x::SimplePipeline) = !any(ispipeline.(x.objectsvector))
"""
"""
function flatten(::Type{SimplePipeline}, p::SimplePipeline)
result = _flatten(SimplePipeline, p)
return result
end
flatten(p::SimplePipeline) = flatten(SimplePipeline, p::SimplePipeline)
# alternatively, we could replace `Union{typeof.(temp_objects)...}` with
# `typejoin(typeof.(temp_objects)...)`
function _flatten(::Type{SimplePipeline}, p::SimplePipeline)
temp_names = Vector{Any}(undef, 0)
temp_objects = Vector{Any}(undef, 0)
for i = 1:length(p)
object = p[i]
push!(temp_names, _flatten_name(SimplePipeline, object))
append!(temp_objects, _flatten_objects(SimplePipeline, object))
end
new_F = Union{typeof.(temp_objects)...}
new_objects::Vector{new_F} = Vector{new_F}(undef,length(temp_objects))
for j = 1:length(temp_objects)
new_objects[j] = temp_objects[j]
end
new_name::String = string(p.name, join(temp_names, ""))
new_pipeline = SimplePipeline(new_objects; name = new_name)
return new_pipeline
end
_flatten_name(::Type{SimplePipeline}, x) = ""
_flatten_name(::Type{SimplePipeline}, p::SimplePipeline) =
_flatten(SimplePipeline, p).name
_flatten_objects(::Type{SimplePipeline}, x) = [x]
_flatten_objects(::Type{SimplePipeline}, p::SimplePipeline) =
_flatten(SimplePipeline, p).objectsvector
"""
"""
function set_max_epochs!(
p::SimplePipeline,
new_max_epochs::Integer,
)
for i = 1:length(p)
set_max_epochs!(
p[i],
new_max_epochs,
)
end
return nothing
end
"""
"""
function set_feature_contrasts!(
p::SimplePipeline,
feature_contrasts::AbstractFeatureContrasts,
)
for i = 1:length(p)
set_feature_contrasts!(p[i], feature_contrasts)
end
return nothing
end
"""
"""
function get_underlying(
p::SimplePipeline;
saving::Bool = false,
loading::Bool = false,
)
underlying = [
get_underlying(
o;
saving=saving,
loading=loading,
) for o in p.objectsvector
]
if saving || loading
else
delete_nothings!(underlying)
if length(underlying) == 0
underlying = nothing
elseif length(underlying) == 1
underlying = underlying[1]
end
end
return underlying
end
"""
"""
function get_history(
p::SimplePipeline;
saving::Bool = false,
loading::Bool = false,
)
history = [
get_history(
o;
saving = saving,
loading = loading,
) for o in p.objectsvector
]
if saving || loading
else
delete_nothings!(history)
if length(history) == 0
history = nothing
elseif length(history) == 1
history = history[1]
end
end
return history
end
"""
"""
function parse_functions!(simplelinearpipeline::SimplePipeline)
for i = 1:length(simplelinearpipeline)
parse_functions!(simplelinearpipeline[i])
end
return nothing
end
"""
"""
function fit!(
simplelinearpipeline::SimplePipeline,
varargs...;
kwargs...
)
output = fit!(
simplelinearpipeline[1],
varargs...;
kwargs...
)
for i = 2:length(simplelinearpipeline)
input = tuplify(output)
output = fit!(
simplelinearpipeline[i],
input...;
kwargs...
)
end
return output
end
"""
"""
function predict(
simplelinearpipeline::SimplePipeline,
varargs...;
kwargs...
)
output = predict(
simplelinearpipeline[1],
varargs...;
kwargs...
)
for i = 2:length(simplelinearpipeline)
input = tuplify(output)
output = predict(
simplelinearpipeline[i],
input...;
kwargs...
)
end
return output
end
"""
"""
function predict_proba(
simplelinearpipeline::SimplePipeline,
varargs...;
kwargs...
)
output = predict_proba(
simplelinearpipeline[1],
varargs...
)
for i = 2:length(simplelinearpipeline)
input = tuplify(output)
output = predict_proba(
simplelinearpipeline[i],
input...;
kwargs...
)
end
return output
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 539 | function is_force_test_plots(a::AbstractDict = ENV)
result = lowercase(strip(get(a, "PREDICTMD_TEST_PLOTS", ""))) == "true"
return result
end
function handle_plotting_error(e::Union{Exception, Any}, a::AbstractDict = ENV)::Nothing
if is_force_test_plots(a)
@warn(
string(
"PREDICTMD_TEST_PLOTS is true ,",
"so rethrowing the error.",
)
)
rethrow(e)
else
@warn(string("ignoring error:", e))
return nothing
end
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 27 | import DefaultApplication
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 1391 | import PGFPlotsX
function get_underlying(p::PGFPlotsXPlot)
result = p.underlying_object
return result
end
function Base.display(p::PGFPlotsXPlot)::Nothing
if is_runtests() && !open_plots_during_tests()
@debug(
string(
"PREDICTMD_OPEN_PLOTS_DURING_TESTS is false, therefore ",
"the plot will not be opened.",
)
)
else
@debug(string("Attempting to display plot..."))
try
Base.display(get_underlying(p))
@debug(string("Displayed plot."))
catch e
handle_plotting_error(e)
end
end
return nothing
end
function PGFPlotsX.save(
filename::String,
p::PGFPlotsXPlot;
kwargs...,
)::Nothing
underlying_object = get_underlying(p)
try
@debug(string("Attempting to save plot..."))
mkpath(dirname(filename))
PGFPlotsX.save(
filename,
underlying_object;
kwargs...,
)
@debug(string("Saved plot to file: \"", filename, "\"",))
catch e
handle_plotting_error(e)
end
return nothing
end
function save_plot(
filename::String,
p::PGFPlotsXPlot;
kwargs...,
)::Nothing
PGFPlotsX.save(
filename,
p;
kwargs...,
)
return nothing
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 9736 | import LaTeXStrings
import PGFPlotsX
import StatsBase
import ValueHistories
"""
"""
function plotlearningcurve(
inputobject::AbstractFittable,
curvetype::Symbol = :loss_vs_iteration;
kwargs...,
)
history::ValueHistories.MultivalueHistory = get_history(inputobject)
result = plotlearningcurve(
history,
curvetype;
kwargs...,
)
return result
end
"""
"""
function plotlearningcurve(
history::ValueHistories.MultivalueHistory,
curvetype::Symbol = :loss_vs_iteration;
window::Integer = 0,
legend_pos::AbstractString = "outer north east",
sampleevery::Integer = 1,
startat::Union{Integer, Symbol} = :start,
endat::Union{Integer, Symbol} = :end,
include_tuning::Bool = true,
show_raw::Bool = true,
show_smoothed::Bool = true,
)
legend_pos::String = convert(String, legend_pos)
if curvetype==:loss_vs_iteration
has_tuning = include_tuning &&
haskey(history, :tuning_loss_at_iteration)
xlabel = "Iteration"
ylabel = "Loss"
legendentry = "Loss function"
training_xvalues, training_yvalues = ValueHistories.get(
history,
:training_loss_at_iteration,
)
if has_tuning
tuning_xvalues, tuning_yvalues = ValueHistories.get(
history,
:tuning_loss_at_iteration,
)
end
elseif curvetype==:loss_vs_epoch
has_tuning = include_tuning &&
haskey(history, :tuning_loss_at_epoch)
xlabel = "Epoch"
ylabel = "Loss"
legendentry = "Loss function"
training_xvalues, training_yvalues = ValueHistories.get(
history,
:training_loss_at_epoch,
)
if has_tuning
tuning_xvalues, tuning_yvalues = ValueHistories.get(
history,
:tuning_loss_at_epoch,
)
end
else
error("\"curvetype\" must be one of: :loss_vs_iteration, :loss_vs_epoch")
end
if length(training_xvalues) != length(training_yvalues)
error("length(training_xvalues) != length(training_yvalues)")
end
if has_tuning
if length(training_xvalues) != length(tuning_yvalues)
error("length(training_xvalues) != length(tuning_yvalues)")
end
if length(training_xvalues) != length(tuning_xvalues)
error("length(training_xvalues) != length(tuning_xvalues)")
end
if !all(training_xvalues .== tuning_xvalues)
error("!all(training_xvalues .== tuning_xvalues)")
end
end
if startat == :start
startat = 1
elseif typeof(startat) <: Symbol
error("$(startat) is not a valid value for startat")
end
if endat == :end
endat = length(training_xvalues)
elseif typeof(endat) <: Symbol
error("$(endat) is not a valid value for endat")
end
if startat > endat
error("startat > endat")
end
training_xvalues = training_xvalues[startat:endat]
training_yvalues = training_yvalues[startat:endat]
if has_tuning
tuning_xvalues = tuning_xvalues[startat:endat]
tuning_yvalues = tuning_yvalues[startat:endat]
end
if length(training_xvalues) != length(training_yvalues)
error("length(training_xvalues) != length(training_yvalues)")
end
if has_tuning
if length(tuning_xvalues) != length(tuning_yvalues)
error("length(tuning_xvalues) != length(tuning_yvalues)")
end
if length(training_xvalues) != length(tuning_xvalues)
error("length(training_xvalues) != length(tuning_xvalues)")
end
if !all(training_xvalues .== tuning_xvalues)
error("!all(training_xvalues .== tuning_xvalues)")
end
result = plotlearningcurve(
training_xvalues,
training_yvalues,
xlabel,
ylabel,
legendentry;
window = window,
legend_pos = legend_pos,
sampleevery = sampleevery,
tuning_yvalues = tuning_yvalues,
show_raw = show_raw,
show_smoothed = show_smoothed,
)
else
result = plotlearningcurve(
training_xvalues,
training_yvalues,
xlabel,
ylabel,
legendentry;
window = window,
legend_pos = legend_pos,
sampleevery = sampleevery,
show_raw = show_raw,
show_smoothed = show_smoothed,
)
end
return result
end
"""
"""
function plotlearningcurve(
xvalues::AbstractVector{<:Real},
training_yvalues::AbstractVector{<:Real},
xlabel::AbstractString,
ylabel::AbstractString,
legendentry::AbstractString;
window::Integer = 0,
legend_pos::AbstractString = "outer north east",
sampleevery::Integer = 1,
tuning_yvalues::Union{Nothing, AbstractVector{<:Real}} = nothing,
show_raw::Bool = true,
show_smoothed::Bool = true,
)
legend_pos::String = convert(String, legend_pos)
if !show_raw && !show_smoothed
error("At least one of show_raw, show_smoothed must be true.")
end
if is_nothing(tuning_yvalues)
has_tuning = false
else
has_tuning = true
end
if has_tuning
training_legendentry = string(strip(legendentry),
", training set")
tuning_legendentry = string(strip(legendentry),
", tuning set")
else
training_legendentry = string(strip(legendentry),
", training set")
end
if sampleevery < 1
error("sampleevery must be >=1")
end
if length(xvalues) != length(training_yvalues)
error("length(xvalues) != length(training_yvalues)")
end
if length(xvalues) == 0
error("length(xvalues) == 0")
end
xvalues = xvalues[1:sampleevery:end]
training_yvalues = training_yvalues[1:sampleevery:end]
if has_tuning
tuning_yvalues = tuning_yvalues[1:sampleevery:end]
end
all_plots_and_legends = []
if show_raw
training_linearplotobject_yraw = PGFPlotsX.@pgf(
PGFPlotsX.Plot(
PGFPlotsX.Coordinates(
xvalues,
training_yvalues,
)
)
)
training_legendentry_yraw = PGFPlotsX.@pgf(
PGFPlotsX.LegendEntry(
LaTeXStrings.LaTeXString(training_legendentry)
)
)
push!(all_plots_and_legends, training_linearplotobject_yraw)
push!(all_plots_and_legends, training_legendentry_yraw)
if has_tuning
tuning_linearplotobject_yraw = PGFPlotsX.@pgf(
PGFPlotsX.Plot(
PGFPlotsX.Coordinates(
xvalues,
tuning_yvalues,
)
)
)
tuning_legendentry_yraw = PGFPlotsX.@pgf(
PGFPlotsX.LegendEntry(
LaTeXStrings.LaTeXString(tuning_legendentry)
)
)
push!(all_plots_and_legends, tuning_linearplotobject_yraw)
push!(all_plots_and_legends, tuning_legendentry_yraw)
end
end
if show_smoothed && window > 0
training_yvaluessmoothed = simple_moving_average(
training_yvalues,
window,
)
training_legendentry_smoothed = string(
strip(training_legendentry),
" (smoothed)",
)
training_linearplotobject_ysmoothed = PGFPlotsX.@pgf(
PGFPlotsX.Plot(
PGFPlotsX.Coordinates(
xvalues,
training_yvaluessmoothed,
)
)
)
training_legendentry_ysmoothed = PGFPlotsX.@pgf(
PGFPlotsX.LegendEntry(
LaTeXStrings.LaTeXString(training_legendentry_smoothed)
)
)
push!(all_plots_and_legends, training_linearplotobject_ysmoothed)
push!(all_plots_and_legends, training_legendentry_ysmoothed)
if has_tuning
tuning_yvaluessmoothed = simple_moving_average(
tuning_yvalues,
window,
)
tuning_legendentry_smoothed = string(
strip(tuning_legendentry),
" (smoothed)",
)
tuning_linearplotobject_ysmoothed = PGFPlotsX.@pgf(
PGFPlotsX.Plot(
PGFPlotsX.Coordinates(
xvalues,
tuning_yvaluessmoothed,
)
)
)
tuning_legendentry_ysmoothed = PGFPlotsX.@pgf(
PGFPlotsX.LegendEntry(
LaTeXStrings.LaTeXString(tuning_legendentry_smoothed)
)
)
push!(all_plots_and_legends, tuning_linearplotobject_ysmoothed)
push!(all_plots_and_legends, tuning_legendentry_ysmoothed)
end
end
p = PGFPlotsX.@pgf(
PGFPlotsX.Axis(
{
xlabel = LaTeXStrings.LaTeXString(xlabel),
ylabel = LaTeXStrings.LaTeXString(ylabel),
no_markers,
legend_pos = legend_pos,
},
all_plots_and_legends...,
),
)
wrapper = PGFPlotsXPlot(p)
return wrapper
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 2475 | import LaTeXStrings
import PGFPlotsX
"""
"""
function plotprcurve(
estimator::AbstractFittable,
features_df::DataFrames.AbstractDataFrame,
labels_df::DataFrames.AbstractDataFrame,
single_label_name::Symbol,
positive_class::AbstractString;
kwargs...,
)
vectorofestimators = [estimator]
result = plotprcurve(
vectorofestimators,
features_df,
labels_df,
single_label_name,
positive_class;
kwargs...,
)
return result
end
"""
"""
function plotprcurve(
vectorofestimators::AbstractVector{AbstractFittable},
features_df::DataFrames.AbstractDataFrame,
labels_df::DataFrames.AbstractDataFrame,
single_label_name::Symbol,
positive_class::AbstractString;
legend_pos::AbstractString = "outer north east",
)::PGFPlotsXPlot
legend_pos::String = convert(String, legend_pos)
if length(vectorofestimators) == 0
error("length(vectorofestimators) == 0")
end
all_plots_and_legends = []
for i = 1:length(vectorofestimators)
estimator_i = vectorofestimators[i]
metrics_i = singlelabelbinaryclassificationmetrics_resultdict(
estimator_i,
features_df,
labels_df,
single_label_name,
positive_class;
threshold = 0.5,
)
ytrue_i = metrics_i[:ytrue]
yscore_i = metrics_i[:yscore]
allprecisions, allrecalls, allthresholds = prcurve(
ytrue_i,
yscore_i,
)
plot_i = PGFPlotsX.@pgf(
PGFPlotsX.Plot(
PGFPlotsX.Coordinates(
allrecalls,
allprecisions,
),
),
)
legend_i = PGFPlotsX.@pgf(
PGFPlotsX.LegendEntry(
LaTeXStrings.LaTeXString(estimator_i.name)
),
)
push!(all_plots_and_legends, plot_i)
push!(all_plots_and_legends, legend_i)
end
all_plots_and_legends = [all_plots_and_legends...]
p = PGFPlotsX.@pgf(
PGFPlotsX.Axis(
{
xlabel = "Recall",
ylabel = "Precision",
no_markers,
legend_pos = legend_pos,
},
all_plots_and_legends...,
),
)
wrapper = PGFPlotsXPlot(p)
return wrapper
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 2474 | import LaTeXStrings
import PGFPlotsX
"""
"""
function plotroccurve(
estimator::AbstractFittable,
features_df::DataFrames.AbstractDataFrame,
labels_df::DataFrames.AbstractDataFrame,
single_label_name::Symbol,
positive_class::AbstractString;
kwargs...,
)
vectorofestimators = [estimator]
result = plotroccurve(
vectorofestimators,
features_df,
labels_df,
single_label_name,
positive_class;
kwargs...,
)
return result
end
"""
"""
function plotroccurve(
vectorofestimators::AbstractVector{AbstractFittable},
features_df::DataFrames.AbstractDataFrame,
labels_df::DataFrames.AbstractDataFrame,
single_label_name::Symbol,
positive_class::AbstractString;
legend_pos::AbstractString = "outer north east",
)
legend_pos::String = convert(String, legend_pos)
if length(vectorofestimators) == 0
error("length(vectorofestimators) == 0")
end
all_plots_and_legends = []
for i = 1:length(vectorofestimators)
estimator_i = vectorofestimators[i]
metrics_i = singlelabelbinaryclassificationmetrics_resultdict(
estimator_i,
features_df,
labels_df,
single_label_name,
positive_class;
threshold = 0.5,
)
ytrue_i = metrics_i[:ytrue]
yscore_i = metrics_i[:yscore]
allfpr_i, alltpr_i, allthresholds_i = roccurve(
ytrue_i,
yscore_i,
)
plot_i = PGFPlotsX.@pgf(
PGFPlotsX.Plot(
PGFPlotsX.Coordinates(
allfpr_i,
alltpr_i,
),
),
)
legend_i = PGFPlotsX.@pgf(
PGFPlotsX.LegendEntry(
LaTeXStrings.LaTeXString(estimator_i.name)
),
)
push!(all_plots_and_legends, plot_i)
push!(all_plots_and_legends, legend_i)
end
all_plots_and_legends = [all_plots_and_legends...]
p = PGFPlotsX.@pgf(
PGFPlotsX.Axis(
{
xlabel = "False positive rate",
ylabel = "True positive rate",
no_markers,
legend_pos = legend_pos,
},
all_plots_and_legends...,
),
)
wrapper = PGFPlotsXPlot(p)
return wrapper
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 6269 | import LaTeXStrings
import PGFPlotsX
import StatsBase
function num_histogram_bins(
v::AbstractVector;
closed::Symbol = :left,
)::Integer
h = StatsBase.fit(
StatsBase.Histogram,
v;
closed = closed,
)
result = length(h.weights)
return result
end
function normalized_histogram_weights(
v::AbstractVector,
edges::AbstractRange;
closed::Symbol = :left,
)
h = StatsBase.fit(
StatsBase.Histogram,
v,
edges;
closed = closed,
)
unnormalized_weights = h.weights
normalized_weights = unnormalized_weights/sum(unnormalized_weights)
return normalized_weights
end
"""
"""
function plotsinglelabelbinaryclassifierhistogram(
estimator::AbstractFittable,
features_df::DataFrames.AbstractDataFrame,
labels_df::DataFrames.AbstractDataFrame,
single_label_name::Symbol,
single_label_levels::AbstractVector{<:AbstractString};
num_bins::Integer = 0,
closed::Symbol = :left,
legend_pos::AbstractString = "outer north east",
# style = "blue,opacity = 0.5,fill=blue,fill opacity=0.5",
negative_style::AbstractString =
"blue, opacity=1.0, fill=blue, fill opacity = 0.5",
positive_style::AbstractString =
"red, opacity=1.0, fill=red, fill opacity = 0.5",
# style = "red,opacity = 0.5,fill=red,fill opacity=0.5",
)::PGFPlotsXPlot
if length(single_label_levels) != length(unique(single_label_levels))
error("there are duplicate values in single_label_levels")
end
if length(single_label_levels) != 2
error("length(single_label_levels) != 2")
end
negative_class = single_label_levels[1]
positive_class = single_label_levels[2]
predictedprobabilitiesalllabels = predict_proba(estimator, features_df)
yscore = Float64.(
singlelabelbinaryyscore(
predictedprobabilitiesalllabels[single_label_name],
positive_class,
)
)
ytrue = Int.(
singlelabelbinaryytrue(
labels_df[single_label_name],
positive_class,
)
)
if num_bins < 1
num_bins_negative_class = num_histogram_bins(yscore[ytrue .== 0])
num_bins_positive_class = num_histogram_bins(yscore[ytrue .== 1])
num_bins = max(num_bins_negative_class, num_bins_positive_class)
end
bin_width = 1/num_bins
edges_range = 0:bin_width:1
negative_class_histogram = normalized_histogram_weights(
yscore[ytrue .== 0],
edges_range;
closed = closed,
)
positive_class_histogram = normalized_histogram_weights(
yscore[ytrue .== 1],
edges_range;
closed = closed,
)
x_values = collect(edges_range)
negative_class_y_values = vcat(negative_class_histogram..., 0)
positive_class_y_values = vcat(positive_class_histogram..., 0)
p = PGFPlotsX.@pgf(
PGFPlotsX.Axis(
{
ymin = 0.0,
ymax = 1.0,
no_markers,
legend_pos = legend_pos,
xlabel = LaTeXStrings.LaTeXString(
"Classifier score"
),
ylabel = LaTeXStrings.LaTeXString(
"Frequency"
),
},
PGFPlotsX.PlotInc(
{
"ybar interval",
style = negative_style,
},
PGFPlotsX.Coordinates(
x_values,
negative_class_y_values,
),
),
PGFPlotsX.PlotInc(
{
"ybar interval",
style = positive_style,
},
PGFPlotsX.Coordinates(
x_values,
positive_class_y_values,
),
),
),
)
# # "ybar interval",
# # "ybar",
# # bar_shift = "0pt",
# # "xticklabel interval boundaries",
# # xmajorgrids = false,
# # xtick = [0.0, 0.25, 0.5, 0.75, 1.0,],
# # xtick = [0.0, 0.5, 1.0,],
# # xticklabel = raw"$[\pgfmathprintnumber\tick,\pgfmathprintnumber\nexttick)$",
# # "xticklabel style" = {font = raw"\tiny", },
# },
# axisobject = PGFPlots.Axis(
# [
# histogramobjectnegative_class,
# PGFPlots.Plots.Command("\\addlegendentry{$(negative_class)}"),
# histogramobjectpositive_class,
# PGFPlots.Plots.Command("\\addlegendentry{$(positive_class)}"),
# ],
# style = "reverse legend",
# )
wrapper = PGFPlotsXPlot(p)
return wrapper
end
# Examples from https://kristofferc.github.io/PGFPlotsX.jl/latest/examples/juliatypes.html
# one_dimensional_example = PGFPlotsX.@pgf(
# PGFPlotsX.Axis(
# {
# "ybar interval",
# "xticklabel interval boundaries",
# xmajorgrids = false,
# xticklabel = raw"$[\pgfmathprintnumber\tick,\pgfmathprintnumber\nexttick)$",
# "xticklabel style" = {font = raw"\tiny", },
# },
# PGFPlotsX.Plot(
# PGFPlotsX.Table(
# StatsBase.fit(
# StatsBase.Histogram,
# range(0; stop = 1, length = 100).^3;
# closed = :left,
# ),
# ),
# ),
# ),
# )
# w = range(-1; stop = 1, length = 100) .^ 3
# xy = vec(tuple.(w, w'))
# h = StatsBase.fit(
# StatsBase.Histogram,
# (first.(xy), last.(xy));
# closed = :left,
# )
# two_dimensional_example = PGFPlotsX.@pgf(
# PGFPlotsX.Axis(
# {
# view = (0, 90),
# colorbar,
# "colormap/jet",
# },
# PGFPlotsX.Plot3(
# {
# surf,
# shader = "flat",
# },
# PGFPlotsX.Table(h)
# ),
# ),
# )
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 2805 | import LaTeXStrings
import PGFPlotsX
"""
"""
function plotsinglelabelregressiontrueversuspredicted(
estimator::AbstractFittable,
features_df::DataFrames.AbstractDataFrame,
labels_df::DataFrames.AbstractDataFrame,
single_label_name::Symbol;
includeorigin::Bool = false,
legend_pos::AbstractString = "outer north east",
)::PGFPlotsXPlot
ytrue = singlelabelregressionytrue(
labels_df[single_label_name],
)
predictionsalllabels = predict(estimator, features_df)
ypred = singlelabelregressionypred(
predictionsalllabels[single_label_name],
)
truevalueversuspredictedvalue_linearplotobject = PGFPlotsX.@pgf(
PGFPlotsX.Plot(
{
only_marks,
style = "black, fill = black",
},
PGFPlotsX.Coordinates(
ypred,
ytrue,
),
)
)
if includeorigin
perfectlinevalues = sort(
unique(
vcat(
0,
ytrue,
),
);
rev = false,
)
else
perfectlinevalues = sort(
unique(
ytrue,
);
rev = false,
)
end
perfectline_linearplotobject = PGFPlotsX.@pgf(
PGFPlotsX.Plot(
{
no_marks,
style = "dotted, red, color=red, fill=red",
},
PGFPlotsX.Coordinates(
perfectlinevalues,
perfectlinevalues,
),
),
)
estimated_intercept,
estimated_x_coefficient = simple_linear_regression(
Float64.(ypred), # X
Float64.(ytrue), # Y
)
bestfitline_linearplotobject = PGFPlotsX.@pgf(
PGFPlotsX.Plot(
{
no_marks,
style = "dashed, blue, color=blue, fill=blue",
},
PGFPlotsX.Coordinates(
perfectlinevalues,
estimated_intercept .+ estimated_x_coefficient*perfectlinevalues,
),
),
)
p = PGFPlotsX.@pgf(
PGFPlotsX.Axis(
{
xlabel = LaTeXStrings.LaTeXString(
"Predicted value"
),
ylabel = LaTeXStrings.LaTeXString(
"True value"
),
legend_pos = legend_pos,
},
truevalueversuspredictedvalue_linearplotobject,
perfectline_linearplotobject,
bestfitline_linearplotobject,
),
)
wrapper = PGFPlotsXPlot(p)
return wrapper
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 8023 | import LaTeXStrings
import PGFPlotsX
import Statistics
"""
"""
function probability_calibration_scores_and_fractions(
estimator::AbstractFittable,
features_df::DataFrames.AbstractDataFrame,
labels_df::DataFrames.AbstractDataFrame,
single_label_name::Symbol,
positive_class::AbstractString;
window::Real = 0.1,
)
ytrue = Int.(
singlelabelbinaryytrue(
labels_df[single_label_name],
positive_class,
)
)
predictedprobabilitiesalllabels = predict_proba(estimator, features_df)
yscore = Float64.(
singlelabelbinaryyscore(
predictedprobabilitiesalllabels[single_label_name],
positive_class,
)
)
scores, fractions = probability_calibration_scores_and_fractions(
ytrue,
yscore;
window = window,
)
return scores, fractions
end
"""
"""
function probability_calibration_scores_and_fractions(
ytrue::AbstractVector{<: Integer},
yscore::AbstractVector{F};
window::Real = 0.1,
)::Tuple{Vector{F}, Vector{F}} where
F <: AbstractFloat
scores = sort(
unique(
vcat(
yscore,
zero(eltype(yscore)),
one(eltype(yscore)),
)
)
)
fractions = fill(F(0), size(scores))
num_rows = fill(Int(0), length(scores))
for k = 1:length(scores)
rows_that_have_approximately_the_kth_score = findall(
( scores[k] - window ) .<= ( yscore ) .<= ( scores[k] + window )
)
num_rows[k] = length(rows_that_have_approximately_the_kth_score)
if length(rows_that_have_approximately_the_kth_score) == 0
fractions[k] = NaN
else
fractions[k] = Statistics.mean(
ytrue[rows_that_have_approximately_the_kth_score]
)
end
end
nonzero_indices = findall(num_rows .!= 0)
scores_nonzero_indices::Vector{F} = scores[nonzero_indices]
fractions_nonzero_indices::Vector{F} = fractions[nonzero_indices]
return scores_nonzero_indices, fractions_nonzero_indices
end
"""
"""
function plot_probability_calibration_curve(
estimator::AbstractFittable,
features_df::DataFrames.AbstractDataFrame,
labels_df::DataFrames.AbstractDataFrame,
single_label_name::Symbol,
positive_class::AbstractString;
window::Real = 0.1,
multiply_by::Real = 1.0,
legend_pos::AbstractString = "outer north east",
)
scores, fractions = probability_calibration_scores_and_fractions(
estimator,
features_df,
labels_df,
single_label_name,
positive_class;
window = window,
)
result = plot_probability_calibration_curve(
scores,
fractions;
multiply_by = multiply_by,
legend_pos = legend_pos,
)
return result
end
"""
"""
function plot_probability_calibration_curve(
scores::AbstractVector{F},
fractions::AbstractVector{F};
multiply_by::Real = 1.0,
legend_pos::AbstractString = "outer north east",
)::PGFPlotsXPlot where F <: AbstractFloat
legend_pos::String = convert(String, legend_pos)
scores = convert(Vector{F}, scores * multiply_by)
fractions = convert(Vector{F}, fractions * multiply_by)
score_versus_fraction_linearplotobject = PGFPlotsX.@pgf(
PGFPlotsX.Plot(
{
only_marks,
style = "black, fill = black",
},
PGFPlotsX.Coordinates(
scores,
fractions,
)
)
)
zero_coordinate = convert(F, zero(F))
one_coordinate = convert(F, one(F) * multiply_by)
perfectline_xs = [
zero_coordinate,
one_coordinate,
]
perfectline_ys = [
zero_coordinate,
one_coordinate,
]
perfectline_linearplotobject = PGFPlotsX.@pgf(
PGFPlotsX.Plot(
{
no_marks,
style = "dotted, red, color=red, fill=red",
},
PGFPlotsX.Coordinates(
perfectline_xs,
perfectline_ys,
),
)
)
estimated_intercept, estimated_x_coefficient =
simple_linear_regression(
scores, # X
fractions, # Y
)
bestfitline_xs = [
zero_coordinate,
one_coordinate,
]
bestfitline_ys = [
estimated_intercept,
estimated_intercept + estimated_x_coefficient * one_coordinate,
]
bestfitline_linearplotobject = PGFPlotsX.@pgf(
PGFPlotsX.Plot(
{
no_marks,
style = "dashed, blue, color=blue, fill=blue",
},
PGFPlotsX.Coordinates(
bestfitline_xs,
bestfitline_ys,
),
)
)
p = PGFPlotsX.@pgf(
PGFPlotsX.Axis(
{
xlabel = LaTeXStrings.LaTeXString(
"Probability of positive class"
),
ylabel = LaTeXStrings.LaTeXString(
"Fraction of positive class"
),
legend_pos = legend_pos,
},
score_versus_fraction_linearplotobject,
perfectline_linearplotobject,
bestfitline_linearplotobject,
),
)
wrapper = PGFPlotsXPlot(p)
return wrapper
end
"""
"""
function probability_calibration_metrics(
estimator::AbstractFittable,
features_df::DataFrames.AbstractDataFrame,
labels_df::DataFrames.AbstractDataFrame,
single_label_name::Symbol,
positive_class::AbstractString;
window::Real = 0.1,
)
vectorofestimators = AbstractFittable[estimator]
result = probability_calibration_metrics(
vectorofestimators,
features_df,
labels_df,
single_label_name,
positive_class,
)
return result
end
"""
"""
function probability_calibration_metrics(
vectorofestimators::AbstractVector{AbstractFittable},
features_df::DataFrames.AbstractDataFrame,
labels_df::DataFrames.AbstractDataFrame,
single_label_name::Symbol,
positive_class::AbstractString,
window::Real = 0.1,
)
result = DataFrames.DataFrame()
result[:metric] = [
"R^2 (coefficient of determination)",
"Brier score (binary formulation)",
"Best fit line: estimated intercept",
"Best fit line: estimated coefficient",
]
for i = 1:length(vectorofestimators)
ytrue = Int.(
singlelabelbinaryytrue(
labels_df[single_label_name],
positive_class,
)
)
predictedprobabilitiesalllabels = predict_proba(
vectorofestimators[i],
features_df,
)
yscore = Float64.(
singlelabelbinaryyscore(
predictedprobabilitiesalllabels[single_label_name],
positive_class,
)
)
binary_brier_score_value = binary_brier_score(ytrue, yscore)
scores, fractions = probability_calibration_scores_and_fractions(
ytrue,
yscore;
window = window,
)
r2_score_value = r2_score(scores, fractions)
estimated_intercept, estimated_x_coefficient =
simple_linear_regression(
Float64.(scores), # X
Float64.(fractions), # Y
)
result[Symbol(vectorofestimators[i].name)] = [
r2_score_value,
binary_brier_score_value,
estimated_intercept,
estimated_x_coefficient,
]
end
return result
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 850 | """
"""
function fit!(
transformer::ImmutablePackageMultiLabelPredictionTransformer,
varargs...;
kwargs...
)
if length(varargs) == 1
return varargs[1]
else
return varargs
end
end
"""
"""
function predict(
transformer::ImmutablePackageMultiLabelPredictionTransformer,
single_labelpredictions::AbstractMatrix,
varargs...
)
result = DataFrames.DataFrame()
for i = 1:length(transformer.label_names)
result[transformer.label_names[i]] = single_labelpredictions[:, i]
end
return result
end
"""
"""
function predict_proba(
transformer::ImmutablePackageMultiLabelPredictionTransformer,
varargs...;
kwargs...
)
if length(varargs) == 1
return varargs[1]
else
return varargs
end
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 820 | """
"""
function fit!(
transformer::ImmutablePackageSingleLabelPredictionTransformer,
varargs...;
kwargs...
)
if length(varargs) == 1
return varargs[1]
else
return varargs
end
end
"""
"""
function predict(
transformer::ImmutablePackageSingleLabelPredictionTransformer,
single_labelpredictions::AbstractVector,
varargs...
)
result = DataFrames.DataFrame()
label_name = transformer.single_label_name
result[label_name] = single_labelpredictions
return result
end
"""
"""
function predict_proba(
transformer::ImmutablePackageSingleLabelPredictionTransformer,
varargs...;
kwargs...
)
if length(varargs) == 1
return varargs[1]
else
return varargs
end
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 813 | """
"""
function fit!(
transformer::ImmutablePackageSingleLabelPredictProbaTransformer,
varargs...;
kwargs...
)
if length(varargs) == 1
return varargs[1]
else
return varargs
end
end
"""
"""
function predict(
transformer::ImmutablePackageSingleLabelPredictProbaTransformer,
varargs...;
kwargs...
)
if length(varargs) == 1
return varargs[1]
else
return varargs
end
end
"""
"""
function predict_proba(
transformer::ImmutablePackageSingleLabelPredictProbaTransformer,
single_labelprobabilities::AbstractDict;
kwargs...
)
result = Dict()
result[transformer.single_label_name] = single_labelprobabilities
result = fix_type(result)
return result
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 1646 | import DataFrames
"""
"""
function fit!(
transformer::ImmutablePredictionsSingleLabelInt2StringTransformer,
varargs...;
kwargs...
)
if length(varargs) == 1
return varargs[1]
else
return varargs
end
end
"""
"""
function predict(
transformer::ImmutablePredictionsSingleLabelInt2StringTransformer,
single_labelpredictions::AbstractVector,
varargs...;
kwargs...
)
single_labelpredictions = parse.(Int, single_labelpredictions)
labelint2stringmap = getlabelint2stringmap(
transformer.levels,
transformer.index,
)
result = Vector{String}(
undef,
length(single_labelpredictions),
)
for i = 1:length(result)
result[i] = labelint2stringmap[single_labelpredictions[i]]
end
return result
end
"""
"""
function predict(
transformer::ImmutablePredictionsSingleLabelInt2StringTransformer,
single_labelpredictions::DataFrames.AbstractDataFrame,
varargs...;
kwargs...
)
label_names = DataFrames.names(single_labelpredictions)
result = DataFrames.DataFrame()
for i = 1:length(label_names)
result[label_names[i]] = predict(
transformer,
single_labelpredictions[label_names[i]];
kwargs...
)
end
return result
end
"""
"""
function predict_proba(
transformer::ImmutablePredictionsSingleLabelInt2StringTransformer,
varargs...;
kwargs...
)
if length(varargs) == 1
return varargs[1]
else
return varargs
end
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 993 | """
"""
function fit!(
transformer::ImmutablePredictProbaSingleLabelInt2StringTransformer,
varargs...;
kwargs...
)
if length(varargs) == 1
return varargs[1]
else
return varargs
end
end
"""
"""
function predict(
transformer::ImmutablePredictProbaSingleLabelInt2StringTransformer,
varargs...;
kwargs...
)
if length(varargs) == 1
return varargs[1]
else
return varargs
end
end
"""
"""
function predict_proba(
transformer::ImmutablePredictProbaSingleLabelInt2StringTransformer,
single_labelprobabilities::AbstractDict;
kwargs...
)
labelint2stringmap = getlabelint2stringmap(
transformer.levels,
transformer.index,
)
result = Dict()
for key in keys(single_labelprobabilities)
result[labelint2stringmap[key]] = single_labelprobabilities[key]
end
result = fix_type(result)
return result
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 2045 | import DataFrames
"""
"""
function DataFrameFeatureContrasts(
df::DataFrames.AbstractDataFrame,
columns::AbstractVector{Symbol},
)
columns_deepcopy = deepcopy(columns)
if length(columns_deepcopy) != length(unique(columns_deepcopy))
error("length(columns_deepcopy) != length(unique(columns_deepcopy))")
end
num_df_columns_deepcopy = length(columns_deepcopy)
formula_wo_intercept = generate_formula(
columns_deepcopy[1],
columns_deepcopy;
intercept = false,
)
formula_with_intercept = generate_formula(
columns_deepcopy[1],
columns_deepcopy;
intercept = true,
)
schema_wo_intercept = StatsModels.schema(formula_wo_intercept, df)
schema_with_intercept = StatsModels.schema(formula_with_intercept, df)
formula_wo_intercept = StatsModels.apply_schema(formula_wo_intercept,
schema_wo_intercept)
formula_with_intercept = StatsModels.apply_schema(formula_with_intercept,
schema_with_intercept)
response_wo_intercept, predictors_wo_intercept =
StatsModels.modelcols(formula_wo_intercept, df)
response_with_intercept, predictors_with_intercept =
StatsModels.modelcols(formula_with_intercept, df)
num_array_columns_wo_intercept = size(predictors_wo_intercept, 2)
num_array_columns_with_intercept = size(predictors_with_intercept, 2)
result = DataFrameFeatureContrasts(
columns_deepcopy,
num_df_columns_deepcopy,
schema_wo_intercept,
formula_wo_intercept,
num_array_columns_wo_intercept,
schema_with_intercept,
formula_with_intercept,
num_array_columns_with_intercept,
)
return result
end
"""
"""
function generate_feature_contrasts(
df::DataFrames.AbstractDataFrame,
columns::AbstractVector{Symbol},
)
result = DataFrameFeatureContrasts(df, columns)
return result
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 2906 | import DataFrames
import StatsModels
function MutableDataFrame2DecisionTreeTransformer(
feature_names::AbstractVector,
single_label_name::Symbol;
levels::AbstractVector = [],
)
dffeaturecontrasts = FeatureContrastsNotYetGenerated()
result = MutableDataFrame2DecisionTreeTransformer(
feature_names,
single_label_name,
levels,
dffeaturecontrasts,
)
return result
end
"""
"""
function set_feature_contrasts!(
x::MutableDataFrame2DecisionTreeTransformer,
feature_contrasts::AbstractFeatureContrasts,
)
x.dffeaturecontrasts = feature_contrasts
return nothing
end
"""
"""
function get_underlying(
x::MutableDataFrame2DecisionTreeTransformer;
saving::Bool = false,
loading::Bool = false,
)
result = x.dffeaturecontrasts
return result
end
"""
"""
function transform(
transformer::MutableDataFrame2DecisionTreeTransformer,
features_df::DataFrames.AbstractDataFrame,
labels_df::DataFrames.AbstractDataFrame;
kwargs...
)
single_label_name = transformer.single_label_name
labelsarray = convert(Array, labels_df[single_label_name])
my_formula = transformer.dffeaturecontrasts.formula_without_intercept
my_schema = transformer.dffeaturecontrasts.schema_without_intercept
my_formula = StatsModels.apply_schema(my_formula, my_schema)
response, featuresarray = StatsModels.modelcols(my_formula,
features_df)
return featuresarray, labelsarray
end
"""
"""
function transform(
transformer::MutableDataFrame2DecisionTreeTransformer,
features_df::DataFrames.AbstractDataFrame;
kwargs...
)
my_formula = transformer.dffeaturecontrasts.formula_without_intercept
my_schema = transformer.dffeaturecontrasts.schema_without_intercept
my_formula = StatsModels.apply_schema(my_formula, my_schema)
response, featuresarray = StatsModels.modelcols(my_formula,
features_df)
return featuresarray
end
"""
"""
function fit!(
transformer::MutableDataFrame2DecisionTreeTransformer,
features_df::DataFrames.AbstractDataFrame,
labels_df::DataFrames.AbstractDataFrame;
kwargs...
)
return transform(transformer, features_df, labels_df)
end
"""
"""
function predict(
transformer::MutableDataFrame2DecisionTreeTransformer,
features_df::DataFrames.AbstractDataFrame,
varargs...;
kwargs...
)
return (transform(transformer, features_df), varargs...)
end
"""
"""
function predict_proba(
transformer::MutableDataFrame2DecisionTreeTransformer,
features_df::DataFrames.AbstractDataFrame;
kwargs...
)
return transform(transformer, features_df)
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 2246 | import DataFrames
"""
"""
function transform(
transformer::ImmutableDataFrame2GLMSingleLabelBinaryClassTransformer,
features_df::DataFrames.AbstractDataFrame,
labels_df::DataFrames.AbstractDataFrame;
kwargs...
)
transformedlabels_df = DataFrames.DataFrame()
label = transformer.label
positive_class = transformer.positive_class
originallabelcolumn = labels_df[label]
transformedlabelcolumn = Int.(originallabelcolumn .== positive_class)
transformedlabels_df[label] = transformedlabelcolumn
return features_df, transformedlabels_df
end
"""
"""
function transform(
transformer::ImmutableDataFrame2GLMSingleLabelBinaryClassTransformer,
features_df::DataFrames.AbstractDataFrame;
kwargs...
)
return features_df
end
"""
"""
function fit!(
transformer::ImmutableDataFrame2GLMSingleLabelBinaryClassTransformer,
features_df::DataFrames.AbstractDataFrame,
labels_df::DataFrames.AbstractDataFrame;
kwargs...
)
return transform(transformer, features_df, labels_df)
end
"""
"""
function predict(
transformer::ImmutableDataFrame2GLMSingleLabelBinaryClassTransformer,
features_df::DataFrames.AbstractDataFrame;
kwargs...
)
return transform(transformer, features_df)
end
"""
"""
function predict(
transformer::ImmutableDataFrame2GLMSingleLabelBinaryClassTransformer,
features_df::DataFrames.AbstractDataFrame,
my_class::AbstractString,
varargs...;
kwargs...
)
result = (transform(transformer, features_df),
transform(transformer, my_class),
varargs...)
return result
end
"""
"""
function transform(
transformer::ImmutableDataFrame2GLMSingleLabelBinaryClassTransformer,
my_class::AbstractString;
kwargs...
)
transformed_my_class = Int(my_class == transformer.positive_class)
return transformed_my_class
end
"""
"""
function predict_proba(
transformer::ImmutableDataFrame2GLMSingleLabelBinaryClassTransformer,
features_df::DataFrames.AbstractDataFrame;
kwargs...
)
return transform(transformer, features_df)
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 12107 | import DataFrames
import StatsModels
function MutableDataFrame2ClassificationKnetTransformer(
feature_names::AbstractVector,
label_names::AbstractVector{Symbol},
label_levels::AbstractDict,
index::Integer;
transposefeatures::Bool = true,
transposelabels::Bool = false,
)
dffeaturecontrasts = FeatureContrastsNotYetGenerated()
result = MutableDataFrame2ClassificationKnetTransformer(
feature_names,
label_names,
label_levels,
index,
transposefeatures,
transposelabels,
dffeaturecontrasts,
)
return result
end
"""
"""
function get_underlying(
x::MutableDataFrame2ClassificationKnetTransformer;
saving::Bool = false,
loading::Bool = false,
)
result = x.dffeaturecontrasts
return result
end
"""
"""
function get_underlying(
x::MutableDataFrame2RegressionKnetTransformer;
saving::Bool = false,
loading::Bool = false,
)
result = x.dffeaturecontrasts
return result
end
"""
"""
function set_feature_contrasts!(
x::MutableDataFrame2ClassificationKnetTransformer,
feature_contrasts::AbstractFeatureContrasts,
)
x.dffeaturecontrasts = feature_contrasts
return nothing
end
"""
"""
function set_feature_contrasts!(
x::MutableDataFrame2RegressionKnetTransformer,
contrasts::AbstractFeatureContrasts,
)
x.dffeaturecontrasts = contrasts
return nothing
end
"""
"""
function fit!(
transformer::MutableDataFrame2ClassificationKnetTransformer,
training_features_df::DataFrames.AbstractDataFrame,
training_labels_df::DataFrames.AbstractDataFrame,
tuning_features_df::Union{Nothing, DataFrames.AbstractDataFrame} =
nothing,
tuning_labels_df::Union{Nothing, DataFrames.AbstractDataFrame} =
nothing;
kwargs...
)
result = transform(
transformer,
training_features_df,
training_labels_df,
tuning_features_df,
tuning_labels_df;
kwargs...
)
return result
end
"""
"""
function fit!(
transformer::MutableDataFrame2RegressionKnetTransformer,
training_features_df::DataFrames.AbstractDataFrame,
training_labels_df::DataFrames.AbstractDataFrame,
tuning_features_df::Union{Nothing, DataFrames.AbstractDataFrame} =
nothing,
tuning_labels_df::Union{Nothing, DataFrames.AbstractDataFrame} =
nothing;
kwargs...
)
result = transform(
transformer,
training_features_df,
training_labels_df,
tuning_features_df,
tuning_labels_df;
kwargs...
)
return result
end
"""
"""
function predict(
transformer::MutableDataFrame2ClassificationKnetTransformer,
features_df::DataFrames.AbstractDataFrame;
kwargs...
)
return transform(transformer, features_df)
end
"""
"""
function transform(
transformer::MutableDataFrame2ClassificationKnetTransformer,
positive_class::AbstractString;
kwargs...
)
if length(transformer.label_names) == 0
error("length(transformer.label_names) == 0")
elseif length(transformer.label_names) == 1
label_1 = transformer.label_names[1]
levels_1 = transformer.label_levels[label_1]
labelstring2intmap_1 = getlabelstring2intmap(
levels_1,
transformer.index,
)
transformed_positive_class = labelstring2intmap_1[positive_class]
else
error("unsupported behavior")
# training_labels_array = Array{Int}(
# size(training_labels_df, 1),
# length(transformer.label_names),
# )
# for j = 1:length(transformer.label_names)
# label_j = transformer.label_names[j]
# levels_j = transformer.label_levels[label_j]
# labelstring2intmap_j = getlabelstring2intmap(
# levels_j,
# transformer.index,
# )
# training_labels_array[:, j] =
# [labelstring2intmap_j[y] for y in labels_df[label_j]]
# end
end
return transformed_positive_class
end
"""
"""
function predict(
transformer::MutableDataFrame2ClassificationKnetTransformer,
features_df::DataFrames.AbstractDataFrame,
positive_class::AbstractString,
varargs...;
kwargs...
)
result = (transform(transformer, features_df),
transform(transformer, positive_class),
varargs...)
return result
end
"""
"""
function predict_proba(
transformer::MutableDataFrame2ClassificationKnetTransformer,
features_df::DataFrames.AbstractDataFrame;
kwargs...
)
return transform(transformer, features_df)
end
"""
"""
function predict(
transformer::MutableDataFrame2RegressionKnetTransformer,
features_df::DataFrames.AbstractDataFrame;
kwargs...
)
result = transform(
transformer,
features_df;
kwargs...
)
return result
end
"""
"""
function predict_proba(
transformer::MutableDataFrame2RegressionKnetTransformer,
features_df::DataFrames.AbstractDataFrame;
kwargs...
)
result = transform(
transformer,
features_df;
kwargs...
)
return result
end
"""
"""
function transform(
transformer::MutableDataFrame2ClassificationKnetTransformer,
training_features_df::DataFrames.AbstractDataFrame,
training_labels_df::DataFrames.AbstractDataFrame,
tuning_features_df::Union{Nothing, DataFrames.AbstractDataFrame} =
nothing,
tuning_labels_df::Union{Nothing, DataFrames.AbstractDataFrame} =
nothing;
kwargs...
)
if is_nothing(tuning_features_df) &&
is_nothing(tuning_labels_df)
has_tuning_data = false
elseif !is_nothing(tuning_features_df) &&
!is_nothing(tuning_labels_df)
has_tuning_data = true
else
error(
string(
"Either both tuning_features_df ",
"and tuning_labels_df ",
"must be defined, or neither can be defined."
)
)
end
if length(transformer.label_names) == 0
error("length(transformer.label_names) == 0")
elseif length(transformer.label_names) == 1
label_1 = transformer.label_names[1]
levels_1 = transformer.label_levels[label_1]
labelstring2intmap_1 = getlabelstring2intmap(
levels_1,
transformer.index,
)
training_labels_array =
[labelstring2intmap_1[y] for y in training_labels_df[label_1]]
else
training_labels_array = Array{Int}(
size(training_labels_df, 1),
length(transformer.label_names),
)
for j = 1:length(transformer.label_names)
label_j = transformer.label_names[j]
levels_j = transformer.label_levels[label_j]
labelstring2intmap_j = getlabelstring2intmap(
levels_j,
transformer.index,
)
training_labels_array[:, j] =
[labelstring2intmap_j[y] for y in labels_df[label_j]]
end
end
my_formula = transformer.dffeaturecontrasts.formula_without_intercept
my_schema = transformer.dffeaturecontrasts.schema_without_intercept
my_formula = StatsModels.apply_schema(my_formula, my_schema)
response, training_features_array = StatsModels.modelcols(my_formula,
training_features_df)
if transformer.transposefeatures
training_features_array = transpose(training_features_array)
end
if transformer.transposelabels
training_labels_array = transpose(training_labels_array)
end
training_features_array = convert(Array, training_features_array)
training_labels_array = convert(Array, training_labels_array)
if has_tuning_data
tuning_features_array, tuning_labels_array = transform(
transformer,
tuning_features_df,
tuning_labels_df;
kwargs...
)
return training_features_array,
training_labels_array,
tuning_features_array,
tuning_labels_array
else
return training_features_array,
training_labels_array
end
end
"""
"""
function transform(
transformer::MutableDataFrame2ClassificationKnetTransformer,
features_df::DataFrames.AbstractDataFrame;
kwargs...
)
my_formula = transformer.dffeaturecontrasts.formula_without_intercept
my_schema = transformer.dffeaturecontrasts.schema_without_intercept
my_formula = StatsModels.apply_schema(my_formula, my_schema)
response, featuresarray = StatsModels.modelcols(my_formula,
features_df)
if transformer.transposefeatures
featuresarray = transpose(featuresarray)
end
return featuresarray
end
"""
"""
function transform(
transformer::MutableDataFrame2RegressionKnetTransformer,
training_features_df::DataFrames.AbstractDataFrame,
training_labels_df::DataFrames.AbstractDataFrame,
tuning_features_df::Union{Nothing, DataFrames.AbstractDataFrame} =
nothing,
tuning_labels_df::Union{Nothing, DataFrames.AbstractDataFrame} =
nothing;
kwargs...
)
if is_nothing(tuning_features_df) &&
is_nothing(tuning_labels_df)
has_tuning_data = false
elseif !is_nothing(tuning_features_df) &&
!is_nothing(tuning_labels_df)
has_tuning_data = true
else
error(
string(
"Either both tuning_features_df ",
"and tuning_labels_df ",
"must be defined, or neither can be defined.",
)
)
end
training_labels_array = hcat(
[
training_labels_df[label] for label in transformer.label_names
]...
)
my_formula = transformer.dffeaturecontrasts.formula_without_intercept
my_schema = transformer.dffeaturecontrasts.schema_without_intercept
my_formula = StatsModels.apply_schema(my_formula, my_schema)
response, training_features_array = StatsModels.modelcols(my_formula,
training_features_df)
if transformer.transposefeatures
training_features_array = transpose(training_features_array)
end
if transformer.transposelabels
training_labels_array = transpose(training_labels_array)
end
training_features_array = convert(Array, training_features_array)
training_labels_array = convert(Array, training_labels_array)
if has_tuning_data
tuning_features_array, tuning_labels_array = transform(
transformer,
tuning_features_df,
tuning_labels_df;
kwargs...
)
return training_features_array,
training_labels_array,
tuning_features_array,
tuning_labels_array
else
return training_features_array,
training_labels_array
end
end
"""
"""
function transform(
transformer::MutableDataFrame2RegressionKnetTransformer,
features_df::DataFrames.AbstractDataFrame,
kwargs...
)
my_formula = transformer.dffeaturecontrasts.formula_without_intercept
my_schema = transformer.dffeaturecontrasts.schema_without_intercept
my_formula = StatsModels.apply_schema(my_formula, my_schema)
response, featuresarray = StatsModels.modelcols(my_formula,
features_df)
if transformer.transposefeatures
featuresarray = transpose(featuresarray)
end
return featuresarray
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 1826 | import DataFrames
import StatsModels
"""
"""
function transform(
transformer::ImmutableFeatureArrayTransposerTransformer,
featuresarray::AbstractMatrix,
labelsarray::AbstractArray;
kwargs...
)
featuresarraytransposed = transpose(featuresarray)
return featuresarraytransposed, labelsarray
end
"""
"""
function transform(
transformer::ImmutableFeatureArrayTransposerTransformer,
featuresarray::AbstractMatrix;
kwargs...
)
featuresarraytransposed = transpose(featuresarray)
return featuresarraytransposed
end
"""
"""
function fit!(
transformer::ImmutableFeatureArrayTransposerTransformer,
featuresarray::AbstractMatrix,
labelsarray::AbstractArray;
kwargs...
)
return transform(transformer, featuresarray, labelsarray)
end
"""
"""
function predict(
transformer::ImmutableFeatureArrayTransposerTransformer,
featuresarray::AbstractMatrix,
varargs...;
kwargs...
)
return (transform(transformer, featuresarray), varargs...)
end
"""
"""
function predict_proba(
transformer::ImmutableFeatureArrayTransposerTransformer,
featuresarray::AbstractMatrix;
kwargs...
)
return transform(transformer, featuresarray)
end
"""
"""
function DataFrame2LIBSVMTransformer(
feature_names::AbstractVector,
single_label_name::Symbol;
levels::AbstractVector = [],
)
df2decisiontreetransformer = MutableDataFrame2DecisionTreeTransformer(
feature_names,
single_label_name;
levels = levels,
)
featuretransposetransformer =
ImmutableFeatureArrayTransposerTransformer()
result = df2decisiontreetransformer |> featuretransposetransformer
return result
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 12738 | import LIBSVM
function LIBSVMModel(
;
single_label_levels::AbstractVector = [],
name::AbstractString = "",
isclassificationmodel::Bool = false,
isregressionmodel::Bool = false,
svmtype::Type=LIBSVM.SVC,
kernel::LIBSVM.Kernel.KERNEL = LIBSVM.Kernel.RadialBasis,
degree::Integer = 3,
gamma::AbstractFloat = 0.1,
coef0::AbstractFloat = 0.0,
cost::AbstractFloat = 1.0,
nu::AbstractFloat = 0.5,
epsilon::AbstractFloat = 0.1,
tolerance::AbstractFloat = 0.001,
shrinking::Bool = true,
weights::Union{Dict, Nothing} = nothing,
cachesize::AbstractFloat = 100.0,
verbose::Bool = true,
)
hyperparameters = Dict()
hyperparameters[:svmtype] = svmtype
hyperparameters[:kernel] = kernel
hyperparameters[:degree] = degree
hyperparameters[:gamma] = gamma
hyperparameters[:coef0] = coef0
hyperparameters[:cost] = cost
hyperparameters[:nu] = nu
hyperparameters[:epsilon] = epsilon
hyperparameters[:tolerance] = tolerance
hyperparameters[:shrinking] = shrinking
weights = fix_type(weights)
hyperparameters[:weights] = weights
hyperparameters[:cachesize] = cachesize
hyperparameters[:verbose] = verbose
hyperparameters = fix_type(hyperparameters)
underlyingsvm = FitNotYetRunUnderlyingObject()
result = LIBSVMModel(
name,
isclassificationmodel,
isregressionmodel,
single_label_levels,
hyperparameters,
underlyingsvm,
)
return result
end
"""
"""
function get_underlying(
x::LIBSVMModel;
saving::Bool = false,
loading::Bool = false,
)
result = x.underlyingsvm
return result
end
"""
"""
function fit!(
estimator::LIBSVMModel,
featuresarray::AbstractArray,
labelsarray::AbstractArray,
)
if estimator.isclassificationmodel && !estimator.isregressionmodel
probability = true
elseif !estimator.isclassificationmodel && estimator.isregressionmodel
probability = false
else
error(
"Could not figure out if model is classification or regression"
)
end
@info(string("Starting to train LIBSVM model."))
svm = try
LIBSVM.svmtrain(
featuresarray,
labelsarray;
probability = probability,
estimator.hyperparameters...
)
catch e
@warn(
string(
"While training LIBSVM model, ignored error: ",
e,
)
)
FitFailedUnderlyingObject()
end
# svm =
@info(string("Finished training LIBSVM model."))
estimator.underlyingsvm = svm
estimator.levels = estimator.underlyingsvm.labels
return estimator
end
"""
"""
function predict(
estimator::LIBSVMModel,
featuresarray::AbstractArray,
)
if estimator.isclassificationmodel && !estimator.isregressionmodel
probabilitiesassoc = predict_proba(
estimator,
featuresarray,
)
predictionsvector = single_labelprobabilitiestopredictions(
probabilitiesassoc
)
return predictionsvector
elseif !estimator.isclassificationmodel && estimator.isregressionmodel
if is_nothing(estimator.underlyingsvm)
predicted_values = fill(Float64(0), size(featuresarray, 2))
else
predicted_values, decision_values = LIBSVM.svmpredict(
estimator.underlyingsvm,
featuresarray,
)
if !(typeof(predicted_values) <: AbstractVector)
error("!(typeof(predicted_values) <: AbstractVector)")
end
end
return predicted_values
else
error(
"Could not figure out if model is classification or regression"
)
end
end
"""
"""
function predict(
estimator::LIBSVMModel,
featuresarray::AbstractArray,
positive_class::AbstractString,
threshold::AbstractFloat,
)
if estimator.isclassificationmodel && !estimator.isregressionmodel
probabilitiesassoc = predict_proba(
estimator,
featuresarray,
)
predictionsvector = single_labelprobabilitiestopredictions(
probabilitiesassoc,
positive_class,
threshold,
)
return predictionsvector
else
error(
"Can only use the `threshold argument` with classification models"
)
end
end
"""
"""
function predict_proba(
estimator::LIBSVMModel,
featuresarray::AbstractArray,
)
if estimator.isclassificationmodel && !estimator.isregressionmodel
if is_nothing(estimator.underlyingsvm)
decision_values = fill(
Int(0),
size(featuresarray, 2),
length(estimator.underlyingsvm.labels),
)
decision_values[:, 1] = 1
else
predicted_labels, decision_values =
LIBSVM.svmpredict(estimator.underlyingsvm,featuresarray,)
decision_values = transpose(decision_values)
end
result = Dict()
for i = 1:length(estimator.underlyingsvm.labels)
result[estimator.underlyingsvm.labels[i]] =
decision_values[:, i]
end
result = fix_type(result)
return result
elseif !estimator.isclassificationmodel && estimator.isregressionmodel
error("predict_proba is not defined for regression models")
else
error(
"Could not figure out if model is classification or regression"
)
end
end
"""
"""
function single_labelmulticlassdataframesvmclassifier_LIBSVM(
feature_names::AbstractVector,
single_label_name::Symbol,
single_label_levels::AbstractVector;
name::AbstractString = "",
svmtype::Type=LIBSVM.SVC,
kernel::LIBSVM.Kernel.KERNEL = LIBSVM.Kernel.RadialBasis,
degree::Integer = 3,
gamma::AbstractFloat = 0.1,
coef0::AbstractFloat = 0.0,
cost::AbstractFloat = 1.0,
nu::AbstractFloat = 0.5,
epsilon::AbstractFloat = 0.1,
tolerance::AbstractFloat = 0.001,
shrinking::Bool = true,
weights::Union{Dict, Nothing} = nothing,
cachesize::AbstractFloat = 100.0,
verbose::Bool = true,
feature_contrasts::Union{Nothing, AbstractFeatureContrasts} =
nothing,
)
dftransformer = DataFrame2LIBSVMTransformer(
feature_names,
single_label_name;
levels = single_label_levels,
)
svmestimator = LIBSVMModel(
;
name = name,
single_label_levels = single_label_levels,
isclassificationmodel = true,
isregressionmodel = false,
svmtype=svmtype,
kernel = kernel,
degree = degree,
gamma = gamma,
coef0 = coef0,
cost = cost,
nu = nu,
epsilon = epsilon,
tolerance = tolerance,
shrinking = shrinking,
weights = weights,
cachesize = cachesize,
verbose = verbose,
)
probapackager = ImmutablePackageSingleLabelPredictProbaTransformer(
single_label_name,
)
predpackager = ImmutablePackageSingleLabelPredictionTransformer(
single_label_name,
)
finalpipeline = dftransformer |>
svmestimator |>
probapackager |>
predpackager
finalpipeline.name = name
if !is_nothing(feature_contrasts)
set_feature_contrasts!(finalpipeline, feature_contrasts)
end
return finalpipeline
end
"""
"""
function single_labelmulticlassdataframesvmclassifier(
feature_names::AbstractVector,
single_label_name::Symbol,
single_label_levels::AbstractVector;
package::Symbol = :none,
name::AbstractString = "",
svmtype::Type=LIBSVM.SVC,
kernel::LIBSVM.Kernel.KERNEL = LIBSVM.Kernel.RadialBasis,
degree::Integer = 3,
gamma::AbstractFloat = 0.1,
coef0::AbstractFloat = 0.0,
cost::AbstractFloat = 1.0,
nu::AbstractFloat = 0.5,
epsilon::AbstractFloat = 0.1,
tolerance::AbstractFloat = 0.001,
shrinking::Bool = true,
weights::Union{Dict, Nothing} = nothing,
cachesize::AbstractFloat = 100.0,
verbose::Bool = true,
feature_contrasts::Union{Nothing, AbstractFeatureContrasts} = nothing,
)
if package == :LIBSVM
result = single_labelmulticlassdataframesvmclassifier_LIBSVM(
feature_names,
single_label_name,
single_label_levels;
name = name,
svmtype=svmtype,
kernel = kernel,
degree = degree,
gamma = gamma,
coef0 = coef0,
cost = cost,
nu = nu,
epsilon = epsilon,
tolerance = tolerance,
shrinking = shrinking,
weights = weights,
cachesize = cachesize,
verbose = verbose,
feature_contrasts = feature_contrasts
)
return result
else
error("$(package) is not a valid value for package")
end
end
"""
"""
function single_labeldataframesvmregression_LIBSVM(
feature_names::AbstractVector,
single_label_name::Symbol;
name::AbstractString = "",
svmtype::Type=LIBSVM.EpsilonSVR,
kernel::LIBSVM.Kernel.KERNEL = LIBSVM.Kernel.RadialBasis,
degree::Integer = 3,
gamma::AbstractFloat = 0.1,
coef0::AbstractFloat = 0.0,
cost::AbstractFloat = 1.0,
nu::AbstractFloat = 0.5,
epsilon::AbstractFloat = 0.1,
tolerance::AbstractFloat = 0.001,
shrinking::Bool = true,
weights::Union{Dict, Nothing} = nothing,
cachesize::AbstractFloat = 100.0,
verbose::Bool = true,
feature_contrasts::Union{Nothing, AbstractFeatureContrasts} =
nothing,
)
dftransformer = DataFrame2LIBSVMTransformer(
feature_names,
single_label_name,
)
svmestimator = LIBSVMModel(
;
name = name,
isclassificationmodel = false,
isregressionmodel = true,
svmtype=svmtype,
kernel = kernel,
degree = degree,
gamma = gamma,
coef0 = coef0,
cost = cost,
nu = nu,
epsilon = epsilon,
tolerance = tolerance,
shrinking = shrinking,
weights = weights,
cachesize = cachesize,
verbose = verbose,
)
predpackager = ImmutablePackageSingleLabelPredictionTransformer(
single_label_name,
)
finalpipeline = dftransformer |>
svmestimator |>
predpackager
finalpipeline.name = name
if !is_nothing(feature_contrasts)
set_feature_contrasts!(finalpipeline, feature_contrasts)
end
return finalpipeline
end
"""
"""
function single_labeldataframesvmregression(
feature_names::AbstractVector,
single_label_name::Symbol,
;
package::Symbol = :none,
name::AbstractString = "",
svmtype::Type=LIBSVM.EpsilonSVR,
kernel::LIBSVM.Kernel.KERNEL = LIBSVM.Kernel.RadialBasis,
degree::Integer = 3,
gamma::AbstractFloat = 0.1,
coef0::AbstractFloat = 0.0,
cost::AbstractFloat = 1.0,
nu::AbstractFloat = 0.5,
epsilon::AbstractFloat = 0.1,
tolerance::AbstractFloat = 0.001,
shrinking::Bool = true,
weights::Union{Dict, Nothing} = nothing,
cachesize::AbstractFloat = 100.0,
verbose::Bool = true,
feature_contrasts::Union{Nothing, AbstractFeatureContrasts} = nothing,
)
if package == :LIBSVM
result = single_labeldataframesvmregression_LIBSVM(
feature_names,
single_label_name;
name = name,
svmtype=svmtype,
kernel = kernel,
degree = degree,
gamma = gamma,
coef0 = coef0,
cost = cost,
nu = nu,
epsilon = epsilon,
tolerance = tolerance,
shrinking = shrinking,
weights = weights,
cachesize = cachesize,
verbose = verbose,
feature_contrasts = feature_contrasts
)
return result
else
error("$(package) is not a valid value for package")
end
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 8749 | import DecisionTree
function DecisionTreeModel(
single_label_name::Symbol;
name::AbstractString = "",
nsubfeatures::Integer = 2,
ntrees::Integer = 20,
isclassificationmodel::Bool = false,
isregressionmodel::Bool = false,
levels::AbstractVector = [],
)
hyperparameters = Dict()
hyperparameters[:nsubfeatures] = nsubfeatures
hyperparameters[:ntrees] = ntrees
hyperparameters = fix_type(hyperparameters)
underlyingrandomforest = FitNotYetRunUnderlyingObject()
result = DecisionTreeModel(
name,
isclassificationmodel,
isregressionmodel,
single_label_name,
levels,
hyperparameters,
underlyingrandomforest,
)
return result
end
"""
"""
function get_underlying(
x::DecisionTreeModel;
saving::Bool = false,
loading::Bool = false,
)
result = x.underlyingrandomforest
return result
end
"""
"""
function fit!(
estimator::DecisionTreeModel,
featuresarray::AbstractArray,
labelsarray::AbstractArray,
)
@info(string("Starting to train DecisionTree model."))
randomforest = try
DecisionTree.build_forest(
labelsarray,
featuresarray,
estimator.hyperparameters[:nsubfeatures],
estimator.hyperparameters[:ntrees],
)
catch e
@warn(
string(
"While training DecisionTree model, ignored error: ",
e,
)
)
FitFailedUnderlyingObject()
end
@info(string("Finished training DecisionTree model."))
estimator.underlyingrandomforest = randomforest
return estimator
end
"""
"""
function predict(
estimator::DecisionTreeModel,
featuresarray::AbstractArray,
)
if estimator.isclassificationmodel && !estimator.isregressionmodel
probabilitiesassoc = predict_proba(
estimator,
featuresarray,
)
predictionsvector = single_labelprobabilitiestopredictions(
probabilitiesassoc
)
return predictionsvector
elseif !estimator.isclassificationmodel && estimator.isregressionmodel
if is_nothing(estimator.underlyingrandomforest)
predicted_values = fill(Float64(0), size(featuresarray,1))
else
predicted_values = DecisionTree.apply_forest(
estimator.underlyingrandomforest,
featuresarray,
)
end
return predicted_values
else
error(
"Could not figure out if model is classification or regression"
)
end
end
"""
"""
function predict(
estimator::DecisionTreeModel,
featuresarray::AbstractArray,
positive_class::AbstractString,
threshold::AbstractFloat,
)
if estimator.isclassificationmodel && !estimator.isregressionmodel
probabilitiesassoc = predict_proba(
estimator,
featuresarray,
)
predictionsvector = single_labelprobabilitiestopredictions(
probabilitiesassoc,
positive_class,
threshold,
)
return predictionsvector
else
error(
"Can only use the `threshold` argument with classification models"
)
end
end
"""
"""
function predict_proba(
estimator::DecisionTreeModel,
featuresarray::AbstractArray,
)
if estimator.isclassificationmodel && !estimator.isregressionmodel
if is_nothing(estimator.underlyingrandomforest)
predictedprobabilities = fill(
Float64(0),
size(featuresarray, 1),
length(estimator.levels),
)
predictedprobabilities[:, 1] = 1
else
predictedprobabilities = DecisionTree.apply_forest_proba(
estimator.underlyingrandomforest,
featuresarray,
estimator.levels,
)
end
result = Dict()
for i = 1:length(estimator.levels)
result[estimator.levels[i]] = predictedprobabilities[:, i]
end
result = fix_type(result)
return result
elseif !estimator.isclassificationmodel && estimator.isregressionmodel
error("predict_proba is not defined for regression models")
else
error(
"Could not figure out if model is classification or regression"
)
end
end
"""
"""
function single_labelmulticlassdfrandomforestclassifier_DecisionTree(
feature_names::AbstractVector,
single_label_name::Symbol,
single_label_levels::AbstractVector;
name::AbstractString = "",
nsubfeatures::Integer = 2,
ntrees::Integer = 10,
feature_contrasts::Union{Nothing, AbstractFeatureContrasts} = nothing,
)
dftransformer = MutableDataFrame2DecisionTreeTransformer(
feature_names,
single_label_name;
levels = single_label_levels,
)
randomforestestimator = DecisionTreeModel(
single_label_name;
name = name,
nsubfeatures = nsubfeatures,
ntrees = ntrees,
isclassificationmodel = true,
isregressionmodel = false,
levels = single_label_levels,
)
probapackager = ImmutablePackageSingleLabelPredictProbaTransformer(
single_label_name,
)
predpackager = ImmutablePackageSingleLabelPredictionTransformer(
single_label_name,
)
finalpipeline = dftransformer |>
randomforestestimator |>
probapackager |>
predpackager
finalpipeline.name = name
if !is_nothing(feature_contrasts)
set_feature_contrasts!(finalpipeline, feature_contrasts)
end
return finalpipeline
end
"""
"""
function single_labelmulticlassdataframerandomforestclassifier(
feature_names::AbstractVector,
single_label_name::Symbol,
single_label_levels::AbstractVector;
name::AbstractString = "",
package::Symbol = :none,
nsubfeatures::Integer = 2,
ntrees::Integer = 10,
feature_contrasts::Union{Nothing, AbstractFeatureContrasts} = nothing,
)
if package == :DecisionTree
result =
single_labelmulticlassdfrandomforestclassifier_DecisionTree(
feature_names,
single_label_name,
single_label_levels;
name = name,
nsubfeatures = nsubfeatures,
ntrees = ntrees,
feature_contrasts = feature_contrasts
)
return result
else
error("$(package) is not a valid value for package")
end
end
"""
"""
function single_labeldataframerandomforestregression_DecisionTree(
feature_names::AbstractVector,
single_label_name::Symbol;
name::AbstractString = "",
nsubfeatures::Integer = 2,
ntrees::Integer = 10,
feature_contrasts::Union{Nothing, AbstractFeatureContrasts} = nothing,
)
dftransformer = MutableDataFrame2DecisionTreeTransformer(
feature_names,
single_label_name,
)
randomforestestimator = DecisionTreeModel(
single_label_name;
name = name,
nsubfeatures = nsubfeatures,
ntrees = ntrees,
isclassificationmodel = false,
isregressionmodel = true,
)
predpackager = ImmutablePackageSingleLabelPredictionTransformer(
single_label_name,
)
finalpipeline = dftransformer |>
randomforestestimator |>
predpackager
finalpipeline.name = name
if !is_nothing(feature_contrasts)
set_feature_contrasts!(finalpipeline, feature_contrasts)
end
return finalpipeline
end
"""
"""
function single_labeldataframerandomforestregression(
feature_names::AbstractVector,
single_label_name::Symbol;
name::AbstractString = "",
package::Symbol = :none,
nsubfeatures::Integer = 2,
ntrees::Integer = 10,
feature_contrasts::Union{Nothing, AbstractFeatureContrasts} = nothing,
)
if package == :DecisionTree
result = single_labeldataframerandomforestregression_DecisionTree(
feature_names,
single_label_name;
name = name,
nsubfeatures = nsubfeatures,
ntrees = ntrees,
feature_contrasts = feature_contrasts
)
return result
else
error("$(package) is not a valid value for package")
end
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 1802 | import DataFrames
function get_unique_values(itr; skip_missings::Bool = true)
if skip_missings
result = get_unique_values_skip_missings(itr)
else
result = get_unique_values_include_missings(itr)
end
return result
end
function get_unique_values_skip_missings(itr)
return unique(skipmissing(collect(itr)))
end
function get_unique_values_include_missings(itr)
return unique(collect(itr))
end
function get_number_of_unique_values(itr; skip_missings::Bool = true)::Int
if skip_missings
result = get_number_of_unique_values_skip_missings(itr)
else
result = get_number_of_unique_values_include_missings(itr)
end
return result
end
function get_number_of_unique_values_skip_missings(itr)::Int
result::Int = length(get_unique_values_skip_missings(itr))
return result
end
function get_number_of_unique_values_include_missings(itr)::Int
result::Int = length(get_unique_values_include_missings(itr))
return result
end
function find_constant_columns(
df::DataFrames.AbstractDataFrame,
)::Vector{Symbol}
list_of_constant_column_names = Symbol[]
for x in DataFrames.names(df)
if get_number_of_unique_values(df[x]) < 2
push!(list_of_constant_column_names, x)
end
end
return list_of_constant_column_names
end
function check_no_constant_columns(
df::DataFrames.AbstractDataFrame,
)::Bool
list_of_constant_column_names = find_constant_columns(df)
if length(list_of_constant_column_names) > 0
error(
string(
"Data frame contains the following constant columns: ",
list_of_constant_column_names,
"\"",
)
)
else
return true
end
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 5559 | import DataFrames
"""
"""
function fix_column_types!(
df::DataFrames.AbstractDataFrame;
categorical_feature_names::AbstractVector{Symbol} = Symbol[],
continuous_feature_names::AbstractVector{Symbol} = Symbol[],
categorical_label_names::AbstractVector{Symbol} = Symbol[],
continuous_label_names::AbstractVector{Symbol} = Symbol[],
float_type::Type{<:AbstractFloat} = Float64,
)::Nothing
function make_categorical_column(
old_vector::AbstractVector,
)::Vector{Any}
new_vector = Vector{Any}(undef, length(old_vector))
for i = 1:length(old_vector)
old_vector_ith_element = old_vector[i]
if DataFrames.ismissing(old_vector_ith_element)
new_vector[i] = DataFrames.missing
else
new_vector[i] = string(old_vector_ith_element)
end
end
return new_vector
end
function make_continuous_column(
old_vector::AbstractVector,
)::Vector{Any}
new_vector = Vector{Any}(undef, length(old_vector))
for i = 1:length(old_vector)
old_vector_ith_element = old_vector[i]
if DataFrames.ismissing(old_vector_ith_element)
new_vector[i] = DataFrames.missing
else
new_vector[i] = float_type(old_vector_ith_element)
end
end
return new_vector
end
for x in categorical_feature_names
transform_columns!(df, make_categorical_column, x)
end
for x in continuous_feature_names
transform_columns!(df, make_continuous_column, x)
end
for x in categorical_label_names
transform_columns!(df, make_categorical_column, x)
end
for x in continuous_label_names
transform_columns!(df, make_continuous_column, x)
end
for x in DataFrames.names(df)
transform_columns!(df, fix_type, x)
end
return nothing
end
"""
"""
function check_column_types(
df::DataFrames.AbstractDataFrame;
categorical_feature_names::AbstractVector{Symbol} = Symbol[],
continuous_feature_names::AbstractVector{Symbol} = Symbol[],
categorical_label_names::AbstractVector{Symbol} = Symbol[],
continuous_label_names::AbstractVector{Symbol} = Symbol[],
)::Nothing
for column_name in DataFrames.names(df)
column_eltype=eltype(
collect(
DataFrames.skipmissing(
df[column_name]
)
)
)
if column_name in categorical_feature_names
if column_eltype<:AbstractString
else
error(
string(
"Column \"",
column_name,
"\" has eltype() \"",
column_eltype,
"\". However, this column is categorical,",
"and therefore its eltype() must be <: ",
"AbstractString.",
)
)
end
elseif column_name in categorical_label_names
if column_eltype<:AbstractString
else
error(
string(
"Column \"",
column_name,
"\" has eltype() \"",
column_eltype,
"\". However, this column is categorical,",
"and therefore its eltype() must be <: ",
"AbstractString.",
)
)
end
elseif column_name in continuous_feature_names
if column_eltype<:AbstractFloat
else
error(
string(
"Column \"",
column_name,
"\" has eltype() \"",
column_eltype,
"\". However, this column is continuous,",
"and therefore its eltype() must be <:",
"AbstractFloat.",
)
)
end
elseif column_name in continuous_label_names
if column_eltype<:AbstractFloat
else
error(
string(
"Column \"",
column_name,
"\" has eltype() \"",
column_eltype,
"\". However, this column is continuous,",
"and therefore its eltype() must be <:",
"AbstractFloat.",
)
)
end
else
if column_eltype<:AbstractString
elseif column_eltype<:AbstractFloat
error(
string(
"Column \"",
column_name,
"\" has eltype() \"",
column_eltype,
"\". However, we only allow AbstractStrings and ",
"AbstractFloats. Use T <: AbstractString if",
"it is a categorical column. Use T <: ",
"AbstractFloat if it is a continuous column."
)
)
else
end
end
end
return nothing
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 145 | """
"""
function filename_extension(filename::AbstractString)
result = lowercase(strip(splitext(strip(filename))[2]))
return result
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 769 | """
"""
function fix_type
end
fix_type(x::Any, T) = convert(T, fix_type(x))
fix_type(x::Any) = x
function fix_type(x::AbstractArray)::Array
result = reshape(
[[fix_type(element) for element in x]...],
size(x),
)
return result
end
function fix_type(
x::AbstractDict;
default_key_type::Type=Any,
default_value_type::Type=Any,
)::Dict
if length(x) == 0
result = Dict{default_key_type, default_value_type}()
else
keys_eltype=eltype( fix_type( collect( keys(x) ) ) )
values_eltype=eltype( fix_type( collect( values(x) ) ) )
result = Dict{keys_eltype, values_eltype}()
for k in keys(x)
result[k] = x[k]
end
end
return result
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 3018 | import Combinatorics
import StatsModels
"""
"""
function generate_formula(lhs::AbstractVector{<:StatsModels.AbstractTerm},
rhs::AbstractVector{<:StatsModels.AbstractTerm})
lhs_deepcopy = deepcopy(lhs)
rhs_deepcopy = deepcopy(rhs)
lhs_sum = sum(lhs_deepcopy)
rhs_sum = sum(rhs_deepcopy)
result = lhs_sum ~ rhs_sum
return result
end
"""
"""
function generate_formula(
dependent_variable::Symbol,
independent_variables::AbstractVector{<:Symbol};
intercept::Bool = true,
interactions::Integer = 1,
)
result = generate_formula(
[dependent_variable],
independent_variables;
intercept = intercept,
interactions = interactions,
)
return result
end
"""
"""
function generate_formula(
dependent_variables::AbstractVector{<:Symbol},
independent_variables::AbstractVector{<:Symbol};
intercept::Bool = true,
interactions::Integer = 1,
)
if length(dependent_variables) < 1
error("length(dependent_variables) must be >= 1")
end
if length(independent_variables) < 1
error("length(independent_variables) must be >= 1")
end
if interactions < 1
error("interactions must be >= 1")
end
if interactions > length(independent_variables)
error("interactions must be <= the number of independent variable")
end
if intercept
intercept_term = StatsModels.term(1)
else
intercept_term = StatsModels.term(0)
end
lhs_terms = Vector{StatsModels.AbstractTerm}(undef, 0)
for dep_var in deepcopy(dependent_variables)
push!(lhs_terms, StatsModels.term(dep_var))
end
rhs_terms = Vector{StatsModels.AbstractTerm}(undef, 0)
push!(rhs_terms, intercept_term)
for interaction_term in generate_interaction_terms_up_to_level(
deepcopy(independent_variables),
interactions)
push!(rhs_terms, interaction_term)
end
result = generate_formula(lhs_terms, rhs_terms)
return result
end
function generate_interaction_terms_up_to_level(
xs::AbstractVector{<:Symbol},
interactions::Integer,
)
all_interaction_terms = Vector{StatsModels.AbstractTerm}(undef, 0)
for level = 1:interactions
append!(all_interaction_terms,
generate_interaction_terms_at_level(xs,level))
end
unique!(all_interaction_terms)
return all_interaction_terms
end
function generate_interaction_terms_at_level(
xs::AbstractVector{<:Symbol},
level::Integer,
)
interaction_sets = collect(
Combinatorics.combinations(
xs,
level,
)
)
interaction_terms = Vector{StatsModels.AbstractTerm}(undef, 0)
for interaction_set in interaction_sets
push!(interaction_terms,
reduce(&, StatsModels.term.(interaction_set)))
end
unique!(interaction_terms)
return interaction_terms
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 835 | """
"""
function is_one_to_one(x::AbstractDict)::Bool
if (length(keys(x)) == length(unique(keys(x)))) &&
(length(values(x)) == length(unique(values(x))))
return true
else
return false
end
end
"""
"""
function inverse(x::AbstractDict)::Dict
if !is_one_to_one(x)
error(
string(
"Input directory is not one-to-one."
)
)
end
keys_array = fix_type(collect(keys(x)))
values_array = fix_type(collect(values(x)))
key_type=eltype(keys_array)
value_type=eltype(values_array)
result = Dict{value_type, key_type}()
for i = 1:length(keys_array)
k = keys_array[i]
v = values_array[i[]]
result[v] = k
end
result_typefixed = fix_type(result)
return result_typefixed
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 660 | """
"""
function getlabelstring2intmap(
levels::AbstractVector,
index::Integer,
)
result = Dict()
if length(levels) == 0
error("length(levels) == 0")
end
for i = 1:length(levels)
result[levels[i]] = i - 1 + index
end
result = fix_type(result)
return result
end
"""
"""
function getlabelint2stringmap(
levels::AbstractVector,
index::Integer,
)
result = Dict()
if length(levels) == 0
error("length(levels) == 0")
end
for i = 1:length(levels)
result[i - 1 + index] = levels[i]
end
result = fix_type(result)
return result
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 2466 | import DataFrames
import GLM
import StatsModels
function get_unique_symbol_name(
v::AbstractVector{<:Symbol},
new_symbol::Symbol = :x;
append_string::String = "1"
)::Symbol
old_strings_set::Set{String} = Set(strip.(string.(v)))
new_string::String = strip.(string.(new_symbol))
_append_string::String = strip(append_string)
while new_string in old_strings_set
new_string = string(new_string, _append_string)
end
new_symbol::Symbol = Symbol(new_string)
return new_symbol
end
function columns_are_linearly_independent(
df::DataFrames.DataFrame,
columns::AbstractVector{<:Symbol} = Symbol[],
)::Bool
df_copy = deepcopy(df)
if length(columns) == 0
columns_copy = deepcopy(DataFrames.names(df_copy))
else
columns_copy = deepcopy(columns)
end
temporary_dependent_variable::Symbol = get_unique_symbol_name(
columns_copy,
:y,
)
formula = generate_formula(
temporary_dependent_variable,
columns_copy;
intercept = false,
interactions = 1,
)
df_copy[temporary_dependent_variable] = randn(size(df_copy, 1))
try_lm_result::Bool = try
lm = GLM.lm(formula, df_copy, false)
true
catch ex
@debug("caught exception: ", exception=ex,)
false
end
cols_result = length(linearly_dependent_columns(df, columns)) == 0
final_result::Bool = try_lm_result && cols_result
return final_result
end
function linearly_dependent_columns(
df::DataFrames.DataFrame,
columns::AbstractVector{<:Symbol} = Symbol[])::Vector{Symbol}
df_copy = deepcopy(df)
if length(columns) == 0
columns_copy = deepcopy(DataFrames.names(df_copy))
else
columns_copy = deepcopy(columns)
end
temporary_dependent_variable::Symbol = get_unique_symbol_name(
columns_copy,
:y,
)
formula = generate_formula(
temporary_dependent_variable,
columns_copy;
intercept = false,
interactions = 1,
)
df_copy[temporary_dependent_variable] = randn(size(df_copy, 1))
lm = GLM.lm(formula, df_copy, true)
coeftable = StatsModels.coeftable(lm)
result_strings::Vector{String} = sort(
unique(strip.(coeftable.rownms[isnan.(coeftable.cols[4])]))
)
result_symbols::Vector{Symbol} = Symbol.(result_strings)
return result_symbols
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 142 | function maketempdir()::String
path::String = mktempdir()
atexit(() -> rm(path; force = true, recursive = true,))
return path
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 1229 | import DataFrames
function disallowmissing(x::AbstractArray{Union{Missing, T}, N})::Array{T, N} where T where N
result::Array{T, N} = convert(Array{T, N}, x)
return result
end
"""
"""
function convert_value_to_missing! end
function convert_value_to_missing!(
df::DataFrames.AbstractDataFrame,
value,
column_names::AbstractArray{Symbol},
)::Nothing
function f(old_vector::AbstractVector)::Vector{Any}
new_vector = Vector{Any}(length(old_vector))
for i = 1:length(old_vector)
old_vector_ith_element = old_vector[i]
if DataFrames.ismissing(old_vector_ith_element)
new_vector[i] = DataFrames.missing
elseif old_vector_ith_element == value
new_vector[i] = DataFrames.missing
else
new_vector[i] = old_vector_ith_element
end
end
return new_vector
end
transform_columns!(df, f, column_names)
return nothing
end
function convert_value_to_missing!(
df::DataFrames.AbstractDataFrame,
value,
column_name::Symbol,
)::Nothing
convert_value_to_missing!(df, value, Symbol[column_name])
return nothing
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 205 | """
"""
function delete_nothings!(x::AbstractVector)::Nothing
filter!(e->e≠nothing, x)
return nothing
end
is_nothing(x::Nothing)::Bool = true
is_nothing(x::Any)::Bool = false
"""
"""
is_nothing
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 2536 | # Parts of this file are based on:
# 1. https://github.com/JuliaPlots/Plots.jl/blob/master/src/backends/web.jl
# 2. https://github.com/JuliaGraphics/Luxor.jl/blob/master/src/Luxor.jl
# 3. https://github.com/tpapp/DefaultApplication.jl/blob/master/src/DefaultApplication.jl
import FileIO
# """
# """
# function open_browser_window(filename::Nothing, a::AbstractDict = ENV)
# @warn("no filename to open")
# return filename
# end
# """
# """
# function open_browser_window(filename::AbstractString, a::AbstractDict = ENV)
# extension = filename_extension(filename)
# is_svg_file = extension == ".svg"
# is_png_file = extension == ".png"
# is_svg_or_png_file = is_svg_file || is_png_file
# is_ijulia = isdefined(Main, :IJulia) && Main.IJulia.inited
# if is_ijulia && is_svg_or_png_file
# Main.IJulia.clear_output(true)
# if is_svg_file
# open(filename) do f
# display(
# "image/svg+xml",
# readstring(f),
# )
# end
# elseif is_png_file
# # We use Base.invokelatest to avoid world age errors
# Base.invokelatest(
# display,
# "image/png",
# FileIO.load(filename),
# )
# end
# elseif (is_make_examples(a)) ||
# (is_make_docs(a)) ||
# (is_runtests(a) && !open_plots_during_tests(a))
# @debug(
# string(
# "Skipping opening file: ",
# filename,
# )
# )
# else
# @debug(string("Opening file ",filename,))
# if Sys.isapple()
# try
# run(`open $(filename)`)
# catch e
# @warn(string("ignoring error: "), e)
# end
# elseif Sys.islinux() || Sys.isbsd()
# try
# run(`xdg-open $(filename)`)
# catch e
# @warn(string("ignoring error: "), e)
# end
# elseif Sys.iswindows()
# try
# run(`$(ENV["COMSPEC"]) /c start "" "$(filename)"`)
# catch e
# @warn(string("ignoring error: "), e)
# end
# else
# @warn(
# string(
# "unknown operating system; could not open file ",
# filename,
# )
# )
# end
# end
# return filename
# end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 220 | """
"""
function open_plots_during_tests(a::AbstractDict = ENV)
result = lowercase(
strip(
get(a, "PREDICTMD_OPEN_PLOTS_DURING_TESTS", "")
)
) == "true"
return result
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 438 | import DataFrames
"""
"""
function predictionsassoctodataframe(
probabilitiesassoc::AbstractDict,
label_names::AbstractVector = [],
)
if length(label_names) == 0
label_names = sort(unique(collect(keys(probabilitiesassoc))))
end
result = DataFrames.DataFrame()
for j = 1:length(label_names)
result[label_names[j]] = probabilitiesassoc[label_names[j]]
end
return result
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 3911 | """
"""
function multilabelprobabilitiestopredictions(
probabilitiesassoc::AbstractDict;
float_type::Type{<:AbstractFloat} = Float64,
)
result = Dict()
label_names = sort(unique(collect(keys(probabilitiesassoc))))
for j = 1:length(label_names)
result[label_names[j]] = single_labelprobabilitiestopredictions(
probabilitiesassoc[label_names[j]];
float_type=float_type,
)
end
result = Dict()
return result
end
"""
"""
function multilabelprobabilitiestopredictions(
probabilitiesassoc::AbstractDict,
thresholds::AbstractDict;
float_type::Type{<:AbstractFloat} = Float64,
)
result = Dict()
label_names = sort(unique(collect(keys(probabilitiesassoc))))
for j = 1:length(label_names)
if haskey(thresholds, label_names[j])
(positive_class, threshold) = thresholds[label_names[j]]
result[label_names[j]] = single_labelprobabilitiestopredictions(
probabilitiesassoc[label_names[j]],
positive_class,
threshold;
float_type=float_type,
)
else
result[label_names[j]] = single_labelprobabilitiestopredictions(
probabilitiesassoc[label_names[j]];
float_type=float_type,
)
end
end
result = Dict()
return result
end
const probabilitiestopredictions = multilabelprobabilitiestopredictions
"""
"""
function single_labelprobabilitiestopredictions(
probabilitiesassoc::AbstractDict;
float_type::Type{<:AbstractFloat} = Float64,
)
classes = sort(unique(collect(keys(probabilitiesassoc))))
numclasses = length(classes)
numrows = size(probabilitiesassoc[classes[1]], 1)
probabilitiesmatrix = Matrix{float_type}(
undef,
numrows,
numclasses,
)
for j = 1:numclasses
probabilitiesmatrix[:, j] = float_type.(probabilitiesassoc[classes[j]])
end
predictionsvector = Vector{String}(undef, numrows,)
for i = 1:numrows
predictionsvector[i] =
string(classes[argmax(probabilitiesmatrix[i, :])])
end
return predictionsvector
end
function single_labelprobabilitiestopredictions(
probabilitiesassoc::AbstractDict,
positive_class,
threshold::AbstractFloat;
float_type::Type{<:AbstractFloat} = Float64,
)
classes = sort(unique(collect(keys(probabilitiesassoc))))
numclasses = length(classes)
numrows = size(probabilitiesassoc[classes[1]], 1)
probabilitiesmatrix = Matrix{float_type}(
undef,
numrows,
numclasses,
)
for j = 1:numclasses
probabilitiesmatrix[:, j] = float_type.(probabilitiesassoc[classes[j]])
end
predictionsvector = Vector{String}(undef, numrows,)
if numclasses == 2
if positive_class in classes
use_threshold = true
else
@error("", positive_class, classes)
error("positive_class is not in the list of classes, so ignoring threshold")
use_threshold = false
end
else
error("numclasses is not 2, so ignoring threshold")
use_threshold = false
end
if use_threshold
positive_class_column::Int = findfirst(classes .== positive_class)
negative_class = first(setdiff(classes, [positive_class]))
for i = 1:numrows
if probabilitiesmatrix[i, positive_class_column] > threshold
predictionsvector[i] = string(positive_class)
else
predictionsvector[i] = string(negative_class)
end
end
else
for i = 1:numrows
predictionsvector[i] =
string(classes[argmax(probabilitiesmatrix[i, :])])
end
end
return predictionsvector
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 778 | """
"""
function is_runtests(a::AbstractDict = ENV)
result = lowercase(strip(get(a, "PREDICTMD_IS_RUNTESTS", ""))) ==
"true"
return result
end
"""
"""
function is_make_examples(a::AbstractDict = ENV)
result = lowercase(strip(get(a, "PREDICTMD_IS_MAKE_EXAMPLES", ""))) ==
"true"
return result
end
"""
"""
function is_make_docs(a::AbstractDict = ENV)
result = lowercase(strip(get(a, "PREDICTMD_IS_MAKE_DOCS", ""))) ==
"true"
return result
end
"""
"""
function is_deploy_docs(a::AbstractDict = ENV)
result = lowercase(strip(get(a, "PREDICTMD_IS_DEPLOY_DOCS", ""))) ==
"true"
return result
end
"""
"""
is_docs_or_examples(a::AbstractDict = ENV) =
is_make_examples(a) || is_make_docs(a) || is_deploy_docs(a)
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 616 | import DataFrames
import Random
import StatsBase
"""
"""
function shuffle_rows!(
dataframe::DataFrames.AbstractDataFrame,
)
result = shuffle_rows!(Random.GLOBAL_RNG, dataframe)
return result
end
"""
"""
function shuffle_rows!(
rng::Random.AbstractRNG,
dataframe::DataFrames.AbstractDataFrame,
)
numrows = size(dataframe,1)
allrows = convert(Array,1:numrows)
rowpermutation = Random.shuffle!(rng, allrows)
numcolumns = size(dataframe,2)
for j = 1:numcolumns
dataframe[:, j] = dataframe[rowpermutation, j]
end
return dataframe
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 426 | import Statistics
"""
"""
function simple_moving_average(
x::AbstractVector,
window::Integer,
) where T
if window < 0
error("window must be >=0")
end
n = length(x)
result = similar(x)
for i = 1:n
lower_bound = max(1, i - window)
upper_bound = min(n, i + window)
result[i] = Statistics.mean(x[lower_bound:upper_bound])
end
return result
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 4086 | # import TikzPictures
# """
# """
# function save_plot(filename::AbstractString, tp::TikzPictures.TikzPicture)
# filename = strip(filename)
# if length(filename) == 0
# error("filename is an empty string")
# end
# extension = filename_extension(filename)
# if extension == ".pdf"
# save_result = save_plot_pdf(filename, tp)
# elseif extension == ".tex"
# save_result = save_plot_tex(filename, tp)
# elseif extension == ".tikz"
# save_result = save_plot_tikz(filename, tp)
# elseif extension == ".svg"
# save_result = save_plot_svg(filename, tp)
# else
# error("File extension must be one of: .pdf, .tex, .tikz, .svg")
# end
# return filename
# end
# """
# """
# function save_plot_pdf(
# filename::AbstractString,
# tp::TikzPictures.TikzPicture,
# )
# filename = strip(filename)
# if length(filename) == 0
# error("filename is an empty string")
# end
# extension = filename_extension(filename)
# if extension != ".pdf"
# error("filename must end in .pdf")
# end
# parent_directory = Base.Filesystem.dirname(filename)
# try
# Base.Filesystem.mkpath(parent_directory)
# catch
# end
# save_result = try
# TikzPictures.save(TikzPictures.PDF(filename), tp)
# catch e
# handle_plotting_error(e)
# end
# return filename
# end
# """
# """
# function save_plot_tex(
# filename::AbstractString,
# tp::TikzPictures.TikzPicture,
# )
# filename = strip(filename)
# if length(filename) == 0
# error("filename is an empty string")
# end
# extension = filename_extension(filename)
# if extension != ".tex"
# error("filename must end in .tex")
# end
# parent_directory = Base.Filesystem.dirname(filename)
# try
# Base.Filesystem.mkpath(parent_directory)
# catch
# end
# save_result = try
# TikzPictures.save(TikzPictures.TEX(filename), tp)
# catch e
# handle_plotting_error(e)
# end
# return filename
# end
# """
# """
# function save_plot_tikz(
# filename::AbstractString,
# tp::TikzPictures.TikzPicture,
# )
# filename = strip(filename)
# if length(filename) == 0
# error("filename is an empty string")
# end
# if extension != ".tikz"
# error("filename must end in .tikz")
# end
# extension = filename_extension(filename)
# parent_directory = Base.Filesystem.dirname(filename)
# try
# Base.Filesystem.mkpath(parent_directory)
# catch
# end
# save_result = try
# TikzPictures.save(TikzPictures.TIKZ(filename), tp)
# catch e
# handle_plotting_error(e)
# end
# return filename
# end
# """
# """
# function save_plot_svg(
# filename::AbstractString,
# tp::TikzPictures.TikzPicture,
# )
# filename = strip(filename)
# if length(filename) == 0
# error("filename is an empty string")
# end
# extension = filename_extension(filename)
# if extension != ".svg"
# error("filename must end in .svg")
# end
# parent_directory = Base.Filesystem.dirname(filename)
# try
# Base.Filesystem.mkpath(parent_directory)
# catch
# end
# save_result = try
# TikzPictures.save(TikzPictures.SVG(filename), tp)
# catch e
# handle_plotting_error(e)
# end
# return filename
# end
# """
# """
# function open_plot(tp::TikzPictures.TikzPicture)
# temp_svg_filename = string(tempname(), ".svg")
# save_result = open_plot(temp_svg_filename, tp)
# return temp_svg_filename
# end
# """
# """
# function open_plot(
# filename::AbstractString,
# tp::TikzPictures.TikzPicture,
# )
# filename = strip(filename)
# if length(filename) == 0
# error("filename is an empty string")
# end
# saveresult = save_plot(filename, tp)
# open_result = open_browser_window(filename)
# return filename
# end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 646 | import DataFrames
"""
"""
function transform_columns! end
function transform_columns!(
df::DataFrames.AbstractDataFrame,
f::Function,
column_name::Symbol,
)::Nothing
old_column = df[column_name]
DataFrames.deletecols!(df, column_name,)
new_column = fix_type(f(old_column))
df[column_name] = new_column
return nothing
end
function transform_columns!(
df::DataFrames.AbstractDataFrame,
f::Function,
column_names::AbstractArray{Symbol},
)::Nothing
for column_name in column_names
transform_columns!(df, f, column_name)
end
return nothing
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 863 | import NumericalIntegration
"""
trapz(x, y)
Compute the area under the curve of 2-dimensional points (x, y) using
the trapezoidal method.
"""
function trapz end
trapz(x, y) = trapz(promote(x, y)...)
function trapz(
x::AbstractVector{T},
y::AbstractVector{T},
)::T where T
if length(x) != length(y)
error("length(x) != length(y)")
end
if length(x) == 0
error("length(x) == 0")
end
if !all(x .== sort(x; rev = false))
error("x needs to be sorted in ascending order")
end
twoI::T = zero(T)
for k = 2:length(x)
twoI += ( y[k] + y[k-1] ) * ( x[k] - x[k-1] )
end
I_verify::T = NumericalIntegration.integrate(
x,
y,
NumericalIntegration.Trapezoidal(),
)
@assert isapprox(twoI/2, I_verify; atol=0.00000001)
return I_verify
end
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 1095 | """
"""
function is_travis_ci(a::AbstractDict = ENV)
result = ( lowercase(strip(get(a, "CI", ""))) == "true" ) &&
( lowercase(strip(get(a, "TRAVIS", ""))) == "true" ) &&
( lowercase(strip(get(a, "CONTINUOUS_INTEGRATION", "")) ) == "true")
return result
end
"""
"""
function is_travis_ci_on_linux(a::AbstractDict = ENV)
result = is_travis_ci(a) && Sys.islinux()
return result
end
"""
"""
# function is_travis_ci_on_apple(a::AbstractDict = ENV)
# result = is_travis_ci(a) && Sys.isapple()
# return result
# end
"""
"""
# function is_appveyor_ci(a::AbstractDict = ENV)
# result = ( lowercase(strip(get(a, "CI", ""))) == "true" ) &&
# ( lowercase(strip(get(a, "APPVEYOR", ""))) == "true" )
# return result
# end
"""
"""
is_ci(a::AbstractDict = ENV) = is_travis_ci(a)
# is_ci(a::AbstractDict = ENV) = is_travis_ci(a) || is_appveyor_ci(a)
"""
"""
is_ci_or_runtests(a::AbstractDict = ENV) = is_ci(a) || is_runtests(a)
"""
"""
is_ci_or_runtests_or_docs_or_examples(a::AbstractDict = ENV) =
is_ci_or_runtests(a) || is_docs_or_examples(a)
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 91 | """
"""
function tuplify end
tuplify(x::Any)::Tuple = (x,)
tuplify(x::Tuple)::Tuple = x
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 140 | """
"""
module PredictMD_MLJ # begin module PredictMD_MLJ
import ..MLJ
import ..MLJBase
import ..MLJModels
end # end module PredictMD_MLJ
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 9314 | ## %PREDICTMD_GENERATED_BY%
using PredictMDExtra
PredictMDExtra.import_all()
using PredictMD
PredictMD.import_all()
# PREDICTMD IF INCLUDE TEST STATEMENTS
logger = Base.CoreLogging.current_logger_for_env(Base.CoreLogging.Debug, Symbol(splitext(basename(something(@__FILE__, "nothing")))[1]), something(@__MODULE__, "nothing"))
if isnothing(logger)
logger_stream = devnull
else
logger_stream = logger.stream
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
### Begin project-specific settings
DIRECTORY_CONTAINING_THIS_FILE = @__DIR__
PROJECT_DIRECTORY = dirname(
joinpath(splitpath(DIRECTORY_CONTAINING_THIS_FILE)...)
)
PROJECT_OUTPUT_DIRECTORY = joinpath(
PROJECT_DIRECTORY,
"output",
)
mkpath(PROJECT_OUTPUT_DIRECTORY)
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "data"))
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "models"))
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "plots"))
# PREDICTMD IF INCLUDE TEST STATEMENTS
@debug("PROJECT_OUTPUT_DIRECTORY: ", PROJECT_OUTPUT_DIRECTORY,)
if PredictMD.is_travis_ci()
PredictMD.cache_to_path!(
;
from = ["cpu_examples", "boston_housing", "output",],
to = [PROJECT_OUTPUT_DIRECTORY],
)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
### End project-specific settings
### Begin data preprocessing code
Random.seed!(999)
df = RDatasets.dataset("MASS", "Boston")
## PredictMD requires that you provide your data in a DataFrame.
## If your data are in a CSV file (e.g. "data.csv"), load them into
## a DataFrame named `df` with:
## df = DataFrames.DataFrame(CSVFiles.load("data.csv"; type_detect_rows = 10_000))
## If your data are in a gzipped CSV file (e.g. "data.csv.gz"), load them into
## a DataFrame named `df` with:
## df = DataFrames.DataFrame(CSVFiles.load(CSVFiles.File(CSVFiles.format"CSV", "data.csv.gz"); type_detect_rows = 10_000))
## If your data are in some other format, use the appropriate Julia package to
## load your data into a DataFrame named `df`.
# PREDICTMD IF INCLUDE TEST STATEMENTS
df1 = DataFrames.DataFrame()
df1[:x] = randn(5)
df1_filename = joinpath(PredictMD.maketempdir(), "df1.csv")
CSVFiles.save(df1_filename, df1)
df2 = DataFrames.DataFrame(CSVFiles.load(df1_filename; type_detect_rows = 10_000))
Test.@test( all(df1[:x] .== df2[:x]) )
df3 = DataFrames.DataFrame()
df3[:y] = randn(5)
df3_filename = joinpath(PredictMD.maketempdir(), "df3.csv.gz")
CSVFiles.save(CSVFiles.File(CSVFiles.format"CSV", df3_filename), df3)
df4 = DataFrames.DataFrame(CSVFiles.load(CSVFiles.File(CSVFiles.format"CSV", df3_filename); type_detect_rows = 10_000))
Test.@test( all(df3[:y] .== df4[:y]) )
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
categorical_feature_names = Symbol[]
continuous_feature_names = Symbol[
:Crim,
:Zn,
:Indus,
:Chas,
:NOx,
:Rm,
:Age,
:Dis,
:Rad,
:Tax,
:PTRatio,
:Black,
:LStat,
]
categorical_feature_names_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"categorical_feature_names.jld2",
)
continuous_feature_names_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"continuous_feature_names.jld2",
)
# PREDICTMD IF INCLUDE TEST STATEMENTS
@debug("", categorical_feature_names_filename)
@debug("", isfile(categorical_feature_names_filename))
@debug("", dirname(categorical_feature_names_filename))
@debug("", isdir(dirname(categorical_feature_names_filename)))
@debug("", joinpath(PROJECT_OUTPUT_DIRECTORY, "data"))
@debug("", isdir(joinpath(PROJECT_OUTPUT_DIRECTORY, "data")))
@debug("", PROJECT_OUTPUT_DIRECTORY)
@debug("", isdir(PROJECT_OUTPUT_DIRECTORY))
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
FileIO.save(
categorical_feature_names_filename,
"categorical_feature_names",
categorical_feature_names,
)
FileIO.save(
continuous_feature_names_filename,
"continuous_feature_names",
continuous_feature_names,
)
feature_names = vcat(categorical_feature_names, continuous_feature_names)
single_label_name = :MedV
continuous_label_names = Symbol[single_label_name]
categorical_label_names = Symbol[]
label_names = vcat(categorical_label_names, continuous_label_names)
df = df[:, vcat(feature_names, label_names)]
DataFrames.dropmissing!(df; disallowmissing=true,)
PredictMD.shuffle_rows!(df)
PredictMD.fix_column_types!(
df;
categorical_feature_names = categorical_feature_names,
continuous_feature_names = continuous_feature_names,
categorical_label_names = categorical_label_names,
continuous_label_names = continuous_label_names,
)
PredictMD.check_column_types(
df;
categorical_feature_names = categorical_feature_names,
continuous_feature_names = continuous_feature_names,
categorical_label_names = categorical_label_names,
continuous_label_names = continuous_label_names,
)
# PREDICTMD IF INCLUDE TEST STATEMENTS
Test.@test PredictMD.check_no_constant_columns(df)
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
features_df = df[feature_names]
labels_df = df[label_names]
DataFrames.describe(logger_stream, labels_df[single_label_name])
(trainingandtuning_features_df,
trainingandtuning_labels_df,
testing_features_df,
testing_labels_df,) = PredictMD.split_data(
features_df,
labels_df,
0.75,
)
(training_features_df,
training_labels_df,
tuning_features_df,
tuning_labels_df,) = PredictMD.split_data(
trainingandtuning_features_df,
trainingandtuning_labels_df,
2/3,
)
trainingandtuning_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"trainingandtuning_features_df.csv",
)
trainingandtuning_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"trainingandtuning_labels_df.csv",
)
testing_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"testing_features_df.csv",
)
testing_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"testing_labels_df.csv",
)
training_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"training_features_df.csv",
)
training_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"training_labels_df.csv",
)
tuning_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"tuning_features_df.csv",
)
tuning_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"tuning_labels_df.csv",
)
FileIO.save(trainingandtuning_features_df_filename, trainingandtuning_features_df)
FileIO.save(trainingandtuning_labels_df_filename, trainingandtuning_labels_df)
FileIO.save(testing_features_df_filename, testing_features_df)
FileIO.save(testing_labels_df_filename, testing_labels_df)
FileIO.save(training_features_df_filename, training_features_df)
FileIO.save(training_labels_df_filename, training_labels_df)
FileIO.save(tuning_features_df_filename, tuning_features_df)
FileIO.save(tuning_labels_df_filename, tuning_labels_df)
# PREDICTMD IF INCLUDE TEST STATEMENTS
temp_dir = mktempdir()
atexit(() -> rm(temp_dir; force = true, recursive = true))
temp_gz_filename_testing_features_df = joinpath(temp_dir, "temp_filename_testing_features_df.csv.gz")
temp_gz_filename_testing_labels_df = joinpath(temp_dir, "temp_filename_testing_labels_df.csv.gz")
original_testing_features_df = deepcopy(testing_features_df)
original_testing_labels_df = deepcopy(testing_labels_df)
CSVFiles.save(CSVFiles.File(CSVFiles.format"CSV", temp_gz_filename_testing_features_df), original_testing_features_df)
CSVFiles.save(CSVFiles.File(CSVFiles.format"CSV", temp_gz_filename_testing_labels_df), original_testing_labels_df)
roundtrip_testing_features_df = DataFrames.DataFrame(CSVFiles.load(CSVFiles.File(CSVFiles.format"CSV", temp_gz_filename_testing_features_df); type_detect_rows = 10_000))
roundtrip_testing_labels_df = DataFrames.DataFrame(CSVFiles.load(CSVFiles.File(CSVFiles.format"CSV", temp_gz_filename_testing_labels_df); type_detect_rows = 10_000))
for column in names(roundtrip_testing_features_df)
for i = 1:size(roundtrip_testing_features_df, 1)
if ismissing(roundtrip_testing_features_df[i, column])
Test.@test ismissing(original_testing_features_df[i, column])
else
Test.@test roundtrip_testing_features_df[i, column] == original_testing_features_df[i, column]
end
end
end
for column in names(roundtrip_testing_labels_df)
for i = 1:size(roundtrip_testing_labels_df, 1)
if ismissing(roundtrip_testing_labels_df[i, column])
Test.@test ismissing(original_testing_labels_df[i, column])
else
Test.@test roundtrip_testing_labels_df[i, column] == original_testing_labels_df[i, column]
end
end
end
rm(temp_dir; force = true, recursive = true)
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
### End data preprocessing code
# PREDICTMD IF INCLUDE TEST STATEMENTS
if PredictMD.is_travis_ci()
PredictMD.path_to_cache!(
;
to = ["cpu_examples", "boston_housing", "output",],
from = [PROJECT_OUTPUT_DIRECTORY],
)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
## %PREDICTMD_GENERATED_BY%
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 10942 | ## %PREDICTMD_GENERATED_BY%
using PredictMDExtra
PredictMDExtra.import_all()
using PredictMD
PredictMD.import_all()
# PREDICTMD IF INCLUDE TEST STATEMENTS
logger = Base.CoreLogging.current_logger_for_env(Base.CoreLogging.Debug, Symbol(splitext(basename(something(@__FILE__, "nothing")))[1]), something(@__MODULE__, "nothing"))
if isnothing(logger)
logger_stream = devnull
else
logger_stream = logger.stream
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
### Begin project-specific settings
DIRECTORY_CONTAINING_THIS_FILE = @__DIR__
PROJECT_DIRECTORY = dirname(
joinpath(splitpath(DIRECTORY_CONTAINING_THIS_FILE)...)
)
PROJECT_OUTPUT_DIRECTORY = joinpath(
PROJECT_DIRECTORY,
"output",
)
mkpath(PROJECT_OUTPUT_DIRECTORY)
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "data"))
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "models"))
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "plots"))
# PREDICTMD IF INCLUDE TEST STATEMENTS
@debug("PROJECT_OUTPUT_DIRECTORY: ", PROJECT_OUTPUT_DIRECTORY,)
if PredictMD.is_travis_ci()
PredictMD.cache_to_path!(
;
from = ["cpu_examples", "boston_housing", "output",],
to = [PROJECT_OUTPUT_DIRECTORY],
)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
### End project-specific settings
### Begin linear regression code
Random.seed!(999)
trainingandtuning_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"trainingandtuning_features_df.csv",
)
trainingandtuning_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"trainingandtuning_labels_df.csv",
)
testing_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"testing_features_df.csv",
)
testing_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"testing_labels_df.csv",
)
training_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"training_features_df.csv",
)
training_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"training_labels_df.csv",
)
tuning_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"tuning_features_df.csv",
)
tuning_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"tuning_labels_df.csv",
)
trainingandtuning_features_df = DataFrames.DataFrame(
FileIO.load(
trainingandtuning_features_df_filename;
type_detect_rows = 100,
)
)
trainingandtuning_labels_df = DataFrames.DataFrame(
FileIO.load(
trainingandtuning_labels_df_filename;
type_detect_rows = 100,
)
)
testing_features_df = DataFrames.DataFrame(
FileIO.load(
testing_features_df_filename;
type_detect_rows = 100,
)
)
testing_labels_df = DataFrames.DataFrame(
FileIO.load(
testing_labels_df_filename;
type_detect_rows = 100,
)
)
training_features_df = DataFrames.DataFrame(
FileIO.load(
training_features_df_filename;
type_detect_rows = 100,
)
)
training_labels_df = DataFrames.DataFrame(
FileIO.load(
training_labels_df_filename;
type_detect_rows = 100,
)
)
tuning_features_df = DataFrames.DataFrame(
FileIO.load(
tuning_features_df_filename;
type_detect_rows = 100,
)
)
tuning_labels_df = DataFrames.DataFrame(
FileIO.load(
tuning_labels_df_filename;
type_detect_rows = 100,
)
)
categorical_feature_names_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"categorical_feature_names.jld2",
)
continuous_feature_names_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"continuous_feature_names.jld2",
)
categorical_feature_names = FileIO.load(
categorical_feature_names_filename,
"categorical_feature_names",
)
continuous_feature_names = FileIO.load(
continuous_feature_names_filename,
"continuous_feature_names",
)
feature_names = vcat(categorical_feature_names, continuous_feature_names)
single_label_name = :MedV
continuous_label_names = Symbol[single_label_name]
categorical_label_names = Symbol[]
label_names = vcat(categorical_label_names, continuous_label_names)
# PREDICTMD IF INCLUDE TEST STATEMENTS
Test.@test(
PredictMD.columns_are_linearly_independent(training_features_df)
)
Test.@test(
PredictMD.columns_are_linearly_independent(
training_features_df,
feature_names,
)
)
Test.@test(
length(
PredictMD.linearly_dependent_columns(
training_features_df,
feature_names,
),
) == 0
)
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
show(
logger_stream, PredictMD.linearly_dependent_columns(
training_features_df,
feature_names,
)
)
# PREDICTMD IF INCLUDE TEST STATEMENTS
Test.@test_throws ErrorException PredictMD.single_labeldataframelinearregression(
feature_names,
single_label_name;
package = :thispackagedoesnotexist,
intercept = true,
interactions = 1,
name = "Linear regression",
)
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
linear_regression = PredictMD.single_labeldataframelinearregression(
feature_names,
single_label_name;
package = :GLM,
intercept = true,
interactions = 1,
name = "Linear regression",
)
# PREDICTMD IF INCLUDE TEST STATEMENTS
DEEPCOPY_training_features_df = deepcopy(training_features_df)
DEEPCOPY_linear_regression = deepcopy(linear_regression)
DEEPCOPY_training_labels_df = deepcopy(training_labels_df)
double_DEEPCOPY_training_features_df = hcat(DEEPCOPY_training_features_df, DEEPCOPY_training_features_df; makeunique=true)
PredictMD.fit!(DEEPCOPY_linear_regression,double_DEEPCOPY_training_features_df,training_labels_df)
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
# PREDICTMD IF INCLUDE TEST STATEMENTS
PredictMD.predict(linear_regression, training_features_df)
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
PredictMD.fit!(linear_regression,training_features_df,training_labels_df)
PredictMD.get_underlying(linear_regression) # TODO: fix this error
linear_regression_plot_training =
PredictMD.plotsinglelabelregressiontrueversuspredicted(
linear_regression,
training_features_df,
training_labels_df,
single_label_name,
);
# PREDICTMD IF INCLUDE TEST STATEMENTS
filename = string(
tempname(),
"_",
"linear_regression_plot_training",
".pdf",
)
rm(filename; force = true, recursive = true,)
@debug("Attempting to test that the file does not exist...", filename,)
Test.@test(!isfile(filename))
@debug("The file does not exist.", filename, isfile(filename),)
PredictMD.save_plot(filename, linear_regression_plot_training)
if PredictMD.is_force_test_plots()
@debug("Attempting to test that the file exists...", filename,)
Test.@test(isfile(filename))
@debug("The file does exist.", filename, isfile(filename),)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
display(linear_regression_plot_training)
PredictMD.save_plot(
joinpath(
PROJECT_OUTPUT_DIRECTORY,
"plots",
"linear_regression_plot_training.pdf",
),
linear_regression_plot_training,
)
linear_regression_plot_testing =
PredictMD.plotsinglelabelregressiontrueversuspredicted(
linear_regression,
testing_features_df,
testing_labels_df,
single_label_name
);
# PREDICTMD IF INCLUDE TEST STATEMENTS
filename = string(
tempname(),
"_",
"linear_regression_plot_testing",
".pdf",
)
rm(filename; force = true, recursive = true,)
@debug("Attempting to test that the file does not exist...", filename,)
Test.@test(!isfile(filename))
@debug("The file does not exist.", filename, isfile(filename),)
PredictMD.save_plot(filename, linear_regression_plot_testing)
if PredictMD.is_force_test_plots()
@debug("Attempting to test that the file exists...", filename,)
Test.@test(isfile(filename))
@debug("The file does exist.", filename, isfile(filename),)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
display(linear_regression_plot_testing)
PredictMD.save_plot(
joinpath(
PROJECT_OUTPUT_DIRECTORY,
"plots",
"linear_regression_plot_testing.pdf",
),
linear_regression_plot_testing,
)
show(
logger_stream, PredictMD.singlelabelregressionmetrics(
linear_regression,
training_features_df,
training_labels_df,
single_label_name,
);
allrows = true,
allcols = true,
splitcols = false,
)
show(
logger_stream, PredictMD.singlelabelregressionmetrics(
linear_regression,
testing_features_df,
testing_labels_df,
single_label_name,
);
allrows = true,
allcols = true,
splitcols = false,
)
linear_regression_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"models",
"linear_regression.jld2",
)
PredictMD.save_model(linear_regression_filename, linear_regression)
# PREDICTMD IF INCLUDE TEST STATEMENTS
PredictMD.predict(linear_regression, training_features_df)
Test.@test_throws ErrorException PredictMD.predict_proba(linear_regression, training_features_df)
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
# PREDICTMD IF INCLUDE TEST STATEMENTS
linear_regression_filename_bson = joinpath(
PredictMD.maketempdir(),
"linear_regression.bson",
)
PredictMD.save_model(
linear_regression_filename_bson,
linear_regression,
)
test_load_bson = PredictMD.load_model(
linear_regression_filename_bson,
)
Test.@test_throws(
ErrorException,
PredictMD.save_model("test.nonexistentextension", linear_regression)
)
Test.@test_throws(
ErrorException,
PredictMD.save_model_jld2("test.nonexistentextension", linear_regression)
)
Test.@test_throws(
ErrorException,
PredictMD.save_model_bson("test.nonexistentextension", linear_regression)
)
Test.@test_throws(
ErrorException,
PredictMD.load_model("test.nonexistentextension")
)
Test.@test_throws(
ErrorException,
PredictMD.load_model_jld2("test.nonexistentextension")
)
Test.@test_throws(
ErrorException,
PredictMD.load_model_bson("test.nonexistentextension")
)
linear_regression = nothing
Test.@test isnothing(linear_regression)
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
### End linear regression code
# PREDICTMD IF INCLUDE TEST STATEMENTS
if PredictMD.is_travis_ci()
PredictMD.path_to_cache!(
;
to = ["cpu_examples", "boston_housing", "output",],
from = [PROJECT_OUTPUT_DIRECTORY],
)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
## %PREDICTMD_GENERATED_BY%
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 8474 | ## %PREDICTMD_GENERATED_BY%
using PredictMDExtra
PredictMDExtra.import_all()
using PredictMD
PredictMD.import_all()
# PREDICTMD IF INCLUDE TEST STATEMENTS
logger = Base.CoreLogging.current_logger_for_env(Base.CoreLogging.Debug, Symbol(splitext(basename(something(@__FILE__, "nothing")))[1]), something(@__MODULE__, "nothing"))
if isnothing(logger)
logger_stream = devnull
else
logger_stream = logger.stream
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
### Begin project-specific settings
DIRECTORY_CONTAINING_THIS_FILE = @__DIR__
PROJECT_DIRECTORY = dirname(
joinpath(splitpath(DIRECTORY_CONTAINING_THIS_FILE)...)
)
PROJECT_OUTPUT_DIRECTORY = joinpath(
PROJECT_DIRECTORY,
"output",
)
mkpath(PROJECT_OUTPUT_DIRECTORY)
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "data"))
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "models"))
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "plots"))
# PREDICTMD IF INCLUDE TEST STATEMENTS
@debug("PROJECT_OUTPUT_DIRECTORY: ", PROJECT_OUTPUT_DIRECTORY,)
if PredictMD.is_travis_ci()
PredictMD.cache_to_path!(
;
from = ["cpu_examples", "boston_housing", "output",],
to = [PROJECT_OUTPUT_DIRECTORY],
)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
### End project-specific settings
### Begin random forest regression code
Random.seed!(999)
trainingandtuning_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"trainingandtuning_features_df.csv",
)
trainingandtuning_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"trainingandtuning_labels_df.csv",
)
testing_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"testing_features_df.csv",
)
testing_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"testing_labels_df.csv",
)
training_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"training_features_df.csv",
)
training_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"training_labels_df.csv",
)
tuning_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"tuning_features_df.csv",
)
tuning_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"tuning_labels_df.csv",
)
trainingandtuning_features_df = DataFrames.DataFrame(
FileIO.load(
trainingandtuning_features_df_filename;
type_detect_rows = 100,
)
)
trainingandtuning_labels_df = DataFrames.DataFrame(
FileIO.load(
trainingandtuning_labels_df_filename;
type_detect_rows = 100,
)
)
testing_features_df = DataFrames.DataFrame(
FileIO.load(
testing_features_df_filename;
type_detect_rows = 100,
)
)
testing_labels_df = DataFrames.DataFrame(
FileIO.load(
testing_labels_df_filename;
type_detect_rows = 100,
)
)
training_features_df = DataFrames.DataFrame(
FileIO.load(
training_features_df_filename;
type_detect_rows = 100,
)
)
training_labels_df = DataFrames.DataFrame(
FileIO.load(
training_labels_df_filename;
type_detect_rows = 100,
)
)
tuning_features_df = DataFrames.DataFrame(
FileIO.load(
tuning_features_df_filename;
type_detect_rows = 100,
)
)
tuning_labels_df = DataFrames.DataFrame(
FileIO.load(
tuning_labels_df_filename;
type_detect_rows = 100,
)
)
categorical_feature_names_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"categorical_feature_names.jld2",
)
continuous_feature_names_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"continuous_feature_names.jld2",
)
categorical_feature_names = FileIO.load(
categorical_feature_names_filename,
"categorical_feature_names",
)
continuous_feature_names = FileIO.load(
continuous_feature_names_filename,
"continuous_feature_names",
)
feature_names = vcat(categorical_feature_names, continuous_feature_names)
single_label_name = :MedV
continuous_label_names = Symbol[single_label_name]
categorical_label_names = Symbol[]
label_names = vcat(categorical_label_names, continuous_label_names)
feature_contrasts = PredictMD.generate_feature_contrasts(
training_features_df,
feature_names,
)
random_forest_regression =
PredictMD.single_labeldataframerandomforestregression(
feature_names,
single_label_name;
nsubfeatures = 2,
ntrees = 20,
package = :DecisionTree,
name = "Random forest",
feature_contrasts = feature_contrasts,
)
PredictMD.fit!(random_forest_regression,
training_features_df,
training_labels_df)
random_forest_regression_plot_training =
PredictMD.plotsinglelabelregressiontrueversuspredicted(
random_forest_regression,
training_features_df,
training_labels_df,
single_label_name,
);
# PREDICTMD IF INCLUDE TEST STATEMENTS
filename = string(
tempname(),
"_",
"random_forest_regression_plot_training",
".pdf",
)
rm(filename; force = true, recursive = true,)
@debug("Attempting to test that the file does not exist...", filename,)
Test.@test(!isfile(filename))
@debug("The file does not exist.", filename, isfile(filename),)
PredictMD.save_plot(filename, random_forest_regression_plot_training)
if PredictMD.is_force_test_plots()
@debug("Attempting to test that the file exists...", filename,)
Test.@test(isfile(filename))
@debug("The file does exist.", filename, isfile(filename),)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
display(random_forest_regression_plot_training)
PredictMD.save_plot(
joinpath(
PROJECT_OUTPUT_DIRECTORY,
"plots",
"random_forest_regression_plot_training.pdf",
),
random_forest_regression_plot_training,
)
random_forest_regression_plot_testing =
PredictMD.plotsinglelabelregressiontrueversuspredicted(
random_forest_regression,
testing_features_df,
testing_labels_df,
single_label_name,
);
# PREDICTMD IF INCLUDE TEST STATEMENTS
filename = string(
tempname(),
"_",
"random_forest_regression_plot_testing",
".pdf",
)
rm(filename; force = true, recursive = true,)
@debug("Attempting to test that the file does not exist...", filename,)
Test.@test(!isfile(filename))
@debug("The file does not exist.", filename, isfile(filename),)
PredictMD.save_plot(filename, random_forest_regression_plot_testing)
if PredictMD.is_force_test_plots()
@debug("Attempting to test that the file exists...", filename,)
Test.@test(isfile(filename))
@debug("The file does exist.", filename, isfile(filename),)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
display(random_forest_regression_plot_testing)
PredictMD.save_plot(
joinpath(
PROJECT_OUTPUT_DIRECTORY,
"plots",
"random_forest_regression_plot_testing.pdf",
),
random_forest_regression_plot_testing,
)
show(
logger_stream, PredictMD.singlelabelregressionmetrics(
random_forest_regression,
training_features_df,
training_labels_df,
single_label_name,
);
allrows = true,
allcols = true,
splitcols = false,
)
show(
logger_stream, PredictMD.singlelabelregressionmetrics(
random_forest_regression,
testing_features_df,
testing_labels_df,
single_label_name,
);
allrows = true,
allcols = true,
splitcols = false,
)
random_forest_regression_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"models",
"random_forest_regression.jld2",
)
PredictMD.save_model(
random_forest_regression_filename,
random_forest_regression
)
# PREDICTMD IF INCLUDE TEST STATEMENTS
random_forest_regression = nothing
Test.@test isnothing(random_forest_regression)
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
### End random forest regression code
# PREDICTMD IF INCLUDE TEST STATEMENTS
if PredictMD.is_travis_ci()
PredictMD.path_to_cache!(
;
to = ["cpu_examples", "boston_housing", "output",],
from = [PROJECT_OUTPUT_DIRECTORY],
)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
## %PREDICTMD_GENERATED_BY%
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 17339 | ## %PREDICTMD_GENERATED_BY%
using PredictMDExtra
PredictMDExtra.import_all()
using PredictMD
PredictMD.import_all()
# PREDICTMD IF INCLUDE TEST STATEMENTS
logger = Base.CoreLogging.current_logger_for_env(Base.CoreLogging.Debug, Symbol(splitext(basename(something(@__FILE__, "nothing")))[1]), something(@__MODULE__, "nothing"))
if isnothing(logger)
logger_stream = devnull
else
logger_stream = logger.stream
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
### Begin project-specific settings
DIRECTORY_CONTAINING_THIS_FILE = @__DIR__
PROJECT_DIRECTORY = dirname(
joinpath(splitpath(DIRECTORY_CONTAINING_THIS_FILE)...)
)
PROJECT_OUTPUT_DIRECTORY = joinpath(
PROJECT_DIRECTORY,
"output",
)
mkpath(PROJECT_OUTPUT_DIRECTORY)
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "data"))
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "models"))
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "plots"))
# PREDICTMD IF INCLUDE TEST STATEMENTS
@debug("PROJECT_OUTPUT_DIRECTORY: ", PROJECT_OUTPUT_DIRECTORY,)
if PredictMD.is_travis_ci()
PredictMD.cache_to_path!(
;
from = ["cpu_examples", "boston_housing", "output",],
to = [PROJECT_OUTPUT_DIRECTORY],
)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
### End project-specific settings
### Begin Knet neural network regression code
Random.seed!(999)
trainingandtuning_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"trainingandtuning_features_df.csv",
)
trainingandtuning_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"trainingandtuning_labels_df.csv",
)
testing_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"testing_features_df.csv",
)
testing_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"testing_labels_df.csv",
)
training_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"training_features_df.csv",
)
training_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"training_labels_df.csv",
)
tuning_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"tuning_features_df.csv",
)
tuning_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"tuning_labels_df.csv",
)
trainingandtuning_features_df = DataFrames.DataFrame(
FileIO.load(
trainingandtuning_features_df_filename;
type_detect_rows = 100,
)
)
trainingandtuning_labels_df = DataFrames.DataFrame(
FileIO.load(
trainingandtuning_labels_df_filename;
type_detect_rows = 100,
)
)
testing_features_df = DataFrames.DataFrame(
FileIO.load(
testing_features_df_filename;
type_detect_rows = 100,
)
)
testing_labels_df = DataFrames.DataFrame(
FileIO.load(
testing_labels_df_filename;
type_detect_rows = 100,
)
)
training_features_df = DataFrames.DataFrame(
FileIO.load(
training_features_df_filename;
type_detect_rows = 100,
)
)
training_labels_df = DataFrames.DataFrame(
FileIO.load(
training_labels_df_filename;
type_detect_rows = 100,
)
)
tuning_features_df = DataFrames.DataFrame(
FileIO.load(
tuning_features_df_filename;
type_detect_rows = 100,
)
)
tuning_labels_df = DataFrames.DataFrame(
FileIO.load(
tuning_labels_df_filename;
type_detect_rows = 100,
)
)
categorical_feature_names_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"categorical_feature_names.jld2",
)
continuous_feature_names_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"continuous_feature_names.jld2",
)
categorical_feature_names = FileIO.load(
categorical_feature_names_filename,
"categorical_feature_names",
)
continuous_feature_names = FileIO.load(
continuous_feature_names_filename,
"continuous_feature_names",
)
feature_names = vcat(categorical_feature_names, continuous_feature_names)
single_label_name = :MedV
continuous_label_names = Symbol[single_label_name]
categorical_label_names = Symbol[]
label_names = vcat(categorical_label_names, continuous_label_names)
knet_mlp_predict_function_source = """
function knetmlp_predict(
w,
x0::AbstractArray,
)
x1 = Knet.relu.( w[1]*x0 .+ w[2] )
x2 = w[3]*x1 .+ w[4]
return x2
end
"""
knet_mlp_loss_function_source = """
function knetmlp_loss(
predict_function::Function,
modelweights,
x::AbstractArray,
ytrue::AbstractArray;
L1::Real = Float64(0),
L2::Real = Float64(0),
)
loss = Statistics.mean(
abs2,
ytrue - predict_function(
modelweights,
x,
),
)
if L1 != 0
loss += L1 * sum(sum(abs, w_i) for w_i in modelweights[1:2:end])
end
if L2 != 0
loss += L2 * sum(sum(abs2, w_i) for w_i in modelweights[1:2:end])
end
return loss
end
"""
feature_contrasts =
PredictMD.generate_feature_contrasts(training_features_df, feature_names)
knetmlp_modelweights = Any[
Float64.(
0.1f0*randn(Float64,10,feature_contrasts.num_array_columns_without_intercept)
),
Float64.(
fill(Float64(0),10,1)
),
Float64.(
0.1f0*randn(Float64,1,10)
),
Float64.(
fill(Float64(0),1,1),
),
]
knetmlp_losshyperparameters = Dict()
knetmlp_losshyperparameters[:L1] = Float64(0.0)
knetmlp_losshyperparameters[:L2] = Float64(0.0)
knetmlp_optimizationalgorithm = :Adam
knetmlp_optimizerhyperparameters = Dict()
knetmlp_minibatchsize = 48
knet_mlp_regression = PredictMD.single_labeldataframeknetregression(
feature_names,
single_label_name;
package = :Knet,
name = "Knet MLP",
predict_function_source = knet_mlp_predict_function_source,
loss_function_source = knet_mlp_loss_function_source,
losshyperparameters = knetmlp_losshyperparameters,
optimizationalgorithm = knetmlp_optimizationalgorithm,
optimizerhyperparameters = knetmlp_optimizerhyperparameters,
minibatchsize = knetmlp_minibatchsize,
modelweights = knetmlp_modelweights,
maxepochs = 100,
printlosseverynepochs = 10,
feature_contrasts = feature_contrasts,
)
PredictMD.parse_functions!(knet_mlp_regression)
PredictMD.fit!(
knet_mlp_regression,
training_features_df,
training_labels_df,
tuning_features_df,
tuning_labels_df,
)
PredictMD.set_max_epochs!(knet_mlp_regression, 200)
PredictMD.fit!(
knet_mlp_regression,
training_features_df,
training_labels_df,
tuning_features_df,
tuning_labels_df,
)
# PREDICTMD IF INCLUDE TEST STATEMENTS
test_knet_mlp_regression = PredictMD.single_labeldataframeknetregression(
feature_names,
single_label_name;
package = :Knet,
name = "Knet MLP",
predict_function_source = knet_mlp_predict_function_source,
loss_function_source = knet_mlp_loss_function_source,
losshyperparameters = knetmlp_losshyperparameters,
optimizationalgorithm = knetmlp_optimizationalgorithm,
optimizerhyperparameters = knetmlp_optimizerhyperparameters,
minibatchsize = knetmlp_minibatchsize,
modelweights = knetmlp_modelweights,
maxepochs = 5,
printlosseverynepochs = 1,
feature_contrasts = feature_contrasts,
)
# PredictMD.fit!(
# knet_mlp_regression,
# training_features_df,
# training_labels_df,
# nothing,
# nothing,
# )
test_knet_mlp_regression = PredictMD.single_labeldataframeknetregression(
feature_names,
single_label_name;
package = :Knet,
name = "Knet MLP",
predict_function_source = knet_mlp_predict_function_source,
loss_function_source = knet_mlp_loss_function_source,
losshyperparameters = knetmlp_losshyperparameters,
optimizationalgorithm = knetmlp_optimizationalgorithm,
optimizerhyperparameters = knetmlp_optimizerhyperparameters,
minibatchsize = knetmlp_minibatchsize,
modelweights = knetmlp_modelweights,
maxepochs = 5,
printlosseverynepochs = 1,
feature_contrasts = feature_contrasts,
)
# PredictMD.fit!(
# knet_mlp_regression,
# training_features_df,
# training_labels_df,
# nothing,
# tuning_labels_df,
# )
test_knet_mlp_regression = PredictMD.single_labeldataframeknetregression(
feature_names,
single_label_name;
package = :Knet,
name = "Knet MLP",
predict_function_source = knet_mlp_predict_function_source,
loss_function_source = knet_mlp_loss_function_source,
losshyperparameters = knetmlp_losshyperparameters,
optimizationalgorithm = knetmlp_optimizationalgorithm,
optimizerhyperparameters = knetmlp_optimizerhyperparameters,
minibatchsize = knetmlp_minibatchsize,
modelweights = knetmlp_modelweights,
maxepochs = 5,
printlosseverynepochs = 1,
feature_contrasts = feature_contrasts,
)
# PredictMD.fit!(
# knet_mlp_regression,
# training_features_df,
# training_labels_df,
# tuning_features_df,
# nothing,
# )
PredictMD.get_underlying(test_knet_mlp_regression)
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
knet_learningcurve_lossvsepoch = PredictMD.plotlearningcurve(
knet_mlp_regression,
:loss_vs_epoch;
);
# PREDICTMD IF INCLUDE TEST STATEMENTS
filename = string(
tempname(),
"_",
"knet_learningcurve_lossvsepoch",
".pdf",
)
rm(filename; force = true, recursive = true,)
@debug("Attempting to test that the file does not exist...", filename,)
Test.@test(!isfile(filename))
@debug("The file does not exist.", filename, isfile(filename),)
PredictMD.save_plot(filename, knet_learningcurve_lossvsepoch)
if PredictMD.is_force_test_plots()
@debug("Attempting to test that the file exists...", filename,)
Test.@test(isfile(filename))
@debug("The file does exist.", filename, isfile(filename),)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
display(knet_learningcurve_lossvsepoch)
PredictMD.save_plot(
joinpath(
PROJECT_OUTPUT_DIRECTORY,
"plots",
"knet_learningcurve_lossvsepoch.pdf",
),
knet_learningcurve_lossvsepoch,
)
knet_learningcurve_lossvsepoch_skip10epochs = PredictMD.plotlearningcurve(
knet_mlp_regression,
:loss_vs_epoch;
startat = 10,
endat = :end,
);
# PREDICTMD IF INCLUDE TEST STATEMENTS
filename = string(
tempname(),
"_",
"knet_learningcurve_lossvsepoch_skip10epochs",
".pdf",
)
rm(filename; force = true, recursive = true,)
@debug("Attempting to test that the file does not exist...", filename,)
Test.@test(!isfile(filename))
@debug("The file does not exist.", filename, isfile(filename),)
PredictMD.save_plot(filename, knet_learningcurve_lossvsepoch_skip10epochs)
if PredictMD.is_force_test_plots()
@debug("Attempting to test that the file exists...", filename,)
Test.@test(isfile(filename))
@debug("The file does exist.", filename, isfile(filename),)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
display(knet_learningcurve_lossvsepoch_skip10epochs)
PredictMD.save_plot(
joinpath(
PROJECT_OUTPUT_DIRECTORY,
"plots",
"knet_learningcurve_lossvsepoch_skip10epochs.pdf",
),
knet_learningcurve_lossvsepoch_skip10epochs,
)
knet_learningcurve_lossvsiteration = PredictMD.plotlearningcurve(
knet_mlp_regression,
:loss_vs_iteration;
window = 50,
sampleevery = 10,
);
# PREDICTMD IF INCLUDE TEST STATEMENTS
filename = string(
tempname(),
"_",
"knet_learningcurve_lossvsiteration",
".pdf",
)
rm(filename; force = true, recursive = true,)
@debug("Attempting to test that the file does not exist...", filename,)
Test.@test(!isfile(filename))
@debug("The file does not exist.", filename, isfile(filename),)
PredictMD.save_plot(filename, knet_learningcurve_lossvsiteration)
if PredictMD.is_force_test_plots()
@debug("Attempting to test that the file exists...", filename,)
Test.@test(isfile(filename))
@debug("The file does exist.", filename, isfile(filename),)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
display(knet_learningcurve_lossvsiteration)
PredictMD.save_plot(
joinpath(
PROJECT_OUTPUT_DIRECTORY,
"plots",
"knet_learningcurve_lossvsiteration.pdf",
),
knet_learningcurve_lossvsiteration,
)
knet_learningcurve_lossvsiteration_skip100iterations =
PredictMD.plotlearningcurve(
knet_mlp_regression,
:loss_vs_iteration;
window = 50,
sampleevery = 10,
startat = 100,
endat = :end,
);
# PREDICTMD IF INCLUDE TEST STATEMENTS
filename = string(
tempname(),
"_",
"knet_learningcurve_lossvsiteration_skip100iterations",
".pdf",
)
rm(filename; force = true, recursive = true,)
@debug("Attempting to test that the file does not exist...", filename,)
Test.@test(!isfile(filename))
@debug("The file does not exist.", filename, isfile(filename),)
PredictMD.save_plot(filename, knet_learningcurve_lossvsiteration_skip100iterations)
if PredictMD.is_force_test_plots()
@debug("Attempting to test that the file exists...", filename,)
Test.@test(isfile(filename))
@debug("The file does exist.", filename, isfile(filename),)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
display(knet_learningcurve_lossvsiteration_skip100iterations)
PredictMD.save_plot(
joinpath(
PROJECT_OUTPUT_DIRECTORY,
"plots",
"knet_learningcurve_lossvsiteration_skip100iterations.pdf",
),
knet_learningcurve_lossvsiteration_skip100iterations,
)
knet_mlp_regression_plot_training =
PredictMD.plotsinglelabelregressiontrueversuspredicted(
knet_mlp_regression,
training_features_df,
training_labels_df,
single_label_name,
);
# PREDICTMD IF INCLUDE TEST STATEMENTS
filename = string(
tempname(),
"_",
"knet_mlp_regression_plot_training",
".pdf",
)
rm(filename; force = true, recursive = true,)
@debug("Attempting to test that the file does not exist...", filename,)
Test.@test(!isfile(filename))
@debug("The file does not exist.", filename, isfile(filename),)
PredictMD.save_plot(filename, knet_mlp_regression_plot_training)
if PredictMD.is_force_test_plots()
@debug("Attempting to test that the file exists...", filename,)
Test.@test(isfile(filename))
@debug("The file does exist.", filename, isfile(filename),)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
display(knet_mlp_regression_plot_training)
PredictMD.save_plot(
joinpath(
PROJECT_OUTPUT_DIRECTORY,
"plots",
"knet_mlp_regression_plot_training.pdf",
),
knet_mlp_regression_plot_training,
)
knet_mlp_regression_plot_testing =
PredictMD.plotsinglelabelregressiontrueversuspredicted(
knet_mlp_regression,
testing_features_df,
testing_labels_df,
single_label_name,
);
# PREDICTMD IF INCLUDE TEST STATEMENTS
filename = string(
tempname(),
"_",
"knet_mlp_regression_plot_testing",
".pdf",
)
rm(filename; force = true, recursive = true,)
@debug("Attempting to test that the file does not exist...", filename,)
Test.@test(!isfile(filename))
@debug("The file does not exist.", filename, isfile(filename),)
PredictMD.save_plot(filename, knet_mlp_regression_plot_testing)
if PredictMD.is_force_test_plots()
@debug("Attempting to test that the file exists...", filename,)
Test.@test(isfile(filename))
@debug("The file does exist.", filename, isfile(filename),)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
display(knet_mlp_regression_plot_testing)
PredictMD.save_plot(
joinpath(
PROJECT_OUTPUT_DIRECTORY,
"plots",
"knet_mlp_regression_plot_testing.pdf",
),
knet_mlp_regression_plot_testing,
)
show(
logger_stream, PredictMD.singlelabelregressionmetrics(
knet_mlp_regression,
training_features_df,
training_labels_df,
single_label_name,
);
allrows = true,
allcols = true,
splitcols = false,
)
show(
logger_stream, PredictMD.singlelabelregressionmetrics(
knet_mlp_regression,
testing_features_df,
testing_labels_df,
single_label_name,
);
allrows = true,
allcols = true,
splitcols = false,
)
knet_mlp_regression_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"models",
"knet_mlp_regression.jld2",
)
PredictMD.save_model(knet_mlp_regression_filename, knet_mlp_regression)
# PREDICTMD IF INCLUDE TEST STATEMENTS
knet_mlp_regression = nothing
Test.@test isnothing(knet_mlp_regression)
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
### End Knet neural network regression code
# PREDICTMD IF INCLUDE TEST STATEMENTS
if PredictMD.is_travis_ci()
PredictMD.path_to_cache!(
;
to = ["cpu_examples", "boston_housing", "output",],
from = [PROJECT_OUTPUT_DIRECTORY],
)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
## %PREDICTMD_GENERATED_BY%
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 8441 | ## %PREDICTMD_GENERATED_BY%
using PredictMDExtra
PredictMDExtra.import_all()
using PredictMD
PredictMD.import_all()
using CSVFiles
using CategoricalArrays
using DataFrames
using DecisionTree
using Distributions
using FileIO
using GLM
using IterTools
using Knet
using LIBSVM
using LinearAlgebra
using PredictMD
using PredictMDAPI
using PredictMDExtra
using RDatasets
using Random
using StatsModels
using Test
using Unitful
const Schema = StatsModels.Schema
# PREDICTMD IF INCLUDE TEST STATEMENTS
logger = Base.CoreLogging.current_logger_for_env(Base.CoreLogging.Debug, Symbol(splitext(basename(something(@__FILE__, "nothing")))[1]), something(@__MODULE__, "nothing"))
if isnothing(logger)
logger_stream = devnull
else
logger_stream = logger.stream
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
### Begin project-specific settings
DIRECTORY_CONTAINING_THIS_FILE = @__DIR__
PROJECT_DIRECTORY = dirname(
joinpath(splitpath(DIRECTORY_CONTAINING_THIS_FILE)...)
)
PROJECT_OUTPUT_DIRECTORY = joinpath(
PROJECT_DIRECTORY,
"output",
)
mkpath(PROJECT_OUTPUT_DIRECTORY)
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "data"))
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "models"))
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "plots"))
# PREDICTMD IF INCLUDE TEST STATEMENTS
@debug("PROJECT_OUTPUT_DIRECTORY: ", PROJECT_OUTPUT_DIRECTORY,)
if PredictMD.is_travis_ci()
PredictMD.cache_to_path!(
;
from = ["cpu_examples", "boston_housing", "output",],
to = [PROJECT_OUTPUT_DIRECTORY],
)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
### End project-specific settings
### Begin model comparison code
Random.seed!(999)
trainingandtuning_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"trainingandtuning_features_df.csv",
)
trainingandtuning_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"trainingandtuning_labels_df.csv",
)
testing_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"testing_features_df.csv",
)
testing_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"testing_labels_df.csv",
)
training_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"training_features_df.csv",
)
training_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"training_labels_df.csv",
)
tuning_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"tuning_features_df.csv",
)
tuning_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"tuning_labels_df.csv",
)
trainingandtuning_features_df = DataFrames.DataFrame(
FileIO.load(
trainingandtuning_features_df_filename;
type_detect_rows = 100,
)
)
trainingandtuning_labels_df = DataFrames.DataFrame(
FileIO.load(
trainingandtuning_labels_df_filename;
type_detect_rows = 100,
)
)
testing_features_df = DataFrames.DataFrame(
FileIO.load(
testing_features_df_filename;
type_detect_rows = 100,
)
)
testing_labels_df = DataFrames.DataFrame(
FileIO.load(
testing_labels_df_filename;
type_detect_rows = 100,
)
)
training_features_df = DataFrames.DataFrame(
FileIO.load(
training_features_df_filename;
type_detect_rows = 100,
)
)
training_labels_df = DataFrames.DataFrame(
FileIO.load(
training_labels_df_filename;
type_detect_rows = 100,
)
)
tuning_features_df = DataFrames.DataFrame(
FileIO.load(
tuning_features_df_filename;
type_detect_rows = 100,
)
)
tuning_labels_df = DataFrames.DataFrame(
FileIO.load(
tuning_labels_df_filename;
type_detect_rows = 100,
)
)
linear_regression_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"models",
"linear_regression.jld2",
)
random_forest_regression_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"models",
"random_forest_regression.jld2",
)
knet_mlp_regression_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"models",
"knet_mlp_regression.jld2",
)
# PREDICTMD IF INCLUDE TEST STATEMENTS
linear_regression = nothing
Test.@test isnothing(linear_regression)
random_forest_regression = nothing
Test.@test isnothing(random_forest_regression)
knet_mlp_regression = nothing
Test.@test isnothing(knet_mlp_regression)
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
linear_regression =
PredictMD.load_model(linear_regression_filename)
random_forest_regression =
PredictMD.load_model(random_forest_regression_filename)
knet_mlp_regression =
PredictMD.load_model(knet_mlp_regression_filename)
PredictMD.parse_functions!(linear_regression)
PredictMD.parse_functions!(random_forest_regression)
PredictMD.parse_functions!(knet_mlp_regression)
# PREDICTMD IF INCLUDE TEST STATEMENTS
PredictMD.parse_functions!(linear_regression)
PredictMD.parse_functions!(random_forest_regression)
PredictMD.parse_functions!(knet_mlp_regression)
PredictMD.parse_functions!(linear_regression)
PredictMD.parse_functions!(random_forest_regression)
PredictMD.parse_functions!(knet_mlp_regression)
PredictMD.parse_functions!(linear_regression)
PredictMD.parse_functions!(random_forest_regression)
PredictMD.parse_functions!(knet_mlp_regression)
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
all_models = PredictMD.AbstractFittable[
linear_regression,
random_forest_regression,
knet_mlp_regression,
]
single_label_name = :MedV
continuous_label_names = Symbol[single_label_name]
categorical_label_names = Symbol[]
label_names = vcat(categorical_label_names, continuous_label_names)
println(logger_stream, "Single label regression metrics, training set: ")
show(
logger_stream, PredictMD.singlelabelregressionmetrics(
all_models,
training_features_df,
training_labels_df,
single_label_name,
);
allrows = true,
allcols = true,
splitcols = false,
)
println(logger_stream, "Single label regression metrics, testing set: ")
show(
logger_stream, PredictMD.singlelabelregressionmetrics(
all_models,
testing_features_df,
testing_labels_df,
single_label_name,
);
allrows = true,
allcols = true,
splitcols = false,
)
### End model comparison code
# PREDICTMD IF INCLUDE TEST STATEMENTS
metrics = PredictMD.singlelabelregressionmetrics(all_models,
testing_features_df,
testing_labels_df,
single_label_name)
r2_row = first(
findall(
strip.(metrics[:metric]) .== "R^2 (coefficient of determination)"
)
)
Test.@test(
strip(metrics[r2_row, :metric]) == "R^2 (coefficient of determination)"
)
Test.@test(
metrics[r2_row, Symbol("Linear regression")] > 0.550
)
Test.@test(
metrics[r2_row, Symbol("Random forest")] > 0.550
)
Test.@test(
metrics[r2_row, Symbol("Knet MLP")] > 0.300
)
mse_row = first(
findall(
strip.(metrics[:metric]) .== "Mean squared error (MSE)"
)
)
Test.@test(
strip(metrics[mse_row, :metric]) == "Mean squared error (MSE)"
)
Test.@test(
metrics[mse_row, Symbol("Linear regression")] < 40.000
)
Test.@test(
metrics[mse_row, Symbol("Random forest")] < 40.000
)
Test.@test(
metrics[mse_row, Symbol("Knet MLP")] < 65.000
)
rmse_row = first(
findall(
strip.(metrics[:metric]) .== "Root mean square error (RMSE)"
)
)
Test.@test(
strip(metrics[rmse_row, :metric]) == "Root mean square error (RMSE)"
)
Test.@test(
metrics[rmse_row, Symbol("Linear regression")] < 6.500
)
Test.@test(
metrics[rmse_row, Symbol("Random forest")] < 6.500
)
Test.@test(
metrics[rmse_row, Symbol("Knet MLP")] < 8.000
)
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
# PREDICTMD IF INCLUDE TEST STATEMENTS
if PredictMD.is_travis_ci()
PredictMD.path_to_cache!(
;
to = ["cpu_examples", "boston_housing", "output",],
from = [PROJECT_OUTPUT_DIRECTORY],
)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
## %PREDICTMD_GENERATED_BY%
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 6220 | ## %PREDICTMD_GENERATED_BY%
using PredictMDExtra
PredictMDExtra.import_all()
using PredictMD
PredictMD.import_all()
using CSVFiles
using CategoricalArrays
using DataFrames
using DecisionTree
using Distributions
using FileIO
using GLM
using IterTools
using Knet
using LIBSVM
using LinearAlgebra
using PredictMD
using PredictMDAPI
using PredictMDExtra
using RDatasets
using Random
using StatsModels
using Test
using Unitful
const Schema = StatsModels.Schema
# PREDICTMD IF INCLUDE TEST STATEMENTS
logger = Base.CoreLogging.current_logger_for_env(Base.CoreLogging.Debug, Symbol(splitext(basename(something(@__FILE__, "nothing")))[1]), something(@__MODULE__, "nothing"))
if isnothing(logger)
logger_stream = devnull
else
logger_stream = logger.stream
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
### Begin project-specific settings
DIRECTORY_CONTAINING_THIS_FILE = @__DIR__
PROJECT_DIRECTORY = dirname(
joinpath(splitpath(DIRECTORY_CONTAINING_THIS_FILE)...)
)
PROJECT_OUTPUT_DIRECTORY = joinpath(
PROJECT_DIRECTORY,
"output",
)
mkpath(PROJECT_OUTPUT_DIRECTORY)
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "data"))
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "models"))
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "plots"))
# PREDICTMD IF INCLUDE TEST STATEMENTS
@debug("PROJECT_OUTPUT_DIRECTORY: ", PROJECT_OUTPUT_DIRECTORY,)
if PredictMD.is_travis_ci()
PredictMD.cache_to_path!(
;
from = ["cpu_examples", "boston_housing", "output",],
to = [PROJECT_OUTPUT_DIRECTORY],
)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
### End project-specific settings
### Begin model output code
Random.seed!(999)
trainingandtuning_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"trainingandtuning_features_df.csv",
)
trainingandtuning_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"trainingandtuning_labels_df.csv",
)
testing_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"testing_features_df.csv",
)
testing_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"testing_labels_df.csv",
)
training_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"training_features_df.csv",
)
training_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"training_labels_df.csv",
)
tuning_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"tuning_features_df.csv",
)
tuning_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"tuning_labels_df.csv",
)
trainingandtuning_features_df = DataFrames.DataFrame(
FileIO.load(
trainingandtuning_features_df_filename;
type_detect_rows = 100,
)
)
trainingandtuning_labels_df = DataFrames.DataFrame(
FileIO.load(
trainingandtuning_labels_df_filename;
type_detect_rows = 100,
)
)
testing_features_df = DataFrames.DataFrame(
FileIO.load(
testing_features_df_filename;
type_detect_rows = 100,
)
)
testing_labels_df = DataFrames.DataFrame(
FileIO.load(
testing_labels_df_filename;
type_detect_rows = 100,
)
)
training_features_df = DataFrames.DataFrame(
FileIO.load(
training_features_df_filename;
type_detect_rows = 100,
)
)
training_labels_df = DataFrames.DataFrame(
FileIO.load(
training_labels_df_filename;
type_detect_rows = 100,
)
)
tuning_features_df = DataFrames.DataFrame(
FileIO.load(
tuning_features_df_filename;
type_detect_rows = 100,
)
)
tuning_labels_df = DataFrames.DataFrame(
FileIO.load(
tuning_labels_df_filename;
type_detect_rows = 100,
)
)
linear_regression_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"models",
"linear_regression.jld2",
)
random_forest_regression_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"models",
"random_forest_regression.jld2",
)
knet_mlp_regression_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"models",
"knet_mlp_regression.jld2",
)
# PREDICTMD IF INCLUDE TEST STATEMENTS
linear_regression = nothing
Test.@test isnothing(linear_regression)
random_forest_regression = nothing
Test.@test isnothing(random_forest_regression)
knet_mlp_regression = nothing
Test.@test isnothing(knet_mlp_regression)
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
linear_regression =
PredictMD.load_model(linear_regression_filename)
random_forest_regression =
PredictMD.load_model(random_forest_regression_filename)
knet_mlp_regression =
PredictMD.load_model(knet_mlp_regression_filename)
PredictMD.parse_functions!(linear_regression)
PredictMD.parse_functions!(random_forest_regression)
PredictMD.parse_functions!(knet_mlp_regression)
# PREDICTMD IF INCLUDE TEST STATEMENTS
PredictMD.parse_functions!(linear_regression)
PredictMD.parse_functions!(random_forest_regression)
PredictMD.parse_functions!(knet_mlp_regression)
PredictMD.parse_functions!(linear_regression)
PredictMD.parse_functions!(random_forest_regression)
PredictMD.parse_functions!(knet_mlp_regression)
PredictMD.parse_functions!(linear_regression)
PredictMD.parse_functions!(random_forest_regression)
PredictMD.parse_functions!(knet_mlp_regression)
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
PredictMD.predict(linear_regression,training_features_df,)
PredictMD.predict(random_forest_regression,training_features_df,)
PredictMD.predict(knet_mlp_regression,training_features_df,)
PredictMD.predict(linear_regression,testing_features_df,)
PredictMD.predict(random_forest_regression,testing_features_df,)
PredictMD.predict(knet_mlp_regression,testing_features_df,)
### End model output code
# PREDICTMD IF INCLUDE TEST STATEMENTS
if PredictMD.is_travis_ci()
PredictMD.path_to_cache!(
;
to = ["cpu_examples", "boston_housing", "output",],
from = [PROJECT_OUTPUT_DIRECTORY],
)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
## %PREDICTMD_GENERATED_BY%
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 6914 | ## %PREDICTMD_GENERATED_BY%
using PredictMDExtra
PredictMDExtra.import_all()
using PredictMD
PredictMD.import_all()
# PREDICTMD IF INCLUDE TEST STATEMENTS
logger = Base.CoreLogging.current_logger_for_env(Base.CoreLogging.Debug, Symbol(splitext(basename(something(@__FILE__, "nothing")))[1]), something(@__MODULE__, "nothing"))
if isnothing(logger)
logger_stream = devnull
else
logger_stream = logger.stream
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
### Begin project-specific settings
DIRECTORY_CONTAINING_THIS_FILE = @__DIR__
PROJECT_DIRECTORY = dirname(
joinpath(splitpath(DIRECTORY_CONTAINING_THIS_FILE)...)
)
PROJECT_OUTPUT_DIRECTORY = joinpath(
PROJECT_DIRECTORY,
"output",
)
mkpath(PROJECT_OUTPUT_DIRECTORY)
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "data"))
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "models"))
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "plots"))
# PREDICTMD IF INCLUDE TEST STATEMENTS
@debug("PROJECT_OUTPUT_DIRECTORY: ", PROJECT_OUTPUT_DIRECTORY,)
if PredictMD.is_travis_ci()
PredictMD.cache_to_path!(
;
from = ["cpu_examples", "breast_cancer_biopsy", "output",],
to = [PROJECT_OUTPUT_DIRECTORY],
)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
### End project-specific settings
### Begin data preprocessing code
Random.seed!(999)
df = RDatasets.dataset("MASS", "biopsy")
## PredictMD requires that you provide your data in a DataFrame.
## If your data are in a CSV file (e.g. "data.csv"), load them into
## a DataFrame named `df` with:
## df = DataFrames.DataFrame(CSVFiles.load("data.csv"; type_detect_rows = 10_000))
## If your data are in a gzipped CSV file (e.g. "data.csv.gz"), load them into
## a DataFrame named `df` with:
## df = DataFrames.DataFrame(CSVFiles.load(CSVFiles.File(CSVFiles.format"CSV", "data.csv.gz"); type_detect_rows = 10_000))
## If your data are in some other format, use the appropriate Julia package to
## load your data into a DataFrame named `df`.
# PREDICTMD IF INCLUDE TEST STATEMENTS
df1 = DataFrames.DataFrame()
df1[:x] = randn(5)
df1_filename = joinpath(PredictMD.maketempdir(), "df1.csv")
CSVFiles.save(df1_filename, df1)
df2 = DataFrames.DataFrame(CSVFiles.load(df1_filename; type_detect_rows = 10_000))
Test.@test( all(df1[:x] .== df2[:x]) )
df3 = DataFrames.DataFrame()
df3[:y] = randn(5)
df3_filename = joinpath(PredictMD.maketempdir(), "df3.csv.gz")
CSVFiles.save(CSVFiles.File(CSVFiles.format"CSV", df3_filename), df3)
df4 = DataFrames.DataFrame(CSVFiles.load(CSVFiles.File(CSVFiles.format"CSV", df3_filename); type_detect_rows = 10_000))
Test.@test( all(df3[:y] .== df4[:y]) )
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
categorical_feature_names = Symbol[]
continuous_feature_names = Symbol[
:V1,
:V2,
:V3,
:V4,
:V5,
:V6,
:V7,
:V8,
:V9,
]
categorical_feature_names_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"categorical_feature_names.jld2",
)
continuous_feature_names_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"continuous_feature_names.jld2",
)
FileIO.save(
categorical_feature_names_filename,
"categorical_feature_names",
categorical_feature_names,
)
FileIO.save(
continuous_feature_names_filename,
"continuous_feature_names",
continuous_feature_names,
)
feature_names = vcat(categorical_feature_names, continuous_feature_names)
single_label_name = :Class
negative_class = "benign"
positive_class = "malignant"
single_label_levels = [negative_class, positive_class]
categorical_label_names = Symbol[single_label_name]
continuous_label_names = Symbol[]
label_names = vcat(categorical_label_names, continuous_label_names)
df = df[:, vcat(feature_names, label_names)]
DataFrames.dropmissing!(df; disallowmissing=true,)
PredictMD.shuffle_rows!(df)
PredictMD.fix_column_types!(
df;
categorical_feature_names = categorical_feature_names,
continuous_feature_names = continuous_feature_names,
categorical_label_names = categorical_label_names,
continuous_label_names = continuous_label_names,
)
PredictMD.check_column_types(
df;
categorical_feature_names = categorical_feature_names,
continuous_feature_names = continuous_feature_names,
categorical_label_names = categorical_label_names,
continuous_label_names = continuous_label_names,
)
# PREDICTMD IF INCLUDE TEST STATEMENTS
Test.@test PredictMD.check_no_constant_columns(df)
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
features_df = df[feature_names]
labels_df = df[label_names]
(trainingandtuning_features_df,
trainingandtuning_labels_df,
testing_features_df,
testing_labels_df,) = PredictMD.split_data(
features_df,
labels_df,
0.75,
)
(training_features_df,
training_labels_df,
tuning_features_df,
tuning_labels_df,) = PredictMD.split_data(
trainingandtuning_features_df,
trainingandtuning_labels_df,
2/3,
)
trainingandtuning_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"trainingandtuning_features_df.csv",
)
trainingandtuning_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"trainingandtuning_labels_df.csv",
)
testing_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"testing_features_df.csv",
)
testing_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"testing_labels_df.csv",
)
training_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"training_features_df.csv",
)
training_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"training_labels_df.csv",
)
tuning_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"tuning_features_df.csv",
)
tuning_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"tuning_labels_df.csv",
)
FileIO.save(trainingandtuning_features_df_filename, trainingandtuning_features_df)
FileIO.save(trainingandtuning_labels_df_filename, trainingandtuning_labels_df)
FileIO.save(testing_features_df_filename, testing_features_df)
FileIO.save(testing_labels_df_filename, testing_labels_df)
FileIO.save(training_features_df_filename, training_features_df)
FileIO.save(training_labels_df_filename, training_labels_df)
FileIO.save(tuning_features_df_filename, tuning_features_df)
FileIO.save(tuning_labels_df_filename, tuning_labels_df)
### End data preprocessing code
# PREDICTMD IF INCLUDE TEST STATEMENTS
if PredictMD.is_travis_ci()
PredictMD.path_to_cache!(
;
to = ["cpu_examples", "breast_cancer_biopsy", "output",],
from = [PROJECT_OUTPUT_DIRECTORY],
)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
## %PREDICTMD_GENERATED_BY%
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 6727 | ## %PREDICTMD_GENERATED_BY%
using PredictMDExtra
PredictMDExtra.import_all()
using PredictMD
PredictMD.import_all()
# PREDICTMD IF INCLUDE TEST STATEMENTS
logger = Base.CoreLogging.current_logger_for_env(Base.CoreLogging.Debug, Symbol(splitext(basename(something(@__FILE__, "nothing")))[1]), something(@__MODULE__, "nothing"))
if isnothing(logger)
logger_stream = devnull
else
logger_stream = logger.stream
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
### Begin project-specific settings
DIRECTORY_CONTAINING_THIS_FILE = @__DIR__
PROJECT_DIRECTORY = dirname(
joinpath(splitpath(DIRECTORY_CONTAINING_THIS_FILE)...)
)
PROJECT_OUTPUT_DIRECTORY = joinpath(
PROJECT_DIRECTORY,
"output",
)
mkpath(PROJECT_OUTPUT_DIRECTORY)
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "data"))
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "models"))
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "plots"))
# PREDICTMD IF INCLUDE TEST STATEMENTS
@debug("PROJECT_OUTPUT_DIRECTORY: ", PROJECT_OUTPUT_DIRECTORY,)
if PredictMD.is_travis_ci()
PredictMD.cache_to_path!(
;
from = ["cpu_examples", "breast_cancer_biopsy", "output",],
to = [PROJECT_OUTPUT_DIRECTORY],
)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
### End project-specific settings
### Begin SMOTE class-balancing code
Random.seed!(999)
trainingandtuning_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"trainingandtuning_features_df.csv",
)
trainingandtuning_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"trainingandtuning_labels_df.csv",
)
testing_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"testing_features_df.csv",
)
testing_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"testing_labels_df.csv",
)
training_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"training_features_df.csv",
)
training_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"training_labels_df.csv",
)
tuning_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"tuning_features_df.csv",
)
tuning_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"tuning_labels_df.csv",
)
trainingandtuning_features_df = DataFrames.DataFrame(
FileIO.load(
trainingandtuning_features_df_filename;
type_detect_rows = 100,
)
)
trainingandtuning_labels_df = DataFrames.DataFrame(
FileIO.load(
trainingandtuning_labels_df_filename;
type_detect_rows = 100,
)
)
testing_features_df = DataFrames.DataFrame(
FileIO.load(
testing_features_df_filename;
type_detect_rows = 100,
)
)
testing_labels_df = DataFrames.DataFrame(
FileIO.load(
testing_labels_df_filename;
type_detect_rows = 100,
)
)
training_features_df = DataFrames.DataFrame(
FileIO.load(
training_features_df_filename;
type_detect_rows = 100,
)
)
training_labels_df = DataFrames.DataFrame(
FileIO.load(
training_labels_df_filename;
type_detect_rows = 100,
)
)
tuning_features_df = DataFrames.DataFrame(
FileIO.load(
tuning_features_df_filename;
type_detect_rows = 100,
)
)
tuning_labels_df = DataFrames.DataFrame(
FileIO.load(
tuning_labels_df_filename;
type_detect_rows = 100,
)
)
categorical_feature_names_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"categorical_feature_names.jld2",
)
continuous_feature_names_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"continuous_feature_names.jld2",
)
categorical_feature_names = FileIO.load(
categorical_feature_names_filename,
"categorical_feature_names",
)
continuous_feature_names = FileIO.load(
continuous_feature_names_filename,
"continuous_feature_names",
)
feature_names = vcat(categorical_feature_names, continuous_feature_names)
single_label_name = :Class
negative_class = "benign"
positive_class = "malignant"
single_label_levels = [negative_class, positive_class]
categorical_label_names = Symbol[single_label_name]
continuous_label_names = Symbol[]
label_names = vcat(categorical_label_names, continuous_label_names)
DataFrames.describe(logger_stream, training_labels_df[single_label_name])
show(logger_stream, StatsBase.countmap(training_labels_df[single_label_name]))
majorityclass = "benign"
minorityclass = "malignant"
(smoted_training_features_df, smoted_training_labels_df,) = PredictMD.smote(
training_features_df,
training_labels_df,
feature_names,
single_label_name;
majorityclass = majorityclass,
minorityclass = minorityclass,
pct_over = 100,
minority_to_majority_ratio = 1.0,
k = 5,
)
PredictMD.check_column_types(
smoted_training_features_df;
categorical_feature_names = categorical_feature_names,
continuous_feature_names = continuous_feature_names,
categorical_label_names = categorical_label_names,
continuous_label_names = continuous_label_names,
)
PredictMD.check_column_types(
smoted_training_labels_df;
categorical_feature_names = categorical_feature_names,
continuous_feature_names = continuous_feature_names,
categorical_label_names = categorical_label_names,
continuous_label_names = continuous_label_names,
)
# PREDICTMD IF INCLUDE TEST STATEMENTS
Test.@test PredictMD.check_no_constant_columns(smoted_training_features_df)
Test.@test PredictMD.check_no_constant_columns(smoted_training_labels_df)
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
DataFrames.describe(logger_stream, smoted_training_labels_df[single_label_name])
show(logger_stream, StatsBase.countmap(smoted_training_labels_df[single_label_name]))
smoted_training_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"smoted_training_features_df.csv",
)
smoted_training_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"smoted_training_labels_df.csv",
)
FileIO.save(smoted_training_features_df_filename, smoted_training_features_df)
FileIO.save(smoted_training_labels_df_filename, smoted_training_labels_df)
### End SMOTE class-balancing code
# PREDICTMD IF INCLUDE TEST STATEMENTS
if PredictMD.is_travis_ci()
PredictMD.path_to_cache!(
;
to = ["cpu_examples", "breast_cancer_biopsy", "output",],
from = [PROJECT_OUTPUT_DIRECTORY],
)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
## %PREDICTMD_GENERATED_BY%
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 13132 | ## %PREDICTMD_GENERATED_BY%
using PredictMDExtra
PredictMDExtra.import_all()
using PredictMD
PredictMD.import_all()
# PREDICTMD IF INCLUDE TEST STATEMENTS
logger = Base.CoreLogging.current_logger_for_env(Base.CoreLogging.Debug, Symbol(splitext(basename(something(@__FILE__, "nothing")))[1]), something(@__MODULE__, "nothing"))
if isnothing(logger)
logger_stream = devnull
else
logger_stream = logger.stream
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
### Begin project-specific settings
DIRECTORY_CONTAINING_THIS_FILE = @__DIR__
PROJECT_DIRECTORY = dirname(
joinpath(splitpath(DIRECTORY_CONTAINING_THIS_FILE)...)
)
PROJECT_OUTPUT_DIRECTORY = joinpath(
PROJECT_DIRECTORY,
"output",
)
mkpath(PROJECT_OUTPUT_DIRECTORY)
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "data"))
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "models"))
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "plots"))
# PREDICTMD IF INCLUDE TEST STATEMENTS
@debug("PROJECT_OUTPUT_DIRECTORY: ", PROJECT_OUTPUT_DIRECTORY,)
if PredictMD.is_travis_ci()
PredictMD.cache_to_path!(
;
from = ["cpu_examples", "breast_cancer_biopsy", "output",],
to = [PROJECT_OUTPUT_DIRECTORY],
)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
### End project-specific settings
### Begin logistic classifier code
Random.seed!(999)
trainingandtuning_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"trainingandtuning_features_df.csv",
)
trainingandtuning_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"trainingandtuning_labels_df.csv",
)
testing_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"testing_features_df.csv",
)
testing_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"testing_labels_df.csv",
)
training_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"training_features_df.csv",
)
training_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"training_labels_df.csv",
)
tuning_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"tuning_features_df.csv",
)
tuning_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"tuning_labels_df.csv",
)
trainingandtuning_features_df = DataFrames.DataFrame(
FileIO.load(
trainingandtuning_features_df_filename;
type_detect_rows = 100,
)
)
trainingandtuning_labels_df = DataFrames.DataFrame(
FileIO.load(
trainingandtuning_labels_df_filename;
type_detect_rows = 100,
)
)
testing_features_df = DataFrames.DataFrame(
FileIO.load(
testing_features_df_filename;
type_detect_rows = 100,
)
)
testing_labels_df = DataFrames.DataFrame(
FileIO.load(
testing_labels_df_filename;
type_detect_rows = 100,
)
)
training_features_df = DataFrames.DataFrame(
FileIO.load(
training_features_df_filename;
type_detect_rows = 100,
)
)
training_labels_df = DataFrames.DataFrame(
FileIO.load(
training_labels_df_filename;
type_detect_rows = 100,
)
)
tuning_features_df = DataFrames.DataFrame(
FileIO.load(
tuning_features_df_filename;
type_detect_rows = 100,
)
)
tuning_labels_df = DataFrames.DataFrame(
FileIO.load(
tuning_labels_df_filename;
type_detect_rows = 100,
)
)
smoted_training_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"smoted_training_features_df.csv",
)
smoted_training_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"smoted_training_labels_df.csv",
)
smoted_training_features_df = DataFrames.DataFrame(
FileIO.load(
smoted_training_features_df_filename;
type_detect_rows = 100,
)
)
smoted_training_labels_df = DataFrames.DataFrame(
FileIO.load(
smoted_training_labels_df_filename;
type_detect_rows = 100,
)
)
categorical_feature_names_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"categorical_feature_names.jld2",
)
continuous_feature_names_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"continuous_feature_names.jld2",
)
categorical_feature_names = FileIO.load(
categorical_feature_names_filename,
"categorical_feature_names",
)
continuous_feature_names = FileIO.load(
continuous_feature_names_filename,
"continuous_feature_names",
)
feature_names = vcat(categorical_feature_names, continuous_feature_names)
single_label_name = :Class
negative_class = "benign"
positive_class = "malignant"
single_label_levels = [negative_class, positive_class]
categorical_label_names = Symbol[single_label_name]
continuous_label_names = Symbol[]
label_names = vcat(categorical_label_names, continuous_label_names)
feature_contrasts = PredictMD.generate_feature_contrasts(
smoted_training_features_df,
feature_names,
)
# PREDICTMD IF INCLUDE TEST STATEMENTS
Test.@test(
PredictMD.columns_are_linearly_independent(training_features_df)
)
Test.@test(
PredictMD.columns_are_linearly_independent(
training_features_df,
feature_names,
)
)
Test.@test(
length(
PredictMD.linearly_dependent_columns(
training_features_df,
feature_names,
),
) == 0
)
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
show(
logger_stream, PredictMD.linearly_dependent_columns(
training_features_df,
feature_names,
)
)
# PREDICTMD IF INCLUDE TEST STATEMENTS
Test.@test_throws ErrorException PredictMD.singlelabelbinaryclassdataframelogisticclassifier(
feature_names,
single_label_name,
single_label_levels;
package = :thispackagedoesnotexist,
intercept = true,
interactions = 1,
name = "Logistic regression",
)
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
logistic_classifier =
PredictMD.singlelabelbinaryclassdataframelogisticclassifier(
feature_names,
single_label_name,
single_label_levels;
package = :GLM,
intercept = true,
interactions = 1,
name = "Logistic regression",
)
# PREDICTMD IF INCLUDE TEST STATEMENTS
PredictMD.predict(logistic_classifier, training_features_df)
PredictMD.predict_proba(logistic_classifier, training_features_df)
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
PredictMD.fit!(logistic_classifier,
smoted_training_features_df,
smoted_training_labels_df) # TODO: fix this error
PredictMD.get_underlying(logistic_classifier) # TODO: fix this error
logistic_hist_training =
PredictMD.plotsinglelabelbinaryclassifierhistogram( # TODO: fix this error
logistic_classifier,
smoted_training_features_df,
smoted_training_labels_df,
single_label_name,
single_label_levels,
);
# PREDICTMD IF INCLUDE TEST STATEMENTS
filename = string(
tempname(),
"_",
"logistic_hist_training",
".pdf",
)
rm(filename; force = true, recursive = true,)
@debug("Attempting to test that the file does not exist...", filename,)
Test.@test(!isfile(filename))
@debug("The file does not exist.", filename, isfile(filename),)
PredictMD.save_plot(filename, logistic_hist_training)
if PredictMD.is_force_test_plots()
@debug("Attempting to test that the file exists...", filename,)
Test.@test(isfile(filename))
@debug("The file does exist.", filename, isfile(filename),)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
display(logistic_hist_training)
PredictMD.save_plot(
joinpath(
PROJECT_OUTPUT_DIRECTORY,
"plots",
"logistic_hist_training.pdf",
),
logistic_hist_training,
)
logistic_hist_testing =
PredictMD.plotsinglelabelbinaryclassifierhistogram(
logistic_classifier,
testing_features_df,
testing_labels_df,
single_label_name,
single_label_levels,
);
# PREDICTMD IF INCLUDE TEST STATEMENTS
filename = string(
tempname(),
"_",
"logistic_hist_testing",
".pdf",
)
rm(filename; force = true, recursive = true,)
@debug("Attempting to test that the file does not exist...", filename,)
Test.@test(!isfile(filename))
@debug("The file does not exist.", filename, isfile(filename),)
PredictMD.save_plot(filename, logistic_hist_testing)
if PredictMD.is_force_test_plots()
@debug("Attempting to test that the file exists...", filename,)
Test.@test(isfile(filename))
@debug("The file does exist.", filename, isfile(filename),)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
display(logistic_hist_testing)
PredictMD.save_plot(
joinpath(
PROJECT_OUTPUT_DIRECTORY,
"plots",
"logistic_hist_testing.pdf",
),
logistic_hist_testing,
)
show(
logger_stream, PredictMD.singlelabelbinaryclassificationmetrics(
logistic_classifier,
smoted_training_features_df,
smoted_training_labels_df,
single_label_name,
positive_class;
sensitivity = 0.95,
);
allrows = true,
allcols = true,
splitcols = false,
)
show(
logger_stream, PredictMD.singlelabelbinaryclassificationmetrics(
logistic_classifier,
testing_features_df,
testing_labels_df,
single_label_name,
positive_class;
sensitivity = 0.95,
);
allrows = true,
allcols = true,
splitcols = false,
)
logistic_calibration_curve =
PredictMD.plot_probability_calibration_curve(
logistic_classifier,
smoted_training_features_df,
smoted_training_labels_df,
single_label_name,
positive_class;
window = 0.2,
);
# PREDICTMD IF INCLUDE TEST STATEMENTS
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
display(logistic_calibration_curve)
PredictMD.save_plot(
joinpath(
PROJECT_OUTPUT_DIRECTORY,
"plots",
"logistic_calibration_curve.pdf",
),
logistic_calibration_curve,
)
show(
logger_stream, PredictMD.probability_calibration_metrics(
logistic_classifier,
testing_features_df,
testing_labels_df,
single_label_name,
positive_class;
window = 0.1,
);
allrows = true,
allcols = true,
splitcols = false,
)
logistic_cutoffs, logistic_risk_group_prevalences =
PredictMD.risk_score_cutoff_values(
logistic_classifier,
testing_features_df,
testing_labels_df,
single_label_name,
positive_class;
average_function = Statistics.mean,
)
# PREDICTMD IF INCLUDE TEST STATEMENTS
@debug(
string(
"Low risk: 0 to $(logistic_cutoffs[1]).",
" Medium risk: $(logistic_cutoffs[1]) to $(logistic_cutoffs[2]).",
" High risk: $(logistic_cutoffs[2]) to 1.",
)
)
@debug(logistic_risk_group_prevalences)
# PREDICTMD ELSE
@info(
string(
"Low risk: 0 to $(logistic_cutoffs[1]).",
" Medium risk: $(logistic_cutoffs[1]) to $(logistic_cutoffs[2]).",
" High risk: $(logistic_cutoffs[2]) to 1.",
)
)
@info(logistic_risk_group_prevalences)
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
logistic_cutoffs, logistic_risk_group_prevalences =
PredictMD.risk_score_cutoff_values(
logistic_classifier,
testing_features_df,
testing_labels_df,
single_label_name,
positive_class;
average_function = Statistics.median,
)
# PREDICTMD IF INCLUDE TEST STATEMENTS
@debug(
string(
"Low risk: 0 to $(logistic_cutoffs[1]).",
" Medium risk: $(logistic_cutoffs[1]) to $(logistic_cutoffs[2]).",
" High risk: $(logistic_cutoffs[2]) to 1.",
)
)
@debug(logistic_risk_group_prevalences)
# PREDICTMD ELSE
@info(
string(
"Low risk: 0 to $(logistic_cutoffs[1]).",
" Medium risk: $(logistic_cutoffs[1]) to $(logistic_cutoffs[2]).",
" High risk: $(logistic_cutoffs[2]) to 1.",
)
)
@info(logistic_risk_group_prevalences)
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
logistic_classifier_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"models",
"logistic_classifier.jld2",
)
PredictMD.save_model(logistic_classifier_filename, logistic_classifier)
# PREDICTMD IF INCLUDE TEST STATEMENTS
logistic_classifier = nothing
Test.@test isnothing(logistic_classifier)
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
### End logistic classifier code
# PREDICTMD IF INCLUDE TEST STATEMENTS
if PredictMD.is_travis_ci()
PredictMD.path_to_cache!(
;
to = ["cpu_examples", "breast_cancer_biopsy", "output",],
from = [PROJECT_OUTPUT_DIRECTORY],
)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
## %PREDICTMD_GENERATED_BY%
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 9441 | ## %PREDICTMD_GENERATED_BY%
using PredictMDExtra
PredictMDExtra.import_all()
using PredictMD
PredictMD.import_all()
# PREDICTMD IF INCLUDE TEST STATEMENTS
logger = Base.CoreLogging.current_logger_for_env(Base.CoreLogging.Debug, Symbol(splitext(basename(something(@__FILE__, "nothing")))[1]), something(@__MODULE__, "nothing"))
if isnothing(logger)
logger_stream = devnull
else
logger_stream = logger.stream
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
### Begin project-specific settings
DIRECTORY_CONTAINING_THIS_FILE = @__DIR__
PROJECT_DIRECTORY = dirname(
joinpath(splitpath(DIRECTORY_CONTAINING_THIS_FILE)...)
)
PROJECT_OUTPUT_DIRECTORY = joinpath(
PROJECT_DIRECTORY,
"output",
)
mkpath(PROJECT_OUTPUT_DIRECTORY)
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "data"))
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "models"))
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "plots"))
# PREDICTMD IF INCLUDE TEST STATEMENTS
@debug("PROJECT_OUTPUT_DIRECTORY: ", PROJECT_OUTPUT_DIRECTORY,)
if PredictMD.is_travis_ci()
PredictMD.cache_to_path!(
;
from = ["cpu_examples", "breast_cancer_biopsy", "output",],
to = [PROJECT_OUTPUT_DIRECTORY],
)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
### End project-specific settings
### Begin random forest classifier code
Random.seed!(999)
trainingandtuning_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"trainingandtuning_features_df.csv",
)
trainingandtuning_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"trainingandtuning_labels_df.csv",
)
testing_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"testing_features_df.csv",
)
testing_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"testing_labels_df.csv",
)
training_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"training_features_df.csv",
)
training_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"training_labels_df.csv",
)
tuning_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"tuning_features_df.csv",
)
tuning_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"tuning_labels_df.csv",
)
trainingandtuning_features_df = DataFrames.DataFrame(
FileIO.load(
trainingandtuning_features_df_filename;
type_detect_rows = 100,
)
)
trainingandtuning_labels_df = DataFrames.DataFrame(
FileIO.load(
trainingandtuning_labels_df_filename;
type_detect_rows = 100,
)
)
testing_features_df = DataFrames.DataFrame(
FileIO.load(
testing_features_df_filename;
type_detect_rows = 100,
)
)
testing_labels_df = DataFrames.DataFrame(
FileIO.load(
testing_labels_df_filename;
type_detect_rows = 100,
)
)
training_features_df = DataFrames.DataFrame(
FileIO.load(
training_features_df_filename;
type_detect_rows = 100,
)
)
training_labels_df = DataFrames.DataFrame(
FileIO.load(
training_labels_df_filename;
type_detect_rows = 100,
)
)
tuning_features_df = DataFrames.DataFrame(
FileIO.load(
tuning_features_df_filename;
type_detect_rows = 100,
)
)
tuning_labels_df = DataFrames.DataFrame(
FileIO.load(
tuning_labels_df_filename;
type_detect_rows = 100,
)
)
smoted_training_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"smoted_training_features_df.csv",
)
smoted_training_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"smoted_training_labels_df.csv",
)
smoted_training_features_df = DataFrames.DataFrame(
FileIO.load(
smoted_training_features_df_filename;
type_detect_rows = 100,
)
)
smoted_training_labels_df = DataFrames.DataFrame(
FileIO.load(
smoted_training_labels_df_filename;
type_detect_rows = 100,
)
)
categorical_feature_names_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"categorical_feature_names.jld2",
)
continuous_feature_names_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"continuous_feature_names.jld2",
)
categorical_feature_names = FileIO.load(
categorical_feature_names_filename,
"categorical_feature_names",
)
continuous_feature_names = FileIO.load(
continuous_feature_names_filename,
"continuous_feature_names",
)
feature_names = vcat(categorical_feature_names, continuous_feature_names)
single_label_name = :Class
negative_class = "benign"
positive_class = "malignant"
single_label_levels = [negative_class, positive_class]
categorical_label_names = Symbol[single_label_name]
continuous_label_names = Symbol[]
label_names = vcat(categorical_label_names, continuous_label_names)
feature_contrasts = PredictMD.generate_feature_contrasts(
smoted_training_features_df,
feature_names,
)
random_forest_classifier =
PredictMD.single_labelmulticlassdataframerandomforestclassifier(
feature_names,
single_label_name,
single_label_levels;
nsubfeatures = 4,
ntrees = 200,
package = :DecisionTree,
name = "Random forest",
feature_contrasts = feature_contrasts,
)
PredictMD.fit!(
random_forest_classifier,
smoted_training_features_df,
smoted_training_labels_df,
)
random_forest_classifier_hist_training =
PredictMD.plotsinglelabelbinaryclassifierhistogram(
random_forest_classifier,
smoted_training_features_df,
smoted_training_labels_df,
single_label_name,
single_label_levels,
);
# PREDICTMD IF INCLUDE TEST STATEMENTS
filename = string(
tempname(),
"_",
"random_forest_classifier_hist_training",
".pdf",
)
rm(filename; force = true, recursive = true,)
@debug("Attempting to test that the file does not exist...", filename,)
Test.@test(!isfile(filename))
@debug("The file does not exist.", filename, isfile(filename),)
PredictMD.save_plot(filename, random_forest_classifier_hist_training)
if PredictMD.is_force_test_plots()
@debug("Attempting to test that the file exists...", filename,)
Test.@test(isfile(filename))
@debug("The file does exist.", filename, isfile(filename),)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
display(random_forest_classifier_hist_training)
PredictMD.save_plot(
joinpath(
PROJECT_OUTPUT_DIRECTORY,
"plots",
"random_forest_classifier_hist_training.pdf",
),
random_forest_classifier_hist_training,
)
random_forest_classifier_hist_testing =
PredictMD.plotsinglelabelbinaryclassifierhistogram(
random_forest_classifier,
testing_features_df,
testing_labels_df,
single_label_name,
single_label_levels,
);
# PREDICTMD IF INCLUDE TEST STATEMENTS
filename = string(
tempname(),
"_",
"random_forest_classifier_hist_testing",
".pdf",
)
rm(filename; force = true, recursive = true,)
@debug("Attempting to test that the file does not exist...", filename,)
Test.@test(!isfile(filename))
@debug("The file does not exist.", filename, isfile(filename),)
PredictMD.save_plot(filename, random_forest_classifier_hist_testing)
if PredictMD.is_force_test_plots()
@debug("Attempting to test that the file exists...", filename,)
Test.@test(isfile(filename))
@debug("The file does exist.", filename, isfile(filename),)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
display(random_forest_classifier_hist_testing)
PredictMD.save_plot(
joinpath(
PROJECT_OUTPUT_DIRECTORY,
"plots",
"random_forest_classifier_hist_testing.pdf",
),
random_forest_classifier_hist_testing,
)
show(
logger_stream, PredictMD.singlelabelbinaryclassificationmetrics(
random_forest_classifier,
smoted_training_features_df,
smoted_training_labels_df,
single_label_name,
positive_class;
sensitivity = 0.95,
);
allrows = true,
allcols = true,
splitcols = false,
)
show(
logger_stream, PredictMD.singlelabelbinaryclassificationmetrics(
random_forest_classifier,
testing_features_df,
testing_labels_df,
single_label_name,
positive_class;
sensitivity = 0.95,
);
allrows = true,
allcols = true,
splitcols = false,
)
random_forest_classifier_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"models",
"random_forest_classifier.jld2",
)
PredictMD.save_model(
random_forest_classifier_filename,
random_forest_classifier,
)
# PREDICTMD IF INCLUDE TEST STATEMENTS
random_forest_classifier = nothing
Test.@test isnothing(random_forest_classifier)
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
### End random forest classifier code
# PREDICTMD IF INCLUDE TEST STATEMENTS
if PredictMD.is_travis_ci()
PredictMD.path_to_cache!(
;
to = ["cpu_examples", "breast_cancer_biopsy", "output",],
from = [PROJECT_OUTPUT_DIRECTORY],
)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
## %PREDICTMD_GENERATED_BY%
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 9550 | ## %PREDICTMD_GENERATED_BY%
using PredictMDExtra
PredictMDExtra.import_all()
using PredictMD
PredictMD.import_all()
# PREDICTMD IF INCLUDE TEST STATEMENTS
logger = Base.CoreLogging.current_logger_for_env(Base.CoreLogging.Debug, Symbol(splitext(basename(something(@__FILE__, "nothing")))[1]), something(@__MODULE__, "nothing"))
if isnothing(logger)
logger_stream = devnull
else
logger_stream = logger.stream
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
### Begin project-specific settings
DIRECTORY_CONTAINING_THIS_FILE = @__DIR__
PROJECT_DIRECTORY = dirname(
joinpath(splitpath(DIRECTORY_CONTAINING_THIS_FILE)...)
)
PROJECT_OUTPUT_DIRECTORY = joinpath(
PROJECT_DIRECTORY,
"output",
)
mkpath(PROJECT_OUTPUT_DIRECTORY)
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "data"))
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "models"))
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "plots"))
# PREDICTMD IF INCLUDE TEST STATEMENTS
@debug("PROJECT_OUTPUT_DIRECTORY: ", PROJECT_OUTPUT_DIRECTORY,)
if PredictMD.is_travis_ci()
PredictMD.cache_to_path!(
;
from = ["cpu_examples", "breast_cancer_biopsy", "output",],
to = [PROJECT_OUTPUT_DIRECTORY],
)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
### End project-specific settings
### Begin C-SVC code
Kernel = LIBSVM.Kernel
Random.seed!(999)
trainingandtuning_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"trainingandtuning_features_df.csv",
)
trainingandtuning_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"trainingandtuning_labels_df.csv",
)
testing_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"testing_features_df.csv",
)
testing_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"testing_labels_df.csv",
)
training_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"training_features_df.csv",
)
training_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"training_labels_df.csv",
)
tuning_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"tuning_features_df.csv",
)
tuning_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"tuning_labels_df.csv",
)
trainingandtuning_features_df = DataFrames.DataFrame(
FileIO.load(
trainingandtuning_features_df_filename;
type_detect_rows = 100,
)
)
trainingandtuning_labels_df = DataFrames.DataFrame(
FileIO.load(
trainingandtuning_labels_df_filename;
type_detect_rows = 100,
)
)
testing_features_df = DataFrames.DataFrame(
FileIO.load(
testing_features_df_filename;
type_detect_rows = 100,
)
)
testing_labels_df = DataFrames.DataFrame(
FileIO.load(
testing_labels_df_filename;
type_detect_rows = 100,
)
)
training_features_df = DataFrames.DataFrame(
FileIO.load(
training_features_df_filename;
type_detect_rows = 100,
)
)
training_labels_df = DataFrames.DataFrame(
FileIO.load(
training_labels_df_filename;
type_detect_rows = 100,
)
)
tuning_features_df = DataFrames.DataFrame(
FileIO.load(
tuning_features_df_filename;
type_detect_rows = 100,
)
)
tuning_labels_df = DataFrames.DataFrame(
FileIO.load(
tuning_labels_df_filename;
type_detect_rows = 100,
)
)
smoted_training_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"smoted_training_features_df.csv",
)
smoted_training_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"smoted_training_labels_df.csv",
)
smoted_training_features_df = DataFrames.DataFrame(
FileIO.load(
smoted_training_features_df_filename;
type_detect_rows = 100,
)
)
smoted_training_labels_df = DataFrames.DataFrame(
FileIO.load(
smoted_training_labels_df_filename;
type_detect_rows = 100,
)
)
categorical_feature_names_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"categorical_feature_names.jld2",
)
continuous_feature_names_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"continuous_feature_names.jld2",
)
categorical_feature_names = FileIO.load(
categorical_feature_names_filename,
"categorical_feature_names",
)
continuous_feature_names = FileIO.load(
continuous_feature_names_filename,
"continuous_feature_names",
)
feature_names = vcat(categorical_feature_names, continuous_feature_names)
single_label_name = :Class
negative_class = "benign"
positive_class = "malignant"
single_label_levels = [negative_class, positive_class]
categorical_label_names = Symbol[single_label_name]
continuous_label_names = Symbol[]
label_names = vcat(categorical_label_names, continuous_label_names)
feature_contrasts = PredictMD.generate_feature_contrasts(
smoted_training_features_df,
feature_names,
)
# PREDICTMD IF INCLUDE TEST STATEMENTS
Test.@test_throws ErrorException PredictMD.single_labelmulticlassdataframesvmclassifier(
feature_names,
single_label_name,
single_label_levels;
package = :packagethatdoesnotexist,
svmtype = LIBSVM.SVC,
name = "SVM (C-SVC)",
verbose = false,
feature_contrasts = feature_contrasts,
)
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
c_svc_svm_classifier =
PredictMD.single_labelmulticlassdataframesvmclassifier(
feature_names,
single_label_name,
single_label_levels;
package = :LIBSVM,
svmtype = LIBSVM.SVC,
name = "SVM (C-SVC)",
verbose = false,
feature_contrasts = feature_contrasts,
)
PredictMD.fit!(
c_svc_svm_classifier,
smoted_training_features_df,
smoted_training_labels_df,
)
c_svc_svm_classifier_hist_training =
PredictMD.plotsinglelabelbinaryclassifierhistogram(
c_svc_svm_classifier,
smoted_training_features_df,
smoted_training_labels_df,
single_label_name,
single_label_levels,
);
# PREDICTMD IF INCLUDE TEST STATEMENTS
filename = string(
tempname(),
"_",
"c_svc_svm_classifier_hist_training",
".pdf",
)
rm(filename; force = true, recursive = true,)
@debug("Attempting to test that the file does not exist...", filename,)
Test.@test(!isfile(filename))
@debug("The file does not exist.", filename, isfile(filename),)
PredictMD.save_plot(filename, c_svc_svm_classifier_hist_training)
if PredictMD.is_force_test_plots()
@debug("Attempting to test that the file exists...", filename,)
Test.@test(isfile(filename))
@debug("The file does exist.", filename, isfile(filename),)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
display(c_svc_svm_classifier_hist_training)
PredictMD.save_plot(
joinpath(
PROJECT_OUTPUT_DIRECTORY,
"plots",
"c_svc_svm_classifier_hist_training.pdf",
),
c_svc_svm_classifier_hist_training,
)
c_svc_svm_classifier_hist_testing =
PredictMD.plotsinglelabelbinaryclassifierhistogram(
c_svc_svm_classifier,
testing_features_df,
testing_labels_df,
single_label_name,
single_label_levels,
);
# PREDICTMD IF INCLUDE TEST STATEMENTS
filename = string(
tempname(),
"_",
"c_svc_svm_classifier_hist_testing",
".pdf",
)
rm(filename; force = true, recursive = true,)
@debug("Attempting to test that the file does not exist...", filename,)
Test.@test(!isfile(filename))
@debug("The file does not exist.", filename, isfile(filename),)
PredictMD.save_plot(filename, c_svc_svm_classifier_hist_testing)
if PredictMD.is_force_test_plots()
@debug("Attempting to test that the file exists...", filename,)
Test.@test(isfile(filename))
@debug("The file does exist.", filename, isfile(filename),)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
display(c_svc_svm_classifier_hist_testing)
PredictMD.save_plot(
joinpath(
PROJECT_OUTPUT_DIRECTORY,
"plots",
"c_svc_svm_classifier_hist_testing.pdf",
),
c_svc_svm_classifier_hist_testing,
)
show(
logger_stream, PredictMD.singlelabelbinaryclassificationmetrics(
c_svc_svm_classifier,
smoted_training_features_df,
smoted_training_labels_df,
single_label_name,
positive_class;
sensitivity = 0.95,
);
allrows = true,
allcols = true,
splitcols = false,
)
show(
logger_stream, PredictMD.singlelabelbinaryclassificationmetrics(
c_svc_svm_classifier,
testing_features_df,
testing_labels_df,
single_label_name,
positive_class;
sensitivity = 0.95,
);
allrows = true,
allcols = true,
splitcols = false,
)
c_svc_svm_classifier_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"models",
"c_svc_svm_classifier.jld2",
)
PredictMD.save_model(c_svc_svm_classifier_filename, c_svc_svm_classifier)
### End C-SVC code
# PREDICTMD IF INCLUDE TEST STATEMENTS
if PredictMD.is_travis_ci()
PredictMD.path_to_cache!(
;
to = ["cpu_examples", "breast_cancer_biopsy", "output",],
from = [PROJECT_OUTPUT_DIRECTORY],
)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
## %PREDICTMD_GENERATED_BY%
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 9350 | ## %PREDICTMD_GENERATED_BY%
using PredictMDExtra
PredictMDExtra.import_all()
using PredictMD
PredictMD.import_all()
# PREDICTMD IF INCLUDE TEST STATEMENTS
logger = Base.CoreLogging.current_logger_for_env(Base.CoreLogging.Debug, Symbol(splitext(basename(something(@__FILE__, "nothing")))[1]), something(@__MODULE__, "nothing"))
if isnothing(logger)
logger_stream = devnull
else
logger_stream = logger.stream
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
### Begin project-specific settings
DIRECTORY_CONTAINING_THIS_FILE = @__DIR__
PROJECT_DIRECTORY = dirname(
joinpath(splitpath(DIRECTORY_CONTAINING_THIS_FILE)...)
)
PROJECT_OUTPUT_DIRECTORY = joinpath(
PROJECT_DIRECTORY,
"output",
)
mkpath(PROJECT_OUTPUT_DIRECTORY)
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "data"))
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "models"))
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "plots"))
# PREDICTMD IF INCLUDE TEST STATEMENTS
@debug("PROJECT_OUTPUT_DIRECTORY: ", PROJECT_OUTPUT_DIRECTORY,)
if PredictMD.is_travis_ci()
PredictMD.cache_to_path!(
;
from = ["cpu_examples", "breast_cancer_biopsy", "output",],
to = [PROJECT_OUTPUT_DIRECTORY],
)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
### End project-specific settings
### Begin nu-SVC code
Kernel = LIBSVM.Kernel
Random.seed!(999)
trainingandtuning_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"trainingandtuning_features_df.csv",
)
trainingandtuning_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"trainingandtuning_labels_df.csv",
)
testing_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"testing_features_df.csv",
)
testing_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"testing_labels_df.csv",
)
training_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"training_features_df.csv",
)
training_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"training_labels_df.csv",
)
tuning_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"tuning_features_df.csv",
)
tuning_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"tuning_labels_df.csv",
)
trainingandtuning_features_df = DataFrames.DataFrame(
FileIO.load(
trainingandtuning_features_df_filename;
type_detect_rows = 100,
)
)
trainingandtuning_labels_df = DataFrames.DataFrame(
FileIO.load(
trainingandtuning_labels_df_filename;
type_detect_rows = 100,
)
)
testing_features_df = DataFrames.DataFrame(
FileIO.load(
testing_features_df_filename;
type_detect_rows = 100,
)
)
testing_labels_df = DataFrames.DataFrame(
FileIO.load(
testing_labels_df_filename;
type_detect_rows = 100,
)
)
training_features_df = DataFrames.DataFrame(
FileIO.load(
training_features_df_filename;
type_detect_rows = 100,
)
)
training_labels_df = DataFrames.DataFrame(
FileIO.load(
training_labels_df_filename;
type_detect_rows = 100,
)
)
tuning_features_df = DataFrames.DataFrame(
FileIO.load(
tuning_features_df_filename;
type_detect_rows = 100,
)
)
tuning_labels_df = DataFrames.DataFrame(
FileIO.load(
tuning_labels_df_filename;
type_detect_rows = 100,
)
)
smoted_training_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"smoted_training_features_df.csv",
)
smoted_training_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"smoted_training_labels_df.csv",
)
smoted_training_features_df = DataFrames.DataFrame(
FileIO.load(
smoted_training_features_df_filename;
type_detect_rows = 100,
)
)
smoted_training_labels_df = DataFrames.DataFrame(
FileIO.load(
smoted_training_labels_df_filename;
type_detect_rows = 100,
)
)
categorical_feature_names_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"categorical_feature_names.jld2",
)
continuous_feature_names_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"continuous_feature_names.jld2",
)
categorical_feature_names = FileIO.load(
categorical_feature_names_filename,
"categorical_feature_names",
)
continuous_feature_names = FileIO.load(
continuous_feature_names_filename,
"continuous_feature_names",
)
feature_names = vcat(categorical_feature_names, continuous_feature_names)
single_label_name = :Class
negative_class = "benign"
positive_class = "malignant"
single_label_levels = [negative_class, positive_class]
categorical_label_names = Symbol[single_label_name]
continuous_label_names = Symbol[]
label_names = vcat(categorical_label_names, continuous_label_names)
feature_contrasts = PredictMD.generate_feature_contrasts(
smoted_training_features_df,
feature_names,
)
nu_svc_svm_classifier =
PredictMD.single_labelmulticlassdataframesvmclassifier(
feature_names,
single_label_name,
single_label_levels;
package = :LIBSVM,
svmtype = LIBSVM.NuSVC,
name = "SVM (nu-SVC)",
verbose = false,
feature_contrasts = feature_contrasts,
)
PredictMD.fit!(
nu_svc_svm_classifier,
smoted_training_features_df,
smoted_training_labels_df,
)
nu_svc_svm_classifier_hist_training =
PredictMD.plotsinglelabelbinaryclassifierhistogram(
nu_svc_svm_classifier,
smoted_training_features_df,
smoted_training_labels_df,
single_label_name,
single_label_levels,
);
# PREDICTMD IF INCLUDE TEST STATEMENTS
filename = string(
tempname(),
"_",
"nu_svc_svm_classifier_hist_training",
".pdf",
)
rm(filename; force = true, recursive = true,)
@debug("Attempting to test that the file does not exist...", filename,)
Test.@test(!isfile(filename))
@debug("The file does not exist.", filename, isfile(filename),)
PredictMD.save_plot(filename, nu_svc_svm_classifier_hist_training)
if PredictMD.is_force_test_plots()
@debug("Attempting to test that the file exists...", filename,)
Test.@test(isfile(filename))
@debug("The file does exist.", filename, isfile(filename),)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
display(nu_svc_svm_classifier_hist_training)
PredictMD.save_plot(
joinpath(
PROJECT_OUTPUT_DIRECTORY,
"plots",
"nu_svc_svm_classifier_hist_training.pdf",
),
nu_svc_svm_classifier_hist_training,
)
nu_svc_svm_classifier_hist_testing =
PredictMD.plotsinglelabelbinaryclassifierhistogram(
nu_svc_svm_classifier,
testing_features_df,
testing_labels_df,
single_label_name,
single_label_levels,
);
# PREDICTMD IF INCLUDE TEST STATEMENTS
filename = string(
tempname(),
"_",
"nu_svc_svm_classifier_hist_testing",
".pdf",
)
rm(filename; force = true, recursive = true,)
@debug("Attempting to test that the file does not exist...", filename,)
Test.@test(!isfile(filename))
@debug("The file does not exist.", filename, isfile(filename),)
PredictMD.save_plot(filename, nu_svc_svm_classifier_hist_testing)
if PredictMD.is_force_test_plots()
@debug("Attempting to test that the file exists...", filename,)
Test.@test(isfile(filename))
@debug("The file does exist.", filename, isfile(filename),)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
display(nu_svc_svm_classifier_hist_testing)
PredictMD.save_plot(
joinpath(
PROJECT_OUTPUT_DIRECTORY,
"plots",
"nu_svc_svm_classifier_hist_testing.pdf",
),
nu_svc_svm_classifier_hist_testing,
)
show(
logger_stream, PredictMD.singlelabelbinaryclassificationmetrics(
nu_svc_svm_classifier,
smoted_training_features_df,
smoted_training_labels_df,
single_label_name,
positive_class;
sensitivity = 0.95,
);
allrows = true,
allcols = true,
splitcols = false,
)
show(
logger_stream, PredictMD.singlelabelbinaryclassificationmetrics(
nu_svc_svm_classifier,
testing_features_df,
testing_labels_df,
single_label_name,
positive_class;
sensitivity = 0.95,
);
allrows = true,
allcols = true,
splitcols = false,
)
nu_svc_svm_classifier_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"models",
"nu_svc_svm_classifier.jld2",
)
PredictMD.save_model(
nu_svc_svm_classifier_filename,
nu_svc_svm_classifier,
)
# PREDICTMD IF INCLUDE TEST STATEMENTS
nu_svc_svm_classifier = nothing
Test.@test isnothing(nu_svc_svm_classifier)
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
### End nu-SVC code
# PREDICTMD IF INCLUDE TEST STATEMENTS
if PredictMD.is_travis_ci()
PredictMD.path_to_cache!(
;
to = ["cpu_examples", "breast_cancer_biopsy", "output",],
from = [PROJECT_OUTPUT_DIRECTORY],
)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
## %PREDICTMD_GENERATED_BY%
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
|
[
"MIT"
] | 0.34.21 | 6af1dc255a34ea50e2ea16f11dfe941a2c3965ad | code | 16347 | ## %PREDICTMD_GENERATED_BY%
using PredictMDExtra
PredictMDExtra.import_all()
using PredictMD
PredictMD.import_all()
# PREDICTMD IF INCLUDE TEST STATEMENTS
logger = Base.CoreLogging.current_logger_for_env(Base.CoreLogging.Debug, Symbol(splitext(basename(something(@__FILE__, "nothing")))[1]), something(@__MODULE__, "nothing"))
if isnothing(logger)
logger_stream = devnull
else
logger_stream = logger.stream
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
### Begin project-specific settings
DIRECTORY_CONTAINING_THIS_FILE = @__DIR__
PROJECT_DIRECTORY = dirname(
joinpath(splitpath(DIRECTORY_CONTAINING_THIS_FILE)...)
)
PROJECT_OUTPUT_DIRECTORY = joinpath(
PROJECT_DIRECTORY,
"output",
)
mkpath(PROJECT_OUTPUT_DIRECTORY)
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "data"))
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "models"))
mkpath(joinpath(PROJECT_OUTPUT_DIRECTORY, "plots"))
# PREDICTMD IF INCLUDE TEST STATEMENTS
@debug("PROJECT_OUTPUT_DIRECTORY: ", PROJECT_OUTPUT_DIRECTORY,)
if PredictMD.is_travis_ci()
PredictMD.cache_to_path!(
;
from = ["cpu_examples", "breast_cancer_biopsy", "output",],
to = [PROJECT_OUTPUT_DIRECTORY],
)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
### End project-specific settings
### Begin Knet neural network classifier code
Random.seed!(999)
trainingandtuning_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"trainingandtuning_features_df.csv",
)
trainingandtuning_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"trainingandtuning_labels_df.csv",
)
testing_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"testing_features_df.csv",
)
testing_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"testing_labels_df.csv",
)
training_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"training_features_df.csv",
)
training_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"training_labels_df.csv",
)
tuning_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"tuning_features_df.csv",
)
tuning_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"tuning_labels_df.csv",
)
trainingandtuning_features_df = DataFrames.DataFrame(
FileIO.load(
trainingandtuning_features_df_filename;
type_detect_rows = 100,
)
)
trainingandtuning_labels_df = DataFrames.DataFrame(
FileIO.load(
trainingandtuning_labels_df_filename;
type_detect_rows = 100,
)
)
testing_features_df = DataFrames.DataFrame(
FileIO.load(
testing_features_df_filename;
type_detect_rows = 100,
)
)
testing_labels_df = DataFrames.DataFrame(
FileIO.load(
testing_labels_df_filename;
type_detect_rows = 100,
)
)
training_features_df = DataFrames.DataFrame(
FileIO.load(
training_features_df_filename;
type_detect_rows = 100,
)
)
training_labels_df = DataFrames.DataFrame(
FileIO.load(
training_labels_df_filename;
type_detect_rows = 100,
)
)
tuning_features_df = DataFrames.DataFrame(
FileIO.load(
tuning_features_df_filename;
type_detect_rows = 100,
)
)
tuning_labels_df = DataFrames.DataFrame(
FileIO.load(
tuning_labels_df_filename;
type_detect_rows = 100,
)
)
smoted_training_features_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"smoted_training_features_df.csv",
)
smoted_training_labels_df_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"smoted_training_labels_df.csv",
)
smoted_training_features_df = DataFrames.DataFrame(
FileIO.load(
smoted_training_features_df_filename;
type_detect_rows = 100,
)
)
smoted_training_labels_df = DataFrames.DataFrame(
FileIO.load(
smoted_training_labels_df_filename;
type_detect_rows = 100,
)
)
categorical_feature_names_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"categorical_feature_names.jld2",
)
continuous_feature_names_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"data",
"continuous_feature_names.jld2",
)
categorical_feature_names = FileIO.load(
categorical_feature_names_filename,
"categorical_feature_names",
)
continuous_feature_names = FileIO.load(
continuous_feature_names_filename,
"continuous_feature_names",
)
feature_names = vcat(categorical_feature_names, continuous_feature_names)
single_label_name = :Class
negative_class = "benign"
positive_class = "malignant"
single_label_levels = [negative_class, positive_class]
categorical_label_names = Symbol[single_label_name]
continuous_label_names = Symbol[]
label_names = vcat(categorical_label_names, continuous_label_names)
knet_mlp_predict_function_source = """
function knetmlp_predict(
w,
x0::AbstractArray;
probabilities::Bool = true,
)
x1 = Knet.relu.( w[1]*x0 .+ w[2] )
x2 = Knet.relu.( w[3]*x1 .+ w[4] )
x3 = w[5]*x2 .+ w[6]
unnormalizedlogprobs = x3
if probabilities
normalizedlogprobs = Knet.logp(unnormalizedlogprobs; dims = 1)
normalizedprobs = exp.(normalizedlogprobs)
return normalizedprobs
else
return unnormalizedlogprobs
end
end
"""
knet_mlp_loss_function_source = """
function knetmlp_loss(
predict::Function,
modelweights,
x::AbstractArray,
ytrue::AbstractArray;
L1::Real = Float64(0),
L2::Real = Float64(0),
)
loss = Knet.nll(
predict(
modelweights,
x;
probabilities = false,
),
ytrue;
dims = 1,
)
if L1 != 0
loss += L1 * sum(sum(abs, w_i) for w_i in modelweights[1:2:end])
end
if L2 != 0
loss += L2 * sum(sum(abs2, w_i) for w_i in modelweights[1:2:end])
end
return loss
end
"""
feature_contrasts = PredictMD.generate_feature_contrasts(
smoted_training_features_df,
feature_names,
)
knetmlp_modelweights = Any[
Float64.(
0.1f0*randn(Float64,64,feature_contrasts.num_array_columns_without_intercept)
),
Float64.(
fill(Float64(0),64,1)
),
Float64.(
0.1f0*randn(Float64,32,64)
),
Float64.(
fill(Float64(0),32,1)
),
Float64.(
0.1f0*randn(Float64,2,32)
),
Float64.(
fill(Float64(0),2,1)
),
]
knetmlp_losshyperparameters = Dict()
knetmlp_losshyperparameters[:L1] = Float64(0.0)
knetmlp_losshyperparameters[:L2] = Float64(0.0)
knetmlp_optimizationalgorithm = :Momentum
knetmlp_optimizerhyperparameters = Dict()
knetmlp_minibatchsize = 48
knet_mlp_classifier =
PredictMD.single_labelmulticlassdataframeknetclassifier(
feature_names,
single_label_name,
single_label_levels;
package = :Knet,
name = "Knet MLP",
predict_function_source = knet_mlp_predict_function_source,
loss_function_source = knet_mlp_loss_function_source,
losshyperparameters = knetmlp_losshyperparameters,
optimizationalgorithm = knetmlp_optimizationalgorithm,
optimizerhyperparameters = knetmlp_optimizerhyperparameters,
minibatchsize = knetmlp_minibatchsize,
modelweights = knetmlp_modelweights,
printlosseverynepochs = 1,
maxepochs = 50,
feature_contrasts = feature_contrasts,
)
PredictMD.parse_functions!(knet_mlp_classifier)
PredictMD.fit!(
knet_mlp_classifier,
smoted_training_features_df,
smoted_training_labels_df,
tuning_features_df,
tuning_labels_df,
)
PredictMD.set_max_epochs!(knet_mlp_classifier, 100)
PredictMD.fit!(
knet_mlp_classifier,
smoted_training_features_df,
smoted_training_labels_df,
tuning_features_df,
tuning_labels_df,
)
knet_learningcurve_lossvsepoch = PredictMD.plotlearningcurve(
knet_mlp_classifier,
:loss_vs_epoch;
);
# PREDICTMD IF INCLUDE TEST STATEMENTS
filename = string(
tempname(),
"_",
"knet_learningcurve_lossvsepoch",
".pdf",
)
rm(filename; force = true, recursive = true,)
@debug("Attempting to test that the file does not exist...", filename,)
Test.@test(!isfile(filename))
@debug("The file does not exist.", filename, isfile(filename),)
PredictMD.save_plot(filename, knet_learningcurve_lossvsepoch)
if PredictMD.is_force_test_plots()
@debug("Attempting to test that the file exists...", filename,)
Test.@test(isfile(filename))
@debug("The file does exist.", filename, isfile(filename),)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
display(knet_learningcurve_lossvsepoch)
PredictMD.save_plot(
joinpath(
PROJECT_OUTPUT_DIRECTORY,
"plots",
"knet_learningcurve_lossvsepoch.pdf",
),
knet_learningcurve_lossvsepoch,
)
knet_learningcurve_lossvsepoch_skip10epochs = PredictMD.plotlearningcurve(
knet_mlp_classifier,
:loss_vs_epoch;
startat = 10,
endat = :end,
);
# PREDICTMD IF INCLUDE TEST STATEMENTS
filename = string(
tempname(),
"_",
"knet_learningcurve_lossvsepoch_skip10epochs",
".pdf",
)
rm(filename; force = true, recursive = true,)
@debug("Attempting to test that the file does not exist...", filename,)
Test.@test(!isfile(filename))
@debug("The file does not exist.", filename, isfile(filename),)
PredictMD.save_plot(filename, knet_learningcurve_lossvsepoch_skip10epochs)
if PredictMD.is_force_test_plots()
@debug("Attempting to test that the file exists...", filename,)
Test.@test(isfile(filename))
@debug("The file does exist.", filename, isfile(filename),)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
display(knet_learningcurve_lossvsepoch_skip10epochs)
PredictMD.save_plot(
joinpath(
PROJECT_OUTPUT_DIRECTORY,
"plots",
"knet_learningcurve_lossvsepoch_skip10epochs.pdf",
),
knet_learningcurve_lossvsepoch_skip10epochs,
)
knet_learningcurve_lossvsiteration = PredictMD.plotlearningcurve(
knet_mlp_classifier,
:loss_vs_iteration;
window = 50,
sampleevery = 10,
);
# PREDICTMD IF INCLUDE TEST STATEMENTS
filename = string(
tempname(),
"_",
"knet_learningcurve_lossvsiteration",
".pdf",
)
rm(filename; force = true, recursive = true,)
@debug("Attempting to test that the file does not exist...", filename,)
Test.@test(!isfile(filename))
@debug("The file does not exist.", filename, isfile(filename),)
PredictMD.save_plot(filename, knet_learningcurve_lossvsiteration)
if PredictMD.is_force_test_plots()
@debug("Attempting to test that the file exists...", filename,)
Test.@test(isfile(filename))
@debug("The file does exist.", filename, isfile(filename),)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
display(knet_learningcurve_lossvsiteration)
PredictMD.save_plot(
joinpath(
PROJECT_OUTPUT_DIRECTORY,
"plots",
"knet_learningcurve_lossvsiteration.pdf",
),
knet_learningcurve_lossvsiteration,
)
knet_learningcurve_lossvsiteration_skip100iterations =
PredictMD.plotlearningcurve(
knet_mlp_classifier,
:loss_vs_iteration;
window = 50,
sampleevery = 10,
startat = 100,
endat = :end,
);
# PREDICTMD IF INCLUDE TEST STATEMENTS
filename = string(
tempname(),
"_",
"knet_learningcurve_lossvsiteration_skip100iterations",
".pdf",
)
rm(filename; force = true, recursive = true,)
@debug("Attempting to test that the file does not exist...", filename,)
Test.@test(!isfile(filename))
@debug("The file does not exist.", filename, isfile(filename),)
PredictMD.save_plot(filename, knet_learningcurve_lossvsiteration_skip100iterations)
if PredictMD.is_force_test_plots()
@debug("Attempting to test that the file exists...", filename,)
Test.@test(isfile(filename))
@debug("The file does exist.", filename, isfile(filename),)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
display(knet_learningcurve_lossvsiteration_skip100iterations)
PredictMD.save_plot(
joinpath(
PROJECT_OUTPUT_DIRECTORY,
"plots",
"knet_learningcurve_lossvsiteration_skip100iterations.pdf",
),
knet_learningcurve_lossvsiteration_skip100iterations,
)
knet_mlp_classifier_hist_training =
PredictMD.plotsinglelabelbinaryclassifierhistogram(
knet_mlp_classifier,
smoted_training_features_df,
smoted_training_labels_df,
single_label_name,
single_label_levels,
);
# PREDICTMD IF INCLUDE TEST STATEMENTS
filename = string(
tempname(),
"_",
"knet_mlp_classifier_hist_training",
".pdf",
)
rm(filename; force = true, recursive = true,)
@debug("Attempting to test that the file does not exist...", filename,)
Test.@test(!isfile(filename))
@debug("The file does not exist.", filename, isfile(filename),)
PredictMD.save_plot(filename, knet_mlp_classifier_hist_training)
if PredictMD.is_force_test_plots()
@debug("Attempting to test that the file exists...", filename,)
Test.@test(isfile(filename))
@debug("The file does exist.", filename, isfile(filename),)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
display(knet_mlp_classifier_hist_training)
PredictMD.save_plot(
joinpath(
PROJECT_OUTPUT_DIRECTORY,
"plots",
"knet_mlp_classifier_hist_training.pdf",
),
knet_mlp_classifier_hist_training,
)
knet_mlp_classifier_hist_testing =
PredictMD.plotsinglelabelbinaryclassifierhistogram(
knet_mlp_classifier,
testing_features_df,
testing_labels_df,
single_label_name,
single_label_levels,
);
# PREDICTMD IF INCLUDE TEST STATEMENTS
filename = string(
tempname(),
"_",
"knet_mlp_classifier_hist_testing",
".pdf",
)
rm(filename; force = true, recursive = true,)
@debug("Attempting to test that the file does not exist...", filename,)
Test.@test(!isfile(filename))
@debug("The file does not exist.", filename, isfile(filename),)
PredictMD.save_plot(filename, knet_mlp_classifier_hist_testing)
if PredictMD.is_force_test_plots()
@debug("Attempting to test that the file exists...", filename,)
Test.@test(isfile(filename))
@debug("The file does exist.", filename, isfile(filename),)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
display(knet_mlp_classifier_hist_testing)
PredictMD.save_plot(
joinpath(
PROJECT_OUTPUT_DIRECTORY,
"plots",
"knet_mlp_classifier_hist_testing.pdf",
),
knet_mlp_classifier_hist_testing,
)
show(
logger_stream, PredictMD.singlelabelbinaryclassificationmetrics(
knet_mlp_classifier,
smoted_training_features_df,
smoted_training_labels_df,
single_label_name,
positive_class;
sensitivity = 0.95,
);
allrows = true,
allcols = true,
splitcols = false,
)
show(
logger_stream, PredictMD.singlelabelbinaryclassificationmetrics(
knet_mlp_classifier,
testing_features_df,
testing_labels_df,
single_label_name,
positive_class;
sensitivity = 0.95,
);
allrows = true,
allcols = true,
splitcols = false,
)
knet_mlp_classifier_filename = joinpath(
PROJECT_OUTPUT_DIRECTORY,
"models",
"knet_mlp_classifier.jld2",
)
PredictMD.save_model(knet_mlp_classifier_filename, knet_mlp_classifier)
# PREDICTMD IF INCLUDE TEST STATEMENTS
knet_mlp_classifier = nothing
Test.@test isnothing(knet_mlp_classifier)
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
### End Knet neural network classifier code
# PREDICTMD IF INCLUDE TEST STATEMENTS
if PredictMD.is_travis_ci()
PredictMD.path_to_cache!(
;
to = ["cpu_examples", "breast_cancer_biopsy", "output",],
from = [PROJECT_OUTPUT_DIRECTORY],
)
end
# PREDICTMD ELSE
# PREDICTMD ENDIF INCLUDE TEST STATEMENTS
## %PREDICTMD_GENERATED_BY%
| PredictMD | https://github.com/bcbi/PredictMD.jl.git |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.