library(shiny)
library(shinyjs)
library(bslib)
library(dplyr)
library(ggplot2)
library(tm)
library(SnowballC)
library(plotly)
library(text2vec)
library(tokenizers)
library(dplyr)
library(tidyr)
library(igraph)
library(ggraph)
library(reshape2)
library(SnowballC)
library(RColorBrewer)
library(syuzhet)
library(cluster)
library(Rtsne)
library(umap)
library(MASS)
library(koRpus)
library(openxlsx)
library(tools)
library(shinyWidgets)
library(readxl)
library(scales)
library(caret)
library(BBmisc)
library(glmnet)
library(pROC)
library(ROCR)
library(car)
library(ResourceSelection)
library(tree)
library(ggplotify)
library(lmtest)
library(gridExtra)
library(patchwork)
library(caret)
library(randomForest)
library(gbm)
library(earth)
library(broom)
library(rlang)
library(ggdendro)
library(pastecs)
options(width = 150)
options(digits = 4, scipen = 1000000000)
options(shiny.maxRequestSize=30*1024^2)
# Function to process a chunk of lines and update word counts
process_chunk <- function(chunk, word_counts) {
max_word_length <- 1000 # Set a maximum word length
for (line in chunk) {
words <- unlist(strsplit(line, "\\s+")) # Splitting line into words
for (word in words) {
word <- as.character(word) # Ensure 'word' is a character string
if (nchar(word) == 0 || nchar(word) > max_word_length) {
next # Skip empty words or words that are too long
}
# Check if word exists and get the current count
if (!is.null(word_counts[[word]])) {
current_count <- word_counts[[word]]
} else {
current_count <- 0
}
word_counts[[word]] <- current_count + 1
}
}
return(word_counts)
}
# Main function to count word frequencies in the file
count_word_frequencies <- function(file_path) {
con <- file(file_path, "r") # Open file connection
word_counts <- new.env(hash = TRUE, size = 600000) # Initialize environment for counting
while(TRUE) {
lines <- readLines(con, n = 5000) # Adjust 'n' based on system capability
if (length(lines) == 0) {
break
}
word_counts <- process_chunk(lines, word_counts)
}
close(con) # Close file connection
return(as.list(word_counts)) # Convert environment to list for easy access
}
ui <- fluidPage(
theme = bs_theme(version = 5, bootswatch = "spacelab"),
useShinyjs(), # Initialize shinyjs
titlePanel("PtteM Data Science"),
tags$head(tags$link(rel = "stylesheet", href="https://fonts.googleapis.com/css?family=Montserrat:100,300,400,700&display=swap"),
tags$style(HTML("
body, h1, h2, h3, h4, h5, h6, .nav, p, a, .shiny-input-container {
font-family: 'Montserrat'; /* Font type for the title attribute */
font-weight: 385;
color: #007c9e !important;
}
* {
font-family: 'Montserrat', sans-serif;
font-weight: 385;
color: #195576; /* Blue color */
}
body {
background-color: #f7f7f7; /* Light gray background */
}
.icon-btn {
border: 1px solid #0d6efd; /* Example border: solid, 2 pixels, #555 color */
border-radius: 15%; /* Circular border */
color: #00969e; /* Icon color */
font-family: 'Montserrat'; /* Font type for the title attribute */
font-weight: 385;
background-color: #f7f7f7;
padding: 125px; /* Space around the icon */
margin: 25px; /* Space around the button */
font-size: 24px; /* Icon size */
box-shadow: 0 2px 4px rgba(0,0,0,0.2);
}
.icon-btn:hover {
color: #00969e; /* Icon color on hover */
border-color: #007c9e;
background-color: #ebfbfd;/* Border color on hover */
}
/* Add custom styles here */
.shiny-input-container {
margin-bottom: 15px;
}
.box {
border: 1px solid #ddd;
padding: 20px;
border-radius: 50px;
margin-bottom: 200px;
gap: 200px;
align-items: center;
}
#statsTable_wrapper {
margin: 0 auto;
}
.shiny-output-error {
border: 1px solid #FF0000; /* Red border on error */
}
/* If you want to change the font size of the tooltip, you can add custom CSS for the 'title' attribute's default styling. */
"))),
tags$head(
# Include JavaScript to reload the page
tags$script(HTML("
document.addEventListener('DOMContentLoaded', function() {
document.getElementById('myElement').style.color = '#0d6efd'; // Change to your desired color
});
"))
),
tags$head(
tags$script(HTML("
function reloadPage() {
window.location.reload();
}
"))
),
# Refresh button that calls the JavaScript function
actionButton("refresh", "Refresh Analysis", onclick = "reloadPage();"),
# Help Text or Information for the user
helpText("Bu uygulama ile metin analizi başlığı altındaki veri bilimi fonksiyonlarına erişebilirsiniz."),
#Supervised Learning
h2("Supervised Learning Section"),
tabsetPanel(
tabPanel("Simple Linear Regression",
sidebarLayout(
sidebarPanel(
fileInput("slrinput", "Choose a CSV or XLSX file", accept = c(".csv", ".xlsx")),
actionButton("loadslr", "Load Data"),
selectInput("targetslr", "Select Target Column", choices = NULL),
selectizeInput("independentVar", "Select Independent Variable", choices = NULL, multiple = FALSE),
sliderInput("dataSplitslr",
"Data Split Ratio",
min = 0.1,
max = 0.9,
value = 0.7, # Default value, for instance, 70% for training and 30% for testing
step = 0.05,
ticks = FALSE,
animate = TRUE),
actionButton("slrassumption", "Run Assumption"),
actionButton("slrmodel", "Run SLR Model"),
HTML("
Basit Doğrusal Regresyon Paneli
Çıktı Nedir ve Neden Kullanılır?
Bu panel, bir hedef değişken ile bir veya birden fazla bağımsız değişken arasındaki ilişkiyi modellemek için basit doğrusal regresyon (SLR) analizi yapar. SLR, iki değişken arasındaki ilişkinin doğasını ve gücünü anlamak için kullanılır.
Kullanım Adımları:
- Veri Dosyası Yükleme: SLR analizi için bir CSV veya XLSX dosyasını
fileInput
aracılığıyla yükleyin.
- Hedef ve Bağımsız Değişken Seçimi: Analiz için hedef değişkeni ve bağımsız değişkeni seçin.
- Analizi Çalıştırma:
actionButton
butonlarına tıklayarak SLR modelini ve varsayım kontrollerini çalıştırın.
Kullanıcı Etkileşimi:
Kullanıcılar, dosya yükledikten ve gerekli değişkenleri seçtikten sonra analizi başlatır ve sonuçlar ana panelde görselleştirilir.
Veri Bilimi Alanındaki Kullanımı:
Basit doğrusal regresyon, özellikle iki değişken arasındaki ilişkiyi keşfetmek ve bu ilişkinin gücünü ve yönünü belirlemek için önemli bir yöntemdir. SLR, tahmin modelleri oluşturma, trend analizi ve değişkenler arasındaki ilişkilerin değerlendirilmesi gibi çeşitli alanlarda kullanılır.
Desteklenen Dosya Tipleri:
Kullanıcılar, analiz için CSV (.csv) veya Excel (.xlsx) formatında dosyalar yükleyebilirler.
Sonuçların Yorumlanması:
Elde edilen model özeti, regresyon katsayıları, p-değerleri, R-kare gibi istatistiklerle modelin anlamlılığını ve açıklayıcılığını değerlendirir. Ayrıca, varsayım testleri ve diyagnostik grafikler modelin varsayımlara uygunluğunu kontrol etmek için kullanılır.
- Model Özeti: Modelin istatistiksel anlamlılığını ve açıklayıcılığını değerlendirir.
- Varsayım Testleri: Modelin normal dağılım, homoskedastisite, bağımsızlık ve doğrusallık gibi temel varsayımlara uygunluğunu test eder.
- Regresyon Çizgisi Grafiği: Hedef ve bağımsız değişken arasındaki ilişkiyi görsel olarak gösterir.
Bu özellikler, basit doğrusal regresyon analizinin, veri setinden önemli içgörüler elde etmek ve değişkenler arasındaki ilişkileri anlamak için nasıl kullanılabileceğini gösterir.
")
),
mainPanel(
tabsetPanel(
tabPanel("Data Summary", verbatimTextOutput("slrsummary")),
tabPanel("Assumptions for SLR Model",
tabsetPanel(
tabPanel("Diagnostics",
plotlyOutput("residualsFittedPlot"),
plotlyOutput("qqPlot"),
plotlyOutput("scaleLocationPlot"),
plotlyOutput("residualsLeveragePlot")
),
tabPanel("Shapiro-Wilk Test", verbatimTextOutput("shapiroTest")),
tabPanel("Breusch-Pagan Test", verbatimTextOutput("ncvTest")),
tabPanel("Linearity Plot", plotlyOutput("linearityPlotOutput", width = "100%", height = "700px")),
tabPanel("Durbin-Watson Test", verbatimTextOutput("durbinWatsonTest"))
)
),
tabPanel("SLR Model Evaluation",
tabsetPanel(
tabPanel("Model Summary", verbatimTextOutput("slrmodeleva")),
tabPanel("Correlation Coefficient Between the Variables", verbatimTextOutput("corcoefslr")),
tabPanel("Confidence Interval", verbatimTextOutput("confintslr")),
tabPanel("Regression Line Plot", plotlyOutput("slrregressPlot", width = "100%", height = "625px"))
)
)
)
)
)
),
tabPanel("Multiple Linear Regression",
sidebarLayout(
sidebarPanel(
fileInput("mlrinput", "Choose a CSV or XLSX file", accept = c(".csv", ".xslx")),
actionButton("loadmlr", "Load Data"),
selectInput("targetmlr", "Select Target Column", choices = NULL),
selectizeInput("independentVarmlr", "Select Independent Variable", choices = NULL, multiple = TRUE),
sliderInput("dataSplitmlr",
"Data Split Ratio",
min = 0.1,
max = 0.9,
value = 0.7, # Default value, for instance, 70% for training and 30% for testing
step = 0.05,
ticks = FALSE,
animate = TRUE),
actionButton("mlrassumption", "Run Assumption"),
actionButton("mlrmodel", "Run MLR Model"),
HTML("
Çoklu Doğrusal Regresyon Paneli
Çıktı Nedir ve Neden Kullanılır?
Bu panel, bir hedef değişken ile birden fazla bağımsız değişken arasındaki ilişkiyi modellemek için çoklu doğrusal regresyon (MLR) analizi yapar. MLR, değişkenler arasındaki ilişkilerin karmaşıklığını anlamak ve birden çok bağımsız değişkenin hedef değişken üzerindeki etkisini keşfetmek için kullanılır.
Kullanım Adımları:
- Veri Dosyası Yükleme: MLR analizi için bir CSV veya XLSX dosyasını
fileInput
aracılığıyla yükleyin.
- Hedef ve Bağımsız Değişkenlerin Seçimi: Analiz için hedef değişkeni ve birden fazla bağımsız değişkeni seçin.
- Analizi Çalıştırma:
actionButton
butonlarına tıklayarak MLR modelini ve varsayım kontrollerini çalıştırın.
Kullanıcı Etkileşimi:
Kullanıcılar, dosya yükledikten ve gerekli değişkenleri seçtikten sonra analizi başlatır ve sonuçlar ana panelde görselleştirilir.
Veri Bilimi Alanındaki Kullanımı:
Çoklu doğrusal regresyon, tahmin modelleri oluşturma, çok faktörlü etki analizi ve değişkenler arasındaki ilişkilerin değerlendirilmesi gibi çeşitli alanlarda kullanılır. MLR, birden çok bağımsız değişkenin hedef değişken üzerindeki etkisini ve ilişkilerin yapısını anlamak için tercih edilen bir yöntemdir.
Desteklenen Dosya Tipleri:
Kullanıcılar, analiz için CSV (.csv) veya Excel (.xlsx) formatında dosyalar yükleyebilirler.
Sonuçların Yorumlanması:
Elde edilen model özeti, regresyon katsayıları, p-değerleri, R-kare gibi istatistiklerle modelin anlamlılığını ve açıklayıcılığını değerlendirir. Ayrıca, varsayım testleri ve diyagnostik grafikler modelin varsayımlara uygunluğunu kontrol etmek için kullanılır.
- Model Özeti: Modelin istatistiksel anlamlılığını ve açıklayıcılığını değerlendirir.
- Varsayım Testleri: Modelin normal dağılım, homoskedastisite, bağımsızlık, doğrusallık ve çoklu bağlantı gibi temel varsayımlara uygunluğunu test eder.
- Regresyon Çizgisi Grafiği: Hedef ve bağımsız değişkenler arasındaki ilişkiyi görsel olarak gösterir.
Bu özellikler, çoklu doğrusal regresyon analizinin, veri setinden derinlemesine içgörüler elde etmek ve değişkenler arasındaki ilişkileri anlamak için nasıl kullanılabileceğini gösterir.
")
),
mainPanel(
tabsetPanel(
tabPanel("Data Summary", verbatimTextOutput("mlrsummary")),
tabPanel("Assumptions for MLR Model",
tabsetPanel(
tabPanel("Diagnostics",
plotlyOutput("resFitmlrPlot"),
plotlyOutput("qqPlotmlr"),
plotlyOutput("scaleLocmlrPlot"),
plotlyOutput("resLevmlrPlot")
),
tabPanel("Shapiro-Wilk Test", verbatimTextOutput("shapTestmlr")),
tabPanel("Breusch-Pagan Test", verbatimTextOutput("ncvTestmlr")),
tabPanel("Linearity Plot", plotlyOutput("linPlotmlr", width = "100%", height = "725px")),
tabPanel("Durbin-Watson Test", verbatimTextOutput("dWTestmlr")),
tabPanel("Variance Inflation Factor", verbatimTextOutput("vifmlr"))
)
),
tabPanel("MLR Model Evaluation",
tabsetPanel(
tabPanel("Model Summary", verbatimTextOutput("mlrmodeleva")),
tabPanel("Correlation Coefficient Between the Variables", verbatimTextOutput("corcoefmlr")),
tabPanel("Confidence Interval", verbatimTextOutput("confintmlr")),
tabPanel("Model Evaluation Metrics", verbatimTextOutput("modelevamet")),
tabPanel("Regression Line Plot", plotlyOutput("mlrregressPlot", width = "100%", height = "625px"))
)
),
),
)
)
),
tabPanel("Logistic Regression",
sidebarLayout(
sidebarPanel(
fileInput("glmfile", "Choose a CSV or XLSX file", accept = c(".csv", ".xlsx")),
actionButton("loadData", "Load Data"),
selectInput("targetglm", "Select Target Column", choices = NULL),
selectizeInput("independentVars", "Select Independent Variables", choices = NULL, multiple = TRUE),
sliderInput("dataSplit",
"Data Split Ratio",
min = 0.1,
max = 0.9,
value = 0.7, # Default value, for instance, 70% for training and 30% for testing
step = 0.05,
ticks = FALSE,
animate = TRUE),
actionButton("glmassumption", "Run Assumption"),
actionButton("runLogisticRegression", "Run Logistic Regression"),
HTML("
Lojistik Regresyon Paneli
Çıktı Nedir ve Neden Kullanılır?
Bu panel, bir veya birden fazla bağımsız değişken ile kategorik bir hedef değişken arasındaki ilişkiyi modellemek için lojistik regresyon analizi yapar. Lojistik regresyon, özellikle ikili (binary) sonuçlar için tercih edilen bir yöntemdir ve olasılıkların tahmin edilmesinde kullanılır.
Kullanım Adımları:
- Veri Dosyası Yükleme: Lojistik regresyon analizi için bir CSV veya XLSX dosyasını
fileInput
aracılığıyla yükleyin.
- Hedef ve Bağımsız Değişkenlerin Seçimi: Analiz için kategorik hedef değişkeni ve bir veya birden fazla bağımsız değişkeni seçin.
- Analizi Çalıştırma:
actionButton
butonlarına tıklayarak lojistik regresyon modelini ve varsayım kontrollerini çalıştırın.
Kullanıcı Etkileşimi:
Kullanıcılar, dosya yükledikten ve gerekli değişkenleri seçtikten sonra analizi başlatır ve sonuçlar ana panelde görselleştirilir.
Veri Bilimi Alanındaki Kullanımı:
Lojistik regresyon, sınıflandırma, olasılık tahmini ve risk faktörlerinin incelenmesi gibi çeşitli alanlarda kullanılır. Özellikle, kategorik sonuçların (örneğin, evet/hayır, başarılı/başarısız) olasılıklarının tahmin edilmesinde tercih edilen bir yöntemdir.
Desteklenen Dosya Tipleri:
Kullanıcılar, analiz için CSV (.csv) veya Excel (.xlsx) formatında dosyalar yükleyebilirler.
Sonuçların Yorumlanması:
Elde edilen model özeti, regresyon katsayıları, p-değerleri, ROC eğrisi gibi istatistiklerle modelin anlamlılığını ve performansını değerlendirir. Ayrıca, varsayım testleri modelin varsayımlara uygunluğunu kontrol etmek için kullanılır.
- Model Özeti: Modelin istatistiksel anlamlılığını ve performansını değerlendirir.
- Varsayım Testleri: Modelin normal dağılım, homoskedastisite, bağımsızlık gibi temel varsayımlara uygunluğunu test eder.
- ROC Eğrisi: Modelin sınıflandırma performansını değerlendirir ve AUC (Alan Altında Kalan Alan) değeri ile modelin ayırt edici gücünü gösterir.
Bu özellikler, lojistik regresyon analizinin, veri setinden derinlemesine içgörüler elde etmek ve kategorik sonuçların olasılıklarını tahmin etmek için nasıl kullanılabileceğini gösterir.
")
),
mainPanel(
tabsetPanel(
tabPanel("Data Summary", verbatimTextOutput("dataSummary")),
tabPanel("Assumptions for Model", verbatimTextOutput("glmassumption")),
tabPanel("Logistic Regression Output", verbatimTextOutput("logisticOutput")),
tabPanel("Cross Validation GLM Output", plotlyOutput("glmcvplot")),
tabPanel("Area Under the Curve Plot", plotlyOutput("glmaucplot"))
)
)
)
),
tabPanel("Decision Tree",
sidebarLayout(
sidebarPanel(
fileInput("treedecfile", "Choose a CSV or XLSX file", accept = c(".csv", ".xlsx")),
selectInput("targetdectree", "Select Target Column", choices = NULL),
sliderInput("dataSplittree",
"Data Split Ratio",
min = 0.1,
max = 0.9,
value = 0.7, # Default value, for instance, 70% for training and 30% for testing
step = 0.05,
ticks = FALSE,
animate = TRUE),
actionButton("rundectree", "Run Prior Steps"),
HTML("
Karar Ağacı Paneli
Çıktı Nedir ve Neden Kullanılır?
Bu panel, sınıflandırma ve regresyon problemleri için karar ağacı modellemesi yapar. Karar ağacı, veri setindeki özelliklerin farklı kombinasyonlarını kullanarak sonuçları tahmin eder ve bu tahminlerin nasıl yapıldığını açıklayabilir bir şekilde görselleştirir.
Kullanım Adımları:
- Veri Dosyası Yükleme: Karar ağacı modellemesi için bir CSV veya XLSX dosyasını
fileInput
aracılığıyla yükleyin.
- Hedef Değişken Seçimi: Modelde tahmin edilecek hedef değişkeni seçin.
- Modellemeyi Çalıştırma:
actionButton
butonuna tıklayarak karar ağacı modellemesini ve ilgili ön adımları çalıştırın.
Kullanıcı Etkileşimi:
Kullanıcılar, dosya yükledikten ve hedef değişkeni seçtikten sonra modellemeyi başlatır ve sonuçlar ana panelde görselleştirilir.
Veri Bilimi Alanındaki Kullanımı:
Karar ağacı, sınıflandırma ve regresyon problemlerinde yaygın olarak kullanılan bir yöntemdir. Ağaç yapısı, modelin kararlarını ve tahminlerini açıklayıcı bir şekilde sunar, bu da modelin yorumlanabilirliğini artırır.
Desteklenen Dosya Tipleri:
Kullanıcılar, analiz için CSV (.csv) veya Excel (.xlsx) formatında dosyalar yükleyebilirler.
Sonuçların Yorumlanması:
Elde edilen karar ağacı modeli, veri setindeki özelliklerin nasıl birleştirildiğini ve sonuçların nasıl tahmin edildiğini gösterir. Model özeti, karar ağacının performansını ve doğruluğunu değerlendirir. Ayrıca, ağacın budanması ve çapraz doğrulama gibi tekniklerle modelin genelleştirilmesi incelenir.
- Model Özeti: Modelin performansını ve doğruluğunu değerlendirir.
- Karar Ağacı Görselleştirme: Karar ağacının nasıl yapılandırıldığını ve tahminlerin nasıl yapıldığını görsel olarak sunar.
- Çapraz Doğrulama: Modelin farklı veri setleri üzerindeki performansını test eder ve genelleştirme yeteneğini değerlendirir.
- Karışıklık Matrisi: Modelin sınıflandırma performansını detaylı bir şekilde gösterir.
Bu özellikler, karar ağacı modellemesinin, veri setinden derinlemesine içgörüler elde etmek ve tahminler yapmak için nasıl kullanılabileceğini gösterir.
")
),
mainPanel(
tabsetPanel(
tabPanel("Finding Right Model",
tabsetPanel(
tabPanel("Data Summary", verbatimTextOutput("dataSummarydt")),
tabPanel("Run First Model", verbatimTextOutput("rundectree")),
tabPanel("Before Pruning Model's Plot", plotOutput("dectreeplot", width = "100%", height = "750px")),
tabPanel("Cross-validation Plot", plotOutput("cvplot", width = "100%", height = "750px")),
tabPanel("Confusion Matrix", verbatimTextOutput("confMatrix")),
tabPanel("Pruned Tree", plotOutput("pruneddtree", width = "100%", height = "725px")),
)
),
tabPanel("Decision Tree Model Evaluation",
tabsetPanel(
tabPanel("Model Evaluation", verbatimTextOutput("cfdtpteva")),
))
)
),
)
),
tabPanel("Random Forest",
sidebarLayout(
sidebarPanel(
fileInput("rfinput", "Choose a CSV or XLSX file", accept = c(".csv", ".xlsx")),
actionButton("loadrf", "Load Data"),
selectInput("targetrf", "Select Target Column", choices = NULL),
selectizeInput("independentVarrf", "Select Independent Variables", choices = NULL, multiple = TRUE),
sliderInput("dataSplitrf",
"Data Split Ratio",
min = 0.1,
max = 0.9,
value = 0.7, # Default value, for instance, 70% for training and 30% for testing
step = 0.05,
ticks = FALSE,
animate = TRUE),
numericInput("mtryInput", "Mtry Value", value = 3, min = 1),
numericInput("ntreeInput", "Ntree Value", value = 14, min = 1),
actionButton("runrf", "Run Prior Steps"),
actionButton("predictBtn", "Predict"),
HTML("
Rastgele Orman Paneli
Çıktı Nedir ve Neden Kullanılır?
Bu panel, sınıflandırma ve regresyon problemleri için rastgele orman modellemesi yapar. Rastgele orman, birden fazla karar ağacını birleştirerek oluşturulan bir topluluk öğrenme yöntemidir. Bu yöntem, modelin genel hatası üzerindeki varyansı azaltır ve aşırı uyuma karşı dirençli olmasını sağlar.
Kullanım Adımları:
- Veri Dosyası Yükleme: Rastgele orman modellemesi için bir CSV veya XLSX dosyasını
fileInput
aracılığıyla yükleyin.
- Hedef Değişken ve Bağımsız Değişkenler Seçimi: Modelde tahmin edilecek hedef değişkeni ve kullanılacak bağımsız değişkenleri seçin.
- Model Parametreleri Ayarlama: Modelin
mtry
ve ntree
değerlerini ayarlayın.
- Modellemeyi Çalıştırma:
actionButton
butonlarına tıklayarak rastgele orman modellemesini ve tahmin işlemini çalıştırın.
Kullanıcı Etkileşimi:
Kullanıcılar, dosya yükledikten, değişkenleri seçtikten ve model parametrelerini ayarladıktan sonra modellemeyi başlatır ve sonuçlar ana panelde görselleştirilir.
Veri Bilimi Alanındaki Kullanımı:
Rastgele orman, sınıflandırma ve regresyon gibi çeşitli makine öğrenmesi problemlerinde kullanılır. Modelin oluşturduğu birden fazla karar ağacının sonuçlarını birleştirerek daha doğru tahminler yapılmasını sağlar.
Desteklenen Dosya Tipleri:
Kullanıcılar, analiz için CSV (.csv) veya Excel (.xlsx) formatında dosyalar yükleyebilirler.
Sonuçların Yorumlanması:
Elde edilen rastgele orman modeli, bağımsız değişkenlerin hedef değişken üzerindeki etkilerini ve önem derecelerini gösterir. Model özeti, rastgele ormanın performansını ve doğruluğunu değerlendirir. Ayrıca, modelin tahminlerini ve bu tahminlerin gerçek değerlerle karşılaştırılmasını içeren görselleştirmeler sunar.
- Model Özeti: Modelin performansını ve doğruluğunu değerlendirir.
- Özellik Önemi: Bağımsız değişkenlerin model üzerindeki etkisinin ve öneminin değerlendirilmesi.
- Model Tahminleri: Modelin tahmin ettiği sonuçlar ve bu tahminlerin gerçek değerlerle karşılaştırılması.
Bu özellikler, rastgele orman modellemesinin, veri setinden derinlemesine içgörüler elde etmek ve tahminler yapmak için nasıl kullanılabileceğini gösterir.
")
),
mainPanel(
tabsetPanel(
tabPanel("Data Summary", verbatimTextOutput("dataSummaryrf")),
tabPanel("Run First Model", verbatimTextOutput("runrf")),
tabPanel("Feature Importance Plot", plotlyOutput("importancePlot", width = "100%", height = "625px")),
tabPanel("Model Prediction",
tabsetPanel(
tabPanel("Predicted Result", verbatimTextOutput("predictionOutput")),
tabPanel("Predicted Plot", plotlyOutput("performancePlot", width = "100%", height = "625px"))
)
)
)
)
)
),
tabPanel("Bagging",
sidebarLayout(
sidebarPanel(
fileInput("bginput", "Choose a CSV or XLSX file", accept = c(".csv", ".xlsx")),
actionButton("loadbg", "Load Data"),
selectInput("targetbg", "Select Target Column", choices = NULL),
selectizeInput("independentVarbg", "Select Independent Variables", choices = NULL, multiple = TRUE),
sliderInput("dataSplitbg",
"Data Split Ratio",
min = 0.1,
max = 0.9,
value = 0.7, # Default value, for instance, 70% for training and 30% for testing
step = 0.05,
ticks = FALSE,
animate = TRUE),
numericInput("nbaggInput", "Nbagg Value", value = 14, min = 1),
actionButton("runbg", "Run Prior Steps"),
actionButton("baggingBtn", "Predict"),
HTML("
Çanta (Bagging) Paneli
Çıktı Nedir ve Neden Kullanılır?
Bu panel, çok sayıda karar ağacı modelini birleştirerek güçlü bir makine öğrenimi modeli oluşturan çanta (bagging) yöntemini kullanır. Çanta yöntemi, modelin genel hatasını azaltarak ve aşırı uyum (overfitting) riskini minimize ederek tahminlerin doğruluğunu artırır.
Kullanım Adımları:
- Veri Dosyası Yükleme: CSV veya XLSX formatında bir dosya yükleyerek analize başlayın.
- Hedef ve Bağımsız Değişkenlerin Seçilmesi: Modelde kullanılacak hedef ve bağımsız değişkenleri seçin.
- Model Parametrelerinin Ayarlanması: Modelin performansını etkileyen parametreleri (örneğin, çanta iterasyon sayısı) ayarlayın.
- Modelin Eğitilmesi ve Tahmin Yapılması: Modeli eğitin ve test veri seti üzerinde tahminlerde bulunun.
Kullanıcı Etkileşimi:
Kullanıcılar, veri setini yükledikten ve gerekli seçimleri yaptıktan sonra, modeli eğitmek ve tahminler yapmak için belirtilen butonlara tıklar. Sonuçlar, ana panelde görselleştirilir ve detaylı analizler sunulur.
Veri Bilimi ve Makine Öğrenmesindeki Uygulamaları:
Çanta yöntemi, hem sınıflandırma hem de regresyon problemleri için yaygın olarak kullanılan bir topluluk öğrenme yöntemidir. Bu yöntem, veri setlerinden elde edilen bilgiyi maksimize eder ve modelin genel performansını iyileştirir.
Desteklenen Dosya Tipleri ve Seçenekler:
Panel, kullanıcıların CSV (.csv) veya Excel (.xlsx) formatındaki dosyaları yüklemesine izin verir. Bu, veri bilimcilerin ve analistlerin çeşitli veri setleri üzerinde çalışabilmesi için esneklik sağlar.
Sonuçların Yorumlanması:
Modelin performansı, çeşitli metrikler kullanılarak değerlendirilir. Özellik önem dereceleri, hangi değişkenlerin model tahminlerini en çok etkilediğini gösterir. Ayrıca, tahmin edilen sonuçlar ve modelin genel doğruluğu hakkında bilgiler sunulur.
- Model Özeti: Modelin performans metrikleri ve ayarlanan parametreler hakkında bilgi verir.
- Özellik Önemi: Model tahminlerinde hangi bağımsız değişkenlerin önemli olduğunu gösterir.
- Tahmin Sonuçları: Modelin test veri seti üzerinde yaptığı tahminler ve bu tahminlerin gerçek değerlerle karşılaştırılması.
Bu özellikler, çanta yönteminin veri setlerinden maksimum bilgiyi çıkararak tahminlerin doğruluğunu artırma potansiyelini gösterir.
")
),
mainPanel(
tabsetPanel(
tabPanel("Data Summary", verbatimTextOutput("dataSummarybg")),
tabPanel("Run First Model", verbatimTextOutput("runbg")),
tabPanel("Feature Importance Plot", plotlyOutput("importancePlotbg", width = "100%", height = "625px")),
tabPanel("Model Prediction",
tabsetPanel(
tabPanel("Predicted Result", verbatimTextOutput("predictionOutputbg")),
tabPanel("Predicted Plot", plotlyOutput("performancePlotbg", width = "100%", height = "625px"))
)
)
)
)
)
),
tabPanel("Boosting",
sidebarLayout(
sidebarPanel(
fileInput("bsinput", "Choose a CSV or XLSX file", accept = c(".csv", ".xlsx")),
actionButton("loadbs", "Load Data"),
selectInput("targetbs", "Select Target Column", choices = NULL),
selectizeInput("independentVarbs", "Select Independent Variables", choices = NULL, multiple = TRUE),
sliderInput("dataSplitbs",
"Data Split Ratio",
min = 0.1,
max = 0.9,
value = 0.7, # Default value, for instance, 70% for training and 30% for testing
step = 0.05,
ticks = FALSE,
animate = TRUE),
numericInput("nbsInput", "N Trees Value", value = 14, min = 1),
numericInput("nbsdepth", "Interaction Depth", value = 4, min = 1),
numericInput("nbshr", "Shrinkage", value = 0.03, min = 0.0001),
actionButton("runbs", "Run Prior Steps"),
actionButton("boostingBtn", "Predict"),
HTML("
Artırma (Boosting) Paneli
Çıktı Nedir ve Neden Kullanılır?
Bu panel, zayıf öğrenicileri güçlü bir model oluşturmak üzere birleştiren artırma (boosting) yöntemini kullanır. Artırma, bir dizi zayıf modeli sıralı olarak eğitir ve her birini öncekinin hatalarını düzeltmeye odaklanır, böylece modelin genel performansı artar.
Kullanım Adımları:
- Veri Dosyası Yükleme: CSV veya XLSX formatında bir dosya yükleyerek analize başlayın.
- Hedef ve Bağımsız Değişkenlerin Seçilmesi: Modelde kullanılacak hedef ve bağımsız değişkenleri seçin.
- Model Parametrelerinin Ayarlanması: Modelin performansını etkileyen parametreleri (örneğin, ağaç sayısı, etkileşim derinliği, küçültme) ayarlayın.
- Modelin Eğitilmesi ve Tahmin Yapılması: Modeli eğitin ve test veri seti üzerinde tahminlerde bulunun.
Kullanıcı Etkileşimi:
Kullanıcılar, veri setini yükledikten ve gerekli seçimleri yaptıktan sonra, modeli eğitmek ve tahminler yapmak için belirtilen butonlara tıklar. Sonuçlar, ana panelde görselleştirilir ve detaylı analizler sunulur.
Veri Bilimi ve Makine Öğrenmesindeki Uygulamaları:
Artırma yöntemi, genellikle sınıflandırma ve regresyon problemlerinde kullanılır ve modelin aşırı uyuma (overfitting) eğilimini azaltırken tahmin doğruluğunu artırır.
Desteklenen Dosya Tipleri ve Seçenekler:
Panel, kullanıcıların CSV (.csv) veya Excel (.xlsx) formatındaki dosyaları yüklemesine izin verir. Bu, veri bilimcilerin ve analistlerin çeşitli veri setleri üzerinde çalışabilmesi için esneklik sağlar.
Sonuçların Yorumlanması:
Modelin performansı, çeşitli metrikler kullanılarak değerlendirilir. Özellik önem dereceleri, hangi değişkenlerin model tahminlerini en çok etkilediğini gösterir. Ayrıca, tahmin edilen sonuçlar ve modelin genel doğruluğu hakkında bilgiler sunulur.
- Model Özeti: Modelin performans metrikleri ve ayarlanan parametreler hakkında bilgi verir.
- Özellik Önemi: Model tahminlerinde hangi bağımsız değişkenlerin önemli olduğunu gösterir.
- Tahmin Sonuçları: Modelin test veri seti üzerinde yaptığı tahminler ve bu tahminlerin gerçek değerlerle karşılaştırılması.
Bu özellikler, artırma yönteminin veri setlerinden maksimum bilgiyi çıkararak tahminlerin doğruluğunu artırma potansiyelini gösterir.
")
),
mainPanel(
tabsetPanel(
tabPanel("Data Summary", verbatimTextOutput("dataSummarybs")),
tabPanel("Run First Model", verbatimTextOutput("runbs")),
tabPanel("Feature Importance Plot", plotlyOutput("importancePlotbs", width = "100%", height = "625px")),
tabPanel("Model Prediction",
tabsetPanel(
tabPanel("Predicted Result", verbatimTextOutput("predictionOutputbs")),
tabPanel("Predicted Plot", plotlyOutput("performancePlotbs", width = "100%", height = "625px"))
)
)
)
)
)
),
tabPanel("MARS",
sidebarLayout(
sidebarPanel(
fileInput("msinput", "Choose a CSV or XLSX file", accept = c(".csv", ".xlsx")),
actionButton("loadms", "Load Data"),
selectInput("targetms", "Select Target Column", choices = NULL),
selectizeInput("independentVarms", "Select Independent Variables", choices = NULL, multiple = TRUE),
sliderInput("dataSplitms",
"Data Split Ratio",
min = 0.1,
max = 0.9,
value = 0.7, # Default value, for instance, 70% for training and 30% for testing
step = 0.05,
ticks = FALSE,
animate = TRUE),
actionButton("runms", "Run Prior Steps"),
actionButton("marsBtn", "Predict"),
HTML("
MARS Paneli
Çıktı Nedir ve Neden Kullanılır?
Bu panel, Çoklu Adaptif Regresyon Spline'ları (MARS) kullanarak veri setinden karmaşık ilişkileri ve etkileşimleri yakalayabilen bir model oluşturur. MARS, lineer olmayan ilişkileri ve değişkenler arasındaki etkileşimleri otomatik olarak tanımlayabilen esnek bir yöntemdir.
Kullanım Adımları:
- Veri Dosyası Yükleme: Analize başlamak için CSV veya XLSX formatında bir dosya yükleyin.
- Hedef ve Bağımsız Değişkenlerin Seçilmesi: Modelde kullanılacak hedef ve bağımsız değişkenleri belirleyin.
- Veri Bölme Oranının Ayarlanması: Modelin eğitim ve test veri setlerine bölünme oranını ayarlayın.
- Modelin Eğitilmesi ve Değerlendirilmesi: Modeli eğitip performansını değerlendirin. Modelin doğruluğunu ve bağımsız değişkenlerin önemini inceleyin.
Kullanıcı Etkileşimi:
Kullanıcılar, analiz için gerekli veri ve parametreleri girer ve modeli eğitmek için belirtilen adımları takip eder. Sonuçlar, kullanıcıya modelin performansı ve değişkenlerin önemi hakkında detaylı bilgiler sunar.
Veri Bilimi ve Makine Öğrenmesindeki Uygulamaları:
MARS yöntemi, özellikle karmaşık ilişkilerin ve değişken etkileşimlerinin olduğu durumlarda tercih edilen bir modelleme tekniğidir. Hem sınıflandırma hem de regresyon problemleri için uygundur.
Desteklenen Dosya Tipleri ve Seçenekler:
Panel, kullanıcıların analiz için CSV (.csv) veya Excel (.xlsx) formatlarında veri dosyaları yüklemesine olanak tanır, bu da farklı veri setleri üzerinde çalışmayı kolaylaştırır.
Sonuçların Yorumlanması:
Modelin performansı, çeşitli metrikler kullanılarak değerlendirilir. Bağımsız değişkenlerin model üzerindeki etkisi, özellik önem dereceleriyle gösterilir. Ayrıca, modelin genel doğruluğu ve tahmin edilen sonuçlar hakkında bilgiler sağlanır.
- Model Özeti: Modelin performans metrikleri ve ayarlanan parametreler hakkında bilgi sağlar.
- Özellik Önemi: Hangi bağımsız değişkenlerin model tahminlerini en çok etkilediğini gösterir.
- Tahmin Sonuçları: Modelin test veri seti üzerinde yaptığı tahminler ve bu tahminlerin gerçek değerlerle karşılaştırılması.
Bu panel, MARS modelinin veri setlerinden karmaşık ilişkileri çıkararak tahminlerin doğruluğunu artırma potansiyeline sahip olduğunu gösterir.
")
),
mainPanel(
tabsetPanel(
tabPanel("Data Summary", verbatimTextOutput("dataSummaryms")),
tabPanel("Run First Model", verbatimTextOutput("runms")),
tabPanel("Feature Importance Plot", plotlyOutput("importancePlotms", width = "100%", height = "625px")),
tabPanel("Model Prediction",
tabsetPanel(
tabPanel("Predicted Result", verbatimTextOutput("predictionOutputms")),
tabPanel("Predicted Plot", plotlyOutput("performancePlotms", width = "100%", height = "625px"))
)
)
)
)
)
),
tabPanel("Ridge Regression",
sidebarLayout(
sidebarPanel(
fileInput("rrinput", "Choose a CSV or XLSX file", accept = c(".csv", ".xlsx")),
actionButton("loadrr", "Load Data"),
selectInput("targetrr", "Select Target Column", choices = NULL),
selectizeInput("independentVarrr", "Select Independent Variables", choices = NULL, multiple = TRUE),
sliderInput("dataSplitrr",
"Data Split Ratio",
min = 0.1,
max = 0.9,
value = 0.7, # Default value, for instance, 70% for training and 30% for testing
step = 0.05,
ticks = FALSE,
animate = TRUE),
actionButton("runrr", "Run Prior Steps"),
actionButton("RidgeBtn", "Predict"),
HTML("
Ridge Regresyon Paneli
Çıktı Nedir ve Neden Kullanılır?
Ridge Regresyonu, çoklu doğrusal regresyon modellerinde aşırı uyumun önlenmesine yardımcı olan bir düzenlileştirme tekniğidir. Bu panel, yüksek boyutlu veri setlerinde bile değişkenler arası ilişkileri daha iyi anlamak için Ridge Regresyon modelini kullanır.
Kullanım Adımları:
- Veri Dosyası Yükleme: Analize başlamak için uygun bir CSV veya XLSX dosyası yükleyin.
- Hedef ve Bağımsız Değişkenlerin Seçimi: Modelde hedef değişken olarak neyin tahmin edileceğini ve hangi bağımsız değişkenlerin kullanılacağını seçin.
- Veri Bölme Oranı Ayarlama: Veri setinin eğitim ve test seti olarak nasıl bölüneceğini belirleyin.
- Model Eğitimi ve Değerlendirme: Modeli eğitin ve performansını değerlendirin. Modelin nasıl performans gösterdiğini ve hangi değişkenlerin önemli olduğunu öğrenin.
Kullanıcı Etkileşimi:
Kullanıcılar, panel aracılığıyla veri setlerini yükleyebilir, model parametrelerini ayarlayabilir ve modelin performansını değerlendirme sonuçlarını görüntüleyebilir. Bu süreç, veri bilimi projelerinde kritik öneme sahip olan modelleme ve analiz işlemlerini kolaylaştırır.
Veri Bilimi ve Makine Öğrenmesindeki Uygulamaları:
Ridge Regresyonu, özellikle çok sayıda özellik içeren veri setlerinde ve değişkenler arasında yüksek korelasyon olduğunda kullanışlıdır. Model karmaşıklığını kontrol ederek aşırı uyumu azaltır ve modelin genelleştirme yeteneğini artırır.
Desteklenen Dosya Tipleri ve Seçenekler:
Panel, CSV (.csv) ve Excel (.xlsx) formatlarındaki veri dosyalarını destekler, bu da kullanıcıların çeşitli veri kaynaklarından kolayca veri yüklemesine olanak tanır.
Sonuçların Yorumlanması:
Modelin başarısı, RMSE, R-kare gibi metriklerle değerlendirilir. Ayrıca, değişkenlerin model üzerindeki etkisi ve önemi hakkında bilgiler sunulur, bu da hangi özelliklerin hedef değişkeni en çok etkilediğini anlamaya yardımcı olur.
- Model Özeti: Modelin ayar parametreleri, performans metrikleri ve doğrulama sonuçları hakkında ayrıntılı bilgi sağlar.
- Özellik Önemi: Model tahminlerinde hangi bağımsız değişkenlerin daha etkili olduğunu gösterir.
- Tahmin Sonuçları: Modelin test veri seti üzerindeki tahminleri ve bu tahminlerin gerçek değerlerle olan karşılaştırması.
Ridge Regresyon paneli, modelin veri setindeki özellikler arasındaki karmaşık ilişkileri nasıl yakaladığını ve tahminlerin doğ
ruluğunu nasıl artırdığını gösterir.
")
),
mainPanel(
tabsetPanel(
tabPanel("Data Summary", verbatimTextOutput("dataSummaryrr")),
tabPanel("Run First Model", verbatimTextOutput("runrr")),
tabPanel("Feature Importance Plot", plotlyOutput("importancePlotrr", width = "100%", height = "625px")),
tabPanel("Model Prediction",
tabsetPanel(
tabPanel("Predicted Result", verbatimTextOutput("predictionOutputrr")),
tabPanel("Predicted Plot", plotlyOutput("performancePlotrr", width = "100%", height = "625px"))
)
)
)
)
)
),
tabPanel("LASSO Regression",
sidebarLayout(
sidebarPanel(
fileInput("lsinput", "Choose a CSV or XLSX file", accept = c(".csv", ".xlsx")),
actionButton("loadls", "Load Data"),
selectInput("targetls", "Select Target Column", choices = NULL),
selectizeInput("independentVarls", "Select Independent Variables", choices = NULL, multiple = TRUE),
sliderInput("dataSplitls",
"Data Split Ratio",
min = 0.1,
max = 0.9,
value = 0.7, # Default value, for instance, 70% for training and 30% for testing
step = 0.05,
ticks = FALSE,
animate = TRUE),
actionButton("runls", "Run Prior Steps"),
actionButton("LassoBtn", "Predict"),
HTML("
LASSO Regresyon Paneli
Çıktı Nedir ve Neden Kullanılır?
LASSO Regresyonu, özellik seçimi ve düzenlileştirme sağlayarak modelin karmaşıklığını azaltır ve genelleştirme kabiliyetini artırır. Bu panel, veri setlerindeki değişkenler arasındaki ilişkileri anlamak ve önemli özellikleri belirlemek için LASSO Regresyon modelini kullanır.
Kullanım Adımları:
- Veri Dosyası Yükleme: Analize başlamak için bir CSV veya XLSX dosyası yükleyin.
- Hedef ve Bağımsız Değişkenlerin Seçimi: Modelin neyi tahmin edeceğini ve hangi bağımsız değişkenlerin kullanılacağını seçin.
- Veri Bölme Oranı Ayarlama: Eğitim ve test setlerinin nasıl bölüneceğini belirleyin.
- Model Eğitimi ve Değerlendirme: Modeli eğitin, performansını değerlendirin ve önemli özellikleri keşfedin.
Kullanıcı Etkileşimi:
Kullanıcılar, veri setlerini yükleyebilir, model parametrelerini ayarlayabilir ve modelin nasıl performans gösterdiğini anlayabilir. Panel, modelleme ve analiz işlemlerini kolaylaştırarak veri bilimi projelerinde değerli bir araç sunar.
Veri Bilimi ve Makine Öğrenmesindeki Uygulamaları:
LASSO Regresyonu, özellikle çok sayıda özelliği olan ve değişkenler arasında yüksek korelasyon bulunan veri setlerinde kullanışlıdır. Model, önemsiz özellikleri sıfıra yaklaştırarak özellik seçimi yapar ve daha anlamlı tahminler sağlar.
Desteklenen Dosya Tipleri ve Seçenekler:
Panel, CSV (.csv) ve Excel (.xlsx) formatlarını destekler, bu da kullanıcıların farklı veri kaynaklarından kolaylıkla veri yüklemesine olanak tanır.
Sonuçların Yorumlanması:
Modelin başarısı, RMSE, R-kare gibi metriklerle değerlendirilir. Ayrıca, modelin hangi değişkenleri önemli bulduğu ve tahminlerin doğruluğu hakkında bilgi sunulur.
- Model Özeti: Modelin performans metrikleri, ayar parametreleri ve doğrulama sonuçları hakkında detaylı bilgiler sağlar.
- Özellik Önemi: Model tahminlerinde hangi bağımsız değişkenlerin daha etkili olduğunu belirler.
- Tahmin Sonuçları: Modelin test veri seti üzerindeki tahmin sonuçları ve bu tahminlerin gerçek değerlerle karşılaştırılması.
LASSO Regresyon paneli, modelin veri setindeki özellikler arasındaki ilişkileri nasıl çözümlendiğini ve tahminlerin doğruluğunu nasıl artırdığını gösterir.
")
),
mainPanel(
tabsetPanel(
tabPanel("Data Summary", verbatimTextOutput("dataSummaryls")),
tabPanel("Run First Model", verbatimTextOutput("runls")),
tabPanel("Feature Importance Plot", plotlyOutput("importancePlotls", width = "100%", height = "625px")),
tabPanel("Model Prediction",
tabsetPanel(
tabPanel("Predicted Result", verbatimTextOutput("predictionOutputls")),
tabPanel("Predicted Plot", plotlyOutput("performancePlotls", width = "100%", height = "625px"))
)
)
)
)
)
)
)
)
server <- function(input, output, session) {
##Supervised Learning
###Simple Linear Regression
# Define reactive values for each assumption test
shapiroTestResult <- reactiveVal()
ncvTestResult <- reactiveVal()
linearityPlot <- reactiveVal()
durbinWatsonTestResult <- reactiveVal()
modelslr <- reactiveVal()
dataslr <- reactiveVal(NULL)
modelslreva <- reactiveVal(NULL)
dataslreva <- reactiveVal(NULL)
# Fix the data loading and cleaning part
observeEvent(input$loadslr, {
file <- input$slrinput
if (!is.null(file)) {
data_slr <- read_data(file$datapath)
data_slr <- clean_column_names(data_slr)
dataslr(data_slr) # Update the reactive value correctly
updateSelectInput(session, "targetslr", choices = colnames(data_slr))
updateSelectizeInput(session, "independentVar", choices = setdiff(colnames(data_slr), input$targetslr))
}
})
output$slrsummary <- renderPrint({
req(dataslr())
summary(dataslr())
})
# Observe the action button for triggering assumption tests
observeEvent(input$slrassumption, {
req(dataslr(), input$targetslr, input$independentVar)
data_slr <- dataslr()
target_col <- input$targetslr
independent_var <- input$independentVar
# Ensure the target and independent variables are available
if (is.null(data_slr[[target_col]]) || is.null(data_slr[[independent_var]])) {
return("Target or independent variable not found in the data.")
}
# Filter out rows where any character column is an empty string
data_slr <- data_slr %>% dplyr::select(all_of(target_col), all_of(independent_var)) %>%
na.omit()
# Split the data into training and testing sets
set.seed(123)
split_ratio <- input$dataSplitslr
training.samples <- createDataPartition(data_slr[[target_col]], p = split_ratio, list = FALSE)
train_data <- data_slr[training.samples, ]
test_data <- data_slr[-training.samples, ]
# Fit the linear regression model
fitted_model <- lm(reformulate(independent_var, target_col), data = train_data)
modelslr(fitted_model) # Update the reactive value
# Print the summary of the model
summary(fitted_model)
})
output$shapiroTest <- renderPrint({
req(dataslr(), input$targetslr, input$independentVar)
data_slr <- dataslr()
target_col <- input$targetslr
independent_var <- input$independentVar
# Ensure the target and independent variables are available
if (is.null(data_slr[[target_col]]) || is.null(data_slr[[independent_var]])) {
return("Target or independent variable not found in the data.")
}
# Filter out rows where any character column is an empty string
data_slr <- data_slr %>% dplyr::select(all_of(target_col), all_of(independent_var)) %>%
na.omit()
# Split the data into training and testing sets
set.seed(123)
split_ratio <- input$dataSplitslr
training.samples <- createDataPartition(data_slr[[target_col]], p = split_ratio, list = FALSE)
train_data <- data_slr[training.samples, ]
test_data <- data_slr[-training.samples, ]
# Fit the linear regression model
fitted_model <- lm(reformulate(independent_var, target_col), data = train_data)
modelslr(fitted_model) # Update the reactive value
# Print the summary of the model
summary(fitted_model)
# Shapiro-Wilk Test for Normality
cat("\nShapiro-Wilk Test for Normality of Residuals:\n")
shap_Test <- shapiro.test(fitted_model$residuals)
print(shap_Test)
if (shap_Test$p.value > 0.05) {
cat("Result: Residuals appear to be normally distributed.\n")
} else {
cat("Result: Residuals may not be normally distributed.\n")
}
})
output$ncvTest <- renderPrint({
req(modelslr()) # Ensure modelslr is available
fitted_model <- modelslr() # Access the model
# Perform Breusch-Pagan Test for Heteroscedasticity
cat("\nBreusch-Pagan Test for Heteroscedasticity:\n")
bp_test_result <- tryCatch({
bptest(fitted_model)
}, error = function(e) {
cat("Error in conducting Breusch-Pagan test: ", e$message, "\n")
NULL # Return NULL in case of error
})
if (!is.null(bp_test_result) && !is.na(bp_test_result$p.value)) {
print(bp_test_result)
if (bp_test_result$p.value > 0.05) {
cat("Result: No evidence of heteroscedasticity.\n")
} else {
cat("Result: There may be heteroscedasticity.\n")
}
} else {
cat("Result: Breusch-Pagan test could not be conducted.\n")
}
})
output$linearityPlotOutput <- renderPlotly({
req(dataslr(), input$targetslr, input$independentVar)
data_slr <- dataslr()
target_col <- input$targetslr
independent_var <- input$independentVar
# Ensure the target and independent variables are available
if (is.null(data_slr[[target_col]]) || is.null(data_slr[[independent_var]])) {
return("Target or independent variable not found in the data.")
}
# Filter out rows where any character column is an empty string
data_slr <- data_slr %>% dplyr::select(all_of(target_col), all_of(independent_var)) %>%
na.omit()
# Split the data into training and testing sets
set.seed(123)
split_ratio <- input$dataSplitslr
training.samples <- createDataPartition(data_slr[[target_col]], p = split_ratio, list = FALSE)
train_data <- data_slr[training.samples, ]
test_data <- data_slr[-training.samples, ]
# Fit the linear regression model
fitted_model <- lm(reformulate(independent_var, target_col), data = train_data)
# Create the ggplot object for the linearity plot
ggplot_object <- ggplot(train_data, aes_string(x = independent_var, y = target_col)) +
geom_point(color = "darkorange") +
geom_smooth(method = "lm", se = FALSE, color = "dodgerblue") +
ggtitle("Linearity") +
scale_x_continuous(name = independent_var) +
scale_y_continuous(name = target_col)
# Convert ggplot object to plotly and render
ggplotly(ggplot_object)
})
output$durbinWatsonTest <- renderPrint({
req(modelslr()) # Ensure modelslr is available
fitted_model <- modelslr() # Access the model
# Perform Durbin-Watson Test for Autocorrelation
dw_test_result <- tryCatch({
lmtest::dwtest(fitted_model)
}, error = function(e) {
cat("Error in conducting Durbin-Watson test: ", e$message, "\n")
NULL # Return NULL in case of error
})
if (!is.null(dw_test_result) && !is.na(dw_test_result$p.value)) {
cat("\nDurbin-Watson Test for Autocorrelation:\n")
print(dw_test_result)
if (dw_test_result$p.value > 0.05) {
cat("Result: No evidence of autocorrelation.\n")
} else {
cat("Result: There may be autocorrelation in the residuals.\n")
}
} else {
cat("Result: Durbin-Watson test could not be conducted.\n")
}
})
# Server function to create diagnostic plots
output$residualsFittedPlot <- renderPlotly({
req(modelslr())
fitted_model <- modelslr()
p <- ggplot(fitted_model, aes(.fitted, .resid)) +
geom_point(color = "darkorange") +
geom_smooth(method = "lm", se = FALSE, color = "dodgerblue") +
labs(title = "Residuals vs Fitted", x = "Fitted Values", y = "Residuals")
ggplotly(p)
})
output$qqPlot <- renderPlotly({
req(modelslr())
fitted_model <- modelslr()
p <- ggplot(fitted_model, aes(sample = .stdresid)) +
stat_qq(color = "darkorange") +
stat_qq_line(color = "dodgerblue") +
labs(title = "Normal Q-Q")
ggplotly(p)
})
output$scaleLocationPlot <- renderPlotly({
req(modelslr())
fitted_model <- modelslr()
p <- ggplot(fitted_model, aes(.fitted, sqrt(abs(.resid)))) +
geom_point(color = "darkorange") +
geom_smooth(method = "lm", se = FALSE, color = "dodgerblue") +
labs(title = "Scale-Location", x = "Fitted Values", y = "Sqrt(|Residuals|)")
ggplotly(p)
})
output$residualsLeveragePlot <- renderPlotly({
req(modelslr())
fitted_model <- modelslr()
p <- ggplot(fitted_model, aes(.hat, .stdresid)) +
geom_point(aes(size = .cooksd), shape = 1, color = "darkorange") +
geom_smooth(method = "lm", se = FALSE, color = "dodgerblue") +
labs(title = "Residuals vs Leverage", x = "Leverage", y = "Standardized Residuals")
ggplotly(p)
})
# Define the reactive value for the data and model at the top of the server function
dataslreva <- reactiveVal(NULL)
modelslreva <- reactiveVal(NULL)
test_data_slr <- reactiveVal(NULL)
# Load and clean data
observeEvent(input$loadslr, {
req(input$slrinput)
file <- input$slrinput
if (!is.null(file)) {
data_slreva <- read_data(file$datapath)
data_slreva <- clean_column_names(data_slreva)
dataslreva(data_slreva) # Update the reactive value correctly
updateSelectInput(session, "targetslr", choices = colnames(data_slreva))
updateSelectizeInput(session, "independentVar", choices = setdiff(colnames(data_slreva), input$targetslr))
}
})
observeEvent(input$slrmodel, {
req(dataslreva(), input$targetslr, input$independentVar)
data_slreva <- dataslreva()
target_col <- input$targetslr
independent_var <- input$independentVar
# Ensure the target and independent variables are available
if (is.null(data_slreva[[target_col]]) || is.null(data_slreva[[independent_var]])) {
return("Target or independent variable not found in the data.")
}
# Filter out rows with NAs and split the data
data_slreva <- na.omit(data_slreva[, c(target_col, independent_var)])
set.seed(123)
split_ratio <- input$dataSplitslr
training.samples <- createDataPartition(data_slreva[[target_col]], p = split_ratio, list = FALSE)
train_data <- data_slreva[training.samples, ]
test_data <- data_slreva[-training.samples, ]
# Fit the linear regression model and update the reactive value
fitted_modelslr <- lm(reformulate(independent_var, target_col), data = train_data)
modelslreva(fitted_modelslr)
# After fitting the model, update test_data_slr reactive value
test_data_slr(test_data) # Store test_data in the reactive value
})
output$slrmodeleva <- renderPrint({
req(modelslreva())
fitted_modelslr <- modelslreva()
if (inherits(fitted_modelslr, "lm")) {
# Model Summary
cat("Model Summary:\n")
print(summary(fitted_modelslr))
# Interpretation of the key components
cat("\nInterpretation:\n")
cat("1. Coefficients: Estimates of the model parameters.\n")
cat(" - Intercept: Represents the predicted value of the dependent variable when all independent variables are zero.\n")
cat(" - Slope: Represents the change in the dependent variable for a one-unit change in the independent variable.\n")
cat("2. Residual standard error: Measures the quality of the linear regression fit.\n")
cat(" - Lower values indicate a better fit.\n")
cat("3. Multiple R-squared: Indicates the proportion of variance in the dependent variable explained by the model.\n")
cat(" - Values closer to 1 suggest a better explanatory power of the model.\n")
cat("4. F-statistic and p-value: Test the overall significance of the model.\n")
cat(" - A low p-value (< 0.05) indicates that the model is statistically significant.\n")
# Additional specific interpretations can be added here
} else {
"Model has not been run or is not a linear model."
}
})
# Render print for correlation coefficient
output$corcoefslr <- renderPrint({
req(modelslreva(), test_data_slr()) # Ensure model and test data are available
fitted_modelslr <- modelslreva()
test_data <- test_data_slr() # Access the test data
target_col <- input$targetslr
independent_var <- input$independentVar
# Prediction and calculation of correlation coefficient
lm_predict <- predict(fitted_modelslr, newdata = test_data)
actual_pred <- data.frame(actuals = test_data[[target_col]], predicted = lm_predict)
cor_accuracy <- cor(actual_pred$actuals, actual_pred$predicted)
# Output with interpretation
cat("Correlation Coefficient between Actual and Predicted Values:\n")
cat(cor_accuracy, "\n\n")
# Interpretation of the correlation coefficient
cat("Interpretation:\n")
if(cor_accuracy > 0.75) {
cat("The model has a strong positive correlation between actual and predicted values.\n")
} else if(cor_accuracy > 0.5) {
cat("The model has a moderate positive correlation between actual and predicted values.\n")
} else if(cor_accuracy > 0.25) {
cat("The model has a weak positive correlation between actual and predicted values.\n")
} else {
cat("The model shows little to no correlation between actual and predicted values.\n")
}
cat("Note: A correlation coefficient close to 1 indicates a strong positive relationship, while values closer to 0 indicate weaker relationships.\n")
})
output$confintslr <- renderPrint({
req(modelslreva(), test_data_slr()) # Ensure model and test data are available
fitted_modelslr <- modelslreva()
test_data <- test_data_slr() # Access the test data
target_col <- input$targetslr
independent_var <- input$independentVar
# Model Confidence Intervals
cat("Model Confidence Intervals (95% Level):\n")
conf_intervals <- confint(fitted_modelslr, level=0.95)
print(conf_intervals)
cat("\nInterpretation of Model Confidence Intervals:\n")
cat("The intervals represent the range within which the true model coefficients are likely to fall with 95% confidence.\n")
cat("For each coefficient, the lower and upper bounds indicate the plausible range of values.\n")
# Predicted Confidence Intervals for Test Data
cat("\nPredicted Confidence Intervals for Test Data (First 10 Observations):\n")
conf_int_predictions <- predict(fitted_modelslr, newdata = test_data, interval = 'confidence')
print(head(conf_int_predictions, n=10))
cat("\nInterpretation of Predicted Confidence Intervals:\n")
cat("These intervals provide a range within which the true value of the dependent variable is expected to fall for each observation, with 95% confidence.\n")
cat("The 'fit' column represents the predicted value, while 'lwr' and 'upr' represent the lower and upper bounds of the confidence interval, respectively.\n")
})
output$slrregressPlot <- renderPlotly({
req(modelslreva(), dataslreva())
fitted_model <- modelslreva()
data_for_plot <- dataslreva()
# Ensure the target and independent variables are provided
target_col <- input$targetslr
independent_var <- input$independentVar
if (is.null(data_for_plot[[target_col]]) || is.null(data_for_plot[[independent_var]])) {
return("Target or independent variable not found in the data.")
}
# Creating the plot with added color
p <- ggplot(data_for_plot, aes_string(x = independent_var, y = target_col)) +
geom_point(color = "darkorange") + # Change color of points
geom_smooth(method = "lm", se = FALSE, color = "dodgerblue") + # Change color of regression line
ggtitle("Regression Line Plot") +
xlab(independent_var) +
ylab(target_col) +
theme_minimal() + # Adding a minimal theme for a cleaner look
theme(legend.position = "none") # Remove legend if not needed
# Convert ggplot object to Plotly for an interactive plot
ggplotly(p)
})
###Multiple Linear Regression
# Define reactive values for each assumption test
shapTestmlr <- reactiveVal()
ncvTestmlrmlr <- reactiveVal()
linPlotmlr <- reactiveVal()
dWTestmlr <- reactiveVal()
vifmlr <- reactiveVal()
modelmlr <- reactiveVal()
datamlr <- reactiveVal(NULL)
modelmlreva <- reactiveVal(NULL)
datamlreva <- reactiveVal(NULL)
# Fix the data loading and cleaning part
observeEvent(input$loadmlr, {
file <- input$mlrinput
if (!is.null(file)) {
data_mlr <- read_data(file$datapath)
data_mlr <- clean_column_names(data_mlr)
datamlr(data_mlr) # Update the reactive value correctly
updateSelectInput(session, "targetmlr", choices = colnames(data_mlr))
updateSelectizeInput(session, "independentVarmlr", choices = setdiff(colnames(data_mlr), input$targetmlr))
}
})
output$mlrsummary <- renderPrint({
req(datamlr())
summary(datamlr())
})
# Observe the action button for triggering model fitting and assumption tests
observeEvent(input$mlrassumption, {
req(datamlr(), input$targetmlr, input$independentVarmlr)
data_mlr <- datamlr()
target_col <- input$targetmlr
independent_vars <- input$independentVarmlr
# Check if independent variables are selected
if (length(independent_vars) == 0) {
return("Please select independent variables.")
}
# Check for NAs and remove rows with NAs in relevant columns
data_mlr <- na.omit(data_mlr[, c(target_col, independent_vars)])
# Check if the dataset is large enough
if (nrow(data_mlr) < 10) {
return("Dataset is too small after removing NA values.")
}
# Ensure split ratio is valid
split_ratio <- input$dataSplitmlr
if (split_ratio <= 0 || split_ratio >= 1) {
return("Invalid split ratio. Please choose a value between 0 and 1.")
}
# Partition the data
set.seed(123)
training.samples <- createDataPartition(data_mlr[[target_col]], p = split_ratio, list = FALSE)
train_data <- data_mlr[training.samples, ]
test_data <- data_mlr[-training.samples, ]
# Fit the MLR model
formula_mlr <- as.formula(paste(target_col, "~", paste(independent_vars, collapse = "+")))
fitted_model_mlr <- lm(formula_mlr, data = train_data)
modelmlr(fitted_model_mlr)
})
output$shapTestmlr <- renderPrint({
req(datamlr(), input$targetmlr, input$independentVarmlr)
data_mlr <- datamlr()
target_col <- input$targetmlr
independent_vars <- input$independentVarmlr
# Ensure that independent variables are selected
if (length(independent_vars) == 0) {
return("Please select independent variables.")
}
# Concatenate independent variables into a formula string
independent_vars_str <- paste(independent_vars, collapse = "+")
# Filter out rows with NAs in relevant columns
data_mlr_filtered <- na.omit(data_mlr[, c(target_col, independent_vars)])
# Check if the dataset is sufficient after NA removal
if (nrow(data_mlr_filtered) < 10) {
return("Dataset is too small after removing NA values.")
}
# Split the data
set.seed(123)
split_ratio <- input$dataSplitmlr
training.samples <- createDataPartition(data_mlr_filtered[[target_col]], p = split_ratio, list = FALSE)
train_data <- data_mlr_filtered[training.samples, ]
test_data <- data_mlr_filtered[-training.samples, ]
# Fit the multiple linear regression model
formula_mlr <- as.formula(paste(target_col, "~", independent_vars_str))
fitted_model_mlr <- lm(formula_mlr, data = train_data)
modelmlr(fitted_model_mlr) # Update the reactive value with the fitted model
# Perform Shapiro-Wilk Test for Normality
cat("\nShapiro-Wilk Test for Normality of Residuals:\n")
shap_Test <- shapiro.test(residuals(fitted_model_mlr)) # Perform the test on the model's residuals
print(shap_Test)
# Interpret the test results for the user
if (shap_Test$p.value > 0.05) {
cat("Interpretation: With a p-value greater than 0.05, there is no statistical evidence to reject the null hypothesis that the residuals are normally distributed.
This suggests that the residuals of the model do not deviate significantly from a normal distribution, meeting one of the key assumptions of linear regression.\n")
} else {
cat("Interpretation: A p-value less than or equal to 0.05 suggests that the residuals are not normally distributed.
This could potentially violate the normality assumption of linear regression. In such cases, consider transforming the dependent variable,
adding polynomial terms or interaction effects, or using a non-linear modeling approach.\n")
}
})
output$ncvTestmlr <- renderPrint({
req(modelmlr()) # Ensure the MLR model is available for the test
fitted_model <- modelmlr() # Retrieve the fitted model
# Execute the Breusch-Pagan Test for Heteroscedasticity
cat("\nBreusch-Pagan Test for Heteroscedasticity:\n")
bp_test_result <- tryCatch({
bptest(fitted_model) # bptest() function from the lmtest package
}, error = function(e) {
cat("Error in conducting Breusch-Pagan test: ", e$message, "\n")
NULL # Return NULL if there's an error to handle it gracefully
})
# Interpret the test results for the user
if (!is.null(bp_test_result) && !is.na(bp_test_result$p.value)) {
print(bp_test_result)
if (bp_test_result$p.value > 0.05) {
cat("Interpretation: With a p-value greater than 0.05, there is no statistical evidence of heteroscedasticity.
This suggests that the variance of residuals is constant across the levels of the independent variables,
which is a desirable property in regression models.\n")
} else {
cat("Interpretation: A p-value less than or equal to 0.05 indicates the presence of heteroscedasticity.
It suggests that the variance of residuals varies across levels of the independent variables.
This can impact the reliability of the regression coefficients' standard errors and the model's predictions.
Consider using weighted least squares or other forms of heteroscedasticity-consistent standard errors.\n")
}
} else {
cat("Result: The Breusch-Pagan test could not be conducted. Check if the model is correctly specified, or consider other diagnostic tests for heteroscedasticity.\n")
}
})
# Render plot for each independent variable
output$linPlotmlr <- renderPlotly({
req(datamlr(), modelmlr(), input$targetmlr, input$independentVarmlr)
data_mlr <- datamlr()
fitted_model_mlr <- modelmlr()
independent_vars <- input$independentVarmlr
target_col <- input$targetmlr
# Generate plots for each independent variable
plots <- lapply(independent_vars, function(var) {
ggplot_object <- ggplot(data_mlr, aes_string(x = var, y = target_col)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE, color = "dodgerblue") +
labs(title = paste("Linearity with", var), x = var, y = target_col) +
theme(
plot.title = element_text(size = 12, hjust = 0.5),
plot.margin = margin(5, 5, 5, 5)
) +
geom_text(aes(label = var), hjust = 0, vjust = 1, size = 2.35, color = "darkorange")
ggplotly(ggplot_object) %>% layout(title = paste("Linearity with", var))
})
# Combine plots (if multiple) or return single plot
if (length(plots) > 1) {
combined_plot <- subplot(
plots,
nrows = length(plots),
shareX = TRUE,
titleX = FALSE,
margin = 0.05
) %>% layout(title = "Linearity Plots", margin = list(t = 40, b = 80, l = 40, r = 40))
return(combined_plot)
} else {
return(plots[[1]])
}
})
output$dWTestmlr <- renderPrint({
req(modelmlr()) # Ensure modelmlr is available
fitted_model <- modelmlr() # Access the model
# Perform Durbin-Watson Test for Autocorrelation
cat("\nDurbin-Watson Test for Autocorrelation:\n")
dw_test_result <- tryCatch({
lmtest::dwtest(fitted_model)
}, error = function(e) {
cat("Error in conducting Durbin-Watson test: ", e$message, "\n")
NULL # Return NULL in case of error
})
# Check the test results and provide interpretation
if (!is.null(dw_test_result) && !is.na(dw_test_result$p.value)) {
print(dw_test_result)
if (dw_test_result$p.value > 0.05) {
cat("Interpretation: With a p-value greater than 0.05, there is no statistical evidence of autocorrelation in the residuals.
This suggests that the residuals are independent of each other, which is an assumption of the linear regression model.\n")
} else {
cat("Interpretation: A p-value less than or equal to 0.05 suggests that there is statistical evidence of autocorrelation in the residuals.
This could mean that the model is missing important predictors, there is a time series structure not accounted for, or the data is not being captured by the model adequately.
Consider investigating time series models, adding lagged variables, or exploring other model specifications.\n")
}
} else {
cat("Result: The Durbin-Watson test could not be conducted. This could be due to computational issues or other data-related problems.\n")
}
})
output$vifmlr <- renderPrint({
# VIF - to check for multicollinearity
# Hosmer-Lemeshow test - to check goodness of fit
req(modelmlr()) # Ensure modelmlr is available
fitted_model <- modelmlr() # Access the model
cat("Variance Inflation Factor (VIF) Results:\n")
vif_results <- vif(fitted_model)
print(vif_results)
# Interpretation for the user
if(any(vif_results > 10)) {
cat("Note: High VIF values (greater than 10) indicate potential multicollinearity issues among predictors.\n")
cat("This can affect the reliability of the regression coefficients. Consider removing or combining variables, or using dimensionality reduction techniques like PCA.\n")
} else {
cat("VIF values less than 10 are generally considered acceptable, indicating no severe multicollinearity between the predictors.\n")
}
})
# Server function to create diagnostic plots
output$resFitmlrPlot <- renderPlotly({
req(modelmlr())
fitted_model <- modelmlr()
p <- ggplot(fitted_model, aes(.fitted, .resid)) +
geom_point(color = "darkorange") +
geom_smooth(method = "lm", se = FALSE, color = "dodgerblue") +
labs(title = "Residuals vs Fitted", x = "Fitted Values", y = "Residuals")
ggplotly(p)
})
####Diagnostic Plots
output$qqPlotmlr <- renderPlotly({
req(modelmlr())
fitted_model <- modelmlr()
p <- ggplot(fitted_model, aes(sample = .stdresid)) +
stat_qq(color = "darkorange") +
stat_qq_line(color = "dodgerblue") +
labs(title = "Normal Q-Q")
ggplotly(p)
})
output$scaleLocmlrPlot <- renderPlotly({
req(modelmlr())
fitted_model <- modelmlr()
p <- ggplot(fitted_model, aes(.fitted, sqrt(abs(.resid)))) +
geom_point(color = "darkorange") +
geom_smooth(method = "lm", se = FALSE, color = "dodgerblue") +
labs(title = "Scale-Location", x = "Fitted Values", y = "Sqrt(|Residuals|)")
ggplotly(p)
})
output$resLevmlrPlot <- renderPlotly({
req(modelmlr())
fitted_model <- modelmlr()
p <- ggplot(fitted_model, aes(.hat, .stdresid)) +
geom_point(aes(size = .cooksd), shape = 1, color = "darkorange") +
geom_smooth(method = "lm", se = FALSE, color = "dodgerblue") +
labs(title = "Residuals vs Leverage", x = "Leverage", y = "Standardized Residuals")
ggplotly(p)
})
# Define the reactive value for the data and model at the top of the server function
datamlreva <- reactiveVal(NULL)
mlrmodeleva <- reactiveVal(NULL)
test_data_mlr <- reactiveVal(NULL)
# Load and clean data
observeEvent(input$loadmlr, {
req(input$mlrinput)
file <- input$mlrinput
if (!is.null(file)) {
data_mlreva <- read_data(file$datapath)
data_mlreva <- clean_column_names(data_mlreva)
datamlreva(data_mlreva) # Update the reactive value correctly
updateSelectInput(session, "targetmlr", choices = colnames(data_mlreva))
updateSelectizeInput(session, "independentVarmlr", choices = setdiff(colnames(data_mlreva), input$targetmlr))
}
})
observeEvent(input$mlrmodel, {
req(datamlreva(), input$targetmlr, input$independentVarmlr)
data_mlreva <- datamlreva()
target_col <- input$targetmlr
independent_vars <- input$independentVarmlr
# Check if independent variables are selected
if (length(independent_vars) == 0) {
return("Please select independent variables.")
}
# Concatenate independent variables into one string
independent_vars_str <- paste(independent_vars, collapse = "+")
# Filter out rows with NAs and split the data
data_mlreva <- na.omit(data_mlreva[, c(target_col, independent_vars)])
set.seed(123)
split_ratio <- input$dataSplitmlr
training.samples <- createDataPartition(data_mlreva[[target_col]], p = split_ratio, list = FALSE)
train_data <- data_mlreva[training.samples, ]
test_data <- data_mlreva[-training.samples, ]
# Fit the linear regression model
formula_str <- paste(target_col, "~", independent_vars_str)
fitted_modelmlr <- lm(as.formula(formula_str), data = train_data)
modelmlreva(fitted_modelmlr)
# Store test_data in the reactive value
test_data_mlr(test_data)
})
output$mlrmodeleva <- renderPrint({
req(modelmlreva())
fitted_modelmlr <- modelmlreva()
if (inherits(fitted_modelmlr, "lm")) {
# Model Summary
cat("Model Summary:\n")
print(summary(fitted_modelmlr))
# Interpretation of the key components
cat("\nInterpretation:\n")
cat("1. Coefficients: Estimates of the model parameters.\n")
cat(" - Intercept: Represents the predicted value of the dependent variable when all independent variables are zero.\n")
cat(" - Slope: Represents the change in the dependent variable for a one-unit change in the independent variable.\n")
cat("2. Residual standard error: Measures the quality of the linear regression fit.\n")
cat(" - Lower values indicate a better fit.\n")
cat("3. Multiple R-squared: Indicates the proportion of variance in the dependent variable explained by the model.\n")
cat(" - Values closer to 1 suggest a better explanatory power of the model.\n")
cat("4. F-statistic and p-value: Test the overall significance of the model.\n")
cat(" - A low p-value (< 0.05) indicates that the model is statistically significant.\n")
# Additional specific interpretations can be added here
} else {
"Model has not been run or is not a linear model."
}
})
# Render print for correlation coefficient
output$corcoefmlr <- renderPrint({
req(modelmlreva(), test_data_mlr()) # Ensure model and test data are available
fitted_modelmlr <- modelmlreva()
test_data <- test_data_mlr() # Access the test data
target_col <- input$targetmlr
independent_vars <- input$independentVar
# Prediction and calculation of correlation coefficient
lm_predict <- predict(fitted_modelmlr, newdata = test_data)
actual_pred <- data.frame(actuals = test_data[[target_col]], predicted = lm_predict)
cor_accuracy <- cor(actual_pred$actuals, actual_pred$predicted)
# Output with interpretation
cat("Correlation Coefficient between Actual and Predicted Values:\n")
cat(cor_accuracy, "\n\n")
# Interpretation of the correlation coefficient
cat("Interpretation:\n")
if(cor_accuracy > 0.75) {
cat("The model has a strong positive correlation between actual and predicted values.\n")
} else if(cor_accuracy > 0.5) {
cat("The model has a moderate positive correlation between actual and predicted values.\n")
} else if(cor_accuracy > 0.25) {
cat("The model has a weak positive correlation between actual and predicted values.\n")
} else {
cat("The model shows little to no correlation between actual and predicted values.\n")
}
cat("Note: A correlation coefficient close to 1 indicates a strong positive relationship, while values closer to 0 indicate weaker relationships.\n")
})
output$confintmlr <- renderPrint({
req(modelmlreva(), test_data_mlr()) # Ensure model and test data are available
fitted_modelmlr <- modelmlreva()
test_data <- test_data_mlr() # Access the test data
target_col <- input$targetmlr
independent_vars <- input$independentVarmlr
# Model Confidence Intervals
cat("Model Confidence Intervals (95% Level):\n")
conf_intervals <- confint(fitted_modelmlr, level=0.95)
print(conf_intervals)
cat("\nInterpretation of Model Confidence Intervals:\n")
cat("The intervals represent the range within which the true model coefficients are likely to fall with 95% confidence.\n")
cat("For each coefficient, the lower and upper bounds indicate the plausible range of values.\n")
# Predicted Confidence Intervals for Test Data
cat("\nPredicted Confidence Intervals for Test Data (First 10 Observations):\n")
conf_int_predictions <- predict(fitted_modelmlr, newdata = test_data, interval = 'confidence')
print(head(conf_int_predictions, n=10))
cat("\nInterpretation of Predicted Confidence Intervals:\n")
cat("These intervals provide a range within which the true value of the dependent variable is expected to fall for each observation, with 95% confidence.\n")
cat("The 'fit' column represents the predicted value, while 'lwr' and 'upr' represent the lower and upper bounds of the confidence interval, respectively.\n")
})
output$modelevamet <- renderPrint({
req(modelmlreva(), test_data_mlr()) # Ensure model and test data are available
fitted_modelmlr <- modelmlreva()
test_data <- test_data_mlr() # Access the test data
target_col_name <- input$targetmlr
# Check if the target column exists in test_data
if (!target_col_name %in% names(test_data)) {
cat("Target column not found in test data.\n")
return()
}
# Predictions
predictions <- predict(fitted_modelmlr, newdata = test_data)
# Check for NA values in predictions
if (any(is.na(predictions))) {
cat("NA values found in predictions.\n")
return()
}
# Calculate metrics
R2_adj <- summary(fitted_modelmlr)$adj.r.squared # Adjusted R-squared
MSE <- mean((test_data[[target_col_name]] - predictions)^2, na.rm = TRUE) # Mean Squared Error
RMSE <- sqrt(MSE) # Root Mean Squared Error
MAE <- mean(abs(test_data[[target_col_name]] - predictions), na.rm = TRUE) # Mean Absolute Error
# MAPE function with check for zero values
mape <- function(actual, predicted){
if (any(actual == 0)) {
cat("MAPE calculation: Actual values contain zero.\n")
return(NA)
}
mean(abs((actual - predicted) / actual), na.rm = TRUE) * 100
}
MAPE <- mape(test_data[[target_col_name]], predictions) # Mean Absolute Percentage Error
# Output with interpretation
cat("Multiple Linear Regression Evaluation Metrics:\n\n")
cat("Adjusted R-squared:\n")
cat("Value: ", R2_adj, "\n")
cat("Interpretation: Adjusted R-squared accounts for the number of predictors in the model. A higher value closer to 1 indicates a strong explanatory power of the model.\n\n")
cat("Mean Squared Error (MSE):\n")
cat("Value: ", MSE, "\n")
cat("Interpretation: MSE represents the average of the squares of the errors. Lower values indicate that the model's predictions are more accurate.\n\n")
cat("Root Mean Squared Error (RMSE):\n")
cat("Value: ", RMSE, "\n")
cat("Interpretation: RMSE is the square root of MSE and gives an estimate of the error magnitude in the same units as the response variable. Lower values suggest a closer fit of the model to the data.\n\n")
cat("Mean Absolute Error (MAE):\n")
cat("Value: ", MAE, "\n")
cat("Interpretation: MAE measures the average magnitude of the errors in the predictions. A lower MAE value suggests a better fit of the model to the observed data.\n\n")
cat("Mean Absolute Percentage Error (MAPE):\n")
cat("Value: ", MAPE, "\n")
cat("Interpretation: MAPE indicates the prediction accuracy as a percentage. Lower values close to 0% indicate high predictive accuracy of the model.\n")
cat("\nNote: While evaluating these metrics, it's crucial to contextualize them within the specific domain and objectives of your model. Statistical significance may not always equate to practical significance.")
})
output$mlrregressPlot <- renderPlotly({
req(modelmlreva(), datamlreva(), input$targetmlr, input$independentVarmlr)
data_for_plot <- datamlreva()
target_col <- input$targetmlr
independent_vars <- input$independentVarmlr
# Check if variables are selected and dataset is valid
if (is.null(data_for_plot) || is.null(data_for_plot[[target_col]]) || length(independent_vars) == 0) {
return("Please ensure target and independent variables are selected and the dataset is valid.")
}
# Create a list of plots, one for each independent variable
plots_list <- lapply(independent_vars, function(var) {
p <- ggplot(data_for_plot, aes_string(x = var, y = target_col)) +
geom_point(color = "darkorange") +
geom_smooth(method = "lm", se = FALSE, color = "dodgerblue") +
ggtitle(paste("Regression Line with", var)) +
xlab(var) +
ylab(target_col) +
theme_minimal() +
theme(legend.position = "none")
ggplotly(p)
})
# Combine plots if there are multiple independent variables
if (length(plots_list) > 1) {
combined_plot <- subplot(plots_list, nrows = length(plots_list), shareX = TRUE, titleX = FALSE)
return(combined_plot)
} else {
return(plots_list[[1]])
}
})
### Logistic Regression
data <- reactiveVal(NULL)
observeEvent(input$loadData, {
file <- input$glmfile
if (!is.null(file)) {
data_df <- read_data(file$datapath)
data_df <- clean_column_names(data_df)
data(data_df)
updateSelectInput(session, "targetglm", choices = colnames(data_df))
updateSelectizeInput(session, "independentVars", choices = setdiff(colnames(data_df), input$targetglm))
}
})
clean_column_names <- function(dataframe) {
colnames(dataframe) <- gsub("[^[:alnum:]_]", "", make.names(colnames(dataframe), unique = TRUE))
return(dataframe)
}
output$dataSummary <- renderPrint({
req(data())
summary(data())
})
output$glmassumption <- renderPrint({
req(data())
target_col <- input$targetglm
independent_vars <- input$independentVars
data_df <- data() # Original data
# Check if the target column is numeric and mutate it
if (is.numeric(data_df[[target_col]])) {
median_val <- median(data_df[[target_col]], na.rm = TRUE)
data_df[[target_col]] <- as.factor(ifelse(data_df[[target_col]] < median_val, "No", "Yes"))
}
# Filter out rows where any character column is an empty string
data_df <- data_df %>% dplyr::select(all_of(target_col), all_of(independent_vars)) %>%
na.omit()
# Example: Splitting the data, normalizing, and running glm
set.seed(123)
split_ratio <- input$dataSplit
training.samples <- createDataPartition(data_df[[target_col]], p = split_ratio, list = FALSE)
train_data <- data_df[training.samples, ]
test_data <- data_df[-training.samples, ]
train_data <- normalize(train_data, method = "standardize", range = c(0, 1), margin = 1L, on.constant = "quiet")
test_data <- normalize(test_data, method = "standardize", range = c(0, 1), margin = 1L, on.constant = "quiet")
formula <- as.formula(paste(target_col, "~", paste(independent_vars, collapse = "+")))
x <- as.matrix(train_data[, setdiff(names(train_data), target_col)])
y <- train_data[[target_col]]
# Fit a standard logistic regression model with increased max iterations
glm_model <- glm(formula, data = train_data, family = binomial(),
control = glm.control(maxit = 50)) # Increase max iterations
# Check if the model has converged
if(!glm_model$converged) {
cat("Warning: The logistic regression model did not converge.\n")
}
# VIF - to check for multicollinearity
# Hosmer-Lemeshow test - to check goodness of fit
cat("Variance Inflation Factor (VIF) Results:\n")
vif_results <- vif(glm_model)
print(vif_results)
if(any(vif_results > 10)) {
cat("Note: High VIF values indicate potential multicollinearity issues among predictors.\n")
}
cat("\nHosmer-Lemeshow Goodness of Fit Test:\n")
tryCatch({
hl_test <- hoslem.test(glm_model$y, fitted(glm_model), g = 5) # Adjusted g value
print(hl_test)
}, error = function(e) {
cat("Note: Hosmer-Lemeshow test could not be conducted. This may indicate issues with model fit or data.\n")
})
})
observeEvent(input$runLogisticRegression, {
req(data())
target_col <- input$targetglm
independent_vars <- input$independentVars
data_df <- data() # Original data
# Check if the target column is numeric and mutate it
if (is.numeric(data_df[[target_col]])) {
median_val <- median(data_df[[target_col]], na.rm = TRUE)
data_df[[target_col]] <- as.factor(ifelse(data_df[[target_col]] < median_val, "No", "Yes"))
}
# Rest of your code...
# Filter out rows where any character column is an empty string
data_df <- data_df %>% dplyr::select(all_of(target_col), all_of(independent_vars)) %>%
na.omit()
# Example: Splitting the data, normalizing, and running glm
set.seed(123)
split_ratio <- input$dataSplit
training.samples <- createDataPartition(data_df[[target_col]], p = split_ratio, list = FALSE)
train_data <- data_df[training.samples, ]
test_data <- data_df[-training.samples, ]
train_data <- normalize(train_data, method = "standardize", range = c(0, 1), margin = 1L, on.constant = "quiet")
test_data <- normalize(test_data, method = "standardize", range = c(0, 1), margin = 1L, on.constant = "quiet")
formula <- as.formula(paste(target_col, "~", paste(independent_vars, collapse = "+")))
x <- as.matrix(train_data[, setdiff(names(train_data), target_col)])
y <- train_data[[target_col]]
# Perform cross-validation to find the optimal lambda
cv_model <- cv.glmnet(x, y, family = "binomial", alpha = 1)
# Extract the coefficients at the optimal lambda
optimal_lambda <- cv_model$lambda.min
# Extract coefficients at the optimal lambda
coefficients <- coef(cv_model, s = optimal_lambda)
# Convert the sparse matrix to a regular matrix and then to a dataframe
coef_matrix <- as.matrix(coefficients)
coef_df <- as.data.frame(coef_matrix)
# Give meaningful names to the dataframe columns
names(coef_df) <- c("Coefficients")
rownames(coef_df) <- names(coefficients)
# Assuming 'model' is your trained glmnet model, and 'test_data' is your test dataset
# Prepare test data for prediction
x_test <- as.matrix(test_data[, independent_vars]) # independent variables
y_test <- as.factor(test_data[[target_col]]) # actual outcomes
# Predict using the model
predictions <- predict(cv_model, newx = x_test, type = "response", s = optimal_lambda)
# Convert predictions to a binary factor based on a threshold (e.g., 0.5)
predicted_class <- ifelse(predictions > 0.5, "Yes", "No")
# Calculate accuracy
accuracy <- mean(predicted_class == y_test)
# Get predicted probabilities (make sure to use 'type = "response"')
predicted_probs <- predict(cv_model, newx = x_test, type = "response", s = optimal_lambda)
# Calculate AUC
roc_curve <- roc(response = y_test, predictor = as.numeric(predicted_probs))
auc_value <- auc(roc_curve)
# Convert predicted classes and actual outcomes to factor if they are not already
predicted_class <- factor(predicted_class, levels = c("No", "Yes"))
y_test <- factor(y_test, levels = c("No", "Yes"))
# Calculate the confusion matrix
conf_matrix <- confusionMatrix(predicted_class, y_test)
# Creating a prediction object for ROC analysis
pred <- ROCR::prediction(predicted_probs, y_test)
# Creating a performance object for ROC curve
perf <- ROCR::performance(pred, "tpr", "fpr")
# Calculating AUC
auc_value <- ROCR::performance(pred, "auc")@y.values[[1]]
# Output coefficients and optimal lambda
output$logisticOutput <- renderPrint({
cat("Optimal Lambda Value:", optimal_lambda, "\n")
cat("This is the value of the regularization parameter lambda that minimizes the cross-validated error.\n\n")
cat("Coefficients:\n")
print(coef_df)
cat("Each coefficient represents the change in the log odds of the outcome for a one-unit change in the predictor variable.\n\n")
cat("Accuracy:", accuracy, "\n")
cat("This is the proportion of correctly predicted instances out of the total instances in the dataset.\n\n")
cat("AUC (Area Under the Curve):", auc_value, "\n")
cat("AUC ranges from 0 to 1 with higher values indicating better model performance. A model with an AUC close to 0.5 has no discriminative ability.\n\n")
cat("Confusion Matrix:\n")
print(conf_matrix$table)
cat("The confusion matrix shows the number of correct and incorrect predictions compared with the actual outcomes.\n")
cat("- True Positives (TP): Actual Yes predicted as Yes\n")
cat("- False Positives (FP): Actual No predicted as Yes\n")
cat("- True Negatives (TN): Actual No predicted as No\n")
cat("- False Negatives (FN): Actual Yes predicted as No\n\n")
cat("Additional Metrics:\n")
cat("- Sensitivity (True Positive Rate): ", conf_matrix$byClass['Sensitivity'], "\n")
cat(" The proportion of actual positives that were correctly identified.\n")
cat("- Specificity (True Negative Rate): ", conf_matrix$byClass['Specificity'], "\n")
cat(" The proportion of actual negatives that were correctly identified.\n")
cat("- Positive Predictive Value (Precision): ", conf_matrix$byClass['Positive Predictive Value'], "\n")
cat(" The proportion of positive identifications that were actually correct.\n")
cat("- Negative Predictive Value: ", conf_matrix$byClass['Negative Predictive Value'], "\n")
cat(" The proportion of negative identifications that were actually correct.\n")
cat("- F1 Score: ", conf_matrix$byClass['F1'], "\n")
cat(" The harmonic mean of Precision and Sensitivity, useful for unbalanced classes.\n")
})
output$glmcvplot <- renderPlotly({
req(cv_model) # Ensure cv_model is available
# Extract necessary data from cv_model
lambda <- cv_model$lambda
cvm <- cv_model$cvm
cvsd <- cv_model$cvsd
# Create a plotly plot
plot_ly(x = lambda, y = cvm, type = 'scatter', mode = 'lines') %>%
add_trace(y = cvm + cvsd, name = 'Upper CI', mode = 'lines', line = list(dash = 'dash')) %>%
add_trace(y = cvm - cvsd, name = 'Lower CI', mode = 'lines', line = list(dash = 'dash')) %>%
layout(
xaxis = list(type = 'log', title = 'Lambda'),
yaxis = list(title = 'Cross-Validated Error'),
title = 'Cross-Validation Plot for GLMNET Model'
)
})
# Render AUC plot in Shiny
output$glmaucplot <- renderPlotly({
# Ensure the performance object is available
req(perf, auc_value)
# Prepare the data for the plot
data <- data.frame(
FPR = perf@x.values[[1]],
TPR = perf@y.values[[1]]
)
# Create the ROC curve plot with Plotly
p <- plot_ly(data, x = ~FPR, y = ~TPR, type = 'scatter', mode = 'lines',
line = list(color = 'blue'), name = 'ROC Curve') %>%
add_trace(x = 0:1, y = 0:1, type = 'scatter', mode = 'lines',
line = list(color = 'red', dash = 'dash'), name = 'Chance') %>%
layout(title = paste("ROC Curve (AUC =", round(auc_value, 2), ")"),
xaxis = list(title = "False Positive Rate"),
yaxis = list(title = "True Positive Rate"))
return(p)
})
})
read_data <- function(filepath) {
ext <- tools::file_ext(filepath)
if (ext %in% c("csv", "xlsx")) {
if (ext == "csv") {
read.csv(filepath, stringsAsFactors = FALSE)
} else if (ext == "xlsx") {
read_excel(filepath)
}
} else {
stop("Invalid file format. Please select a CSV or XLSX file.")
}
}
###Decision Tree
# Reactive values for the decision tree model and training data
treedecision <- reactiveVal(NULL)
train_data_rv <- reactiveVal(NULL)
train_data_reactive <- reactiveVal(NULL)
target_col_rv <- reactiveVal(NULL)
test_data_reactive <- reactiveVal(NULL)
# Define the function to clean column names
cl_column_names <- function(dataframe) {
colnames(dataframe) <- gsub("[^[:alnum:]_]", "", make.names(colnames(dataframe), unique = TRUE))
return(dataframe)
}
# Reactive expression to read and process the data
datadectree <- reactive({
req(input$treedecfile)
inFile <- input$treedecfile
ext <- tools::file_ext(inFile$name)
if (ext == "csv") {
df <- read.csv(inFile$datapath, stringsAsFactors = FALSE, na.strings = c("", "NA", "na"))
} else if (ext == "xlsx") {
df <- readxl::read_xlsx(inFile$datapath, na = c("", "NA", "na"))
}
cl_column_names(df)
})
# Observer to update the select inputs
observe({
req(datadectree())
updateSelectInput(session, "targetdectree", choices = names(datadectree()))
})
observe({
req(input$targetdectree)
target_col_rv(input$targetdectree)
})
output$dataSummarydt <- renderPrint({
req(datadectree())
summary(datadectree())
})
# Define the reactive expression for train data
train_data_reactive <- reactive({
req(datadectree())
data_df <- datadectree()
target_col <- target_col_rv() # Use the reactive value
# Numeric to factor conversion (if necessary)
if (is.numeric(data_df[[target_col]])) {
median_val <- median(data_df[[target_col]], na.rm = TRUE)
data_df[[target_col]] <- as.factor(ifelse(data_df[[target_col]] < median_val, "No", "Yes"))
}
# Filter out rows with NA and split the data
data_df <- data_df %>% filter(!is.na(.[[target_col]]))
set.seed(123)
split_ratio <- input$dataSplittree
training.samples <- createDataPartition(data_df[[target_col]], p = split_ratio, list = FALSE)
train_data <- data_df[training.samples, ]
train_data_rv(train_data) # This line was missing
return(train_data)
})
# Trigger to fit the decision tree model
observeEvent(input$rundectree, {
req(train_data_reactive())
local_train_data <- train_data_rv() # Now it will have the updated data
# Fit the decision tree model
tree_formula <- as.formula(paste(target_col_rv(), "~ ."))
treedecision(tree(tree_formula, data = local_train_data)) # Ensure local_train_data is correctly passed
})
# Summarize the decision tree model
output$rundectree <- renderPrint({
req(treedecision())
decision_tree_model <- treedecision() # Retrieve the decision tree model
# Check if the decision tree model is correctly retrieved
if(is.null(decision_tree_model)) {
cat("Decision tree model is not available.\n")
return()
}
# Print model summary
cat("Decision Tree Model Summary:\n\n")
tryCatch({
print(summary(decision_tree_model))
}, error = function(e) {
cat("Error in printing model summary: ", e$message, "\n")
})
cat("\nInterpretation:\n")
cat("1. Node Number: Each number represents a node in the tree.\n")
cat("2. Split Variable: The variable used to split the node. If 'leaf', it indicates the node is a terminal node (leaf).\n")
cat("3. Split Point: The value of the split variable that divides the node.\n")
cat("4. n: The number of observations in the node.\n")
cat("5. Deviance: Measures the variability of the response variable within the node. Lower values indicate better model fit.\n")
cat("6. Prediction: The predicted class (or value for regression trees) for each node.\n")
cat("\nHow to Read the Tree:\n")
cat("- Start at the top node (Root Node) and make decisions based on the split variables and points.\n")
cat("- Follow the branches to reach the leaf nodes, which contain the final predictions.\n")
cat("- Each path from the root to a leaf represents a decision rule.\n")
cat("\nNote:\n")
cat("- A simpler tree (fewer nodes) with good predictive accuracy is generally preferable to avoid overfitting.\n")
cat("- Decision trees are intuitive but can become complex with many splits (consider pruning if needed).\n")
})
# Tree Plot
output$dectreeplot <- renderPlot({
req(treedecision())
plot(treedecision())
text(treedecision(), pretty = 0)
})
# Cross-validation Plot
# Define cv_errors as a reactive expression
cv_errors <- reactive({
local_train_data <- train_data_rv() # Your dataset
req(local_train_data)
target_column <- target_col_rv() # Your target column
req(target_column)
folds <- createFolds(local_train_data[[target_column]], k = 10, list = TRUE)
errors <- numeric(length(folds))
for(i in seq_along(folds)) {
training_set <- local_train_data[-folds[[i]], ]
testing_set <- local_train_data[folds[[i]], ]
# Assuming that your model formula is correct and uses the target column
tree_model <- tree(paste(target_column, "~ ."), data = training_set)
predictions <- predict(tree_model, testing_set, type = "class")
errors[i] <- mean(predictions != testing_set[[target_column]])
}
return(errors) # Return the vector of errors
})
# Plot for cross-validation errors
output$cvplot <- renderPlot({
errors <- cv_errors() # Retrieve the vector of CV errors
req(errors) # Make sure errors are available before proceeding
# Plot the errors against fold numbers
plot(seq_along(errors), errors, type = "b",
xlab = "Fold Number", ylab = "Misclassification Error",
main = "Cross-Validation Error by Fold")
})
#Pruned Tree
# Define pruned_dtree as a reactive expression
pruned_dtree <- reactive({
decision_tree_model <- treedecision()
req(decision_tree_model)
req(cv_errors()) # Access cv_errors as a reactive expression
optimal_size <- which.min(cv_errors())
prune.tree(decision_tree_model, best = optimal_size)
})
# Pruned Tree Plot
output$pruneddtree <- renderPlot({
req(pruned_dtree())
plot(pruned_dtree())
text(pruned_dtree(), pretty = 0)
})
# Confusion Matrix
output$confMatrix <- renderPrint({
req(treedecision(), train_data_rv())
local_train_data <- train_data_rv()
# Prediction
tree_pred <- tryCatch({
predict(treedecision(), local_train_data, type = "class")
}, error = function(e) {
cat("Error in prediction:", e$message, "\n")
return(NULL)
})
# Ensure tree_pred and the target column have the same length
if (length(tree_pred) != nrow(local_train_data)) {
cat("Error: Prediction length does not match the number of rows in training data.\n")
return()
}
# Compute the confusion matrix
cm <- table(tree_pred, local_train_data[[target_col_rv()]])
# Print the confusion matrix
cat("Confusion Matrix:\n")
print(cm)
cat("The confusion matrix shows the number of correct and incorrect predictions compared with the actual outcomes.\n")
cat("- True Positives (TP): Actual Yes predicted as Yes\n")
cat("- False Positives (FP): Actual No predicted as Yes\n")
cat("- True Negatives (TN): Actual No predicted as No\n")
cat("- False Negatives (FN): Actual Yes predicted as No\n\n")
cat("Additional Metrics:\n")
cat("- Sensitivity (True Positive Rate): ", "\n")
cat(" The proportion of actual positives that were correctly identified.\n")
cat("- Specificity (True Negative Rate): ", "\n")
cat(" The proportion of actual negatives that were correctly identified.\n")
cat("- Positive Predictive Value (Precision): ", "\n")
cat(" The proportion of positive identifications that were actually correct.\n")
cat("- Negative Predictive Value: ", "\n")
cat(" The proportion of negative identifications that were actually correct.\n")
cat("- F1 Score: ", "\n")
cat(" The harmonic mean of Precision and Sensitivity, useful for unbalanced classes.\n")
# Interpretation of the confusion matrix
cat("\nInterpretation:\n")
cat("- Each row of the matrix represents the instances in a predicted class.\n")
cat("- Each column represents the instances in an actual class.\n")
cat("- The diagonal elements (top left to bottom right) represent the number of correct classifications.\n")
cat("- Off-diagonal elements are those that were misclassified by the model.\n")
cat("- Higher values on the diagonal indicate better performance.\n")
# Additional specific interpretations can be added here based on the context and the data
})
# Define the reactive expression for train data
test_data_reactive <- reactive({
req(datadectree(), input$targetdectree, input$dataSplittree)
data_df <- datadectree()
target_col <- target_col_rv() # Use the reactive value
# Numeric to factor conversion (if necessary)
if (is.numeric(data_df[[target_col]])) {
median_val <- median(data_df[[target_col]], na.rm = TRUE)
data_df[[target_col]] <- as.factor(ifelse(data_df[[target_col]] < median_val, "No", "Yes"))
}
data_df <- data_df %>% filter(!is.na(.[[target_col]]))
# Split the data into training and testing sets
set.seed(123) # It's good you're setting a seed for reproducibility
split_ratio <- input$dataSplittree # The ratio for splitting, e.g., 0.7 for 70% training data
training.samples <- createDataPartition(data_df[[target_col]], p = split_ratio, list = FALSE)
# Assuming that training.samples is a vector of indices for the training set
test_data <- data_df[-training.samples, ] # Use negative indexing to get the test set
return(test_data)
})
# Assuming 'predictions' and 'local_eval_data' are available as they were in the previous step
output$cfdtpteva <- renderPrint({
# Retrieve the pruned decision tree model
pruned_model <- pruned_dtree()
req(pruned_model)
# Retrieve the evaluation dataset from the reactive expression
local_eval_data <- test_data_reactive() # Using test_data_reactive
req(local_eval_data)
# Make predictions using the pruned tree model
predictions <- predict(pruned_model, local_eval_data, type = "class")
# Generate the confusion matrix
cm <- table(True = local_eval_data[[target_col_rv()]], Predicted = predictions)
# Calculate metrics
accuracy <- sum(diag(cm)) / sum(cm)
precision <- cm[2, 2] / sum(cm[2, ])
recall <- cm[2, 2] / sum(cm[, 2])
F1_score <- 2 * precision * recall / (precision + recall)
# Print the evaluation metrics
cat("Confusion Matrix:\n")
print(cm)
cat("\n")
cat("Accuracy:", accuracy, "\n")
cat("Precision:", precision, "\n")
cat("Recall:", recall, "\n")
cat("F1 Score:", F1_score, "\n")
# Enhanced interpretation
cat("\nDetailed Interpretation and Decision Insights:\n")
cat("1. Accuracy reflects the overall correctness of the model and is a good initial indicator of performance. However, it may not fully capture the nuances in imbalanced datasets.\n")
cat("2. Precision measures the reliability of the model's positive predictions. High precision indicates that when the model predicts a positive outcome, it is likely correct. This is critical in scenarios where false positives carry a high cost.\n")
cat("3. Recall assesses the model's ability to detect all relevant cases. High recall means the model is effective at capturing the majority of positive instances, which is crucial in situations where missing a positive case is detrimental.\n")
cat("4. F1 Score provides a balance between precision and recall. A high F1 score suggests the model effectively balances the trade-off between not missing positive cases and maintaining high accuracy in its positive predictions.\n")
cat("\n")
cat("Decision-Making Insights:\n")
cat("- The model's performance should be considered in the context of your specific application. For instance, if missing a positive case has serious consequences, prioritize improving recall.\n")
cat("- If your focus is on the accuracy of the positive predictions (to avoid false alarms or unnecessary actions), aim to improve precision.\n")
cat("- The F1 Score is particularly informative when you need a single metric to assess the model's balance between precision and recall, especially in cases of class imbalance.\n")
cat("- Consider the model's limitations and strengths in the context of your dataset, and use these insights to guide your decision-making process or further model refinement.\n")
cat("\n")
cat("Remember, no model is perfect. It's crucial to continuously evaluate the model's performance in real-world scenarios and update it as new data becomes available to ensure its ongoing effectiveness.\n")
})
###Random Forest
datarf <- reactiveVal(NULL)
rf_model_reactive <- reactiveVal()
pred_rf_reactive <- reactiveVal()
results_reactive <- reactiveVal()
# Load and clean data
observeEvent(input$loadrf, {
req(input$rfinput)
file <- input$rfinput
if (!is.null(file)) {
# Reading and cleaning data
data_df <- read_data(file$datapath)
data_df <- clean_column_names(data_df)
# Setting the reactive value
datarf(data_df)
# Updating UI elements
updateSelectInput(session, "targetrf", choices = colnames(data_df))
updateSelectizeInput(session, "independentVarrf", choices = setdiff(colnames(data_df), input$targetrf))
}
})
output$dataSummaryrf <- renderPrint({
req(datarf())
summary(datarf())
})
observeEvent(input$runrf, {
req(datarf(), input$targetrf, input$independentVarrf)
data_rf <- datarf() %>%
dplyr::select(all_of(c(input$targetrf, input$independentVarrf))) %>%
na.omit()
withProgress(message = 'Model is being trained...', value = 0, {
# Increment progress
incProgress(0.1) # Initial progress
# Early return if conditions are not met
if (length(input$independentVarrf) == 0) {
output$modelOutputrf <- renderPrint({ "Please select independent variables." })
return()
}
if (nrow(data_rf) < 10) {
output$modelOutputrf <- renderPrint({ "Dataset is too small after removing NA values." })
return()
}
split_ratio <- input$dataSplitrf
if (split_ratio <= 0 || split_ratio >= 1) {
output$modelOutputrf <- renderPrint({ "Invalid split ratio. Please choose a value between 0 and 1." })
return()
}
incProgress(0.3) # Increment progress
# Partition the data
set.seed(123)
train <- data_rf %>% sample_frac(split_ratio)
test <- data_rf %>% setdiff(train)
incProgress(0.6) # Increment progress
formula_rf <- as.formula(paste(input$targetrf, "~", paste(input$independentVarrf, collapse = "+")))
# Fit the Random Forest model
rf_model <- randomForest(formula_rf,
data = train,
mtry = input$mtryInput,
ntree = input$ntreeInput)
rf_model_reactive(rf_model)
# Model summary
output$runrf <- renderPrint({
print(rf_model)
# Feature Importance
cat("Feature Importance:\n")
importance_vals <- importance(rf_model)
print(importance_vals)
})
# Finalize progress
incProgress(1.0) # Complete the progress
})
})
output$importancePlot <- renderPlotly({
req(datarf(), input$targetrf, input$independentVarrf)
# Access the model from the reactive value
rf_model <- rf_model_reactive()
req(rf_model) # Ensure the model is available
# Extracting feature importance
importance_vals <- importance(rf_model)
# Converting to a data frame for plotting
importance_df <- as.data.frame(importance_vals)
importance_df$Feature <- rownames(importance_df)
# For example, if the correct column name is "Importance"
ggplot(importance_df, aes(x = reorder(Feature, IncNodePurity), y = IncNodePurity)) +
geom_bar(stat = "identity", fill = "dodgerblue") +
theme_minimal() +
coord_flip() + # Flipping coordinates for horizontal bars
labs(title = "Feature Importance", x = "Features", y = "Importance")
})
# Define the Mean Squared Error function
mse <- function(actual, predicted) {
mean((actual - predicted) ^ 2)
}
# Define the Mean Absolute Error function
mae <- function(actual, predicted) {
mean(abs(actual - predicted))
}
observeEvent(input$predictBtn, {
req(datarf(), input$targetrf, input$independentVarrf)
data_rf <- datarf() %>%
dplyr::select(all_of(c(input$targetrf, input$independentVarrf))) %>%
na.omit()
withProgress(message = 'Model is being trained...', value = 0, {
# Increment progress
incProgress(0.1) # Initial progress
# Early return if conditions are not met
if (length(input$independentVarrf) == 0) {
output$modelOutputrf <- renderPrint({ "Please select independent variables." })
return()
}
if (nrow(data_rf) < 10) {
output$modelOutputrf <- renderPrint({ "Dataset is too small after removing NA values." })
return()
}
split_ratio <- input$dataSplitrf
if (split_ratio <= 0 || split_ratio >= 1) {
output$modelOutputrf <- renderPrint({ "Invalid split ratio. Please choose a value between 0 and 1." })
return()
}
incProgress(0.3) # Increment progress
# Partition the data
set.seed(123)
train <- data_rf %>% sample_frac(split_ratio)
test <- data_rf %>% setdiff(train)
# Assuming train dataset is already prepared
control <- trainControl(method="cv", number=10)
tunegrid <- expand.grid(.mtry = input$mtryInput)
set.seed(2)
formula_rf <- as.formula(paste(input$targetrf, "~", paste(input$independentVarrf, collapse = "+")))
rf_model <- randomForest(formula_rf,
data=train,
metric="RMSE",
tuneGrid=tunegrid,
ntree = input$ntreeInput,
trControl=control,
importance = TRUE)
# Make predictions for the entire dataset
pred_rf <- predict(rf_model, newdata = train)
# Calculate metrics
MSE_rf <- mse(actual = train[[input$targetrf]], predicted = pred_rf)
# For R_square_rf, ensure you have a function or package that provides R2
R_square_rf <- R2(pred_rf, train[[input$targetrf]])
# For MAE_rf, ensure you have a function or package that provides mae
MAE_rf <- mae(actual = train[[input$targetrf]], predicted = pred_rf)
output$predictionOutput <- renderPrint({
cat("Mean Squared Error (MSE):\n\n", MSE_rf, "\n\n", "Interpretation: MSE measures the average squared difference between actual and predicted values.
Lower values indicate better model performance.
A value of 0 means perfect predictions.")
cat("\n\nR-squared (R²):\n\n", R_square_rf, "\n\n", "Interpretation: R² represents the proportion of variance in the dependent variable that's predictable from the independent variables.
It ranges from 0 to 1, with higher values indicating better model fit.")
cat("\n\nMean Absolute Error (MAE):\n\n", MAE_rf, "\n\n", "Interpretation: MAE measures the average absolute difference between actual and predicted values.
Like MSE, lower MAE values indicate better model performance.")
})
# Finalize progress
incProgress(1.0) # Complete the progress
# Store results in a reactive value
results_reactive(list(actual = train[[input$targetrf]], predicted = pred_rf))
})
})
output$performancePlot <- renderPlotly({
# Access the stored results
results <- results_reactive()
req(results) # Ensure results are available
# Creating the plot
plot_data <- data.frame(Actual = results$actual, Predicted = results$predicted)
p <- ggplot(plot_data, aes(x = Actual, y = Predicted)) +
geom_point(alpha = 0.5) +
geom_abline(intercept = 0, slope = 1, color = "red", linetype = "dashed") +
labs(x = "Actual Values", y = "Predicted Values", title = "Actual vs. Predicted Values") +
theme_minimal()
ggplotly(p)
})
datarf <- reactiveVal(NULL)
rf_model_reactive <- reactiveVal()
pred_rf_reactive <- reactiveVal()
results_reactive <- reactiveVal()
# Load and clean data
observeEvent(input$loadrf, {
req(input$rfinput)
file <- input$rfinput
if (!is.null(file)) {
# Reading and cleaning data
data_df <- read_data(file$datapath)
data_df <- clean_column_names(data_df)
# Setting the reactive value
datarf(data_df)
# Updating UI elements
updateSelectInput(session, "targetrf", choices = colnames(data_df))
updateSelectizeInput(session, "independentVarrf", choices = setdiff(colnames(data_df), input$targetrf))
}
})
output$dataSummaryrf <- renderPrint({
req(datarf())
summary(datarf())
})
observeEvent(input$runrf, {
req(datarf(), input$targetrf, input$independentVarrf)
data_rf <- datarf() %>%
dplyr::select(all_of(c(input$targetrf, input$independentVarrf))) %>%
na.omit()
withProgress(message = 'Model is being trained...', value = 0, {
# Increment progress
incProgress(0.1) # Initial progress
# Early return if conditions are not met
if (length(input$independentVarrf) == 0) {
output$modelOutputrf <- renderPrint({ "Please select independent variables." })
return()
}
if (nrow(data_rf) < 10) {
output$modelOutputrf <- renderPrint({ "Dataset is too small after removing NA values." })
return()
}
incProgress(0.3) # Increment progress
split_ratio <- input$dataSplitrf
if (split_ratio <= 0 || split_ratio >= 1) {
output$modelOutputrf <- renderPrint({ "Invalid split ratio. Please choose a value between 0 and 1." })
return()
}
# Partition the data
set.seed(123)
train <- data_rf %>% sample_frac(split_ratio)
test <- data_rf %>% setdiff(train)
# Fit the Random Forest model
formula_rf <- as.formula(paste(input$targetrf, "~", paste(input$independentVarrf, collapse = "+")))
incProgress(0.6) # Increment progress
# Fit the Random Forest model
rf_model <- randomForest(formula_rf,
data = train,
mtry = input$mtryInput,
ntree = input$ntreeInput)
rf_model_reactive(rf_model)
# Model summary
output$runrf <- renderPrint({
print(rf_model)
# Feature Importance
cat("Feature Importance:\n")
importance_vals <- importance(rf_model)
print(importance_vals)
})
# Finalize progress
incProgress(1.0) # Complete the progress
})
})
output$importancePlot <- renderPlotly({
req(datarf(), input$targetrf, input$independentVarrf)
# Access the model from the reactive value
rf_model <- rf_model_reactive()
req(rf_model) # Ensure the model is available
# Extracting feature importance
importance_vals <- importance(rf_model)
# Converting to a data frame for plotting
importance_df <- as.data.frame(importance_vals)
importance_df$Feature <- rownames(importance_df)
# For example, if the correct column name is "Importance"
ggplot(importance_df, aes(x = reorder(Feature, IncNodePurity), y = IncNodePurity)) +
geom_bar(stat = "identity", fill = "dodgerblue") +
theme_minimal() +
coord_flip() + # Flipping coordinates for horizontal bars
labs(title = "Feature Importance", x = "Features", y = "Importance")
})
# Define the Mean Squared Error function
mse <- function(actual, predicted) {
mean((actual - predicted) ^ 2)
}
# Define the Mean Absolute Error function
mae <- function(actual, predicted) {
mean(abs(actual - predicted))
}
observeEvent(input$predictBtn, {
req(datarf(), input$targetrf, input$independentVarrf)
data_rf <- datarf() %>%
dplyr::select(all_of(c(input$targetrf, input$independentVarrf))) %>%
na.omit()
withProgress(message = 'Model is being trained...', value = 0, {
# Increment progress
incProgress(0.1) # Initial progress
# Early return if conditions are not met
if (length(input$independentVarrf) == 0) {
output$modelOutputrf <- renderPrint({ "Please select independent variables." })
return()
}
incProgress(0.3) # Increment progress
if (nrow(data_rf) < 10) {
output$modelOutputrf <- renderPrint({ "Dataset is too small after removing NA values." })
return()
}
split_ratio <- input$dataSplitrf
if (split_ratio <= 0 || split_ratio >= 1) {
output$modelOutputrf <- renderPrint({ "Invalid split ratio. Please choose a value between 0 and 1." })
return()
}
# Assuming train dataset is already prepared
control <- trainControl(method="cv", number=10)
tunegrid <- expand.grid(.mtry = input$mtryInput)
# Partition the data
set.seed(2)
train <- data_rf %>% sample_frac(split_ratio)
test <- data_rf %>% setdiff(train)
incProgress(0.6) # Increment progress
formula_rf <- as.formula(paste(input$targetrf, "~", paste(input$independentVarrf, collapse = "+")))
# Fit the Random Forest model
rf_model <- train(formula_rf,
data=train, method="rf",
metric="RMSE",
tuneGrid=tunegrid,
ntree = input$ntreeInput,
trControl=control,
importance = TRUE)
# Make predictions for the entire dataset
pred_rf <- predict(rf_model, newdata = train)
# Calculate metrics
MSE_rf <- mse(actual = train[[input$targetrf]], predicted = pred_rf)
# For R_square_rf, ensure you have a function or package that provides R2
R_square_rf <- R2(pred_rf, train[[input$targetrf]])
# For MAE_rf, ensure you have a function or package that provides mae
MAE_rf <- mae(actual = train[[input$targetrf]], predicted = pred_rf)
output$predictionOutput <- renderPrint({
cat("Mean Squared Error (MSE):\n\n", MSE_rf, "\n\n", "Interpretation: MSE measures the average squared difference between actual and predicted values.
Lower values indicate better model performance.
A value of 0 means perfect predictions.")
cat("\n\nR-squared (R²):\n\n", R_square_rf, "\n\n", "Interpretation: R² represents the proportion of variance in the dependent variable that's predictable from the independent variables.
It ranges from 0 to 1, with higher values indicating better model fit.")
cat("\n\nMean Absolute Error (MAE):\n\n", MAE_rf, "\n\n", "Interpretation: MAE measures the average absolute difference between actual and predicted values.
Like MSE, lower MAE values indicate better model performance.")
})
# Finalize progress
incProgress(1.0) # Complete the progress
# Store results in a reactive value
results_reactive(list(actual = train[[input$targetrf]], predicted = pred_rf))
})
})
output$performancePlot <- renderPlotly({
# Access the stored results
results <- results_reactive()
req(results) # Ensure results are available
# Creating the plot
plot_data <- data.frame(Actual = results$actual, Predicted = results$predicted)
p <- ggplot(plot_data, aes(x = Actual, y = Predicted)) +
geom_point(alpha = 0.5) +
geom_abline(intercept = 0, slope = 1, color = "red", linetype = "dashed") +
labs(x = "Actual Values", y = "Predicted Values", title = "Actual vs. Predicted Values") +
theme_minimal()
ggplotly(p)
})
###Bagging
databg <- reactiveVal(NULL)
bg_model_reactive <- reactiveVal()
pred_bg_reactive <- reactiveVal()
results_reactive_bg <- reactiveVal()
# Load and clean data
observeEvent(input$loadbg, {
req(input$bginput)
file <- input$bginput
if (!is.null(file)) {
# Reading and cleaning data
data_df <- read_data(file$datapath)
data_df <- clean_column_names(data_df)
# Setting the reactive value
databg(data_df)
# Updating UI elements
updateSelectInput(session, "targetbg", choices = colnames(data_df))
updateSelectizeInput(session, "independentVarbg", choices = setdiff(colnames(data_df), input$targetbg))
}
})
output$dataSummarybg <- renderPrint({
req(databg())
summary(databg())
})
observeEvent(input$runbg, {
req(databg(), input$targetbg, input$independentVarbg)
data_bg <- databg() %>%
dplyr::select(all_of(c(input$targetbg, input$independentVarbg))) %>%
na.omit()
# Early return if conditions are not met
if (length(input$independentVarbg) == 0) {
output$modelOutputbg <- renderPrint({ "Please select independent variables." })
return()
}
if (nrow(data_bg) < 10) {
output$modelOutputbg <- renderPrint({ "Dataset is too small after removing NA values." })
return()
}
split_ratio <- input$dataSplitbg
if (split_ratio <= 0 || split_ratio >= 1) {
output$modelOutputbg <- renderPrint({ "Invalid split ratio. Please choose a value between 0 and 1." })
return()
}
# Partition the data
set.seed(123)
train <- data_bg %>% sample_frac(split_ratio)
test <- data_bg %>% setdiff(train)
# Fit the Bagging model
formula_bg <- as.formula(paste(input$targetbg, "~", paste(input$independentVarbg, collapse = "+")))
# Fit the Random Forest model
ctrl <- trainControl(method = "cv", number = 10)
bg_model <- caret::train(formula_bg,
data = train,
method = "treebag",
nbagg = input$nbaggInput,
trControl = ctrl,
importance = TRUE)
bg_model_reactive(bg_model)
# Model summary
output$runbg <- renderPrint({
print(bg_model)
# Add interpretations
cat("\nModel Interpretation:\n")
cat("1. Best-Tuned Parameters: These parameters, such as the number of bagging iterations (nbagg), were found to be most effective during the training process.\n")
cat("2. Performance Metrics: These numbers indicate how well the model predicts the target variable. For a regression model, metrics like RMSE or MAE are common, where lower values are better. For a classification model, metrics like Accuracy or AUC are used, where higher values indicate better performance.\n")
cat("3. Resampling Results: The cross-validation results show how the model's performance varied across different subsets of the training data. Consistent performance across folds suggests a robust model.\n")
# If the model includes variable importance
if ("importance" %in% names(bg_model)) {
cat("4. Variable Importance: This shows which predictors are most influential in the model. Higher values indicate more important predictors.\n")
}
})
})
output$importancePlotbg <- renderPlotly({
req(bg_model_reactive) # Ensure the model is available
# Access the model from the reactive value
bg_model <- bg_model_reactive()
# Extracting feature importance using varImp from caret
importance_vals <- varImp(bg_model, scale = FALSE)
# Converting to a data frame for plotting
importance_df <- as.data.frame(importance_vals$importance)
importance_df$Feature <- rownames(importance_df)
# Plot using ggplot
p <- ggplot(importance_df, aes(x = reorder(Feature, Overall), y = Overall)) +
geom_bar(stat = "identity", fill = "dodgerblue") +
theme_minimal() +
coord_flip() + # Flipping coordinates for horizontal bars
labs(title = "Feature Importance", x = "Features", y = "Importance")
# Convert to Plotly for an interactive plot
ggplotly(p)
})
observeEvent(input$baggingBtn, {
req(databg(), input$targetbg, input$independentVarbg)
data_bg <- databg() %>%
dplyr::select(all_of(c(input$targetbg, input$independentVarbg))) %>%
na.omit()
withProgress(message = 'Model is being trained...', value = 0, {
# Increment progress
incProgress(0.1) # Initial progress
# Early return if conditions are not met
if (length(input$independentVarbg) == 0) {
output$modelOutputbg <- renderPrint({ "Please select independent variables." })
return()
}
incProgress(0.3) # Increment progress
if (nrow(data_bg) < 10) {
output$modelOutputbg <- renderPrint({ "Dataset is too small after removing NA values." })
return()
}
split_ratio <- input$dataSplitbg
if (split_ratio <= 0 || split_ratio >= 1) {
output$modelOutputbg <- renderPrint({ "Invalid split ratio. Please choose a value between 0 and 1." })
return()
}
# Assuming train dataset is already prepared
control <- trainControl(method="cv", number=10)
set.seed(2)
train <- data_bg %>% sample_frac(split_ratio)
test <- data_bg %>% setdiff(train)
formula_bg <- as.formula(paste(input$targetbg, "~", paste(input$independentVarbg, collapse = "+")))
bg_model <- caret::train(formula_bg,
data=train,
method="treebag",
ntree = input$nbaggInput,
trControl=control,
importance = TRUE)
incProgress(0.6) # Increment progress
# Make predictions for the entire dataset
pred_bg <- predict(bg_model, newdata = train)
# Calculate metrics
MSE_bg <- mse(actual = train[[input$targetbg]], predicted = pred_bg)
# For R_square_bg, ensure you have a function or package that provides R2
R_square_bg <- R2(pred_bg, train[[input$targetbg]])
# For MAE_bg, ensure you have a function or package that provides mae
MAE_bg <- mae(actual = train[[input$targetbg]], predicted = pred_bg)
output$predictionOutputbg <- renderPrint({
cat("Mean Squared Error (MSE):\n\n", MSE_bg, "\n\n", "Interpretation: MSE measures the average squared difference between actual and predicted values.
Lower values indicate better model performance.
A value of 0 means perfect predictions.")
cat("\n\nR-squared (R²):\n\n", R_square_bg, "\n\n", "Interpretation: R² represents the proportion of variance in the dependent variable that's predictable from the independent variables.
It ranges from 0 to 1, with higher values indicating better model fit.")
cat("\n\nMean Absolute Error (MAE):\n\n", MAE_bg, "\n\n", "Interpretation: MAE measures the average absolute difference between actual and predicted values.
Like MSE, lower MAE values indicate better model performance.")
})
# Finalize progress
incProgress(1.0) # Complete the progress
# Store results in a reactive value
results_reactive_bg(list(actual = train[[input$targetbg]], predicted = pred_bg))
})
})
output$performancePlotbg <- renderPlotly({
# Access the stored results
results <- results_reactive_bg()
req(results) # Ensure results are available
# Creating the plot
plot_data <- data.frame(Actual = results$actual, Predicted = results$predicted)
p <- ggplot(plot_data, aes(x = Actual, y = Predicted)) +
geom_point(alpha = 0.5) +
geom_abline(intercept = 0, slope = 1, color = "red", linetype = "dashed") +
labs(x = "Actual Values", y = "Predicted Values", title = "Actual vs. Predicted Values") +
theme_minimal()
ggplotly(p)
})
###Boosting
databs <- reactiveVal(NULL)
bs_model_reactive <- reactiveVal()
pred_bs_reactive <- reactiveVal()
results_reactive_bs <- reactiveVal()
# Load and clean data
observeEvent(input$loadbs, {
req(input$bsinput)
file <- input$bsinput
if (!is.null(file)) {
# Reading and cleaning data
data_df <- read_data(file$datapath)
data_df <- clean_column_names(data_df)
# Setting the reactive value
databs(data_df)
# Updating UI elements
updateSelectInput(session, "targetbs", choices = colnames(data_df))
updateSelectizeInput(session, "independentVarbs", choices = setdiff(colnames(data_df), input$targetbs))
}
})
output$dataSummarybs <- renderPrint({
req(databs())
summary(databs())
})
observeEvent(input$runbs, {
req(databs(), input$targetbs, input$independentVarbs)
# Start the progress bar
withProgress(message = 'Model is being trained...', value = 0, {
# Increment progress
incProgress(0.1) # Initial progress
data_bs <- databs() %>%
dplyr::select(all_of(c(input$targetbs, input$independentVarbs))) %>%
na.omit()
# Early return if conditions are not met
if (length(input$independentVarbs) == 0) {
output$modelOutputbs <- renderPrint({ "Please select independent variables." })
return()
}
incProgress(0.3) # Increment progress
if (nrow(data_bs) < 10) {
output$modelOutputbs <- renderPrint({ "Dataset is too small after removing NA values." })
return()
}
split_ratio <- input$dataSplitbs
if (split_ratio <= 0 || split_ratio >= 1) {
output$modelOutputbs <- renderPrint({ "Invalid split ratio. Please choose a value between 0 and 1." })
return()
}
# Partition the data
set.seed(123)
train <- data_bs %>% sample_frac(split_ratio)
test <- data_bs %>% setdiff(train)
incProgress(0.5) # Increment progress
# Fit the Boosting model
formula_bs <- as.formula(paste(input$targetbs, "~", paste(input$independentVarbs, collapse = "+")))
# Fit the Boosting model
ctrl <- trainControl(method = "cv", number = 10)
bs_model <- gbm(formula_bs,
data = train,
distribution = "gaussian",
n.trees = input$nbsInput,
interaction.depth = input$nbsdepth,
cv.folds = 10,
shrinkage = input$nbshr,
verbose = F)
bs_model_reactive(bs_model)
# Model summary
output$runbs <- renderPrint({
summary <- summary(bs_model)
print(summary)
# Add interpretations
cat("\nModel Interpretation:\n")
cat("1. Variable Importance: The summary shows the relative influence of each predictor variable in the model.
Variables with higher values have more influence on the model's predictions.\n")
})
# Finalize progress
incProgress(1.0) # Complete the progress
})
})
output$importancePlotbs <- renderPlotly({
req(bs_model_reactive) # Ensure the model is available
# Access the model from the reactive value
bs_model <- bs_model_reactive()
# Extracting feature importance
# Note: The 'n.trees' argument should be set to the number of trees used in the model
importance_vals <- summary(bs_model, n.trees = input$nbsInput, plot = FALSE)
# Preparing the data frame for ggplot
importance_df <- data.frame(
Feature = rownames(importance_vals),
Overall = importance_vals$rel.inf
)
# Plot using ggplot
p <- ggplot(importance_df, aes(x = reorder(Feature, Overall), y = Overall)) +
geom_bar(stat = "identity", fill = "dodgerblue") +
theme_minimal() +
coord_flip() + # Flipping coordinates for horizontal bars
labs(title = "Feature Importance", x = "Features", y = "Relative Influence")
# Convert to Plotly for an interactive plot
ggplotly(p)
})
observeEvent(input$boostingBtn, {
req(databs(), input$targetbs, input$independentVarbs)
# Start the progress bar
withProgress(message = 'Model is being trained...', value = 0, {
# Increment progress
incProgress(0.1) # Initial progress
df <- databs() %>%
dplyr::select(all_of(c(input$targetbs, input$independentVarbs))) %>%
na.omit()
# Early return if conditions are not met
if (length(input$independentVarbs) == 0) {
output$modelOutputbs <- renderPrint({ "Please select independent variables." })
return()
}
if (nrow(df) < 10) {
output$modelOutputbs <- renderPrint({ "Dataset is too small after removing NA values." })
return()
}
split_ratio <- input$dataSplitbs
if (split_ratio <= 0 || split_ratio >= 1) {
output$modelOutputbs <- renderPrint({ "Invalid split ratio. Please choose a value between 0 and 1." })
return()
}
incProgress(0.3) # Increment progress
# Partition the data
set.seed(123) # For reproducibility
train <- df %>% sample_frac(split_ratio)
test <- df %>% setdiff(train)
# Fit the Boosting model
formula_bs <- as.formula(paste(input$targetbs, "~", paste(input$independentVarbs, collapse = "+")))
bs_model <- gbm(formula_bs,
data = train,
distribution = "gaussian",
n.trees = input$nbsInput,
interaction.depth = input$nbsdepth,
cv.folds = 10,
shrinkage = input$nbshr,
verbose = F)
incProgress(0.7) # Increment progress
# Make predictions for the entire dataset
pred_bs <- predict(bs_model, newdata = train)
# Calculate metrics
MSE_bs <- mse(actual = train[[input$targetbs]], predicted = pred_bs)
# For R_square_bs, ensure you have a function or package that provides R2
R_square_bs <- R2(pred_bs, train[[input$targetbs]])
# For MAE_bs, ensure you have a function or package that provides mae
MAE_bs <- mae(actual = train[[input$targetbs]], predicted = pred_bs)
# Finalize progress
incProgress(1.0) # Complete the progress
output$predictionOutputbs <- renderPrint({
cat("Mean Squared Error (MSE):\n\n", MSE_bs, "\n\n", "Interpretation: MSE measures the average squared difference between actual and predicted values.
Lower values indicate better model performance.
A value of 0 means perfect predictions.")
cat("\n\nR-squared (R²):\n\n", R_square_bs, "\n\n", "Interpretation: R² represents the proportion of variance in the dependent variable that's predictable from the independent variables.
It ranges from 0 to 1, with higher values indicating better model fit.")
cat("\n\nMean Absolute Error (MAE):\n\n", MAE_bs, "\n\n", "Interpretation: MAE measures the average absolute difference between actual and predicted values.
Like MSE, lower MAE values indicate better model performance.")
})
# Store results in a reactive value
results_reactive_bs(list(actual = train[[input$targetbs]], predicted = pred_bs))
})
})
output$performancePlotbs <- renderPlotly({
# Access the stored results
results <- results_reactive_bs()
req(results) # Ensure results are available
# Creating the plot
plot_data <- data.frame(Actual = results$actual, Predicted = results$predicted)
p <- ggplot(plot_data, aes(x = Actual, y = Predicted)) +
geom_point(alpha = 0.5) +
geom_abline(intercept = 0, slope = 1, color = "red", linetype = "dashed") +
labs(x = "Actual Values", y = "Predicted Values", title = "Actual vs. Predicted Values") +
theme_minimal()
ggplotly(p)
})
###MARS
datams <- reactiveVal(NULL)
ms_model_reactive <- reactiveVal()
pred_ms_reactive <- reactiveVal()
results_reactive_ms <- reactiveVal()
# Load and clean data
observeEvent(input$loadms, {
req(input$msinput)
file <- input$msinput
if (!is.null(file)) {
# Reading and cleaning data
data_df <- read_data(file$datapath)
data_df <- clean_column_names(data_df)
# Setting the reactive value
datams(data_df)
# Updating UI elements
updateSelectInput(session, "targetms", choices = colnames(data_df))
updateSelectizeInput(session, "independentVarms", choices = setdiff(colnames(data_df), input$targetms))
}
})
output$dataSummaryms <- renderPrint({
req(datams())
summary(datams())
})
observeEvent(input$runms, {
req(datams(), input$targetms, input$independentVarms)
withProgress(message = 'Model is being trained...', value = 0, {
# Increment progress
incProgress(0.1) # Initial progress
data_ms <- datams() %>%
dplyr::select(all_of(c(input$targetms, input$independentVarms))) %>%
na.omit()
# Early return if conditions are not met
if (length(input$independentVarms) == 0) {
output$modelOutputms <- renderPrint({ "Please select independent variables." })
return()
}
incProgress(0.3) # Increment progress
if (nrow(data_ms) < 10) {
output$modelOutputms <- renderPrint({ "Dataset is too small after removing NA values." })
return()
}
split_ratio <- input$dataSplitms
if (split_ratio <= 0 || split_ratio >= 1) {
output$modelOutputms <- renderPrint({ "Invalid split ratio. Please choose a value between 0 and 1." })
return()
}
incProgress(0.6) # Increment progress
# Partition the data
set.seed(123)
train <- data_ms %>% sample_frac(split_ratio)
test <- data_ms %>% setdiff(train)
# Fit the Random Forest model
formula_ms <- as.formula(paste(input$targetms, "~", paste(input$independentVarms, collapse = "+")))
# Fit the Random Forest model
# Define the range of values for .nk and other parameters
marsGrid <- expand.grid(.degree = 1:2, .nprune = 2:20)
ms_model <- train(formula_ms, data = train, method = "earth", tuneGrid = marsGrid,
trControl = trainControl(method = "cv", verboseIter = T))
ms_model_reactive(ms_model)
# Model summary
output$runms <- renderPrint({
print(ms_model)
cat("\nModel Summary Interpretation:\n")
cat("\n1. Tuned Parameters: This section shows the best parameters found during the training process,
such as the degree of interactions and the number of terms/pruning in the MARS model.
These parameters are crucial for the model's ability to capture complex relationships in the data.\n")
cat("\n2. Model Performance: The summary will also include performance metrics.
For regression tasks, look for metrics like RMSE or R-squared, where a lower RMSE or a higher R-squared indicates better performance.
For classification, metrics like Accuracy or AUC are common.\n")
cat("\n3. Cross-Validation Results: If cross-validation was used,
the summary may show how the model performed across different subsets of the data, which can be an indicator of the model's robustness.\n")
# Feature Importance
cat("Feature Importance:\n")
importance_vals <- varImp(ms_model, scale = FALSE)
print(importance_vals)
cat("\nFeature Importance Interpretation:\n")
cat("\nThis table shows the importance of each predictor variable in the model.
Variables with higher values have more influence on the model's predictions.
In the context of MARS, this importance can be seen as how much each variable contributes to the model's ability to fit the data and make accurate predictions.
High-importance variables are key drivers of the target variable, while low-importance variables have less impact.\n")
})
# Finalize progress
incProgress(1.0) # Complete the progress
})
})
output$importancePlotms <- renderPlotly({
req(ms_model_reactive) # Ensure the model is available
# Access the model from the reactive value
ms_model <- ms_model_reactive()
# Extracting feature importance using caret's varImp function
importance_vals <- varImp(ms_model, scale = FALSE)
# Preparing the data frame for ggplot
importance_df <- as.data.frame(importance_vals$importance)
importance_df$Feature <- rownames(importance_df)
# Plot using ggplot
p <- ggplot(importance_df, aes(x = reorder(Feature, Overall), y = Overall)) +
geom_bar(stat = "identity", fill = "dodgerblue") +
theme_minimal() +
coord_flip() + # Flipping coordinates for horizontal bars
labs(title = "Feature Importance", x = "Features", y = "Relative Importance")
# Convert to Plotly for an interactive plot
ggplotly(p)
})
# Define the Mean Squared Error function
mse <- function(actual, predicted) {
mean((actual - predicted) ^ 2)
}
# Define the Mean Absolute Error function
mae <- function(actual, predicted) {
mean(abs(actual - predicted))
}
observeEvent(input$marsBtn, {
req(datams(), input$targetms, input$independentVarms)
data_ms <- datams() %>%
dplyr::select(all_of(c(input$targetms, input$independentVarms))) %>%
na.omit()
withProgress(message = 'Model is being trained...', value = 0, {
# Increment progress
incProgress(0.1) # Initial progress
# Early return if conditions are not met
if (length(input$independentVarms) == 0) {
output$modelOutputms <- renderPrint({ "Please select independent variables." })
return()
}
if (nrow(data_ms) < 10) {
output$modelOutputms <- renderPrint({ "Dataset is too small after removing NA values." })
return()
}
incProgress(0.3) # Increment progress
split_ratio <- input$dataSplitms
if (split_ratio <= 0 || split_ratio >= 1) {
output$modelOutputms <- renderPrint({ "Invalid split ratio. Please choose a value between 0 and 1." })
return()
}
# Partition the data
set.seed(123)
train <- data_ms %>% sample_frac(split_ratio)
test <- data_ms %>% setdiff(train)
# Fit the Random Forest model
formula_ms <- as.formula(paste(input$targetms, "~", paste(input$independentVarms, collapse = "+")))
# Fit the Random Forest model
# Define the range of values for .nk and other parameters
set.seed(2)
marsGrid <- expand.grid(.degree = 1:2, .nprune = 2:20)
ms_model <- train(formula_ms, data = train, method = "earth", tuneGrid = marsGrid,
trControl = trainControl(method = "cv", verboseIter = T))
incProgress(0.7) # Increment progress
# Make predictions for the entire dataset
pred_ms <- predict(ms_model, newdata = train)
# Calculate metrics
MSE_ms <- mse(actual = train[[input$targetms]], predicted = pred_ms)
# For R_square_rf, ensure you have a function or package that provides R2
R_square_ms <- R2(pred_ms, train[[input$targetms]])
# For MAE_rf, ensure you have a function or package that provides mae
MAE_ms <- mae(actual = train[[input$targetms]], predicted = pred_ms)
# Finalize progress
incProgress(1.0) # Complete the progress
output$predictionOutputms <- renderPrint({
cat("Mean Squared Error (MSE):\n\n", MSE_ms, "\n\n", "Interpretation: MSE measures the average squared difference between actual and predicted values.
Lower values indicate better model performance.
A value of 0 means perfect predictions.")
cat("\n\nR-squared (R²):\n\n", R_square_ms, "\n\n", "Interpretation: R² represents the proportion of variance in the dependent variable that's predictable from the independent variables.
It ranges from 0 to 1, with higher values indicating better model fit.")
cat("\n\nMean Absolute Error (MAE):\n\n", MAE_ms, "\n\n", "Interpretation: MAE measures the average absolute difference between actual and predicted values.
Like MSE, lower MAE values indicate better model performance.")
})
# Store results in a reactive value
results_reactive_ms(list(actual = train[[input$targetms]], predicted = pred_ms))
})
})
output$performancePlotms <- renderPlotly({
# Access the stored results
results <- results_reactive_ms()
req(results) # Ensure results are available
# Extract the 'y' column from the 'predicted' matrix and rename it to 'Predicted'
predicted_vector <- results$predicted[, "y"]
# Creating the plot data frame with 'Actual' and 'Predicted'
plot_data <- data.frame(Actual = results$actual, Predicted = predicted_vector)
p <- ggplot(plot_data, aes(x = Actual, y = Predicted)) +
geom_point(alpha = 0.5) +
geom_abline(intercept = 0, slope = 1, color = "red", linetype = "dashed") +
labs(x = "Actual Values", y = "Predicted Values", title = "Actual vs. Predicted Values") +
theme_minimal()
ggplotly(p)
})
###Ridge Regression
datarr <- reactiveVal(NULL)
rr_model_reactive <- reactiveVal()
pred_rr_reactive <- reactiveVal()
results_reactive_rr <- reactiveVal()
# Load and clean data
observeEvent(input$loadrr, {
req(input$rrinput)
file <- input$rrinput
if (!is.null(file)) {
# Reading and cleaning data
data_df <- read_data(file$datapath)
data_df <- clean_column_names(data_df)
# Setting the reactive value
datarr(data_df)
# Updating UI elements
updateSelectInput(session, "targetrr", choices = colnames(data_df))
updateSelectizeInput(session, "independentVarrr", choices = setdiff(colnames(data_df), input$targetrr))
}
})
output$dataSummaryrr <- renderPrint({
req(datarr())
summary(datarr())
})
observeEvent(input$runrr, {
req(datarr(), input$targetrr, input$independentVarrr)
withProgress(message = 'Model is being trained...', value = 0, {
# Increment progress
incProgress(0.1) # Initial progress
data_rr <- datarr() %>%
dplyr::select(all_of(c(input$targetrr, input$independentVarrr))) %>%
na.omit()
# Early return if conditions are not met
if (length(input$independentVarrr) == 0) {
output$modelOutputrr <- renderPrint({ "Please select independent variables." })
return()
}
incProgress(0.3) # Increment progress
if (nrow(data_rr) < 10) {
output$modelOutputrr <- renderPrint({ "Dataset is too small after removing NA values." })
return()
}
split_ratio <- input$dataSplitrr
if (split_ratio <= 0 || split_ratio >= 1) {
output$modelOutputrr <- renderPrint({ "Invalid split ratio. Please choose a value between 0 and 1." })
return()
}
incProgress(0.6) # Increment progress
# Partition the data
set.seed(123)
train <- data_rr %>% sample_frac(split_ratio)
test <- data_rr %>% setdiff(train)
# Define the tuning grid for Ridge Regression
ridgeGrid <- expand.grid(.lambda = 10^seq(-3, 3, length = 100),
.alpha = 0) # alpha = 0 for Ridge
formula_rr <- as.formula(paste(input$targetrr, "~", paste(input$independentVarrr, collapse = "+")))
# Fit the Random Forest model
rr_model <- train(
formula_rr,
data = train,
method = "glmnet",
tuneGrid = ridgeGrid,
trControl = trainControl(method = "cv", number = 10, verboseIter = TRUE)
)
rr_model_reactive(rr_model)
# Model summary
output$runrr <- renderPrint({
print(rr_model)
cat("\nModel Summary Interpretation:\n")
cat("\n1. Tuned Parameters: This section shows the best parameters found during the training process,
such as the degree of interactions and the number of terms/pruning in the MARS model.
These parameters are crucial for the model's ability to capture complex relationships in the data.\n")
cat("\n2. Model Performance: The summary will also include performance metrics.
For regression tasks, look for metrics like RMSE or R-squared, where a lower RMSE or a higher R-squared indicates better performance.
For classification, metrics like Accuracy or AUC are common.\n")
cat("\n3. Cross-Validation Results: If cross-validation was used,
the summary may show how the model performed across different subsets of the data, which can be an indicator of the model's robustness.\n")
# Feature Importance
cat("Feature Importance:\n")
importance_vals <- varImp(rr_model, scale = FALSE)
print(importance_vals)
cat("\nFeature Importance Interpretation:\n")
cat("\nThis table shows the importance of each predictor variable in the model.
Variables with higher values have more influence on the model's predictions.
In the context of MARS, this importance can be seen as how much each variable contributes to the model's ability to fit the data and make accurate predictions.
High-importance variables are key drivers of the target variable, while low-importance variables have less impact.\n")
})
# Finalize progress
incProgress(1.0) # Complete the progress
})
})
output$importancePlotrr <- renderPlotly({
req(rr_model_reactive) # Ensure the model is available
# Access the model from the reactive value
rr_model <- rr_model_reactive()
# Extracting feature importance using caret's varImp function
importance_vals <- varImp(rr_model, scale = FALSE)
# Preparing the data frame for ggplot
importance_df <- as.data.frame(importance_vals$importance)
importance_df$Feature <- rownames(importance_df)
# Plot using ggplot
p <- ggplot(importance_df, aes(x = reorder(Feature, Overall), y = Overall)) +
geom_bar(stat = "identity", fill = "dodgerblue") +
theme_minimal() +
coord_flip() + # Flipping coordinates for horizontal bars
labs(title = "Feature Importance", x = "Features", y = "Relative Importance")
# Convert to Plotly for an interactive plot
ggplotly(p)
})
# Define the Mean Squared Error function
mse <- function(actual, predicted) {
mean((actual - predicted) ^ 2)
}
# Define the Mean Absolute Error function
mae <- function(actual, predicted) {
mean(abs(actual - predicted))
}
observeEvent(input$RidgeBtn, {
req(datarr(), input$targetrr, input$independentVarrr)
data_rr <- datarr() %>%
dplyr::select(all_of(c(input$targetrr, input$independentVarrr))) %>%
na.omit()
withProgress(message = 'Model is being trained...', value = 0, {
# Increment progress
incProgress(0.1) # Initial progress
# Early return if conditions are not met
if (length(input$independentVarrr) == 0) {
output$modelOutputrr <- renderPrint({ "Please select independent variables." })
return()
}
if (nrow(data_rr) < 10) {
output$modelOutputrr <- renderPrint({ "Dataset is too small after removing NA values." })
return()
}
incProgress(0.3) # Increment progress
split_ratio <- input$dataSplitrr
if (split_ratio <= 0 || split_ratio >= 1) {
output$modelOutputrr <- renderPrint({ "Invalid split ratio. Please choose a value between 0 and 1." })
return()
}
# Partition the data
set.seed(123)
train <- data_rr %>% sample_frac(split_ratio)
test <- data_rr %>% setdiff(train)
# Define the tuning grid for Ridge Regression
ridgeGrid <- expand.grid(.lambda = 10^seq(-3, 3, length = 100),
.alpha = 0) # alpha = 0 for Ridge
formula_rr <- as.formula(paste(input$targetrr, "~", paste(input$independentVarrr, collapse = "+")))
# Fit the Random Forest model
rr_model <- train(
formula_rr,
data = train,
method = "glmnet",
tuneGrid = ridgeGrid,
trControl = trainControl(method = "cv", number = 10, verboseIter = TRUE)
)
incProgress(0.7) # Increment progress
# Make predictions for the entire dataset
pred_rr <- predict(rr_model, newdata = train)
# Calculate metrics
MSE_rr <- mse(actual = train[[input$targetrr]], predicted = pred_rr)
# For R_square_rf, ensure you have a function or package that provides R2
R_square_rr <- R2(pred_rr, train[[input$targetrr]])
# For MAE_rf, ensure you have a function or package that provides mae
MAE_rr <- mae(actual = train[[input$targetrr]], predicted = pred_rr)
# Finalize progress
incProgress(1.0) # Complete the progress
output$predictionOutputrr <- renderPrint({
cat("Mean Squared Error (MSE):\n\n", MSE_rr, "\n\n", "Interpretation: MSE measures the average squared difference between actual and predicted values.
Lower values indicate better model performance.
A value of 0 means perfect predictions.")
cat("\n\nR-squared (R²):\n\n", R_square_rr, "\n\n", "Interpretation: R² represents the proportion of variance in the dependent variable that's predictable from the independent variables.
It ranges from 0 to 1, with higher values indicating better model fit.")
cat("\n\nMean Absolute Error (MAE):\n\n", MAE_rr, "\n\n", "Interpretation: MAE measures the average absolute difference between actual and predicted values.
Like MSE, lower MAE values indicate better model performance.")
})
# Store results in a reactive value
results_reactive_rr(list(actual = train[[input$targetrr]], predicted = pred_rr))
})
})
output$performancePlotrr <- renderPlotly({
# Access the stored results
results <- results_reactive_rr()
req(results) # Ensure results are available
# Since results$predicted is a vector, use it directly
plot_data <- data.frame(Actual = results$actual, Predicted = results$predicted)
p <- ggplot(plot_data, aes(x = Actual, y = Predicted)) +
geom_point(alpha = 0.5) +
geom_abline(intercept = 0, slope = 1, color = "red", linetype = "dashed") +
labs(x = "Actual Values", y = "Predicted Values", title = "Actual vs. Predicted Values") +
theme_minimal()
ggplotly(p)
})
###LASSO Regression
datals <- reactiveVal(NULL)
ls_model_reactive <- reactiveVal()
pred_ls_reactive <- reactiveVal()
results_reactive_ls <- reactiveVal()
# Load and clean data
observeEvent(input$loadls, {
req(input$lsinput)
file <- input$lsinput
if (!is.null(file)) {
# Reading and cleaning data
data_df <- read_data(file$datapath)
data_df <- clean_column_names(data_df)
# Setting the reactive value
datals(data_df)
# Updating UI elements
updateSelectInput(session, "targetls", choices = colnames(data_df))
updateSelectizeInput(session, "independentVarls", choices = setdiff(colnames(data_df), input$targetls))
}
})
output$dataSummaryls <- renderPrint({
req(datals())
summary(datals())
})
observeEvent(input$runls, {
req(datals(), input$targetls, input$independentVarls)
withProgress(message = 'Model is being trained...', value = 0, {
# Increment progress
incProgress(0.1) # Initial progress
data_ls <- datals() %>%
dplyr::select(all_of(c(input$targetls, input$independentVarls))) %>%
na.omit()
# Early return if conditions are not met
if (length(input$independentVarls) == 0) {
output$modelOutputls <- renderPrint({ "Please select independent variables." })
return()
}
incProgress(0.3) # Increment progress
if (nrow(data_ls) < 10) {
output$modelOutputls <- renderPrint({ "Dataset is too small after removing NA values." })
return()
}
split_ratio <- input$dataSplitls
if (split_ratio <= 0 || split_ratio >= 1) {
output$modelOutputls <- renderPrint({ "Invalid split ratio. Please choose a value between 0 and 1." })
return()
}
incProgress(0.6) # Increment progress
# Partition the data
set.seed(123)
train <- data_ls %>% sample_frac(split_ratio)
test <- data_ls %>% setdiff(train)
# Define the tuning grid for Ridge Regression
ridgeGrid <- expand.grid(.lambda = 10^seq(-3, 3, length = 100),
.alpha = 1) # alpha = 0 for Ridge
formula_ls <- as.formula(paste(input$targetls, "~", paste(input$independentVarls, collapse = "+")))
# Fit the Random Forest model
ls_model <- train(
formula_ls,
data = train,
method = "glmnet",
tuneGrid = ridgeGrid,
trControl = trainControl(method = "cv", number = 10, verboseIter = TRUE)
)
ls_model_reactive(ls_model)
# Model summary
output$runls <- renderPrint({
print(ls_model)
cat("\nModel Summary Interpretation:\n")
cat("\n1. Tuned Parameters: This section shows the best parameters found during the training process,
such as the degree of interactions and the number of terms/pruning in the MARS model.
These parameters are crucial for the model's ability to capture complex relationships in the data.\n")
cat("\n2. Model Performance: The summary will also include performance metrics.
For regression tasks, look for metrics like RMSE or R-squared, where a lower RMSE or a higher R-squared indicates better performance.
For classification, metrics like Accuracy or AUC are common.\n")
cat("\n3. Cross-Validation Results: If cross-validation was used,
the summary may show how the model performed across different subsets of the data, which can be an indicator of the model's robustness.\n")
# Feature Importance
cat("Feature Importance:\n")
importance_vals <- varImp(ls_model, scale = FALSE)
print(importance_vals)
cat("\nFeature Importance Interpretation:\n")
cat("\nThis table shows the importance of each predictor variable in the model.
Variables with higher values have more influence on the model's predictions.
In the context of MARS, this importance can be seen as how much each variable contributes to the model's ability to fit the data and make accurate predictions.
High-importance variables are key drivers of the target variable, while low-importance variables have less impact.\n")
})
# Finalize progress
incProgress(1.0) # Complete the progress
})
})
output$importancePlotls <- renderPlotly({
req(ls_model_reactive) # Ensure the model is available
# Access the model from the reactive value
ls_model <- ls_model_reactive()
# Extracting feature importance using caret's varImp function
importance_vals <- varImp(ls_model, scale = FALSE)
# Preparing the data frame for ggplot
importance_df <- as.data.frame(importance_vals$importance)
importance_df$Feature <- rownames(importance_df)
# Plot using ggplot
p <- ggplot(importance_df, aes(x = reorder(Feature, Overall), y = Overall)) +
geom_bar(stat = "identity", fill = "dodgerblue") +
theme_minimal() +
coord_flip() + # Flipping coordinates for horizontal bars
labs(title = "Feature Importance", x = "Features", y = "Relative Importance")
# Convert to Plotly for an interactive plot
ggplotly(p)
})
# Define the Mean Squared Error function
mse <- function(actual, predicted) {
mean((actual - predicted) ^ 2)
}
# Define the Mean Absolute Error function
mae <- function(actual, predicted) {
mean(abs(actual - predicted))
}
observeEvent(input$LassoBtn, {
req(datals(), input$targetls, input$independentVarls)
data_ls <- datals() %>%
dplyr::select(all_of(c(input$targetls, input$independentVarls))) %>%
na.omit()
withProgress(message = 'Model is being trained...', value = 0, {
# Increment progress
incProgress(0.1) # Initial progress
# Early return if conditions are not met
if (length(input$independentVarls) == 0) {
output$modelOutputls <- renderPrint({ "Please select independent variables." })
return()
}
if (nrow(data_ls) < 10) {
output$modelOutputls <- renderPrint({ "Dataset is too small after removing NA values." })
return()
}
incProgress(0.3) # Increment progress
split_ratio <- input$dataSplitls
if (split_ratio <= 0 || split_ratio >= 1) {
output$modelOutputls <- renderPrint({ "Invalid split ratio. Please choose a value between 0 and 1." })
return()
}
# Partition the data
set.seed(123)
train <- data_ls %>% sample_frac(split_ratio)
test <- data_ls %>% setdiff(train)
# Define the tuning grid for Ridge Regression
ridgeGrid <- expand.grid(.lambda = 10^seq(-3, 3, length = 100),
.alpha = 1) # alpha = 0 for Ridge
formula_ls <- as.formula(paste(input$targetls, "~", paste(input$independentVarls, collapse = "+")))
# Fit the Random Forest model
ls_model <- train(
formula_ls,
data = train,
method = "glmnet",
tuneGrid = ridgeGrid,
trControl = trainControl(method = "cv", number = 10, verboseIter = TRUE)
)
incProgress(0.7) # Increment progress
# Make predictions for the entire dataset
pred_ls <- predict(ls_model, newdata = train)
# Calculate metrics
MSE_ls <- mse(actual = train[[input$targetls]], predicted = pred_ls)
# For R_square_rf, ensure you have a function or package that provides R2
R_square_ls <- R2(pred_ls, train[[input$targetls]])
# For MAE_rf, ensure you have a function or package that provides mae
MAE_ls <- mae(actual = train[[input$targetls]], predicted = pred_ls)
# Finalize progress
incProgress(1.0) # Complete the progress
output$predictionOutputls <- renderPrint({
cat("Mean Squared Error (MSE):\n\n", MSE_ls, "\n\n", "Interpretation: MSE measures the average squared difference between actual and predicted values.
Lower values indicate better model performance.
A value of 0 means perfect predictions.")
cat("\n\nR-squared (R²):\n\n", R_square_ls, "\n\n", "Interpretation: R² represents the proportion of variance in the dependent variable that's predictable from the independent variables.
It ranges from 0 to 1, with higher values indicating better model fit.")
cat("\n\nMean Absolute Error (MAE):\n\n", MAE_ls, "\n\n", "Interpretation: MAE measures the average absolute difference between actual and predicted values.
Like MSE, lower MAE values indicate better model performance.")
})
# Store results in a reactive value
results_reactive_ls(list(actual = train[[input$targetls]], predicted = pred_ls))
})
})
output$performancePlotls <- renderPlotly({
# Access the stored results
results <- results_reactive_ls()
req(results) # Ensure results are available
# Since results$predicted is a vector, use it directly
plot_data <- data.frame(Actual = results$actual, Predicted = results$predicted)
p <- ggplot(plot_data, aes(x = Actual, y = Predicted)) +
geom_point(alpha = 0.5) +
geom_abline(intercept = 0, slope = 1, color = "red", linetype = "dashed") +
labs(x = "Actual Values", y = "Predicted Values", title = "Actual vs. Predicted Values") +
theme_minimal()
ggplotly(p)
})
}
shinyApp(ui, server)