kritsadaK's picture
add app.py
1e5960d
import streamlit as st
import pandas as pd
from statsmodels.tsa.arima.model import ARIMA
from sklearn.metrics import mean_squared_error
import numpy as np
# Function to load and combine data from the provided URLs
def load_data(urls):
dataframes = []
for url in urls:
try:
df = pd.read_csv(url, encoding='latin1')
dataframes.append(df)
except Exception as e:
st.write(f"Error reading {url}: {e}")
combined_df = pd.concat(dataframes, ignore_index=True)
combined_df['Date'] = pd.to_datetime(combined_df['Date'], errors='coerce')
combined_df = combined_df.sort_values(by='Date')
return combined_df
# URLs to the CSV files
urls = [
"https://pcd.gdcatalog.go.th/dataset/d86c62ec-b6e5-4577-82e3-6dee5e423634/resource/f5f7f32c-b56b-40f1-a513-d2ac6cd07d3a/download/aqpm10thailand2554.csv",
"https://pcd.gdcatalog.go.th/dataset/d86c62ec-b6e5-4577-82e3-6dee5e423634/resource/279cdb5a-f881-4b42-953a-c626bcec95b7/download/aqpm10thailand2555.csv",
"https://pcd.gdcatalog.go.th/dataset/d86c62ec-b6e5-4577-82e3-6dee5e423634/resource/15a0e00f-ba2f-477f-a5d5-11479c86e76f/download/aqpm10thailand2556.csv",
"https://pcd.gdcatalog.go.th/dataset/d86c62ec-b6e5-4577-82e3-6dee5e423634/resource/5d19474f-8062-4321-a0f8-d34ecd92f19d/download/aqpm10thailand2557.csv",
"https://pcd.gdcatalog.go.th/dataset/d86c62ec-b6e5-4577-82e3-6dee5e423634/resource/81ef4d84-920d-4e30-a008-605156b34ffc/download/aqpm10thailand2558.csv",
"https://pcd.gdcatalog.go.th/dataset/d86c62ec-b6e5-4577-82e3-6dee5e423634/resource/c2cb7e5c-3990-4c2b-bd9b-69e82ea35c96/download/aqpm10thailand2559.csv",
"https://pcd.gdcatalog.go.th/dataset/d86c62ec-b6e5-4577-82e3-6dee5e423634/resource/a9aa40f0-9ad0-4b2e-97c9-d1c92de8766a/download/aqpm10thailand2560.csv",
"https://pcd.gdcatalog.go.th/dataset/d86c62ec-b6e5-4577-82e3-6dee5e423634/resource/cd40ac24-c553-476f-bf16-111046fdd3da/download/aqpm10thailand2561.csv",
"https://pcd.gdcatalog.go.th/dataset/d86c62ec-b6e5-4577-82e3-6dee5e423634/resource/9e44b0dc-d9ee-4844-b9da-e8efb8002e11/download/aqpm10thailand2562.csv",
"https://pcd.gdcatalog.go.th/dataset/d86c62ec-b6e5-4577-82e3-6dee5e423634/resource/85d1c2a5-4098-4829-86f5-282063bb07fe/download/aqpm10thailand2563.csv",
"https://pcd.gdcatalog.go.th/dataset/d86c62ec-b6e5-4577-82e3-6dee5e423634/resource/d2895cde-77a1-4b62-b2c8-deafcee91dda/download/aqpm10thailand2564.csv",
"https://pcd.gdcatalog.go.th/dataset/d86c62ec-b6e5-4577-82e3-6dee5e423634/resource/77407f63-3059-40f9-9411-c74b26a63e16/download/pm102022-65.csv",
"https://pcd.gdcatalog.go.th/dataset/d86c62ec-b6e5-4577-82e3-6dee5e423634/resource/c4aa6dfb-e2ea-4b0c-a953-619e2bcb43af/download/pm102023-66.csv"
]
# Load the data
st.title("Thailand PM10 Prediction App by using Basic ARIMA model")
st.write("ข้อมูลตรวจวัดคุณภาพอากาศจากสถานีตรวจวัดคุณภาพอากาศอัตโนมัติ (PM10) พื้นที่ทั่วประเทศ")
st.write("source of dataset: https://data.go.th/dataset/pm10")
st.write("Adjust the Mlflow on this project, i try do to MLops on DAgsHUb link below")
st.write("https://dagshub.com/kkowenn/OpendatathaiMLflow")
combined_df = load_data(urls)
st.write("Data Loaded")
# Show the first few rows of the DataFrame
st.write("Sample Data (2011-2021)")
st.write(combined_df.head())
# Load and display the specific CSV file provided in the link
csv_url = "https://pcd.gdcatalog.go.th/dataset/d86c62ec-b6e5-4577-82e3-6dee5e423634/resource/9677d250-4d5d-40a4-a070-33182ffbec00/download/-2564.csv"
st.write("List of air quality monitoring stations:")
station_info_df = pd.read_csv(csv_url, encoding='utf-8-sig') # Change encoding to 'utf-8-sig' for Thai language support
st.write(station_info_df)
# Allow the user to select a column for prediction
column_to_predict = st.selectbox("Select a Time Series Column for Prediction and press ENTER", combined_df.columns[1:])
# Prepare the data for the selected column
series = combined_df[['Date', column_to_predict]].dropna()
# Convert the selected column to numeric, coercing any errors
series[column_to_predict] = pd.to_numeric(series[column_to_predict], errors='coerce')
# Drop any rows with NaN values that might have been introduced by the conversion
series.dropna(inplace=True)
# Set the date as the index
series.set_index('Date', inplace=True)
# Split the data into training and testing sets
train_size = int(len(series) * 0.8)
train, test = series[:train_size], series[train_size:]
# Fit ARIMA model
model = ARIMA(train, order=(5,1,0))
model_fit = model.fit()
# Make predictions
predictions = model_fit.forecast(steps=len(test))
predictions = pd.DataFrame(predictions)
predictions.columns = ['Predicted_PM']
predictions.index = test.index
# Evaluate the model
mse = mean_squared_error(test, predictions)
rmse = np.sqrt(mse)
st.write(f"Root Mean Squared Error (RMSE): {rmse}")
# Display the results with Date and PM values
st.write("Predicted PM values with Dates:")
predictions = predictions.reset_index() # Reset index to make it a column
predictions.rename(columns={'index': 'Date'}, inplace=True) # Rename the index column to 'Date'
predictions['Date'] = predictions['Date'].dt.date # Convert datetime to date
st.write(predictions)