Spaces:
Sleeping
Sleeping
import asyncio | |
from fastapi import FastAPI | |
from fastapi.middleware.cors import CORSMiddleware | |
import requests | |
import pandas as pd | |
import json | |
import os,datetime | |
import pandas as pd | |
from sklearn.model_selection import train_test_split, GridSearchCV | |
from sklearn.preprocessing import LabelEncoder | |
from sklearn.utils import resample | |
from xgboost import XGBClassifier | |
from sklearn.metrics import accuracy_score, classification_report | |
from joblib import dump, load | |
import numpy as np | |
try: from pip._internal.operations import freeze | |
except ImportError: # pip < 10.0 | |
from pip.operations import freeze | |
pkgs = freeze.freeze() | |
for pkg in pkgs: print(pkg) | |
app = FastAPI() | |
app.add_middleware( | |
CORSMiddleware, | |
allow_origins=["*"], | |
allow_credentials=True, | |
allow_methods=["*"], | |
allow_headers=["*"], | |
) | |
def train_the_model(): | |
data = pd.read_csv("trainer_data.csv") | |
print(data["customer_name"].count()) | |
data = pd.read_csv("trainer_data_balanced.csv") | |
print(data["customer_name"].count()) | |
# Select columns | |
selected_columns = ['customer_name', 'customer_address', 'customer_phone_no', | |
'weight','cod','pickup_address','client_number','destination_city', | |
'status_name'] | |
# Handling missing values | |
#data_filled = data[selected_columns].fillna('Missing') | |
data_filled = data[selected_columns].dropna() | |
# Encoding categorical variables | |
encoders = {col: LabelEncoder() for col in selected_columns if data_filled[col].dtype == 'object'} | |
for col, encoder in encoders.items(): | |
data_filled[col] = encoder.fit_transform(data_filled[col]) | |
# Splitting the dataset | |
X = data_filled.drop('status_name', axis=1) | |
y = data_filled['status_name'] | |
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) | |
# Parameters to use for the model | |
# Parameters to use for the model | |
"""params = { | |
'colsample_bytree': 0.3, | |
'learning_rate': 0.6, | |
'max_depth': 6, | |
'n_estimators': 100, | |
'subsample': 0.9, | |
'use_label_encoder': False, | |
'eval_metric': 'logloss' | |
}""" | |
params = { | |
'colsample_bytree': 0.9, | |
'learning_rate': 0.1, | |
'max_depth': 30, | |
'n_estimators': 500, | |
'subsample': 0.9, | |
'use_label_encoder': False, | |
'eval_metric': 'logloss' | |
} | |
# Initialize the classifier with the specified parameters | |
xgb = XGBClassifier(**params) | |
# Train the model | |
xgb.fit(X_train, y_train) | |
# Predict on the test set | |
y_pred = xgb.predict(X_test) | |
y_pred_proba = xgb.predict_proba(X_test) | |
# Evaluate the model | |
accuracy = accuracy_score(y_test, y_pred) | |
classification_rep = classification_report(y_test, y_pred) | |
# Save the model | |
model_filename = 'transexpress_xgb_model.joblib' | |
dump(xgb, model_filename) | |
# Save the encoders | |
encoders_filename = 'transexpress_encoders.joblib' | |
dump(encoders, encoders_filename) | |
return accuracy,classification_rep,"Model trained with new data" | |
async def your_continuous_function(page: str,paginate: str): | |
print("data fetcher running.....") | |
# Initialize an empty DataFrame to store the combined data | |
combined_df = pd.DataFrame() | |
# Update the payload for each page | |
url = "https://report.transexpress.lk/api/orders/delivery-success-rate/return-to-client-orders?page="+page+"&per_page="+paginate | |
payload = {} | |
headers = { | |
'Cookie': 'development_trans_express_session=NaFDGzh5WQCFwiortxA6WEFuBjsAG9GHIQrbKZ8B' | |
} | |
response = requests.request("GET", url, headers=headers, data=payload) | |
# Sample JSON response | |
json_response = response.json() | |
# Extracting 'data' for conversion | |
data = json_response["return_to_client_orders"]['data'] | |
data_count = len(data) | |
df = pd.json_normalize(data) | |
df['status_name'] = df['status_name'].replace('Partially Delivered', 'Delivered') | |
df['status_name'] = df['status_name'].replace('Received by Client', 'Returned to Client') | |
print("data collected from page : "+page) | |
#return "done" | |
try: | |
file_path = 'trainer_data.csv' # Replace with your file path | |
source_csv = pd.read_csv(file_path) | |
new_data = df | |
combined_df_final = pd.concat([source_csv,new_data], ignore_index=True) | |
combined_df_final.to_csv("trainer_data.csv") | |
print("data added") | |
except: | |
df.to_csv("trainer_data.csv") | |
print("data created") | |
# Load the dataset | |
file_path = 'trainer_data.csv' # Update to the correct file path | |
data = pd.read_csv(file_path) | |
# Analyze class distribution | |
class_distribution = data['status_name'].value_counts() | |
print("Class Distribution before balancing:\n", class_distribution) | |
# Get the size of the largest class to match other classes' sizes | |
max_class_size = class_distribution.max() | |
# Oversampling | |
oversampled_data = pd.DataFrame() | |
for class_name, group in data.groupby('status_name'): | |
oversampled_group = resample(group, | |
replace=True, # Sample with replacement | |
n_samples=max_class_size, # to match majority class | |
random_state=123) # for reproducibility | |
oversampled_data = pd.concat([oversampled_data, oversampled_group], axis=0) | |
# Verify new class distribution | |
print("Class Distribution after oversampling:\n", oversampled_data['status_name'].value_counts()) | |
# Save the balanced dataset if needed | |
oversampled_data.to_csv('trainer_data_balanced.csv', index=False) | |
accuracy,classification_rep,message = train_the_model() | |
return {"message":message,"page_number":page,"data_count":data_count,"accuracy":accuracy,"classification_rep":classification_rep} | |
async def model_updated_time(): | |
try: | |
m_time_encoder = os.path.getmtime('transexpress_encoders.joblib') | |
m_time_model = os.path.getmtime('transexpress_xgb_model.joblib') | |
return {"base model created time ":datetime.datetime.fromtimestamp(m_time_encoder), | |
"last model updated time":datetime.datetime.fromtimestamp(m_time_model)} | |
except: | |
return {"no model found so first trained the model using data fecther"} | |
# Endpoint for making predictions | |
def predict( | |
date : str, | |
customer_name: str, | |
customer_address: str, | |
customer_phone: str, | |
weight: float, | |
cod: int, | |
pickup_address: str, | |
client_number:str, | |
destination_city:str | |
): | |
try: | |
# Load your trained model and encoders | |
xgb_model = load('transexpress_xgb_model.joblib') | |
encoders = load('transexpress_encoders.joblib') | |
except: | |
return {"no model found so first trained the model using data fecther"} | |
# Function to handle unseen labels during encoding | |
def safe_transform(encoder, column): | |
classes = encoder.classes_ | |
return [encoder.transform([x])[0] if x in classes else -1 for x in column] | |
# Convert input data to DataFrame | |
input_data = { | |
'customer_name': customer_name, | |
'customer_address': customer_address, | |
'customer_phone_no': customer_phone, | |
'weight': float(weight), | |
'cod': int(cod), | |
'pickup_address':pickup_address, | |
'client_number':client_number, | |
'destination_city':destination_city | |
} | |
input_df = pd.DataFrame([input_data]) | |
# Encode categorical variables using the same encoders used during training | |
for col in input_df.columns: | |
if col in encoders: | |
input_df[col] = safe_transform(encoders[col], input_df[col]) | |
# Predict and obtain probabilities | |
pred = xgb_model.predict(input_df) | |
pred_proba = xgb_model.predict_proba(input_df) | |
# Output | |
predicted_status = "Unknown" if pred[0] == -1 else encoders['status_name'].inverse_transform([pred])[0] | |
probability = pred_proba[0][pred[0]] * 100 if pred[0] != -1 else "Unknown" | |
print(predicted_status) | |
if predicted_status == "Returned to Client": | |
probability = 100 - probability | |
return {"Probability": round(probability,2)} |