|
import nltk |
|
from nltk.corpus import stopwords |
|
from nltk.stem import WordNetLemmatizer |
|
from fastapi import FastAPI |
|
from fastapi.responses import JSONResponse |
|
from pydantic import BaseModel |
|
import joblib |
|
|
|
|
|
nltk.download('wordnet', quiet=True) |
|
nltk.download('stopwords', quiet=True) |
|
|
|
|
|
lemmatizer = WordNetLemmatizer() |
|
|
|
|
|
model = joblib.load('disaster_classification_model.joblib') |
|
|
|
def improved_preprocess(text): |
|
text = text.lower() |
|
text = ''.join([char for char in text if char not in string.punctuation]) |
|
words = text.split() |
|
words = [lemmatizer.lemmatize(word) for word in words if word not in stopwords.words('english')] |
|
return ' '.join(words) |
|
|
|
app = FastAPI() |
|
|
|
class TextRequest(BaseModel): |
|
text: str |
|
|
|
@app.post("/predict") |
|
async def predict(request: TextRequest): |
|
text = request.text |
|
new_text_processed = [improved_preprocess(text)] |
|
prediction = model.predict(new_text_processed) |
|
result = "disaster" if prediction == 1 else "not" |
|
return JSONResponse(content={"output": result}) |
|
|
|
@app.get("/") |
|
async def root(): |
|
return {"message": "Welcome to the Disaster Classification API"} |
|
|
|
if __name__ == "__main__": |
|
import uvicorn |
|
uvicorn.run(app, host="0.0.0.0", port=7860) |
|
|