File size: 1,308 Bytes
4ea700c
afcca73
 
88d9793
 
 
4ea700c
 
 
 
 
 
afcca73
 
 
4ea700c
 
 
afcca73
 
 
 
 
 
 
4ea700c
 
88d9793
 
 
 
 
afcca73
 
 
 
 
4ea700c
 
 
 
 
 
 
88d9793
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from fastapi import FastAPI
from fastapi.responses import JSONResponse
from pydantic import BaseModel
import joblib

# Download necessary NLTK resources
nltk.download('wordnet', quiet=True)
nltk.download('stopwords', quiet=True)

# Initialize lemmatizer
lemmatizer = WordNetLemmatizer()

# Load the trained model
model = joblib.load('disaster_classification_model.joblib')

def improved_preprocess(text):
    text = text.lower()
    text = ''.join([char for char in text if char not in string.punctuation])
    words = text.split()
    words = [lemmatizer.lemmatize(word) for word in words if word not in stopwords.words('english')]
    return ' '.join(words)

app = FastAPI()

class TextRequest(BaseModel):
    text: str

@app.post("/predict")
async def predict(request: TextRequest):
     text = request.text
     new_text_processed = [improved_preprocess(text)]
     prediction = model.predict(new_text_processed)
     result = "disaster" if prediction == 1 else "not"
     return JSONResponse(content={"output": result})

@app.get("/")
async def root():
    return {"message": "Welcome to the Disaster Classification API"}

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=7860)