File size: 1,471 Bytes
95fc527
25b797a
20712aa
 
25b797a
20712aa
25b797a
 
 
 
 
95fc527
20712aa
 
b147674
 
 
20712aa
 
 
 
 
b147674
 
 
 
 
25b797a
b147674
25b797a
b147674
 
20712aa
 
 
 
 
b147674
 
 
 
25b797a
b147674
 
 
 
 
25b797a
b147674
 
 
25b797a
 
b147674
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import os
from fastapi import FastAPI
from pydantic import BaseModel
from transformers import pipeline
from langdetect import detect, DetectorFactory

# Ensure consistent language detection results
DetectorFactory.seed = 0

# Set HF cache directory instead of TRANSFORMERS_CACHE
os.environ["HF_HOME"] = "/app/cache"

app = FastAPI()

# Load sentiment analysis models
multilingual_model = pipeline("sentiment-analysis", model="tabularisai/multilingual-sentiment-analysis")
english_model = pipeline("sentiment-analysis", model="siebert/sentiment-roberta-large-english")

class SentimentRequest(BaseModel):
    text: str

class SentimentResponse(BaseModel):
    original_text: str
    language_detected: str
    sentiment: str
    confidence_score: float

def detect_language(text):
    try:
        return detect(text)
    except:
        return "unknown"

@app.get("/")
def home():
    return {"message": "Sentiment Analysis API is running!"}

@app.post("/analyze/", response_model=SentimentResponse)
def analyze_sentiment(request: SentimentRequest):
    text = request.text
    language = detect_language(text)

    # Choose the appropriate model based on language
    if language == "en":
        result = english_model(text)
    else:
        result = multilingual_model(text)

    return SentimentResponse(
        original_text=text,
        language_detected=language,
        sentiment=result[0]["label"].lower(),
        confidence_score=result[0]["score"],
    )