Spaces:
Running
Running
import os | |
from fastapi import FastAPI, HTTPException | |
from pydantic import BaseModel | |
from transformers import pipeline | |
import langdetect | |
# Set custom cache directory to avoid permission issues | |
os.environ["TRANSFORMERS_CACHE"] = "/app/cache" | |
app = FastAPI() | |
# Load sentiment analysis models | |
multilingual_model = pipeline("sentiment-analysis", model="tabularisai/multilingual-sentiment-analysis") | |
english_model = pipeline("sentiment-analysis", model="siebert/sentiment-roberta-large-english") | |
class SentimentRequest(BaseModel): | |
text: str | |
class SentimentResponse(BaseModel): | |
original_text: str | |
language_detected: str | |
sentiment: str | |
confidence_score: float | |
def detect_language(text: str) -> str: | |
try: | |
return langdetect.detect(text) | |
except: | |
return "unknown" | |
def home(): | |
return {"message": "Sentiment Analysis API is running!"} | |
def analyze_sentiment(request: SentimentRequest): | |
if not request.text: | |
raise HTTPException(status_code=400, detail="No text provided") | |
text = request.text | |
language = detect_language(text) | |
# Choose the appropriate model based on language | |
if language == "en": | |
result = english_model(text) | |
else: | |
result = multilingual_model(text) | |
sentiment = result[0]["label"].lower() | |
score = result[0]["score"] | |
return SentimentResponse( | |
original_text=text, | |
language_detected=language, | |
sentiment=sentiment, | |
confidence_score=score | |
) | |