Spaces:
Running
Running
File size: 1,995 Bytes
95fc527 c43a80c 20712aa c43a80c 25b797a 20712aa 25b797a c43a80c 8aec507 53fd703 1294d13 95fc527 20712aa c43a80c 1294d13 2a34452 1294d13 c43a80c 2a34452 c43a80c 1294d13 2ee1f34 b147674 20712aa b147674 25b797a b147674 25b797a 1294d13 b147674 20712aa b147674 25b797a b147674 25b797a b147674 25b797a b147674 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 |
import os
from fastapi import FastAPI
from pydantic import BaseModel
from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification
from langdetect import detect, DetectorFactory
# Ensure consistent language detection results
DetectorFactory.seed = 0
# Set Hugging Face cache directory to a writable location
os.environ["HF_HOME"] = "/tmp/huggingface"
os.environ["TRANSFORMERS_CACHE"] = "/tmp/huggingface"
os.makedirs(os.environ["HF_HOME"], exist_ok=True)
app = FastAPI()
# Load the original tokenizer from the base model
original_tokenizer = AutoTokenizer.from_pretrained("tabularisai/multilingual-sentiment-analysis")
mode_name = "johndoee/sentiment" # or "./sentiment"
# Load the fine-tuned model (johndoee/sentiment) and pass the tokenizer explicitly
multilingual_model = pipeline(
"sentiment-analysis",
model= AutoModelForSequenceClassification.from_pretrained(model_name),
tokenizer=original_tokenizer
)
# Load the English sentiment model
english_model = pipeline("sentiment-analysis", model="siebert/sentiment-roberta-large-english")
class SentimentRequest(BaseModel):
text: str
class SentimentResponse(BaseModel):
original_text: str
language_detected: str
sentiment: str
confidence_score: float
def detect_language(text):
try:
return detect(text)
except Exception:
return "unknown"
@app.get("/")
def home():
return {"message": "Sentiment Analysis API is running!"}
@app.post("/analyze/", response_model=SentimentResponse)
def analyze_sentiment(request: SentimentRequest):
text = request.text
language = detect_language(text)
# Choose the appropriate model based on language
if language == "en":
result = english_model(text)
else:
result = multilingual_model(text)
return SentimentResponse(
original_text=text,
language_detected=language,
sentiment=result[0]["label"].lower(),
confidence_score=result[0]["score"],
)
|