Agamrampal commited on
Commit
35d30d1
·
1 Parent(s): 19625c8
Files changed (3) hide show
  1. Dockerfile +18 -0
  2. app.py +132 -0
  3. requirements.txt +6 -0
Dockerfile ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10-slim
2
+
3
+ WORKDIR /code
4
+
5
+ # Install system dependencies
6
+ RUN apt-get update && apt-get install -y \
7
+ build-essential \
8
+ && rm -rf /var/lib/apt/lists/*
9
+
10
+ # Copy requirements and install Python packages
11
+ COPY requirements.txt .
12
+ RUN pip install --no-cache-dir -r requirements.txt
13
+
14
+ # Copy application code
15
+ COPY app.py .
16
+
17
+ # Command to run the application
18
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
app.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException
2
+ from fastapi.middleware.cors import CORSMiddleware
3
+ from pydantic import BaseModel
4
+ from typing import Optional, Dict
5
+ from transformers import pipeline
6
+ import torch
7
+ import logging
8
+ from functools import lru_cache
9
+
10
+ # Configure logging
11
+ logging.basicConfig(
12
+ level=logging.INFO,
13
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
14
+ )
15
+ logger = logging.getLogger(__name__)
16
+
17
+ # Initialize FastAPI app
18
+ app = FastAPI()
19
+
20
+ # Configure CORS - adjust the origins based on your needs
21
+ app.add_middleware(
22
+ CORSMiddleware,
23
+ allow_origins=["*"], # Adjust this in production
24
+ allow_credentials=True,
25
+ allow_methods=["*"],
26
+ allow_headers=["*"],
27
+ )
28
+
29
+ # Model cache
30
+ MODEL_CACHE: Dict[str, any] = {}
31
+
32
+ @lru_cache()
33
+ def get_model(model_type: str):
34
+ """Get or initialize model with caching."""
35
+ if model_type not in MODEL_CACHE:
36
+ logger.info(f"Initializing {model_type} model...")
37
+ try:
38
+ if model_type == "summarizer":
39
+ MODEL_CACHE[model_type] = pipeline(
40
+ "summarization",
41
+ model="facebook/bart-large-cnn",
42
+ device="cpu"
43
+ )
44
+ elif model_type == "detector":
45
+ MODEL_CACHE[model_type] = pipeline(
46
+ "text-classification",
47
+ model="roberta-base-openai-detector",
48
+ device="cpu"
49
+ )
50
+ logger.info(f"Successfully initialized {model_type} model")
51
+ except Exception as e:
52
+ logger.error(f"Error initializing {model_type} model: {str(e)}")
53
+ raise RuntimeError(f"Failed to initialize {model_type} model")
54
+ return MODEL_CACHE[model_type]
55
+
56
+ class TextRequest(BaseModel):
57
+ text: str
58
+ max_length: Optional[int] = 130
59
+ min_length: Optional[int] = 30
60
+
61
+ def validate_text(text: str, min_words: int = 10) -> bool:
62
+ """Validate text input."""
63
+ return len(text.split()) >= min_words
64
+
65
+ @app.get("/")
66
+ async def root():
67
+ """Health check endpoint."""
68
+ return {"status": "healthy", "message": "API is running"}
69
+
70
+ @app.post("/api/summarize")
71
+ async def summarize_text(request: TextRequest):
72
+ """Endpoint to summarize text."""
73
+ try:
74
+ if not validate_text(request.text):
75
+ raise HTTPException(
76
+ status_code=400,
77
+ detail="Text is too short to summarize (minimum 10 words required)"
78
+ )
79
+
80
+ summarizer = get_model("summarizer")
81
+ summary = summarizer(
82
+ request.text,
83
+ max_length=request.max_length,
84
+ min_length=request.min_length,
85
+ do_sample=False
86
+ )
87
+
88
+ return {"summary": summary[0]["summary_text"]}
89
+ except HTTPException:
90
+ raise
91
+ except Exception as e:
92
+ logger.error(f"Error in summarization: {str(e)}")
93
+ raise HTTPException(
94
+ status_code=500,
95
+ detail="An error occurred during summarization"
96
+ )
97
+
98
+ @app.post("/api/detect")
99
+ async def detect_ai(request: TextRequest):
100
+ """Endpoint to detect if text is AI-generated."""
101
+ try:
102
+ if not validate_text(request.text, min_words=5):
103
+ raise HTTPException(
104
+ status_code=400,
105
+ detail="Text is too short for AI detection (minimum 5 words required)"
106
+ )
107
+
108
+ detector = get_model("detector")
109
+ result = detector(request.text)[0]
110
+
111
+ prob_ai = torch.sigmoid(torch.tensor(result["score"])).item()
112
+ score = prob_ai * 100
113
+
114
+ confidence = (
115
+ "high" if abs(score - 50) > 25
116
+ else "medium" if abs(score - 50) > 10
117
+ else "low"
118
+ )
119
+
120
+ return {
121
+ "score": round(score, 2),
122
+ "likely_ai": score > 70,
123
+ "confidence": confidence
124
+ }
125
+ except HTTPException:
126
+ raise
127
+ except Exception as e:
128
+ logger.error(f"Error in AI detection: {str(e)}")
129
+ raise HTTPException(
130
+ status_code=500,
131
+ detail="An error occurred during AI detection"
132
+ )
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ fastapi==0.110.0
2
+ uvicorn==0.27.1
3
+ pydantic==2.6.3
4
+ transformers==4.38.2
5
+ torch==2.2.1
6
+ python-multipart==0.0.9