Spaces:
Running
Running
Commit
·
280c0fd
1
Parent(s):
8083700
deepfake services
Browse files- README.md +12 -10
- app/api/forgery_routes.py +151 -0
- app/api/routes.py +1 -9
- app/main.py +2 -0
- app/services/audio_deepfake_service.py +55 -0
- app/services/face_manipulation_service.py +66 -0
- app/services/gan_detection_service.py +30 -0
- app/services/image_manipulation_service.py +57 -0
- app/services/video_service.py +2 -2
- app/utils/file_utils.py +11 -0
- app/utils/forgery_image_utils.py +47 -0
- app/utils/forgery_video_utils.py +138 -0
- models/DeepFakeVoiceDetector_V1.h5 +3 -0
- models/deepfake_image_params.json +1 -0
- models/deepfake_label_mappings.json +1 -0
- models/deepfake_model.pth +3 -0
- models/deepfake_model/config.json +32 -0
- models/deepfake_model/model.safetensors +3 -0
- models/deepfake_model_config.json +1 -0
- models/gan_model.h5 +3 -0
- models/image_manipulation_detection_model.h5 +3 -0
- models/img_man_preprocessing_params.json +1 -0
- models/img_manipulation_class_names.json +1 -0
- requirements.txt +13 -9
README.md
CHANGED
@@ -1,10 +1,12 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
|
|
|
|
|
1 |
+
use python 3.10 for this project as the audio extraction library can work with this version only
|
2 |
+
|
3 |
+
create a virtual environment : .\venv\Scripts\activate
|
4 |
+
|
5 |
+
install the required libraries using : pip install -r requirements.txt
|
6 |
+
|
7 |
+
run the app using : uvicorn app.main:app --reload
|
8 |
+
|
9 |
+
The issue you are facing regarding while re uploading the same image or audio the path error shows up is die to mssing ffmpeg installation on your device the solution for the same :
|
10 |
+
- open cmd and type : winget install ffmpeg
|
11 |
+
- after the installation has been sucessfully done add the bin path to the System environment variables
|
12 |
+
- if u fail to find the path just type : where ffmpeg , in cmd and you will get the path
|
app/api/forgery_routes.py
ADDED
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import APIRouter, HTTPException
|
2 |
+
from app.services.image_manipulation_service import ImageManipulationService
|
3 |
+
from app.services.face_manipulation_service import FaceManipulationService
|
4 |
+
from app.services.audio_deepfake_service import AudioDeepfakeService
|
5 |
+
from app.services.gan_detection_service import GANDetectionService
|
6 |
+
from app.utils.file_utils import download_file, remove_temp_file, get_file_content
|
7 |
+
from app.utils.forgery_image_utils import detect_face
|
8 |
+
from app.utils.forgery_video_utils import extract_audio, extract_frames, compress_and_process_video
|
9 |
+
import os
|
10 |
+
import logging
|
11 |
+
import traceback
|
12 |
+
from pydantic import BaseModel
|
13 |
+
|
14 |
+
router = APIRouter()
|
15 |
+
|
16 |
+
class DetectForgeryRequest(BaseModel):
|
17 |
+
file_url: str
|
18 |
+
|
19 |
+
# Initialize services
|
20 |
+
image_manipulation_service = ImageManipulationService()
|
21 |
+
face_manipulation_service = FaceManipulationService()
|
22 |
+
audio_deepfake_service = AudioDeepfakeService()
|
23 |
+
gan_detection_service = GANDetectionService()
|
24 |
+
|
25 |
+
def parse_confidence(value):
|
26 |
+
if isinstance(value, str):
|
27 |
+
return float(value.rstrip('%')) / 100
|
28 |
+
return float(value)
|
29 |
+
|
30 |
+
def get_file_extension(url: str) -> str:
|
31 |
+
_, ext = os.path.splitext(url)
|
32 |
+
return ext.lstrip('.').lower()
|
33 |
+
|
34 |
+
@router.post("/detect_forgery")
|
35 |
+
async def detect_forgery(request: DetectForgeryRequest):
|
36 |
+
file_url = request.file_url
|
37 |
+
logging.info(f"Received forgery detection request for file: {file_url}")
|
38 |
+
|
39 |
+
firebase_filename = None
|
40 |
+
|
41 |
+
try:
|
42 |
+
file_extension = get_file_extension(file_url)
|
43 |
+
logging.info(f"Detected file extension: {file_extension}")
|
44 |
+
|
45 |
+
firebase_filename = await download_file(file_url)
|
46 |
+
logging.info(f"File downloaded and saved as: {firebase_filename}")
|
47 |
+
|
48 |
+
if file_extension in ['jpg', 'jpeg', 'png', 'gif', 'bmp']:
|
49 |
+
logging.info(f"Processing image file: {firebase_filename}")
|
50 |
+
return await process_image(firebase_filename)
|
51 |
+
elif file_extension in ['mp4', 'avi', 'mov', 'flv', 'wmv']:
|
52 |
+
logging.info(f"Processing video file: {firebase_filename}")
|
53 |
+
return await process_video(firebase_filename)
|
54 |
+
else:
|
55 |
+
logging.error(f"Unsupported file type: {file_extension} (URL: {file_url})")
|
56 |
+
raise HTTPException(status_code=400, detail=f"Unsupported file type: {file_extension}")
|
57 |
+
|
58 |
+
except Exception as e:
|
59 |
+
logging.error(f"Error processing file: {e}")
|
60 |
+
logging.error(traceback.format_exc())
|
61 |
+
raise HTTPException(status_code=500, detail="An error occurred while processing the file.")
|
62 |
+
|
63 |
+
finally:
|
64 |
+
if firebase_filename:
|
65 |
+
logging.info(f"Removing temporary file: {firebase_filename}")
|
66 |
+
await remove_temp_file(firebase_filename)
|
67 |
+
|
68 |
+
async def process_image(firebase_filename: str):
|
69 |
+
logging.info(f"Starting image processing for: {firebase_filename}")
|
70 |
+
image_content = get_file_content(firebase_filename)
|
71 |
+
has_face = detect_face(image_content)
|
72 |
+
logging.info(f"Face detection result for {firebase_filename}: {'Face detected' if has_face else 'No face detected'}")
|
73 |
+
results = {
|
74 |
+
"image_manipulation": image_manipulation_service.detect_manipulation(firebase_filename),
|
75 |
+
"gan_detection": gan_detection_service.detect_gan(firebase_filename)
|
76 |
+
}
|
77 |
+
logging.info(f"Image manipulation detection result: {results['image_manipulation']}")
|
78 |
+
logging.info(f"GAN detection result: {results['gan_detection']}")
|
79 |
+
if has_face:
|
80 |
+
results["face_manipulation"] = face_manipulation_service.detect_manipulation(firebase_filename)
|
81 |
+
logging.info(f"Face manipulation detection result: {results['face_manipulation']}")
|
82 |
+
else:
|
83 |
+
results["face_manipulation"] = None
|
84 |
+
logging.info("Face manipulation detection skipped (no face detected)")
|
85 |
+
logging.info(f"Image processing completed for: {firebase_filename}")
|
86 |
+
return results
|
87 |
+
|
88 |
+
async def process_video(firebase_filename: str):
|
89 |
+
logging.info(f"Starting video processing for: {firebase_filename}")
|
90 |
+
try:
|
91 |
+
compressed_video_filename = await compress_and_process_video(firebase_filename)
|
92 |
+
logging.info(f"Video compressed: {compressed_video_filename}")
|
93 |
+
|
94 |
+
audio_filename = await extract_audio(compressed_video_filename)
|
95 |
+
logging.info(f"Audio extracted: {audio_filename}")
|
96 |
+
|
97 |
+
frames = await extract_frames(compressed_video_filename)
|
98 |
+
logging.info(f"Frames extracted: {len(frames)} frames")
|
99 |
+
|
100 |
+
results = {
|
101 |
+
"audio_deepfake": None,
|
102 |
+
"image_manipulation": [],
|
103 |
+
"face_manipulation": [],
|
104 |
+
"gan_detection": []
|
105 |
+
}
|
106 |
+
|
107 |
+
if audio_filename:
|
108 |
+
results["audio_deepfake"] = audio_deepfake_service.detect_deepfake(audio_filename)
|
109 |
+
logging.info(f"Audio deepfake detection result: {results['audio_deepfake']}")
|
110 |
+
await remove_temp_file(audio_filename)
|
111 |
+
logging.info(f"Temporary audio file removed: {audio_filename}")
|
112 |
+
|
113 |
+
face_frames = []
|
114 |
+
for i, frame in enumerate(frames):
|
115 |
+
frame_filename = frame # Assuming extract_frames now returns a list of filenames
|
116 |
+
logging.info(f"Processing frame: {frame_filename}")
|
117 |
+
frame_content = get_file_content(frame_filename)
|
118 |
+
has_face = detect_face(frame_content)
|
119 |
+
logging.info(f"Face detection result for {frame_filename}: {'Face detected' if has_face else 'No face detected'}")
|
120 |
+
|
121 |
+
results["image_manipulation"].append(image_manipulation_service.detect_manipulation(frame_filename))
|
122 |
+
results["gan_detection"].append(gan_detection_service.detect_gan(frame_filename))
|
123 |
+
|
124 |
+
if has_face:
|
125 |
+
face_frames.append(frame_filename)
|
126 |
+
results["face_manipulation"].append(face_manipulation_service.detect_manipulation(frame_filename))
|
127 |
+
else:
|
128 |
+
results["face_manipulation"].append(None)
|
129 |
+
logging.info(f"Face manipulation detection skipped for {frame_filename} (no face detected)")
|
130 |
+
|
131 |
+
await remove_temp_file(frame_filename)
|
132 |
+
logging.info(f"Temporary frame file removed: {frame_filename}")
|
133 |
+
|
134 |
+
# Aggregate results
|
135 |
+
for key in results:
|
136 |
+
if key != "audio_deepfake" and results[key]:
|
137 |
+
valid_results = [r for r in results[key] if r is not None]
|
138 |
+
results[key] = {
|
139 |
+
"collective_detection": any(r.get("is_manipulated", False) if isinstance(r, dict) else r for r in valid_results),
|
140 |
+
"collective_confidence": sum(parse_confidence(r.get("confidence", 0)) if isinstance(r, dict) else 0 for r in valid_results) / len(valid_results) if valid_results else 0
|
141 |
+
}
|
142 |
+
logging.info(f"Aggregated results: {results}")
|
143 |
+
|
144 |
+
await remove_temp_file(compressed_video_filename)
|
145 |
+
logging.info(f"Temporary compressed video file removed: {compressed_video_filename}")
|
146 |
+
logging.info(f"Video processing completed for: {firebase_filename}")
|
147 |
+
return results
|
148 |
+
except Exception as e:
|
149 |
+
logging.error(f"Error processing video: {e}")
|
150 |
+
logging.error(traceback.format_exc())
|
151 |
+
raise HTTPException(status_code=500, detail=f"An error occurred while processing the video: {str(e)}")
|
app/api/routes.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
from fastapi import APIRouter, HTTPException
|
2 |
from pydantic import BaseModel
|
3 |
from app.services import video_service, image_service, antispoof_service
|
4 |
from app.services.antispoof_service import antispoof_service
|
@@ -14,14 +14,6 @@ class CompareRequest(BaseModel):
|
|
14 |
url1: str
|
15 |
url2: str
|
16 |
|
17 |
-
@router.get("/health")
|
18 |
-
@router.head("/health")
|
19 |
-
async def health_check():
|
20 |
-
"""
|
21 |
-
Health check endpoint that responds to both GET and HEAD requests.
|
22 |
-
"""
|
23 |
-
return Response(content="OK", media_type="text/plain")
|
24 |
-
|
25 |
@router.post("/fingerprint")
|
26 |
async def create_fingerprint(request: ContentRequest):
|
27 |
try:
|
|
|
1 |
+
from fastapi import APIRouter, HTTPException
|
2 |
from pydantic import BaseModel
|
3 |
from app.services import video_service, image_service, antispoof_service
|
4 |
from app.services.antispoof_service import antispoof_service
|
|
|
14 |
url1: str
|
15 |
url2: str
|
16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
@router.post("/fingerprint")
|
18 |
async def create_fingerprint(request: ContentRequest):
|
19 |
try:
|
app/main.py
CHANGED
@@ -4,6 +4,7 @@ from fastapi.responses import JSONResponse
|
|
4 |
from app.api.routes import router
|
5 |
from app.core.logging_config import configure_logging
|
6 |
from app.core.firebase_config import initialize_firebase
|
|
|
7 |
import logging
|
8 |
|
9 |
app = FastAPI()
|
@@ -14,6 +15,7 @@ async def startup_event():
|
|
14 |
initialize_firebase()
|
15 |
|
16 |
app.include_router(router)
|
|
|
17 |
|
18 |
@app.exception_handler(Exception)
|
19 |
async def global_exception_handler(request: Request, exc: Exception):
|
|
|
4 |
from app.api.routes import router
|
5 |
from app.core.logging_config import configure_logging
|
6 |
from app.core.firebase_config import initialize_firebase
|
7 |
+
from app.api.forgery_routes import router as forgery_router
|
8 |
import logging
|
9 |
|
10 |
app = FastAPI()
|
|
|
15 |
initialize_firebase()
|
16 |
|
17 |
app.include_router(router)
|
18 |
+
app.include_router(forgery_router)
|
19 |
|
20 |
@app.exception_handler(Exception)
|
21 |
async def global_exception_handler(request: Request, exc: Exception):
|
app/services/audio_deepfake_service.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import librosa as lb
|
3 |
+
from tensorflow.keras.models import load_model
|
4 |
+
from app.utils.file_utils import get_file_content
|
5 |
+
import io
|
6 |
+
import logging
|
7 |
+
|
8 |
+
class AudioDeepfakeService:
|
9 |
+
def __init__(self):
|
10 |
+
logging.info("Initializing AudioDeepfakeService")
|
11 |
+
self.model = load_model("models/DeepFakeVoiceDetector_V1.h5")
|
12 |
+
logging.info("AudioDeepfakeService model loaded successfully")
|
13 |
+
|
14 |
+
def create_mel_spectrogram_sample(self, audio_content, sr=22050, sample_time=1.5, n_mels=64):
|
15 |
+
logging.info("Creating mel spectrogram sample")
|
16 |
+
y, sr = lb.load(io.BytesIO(audio_content), sr=sr)
|
17 |
+
logging.info(f"Audio loaded with sample rate: {sr}")
|
18 |
+
|
19 |
+
sample_length = int(sr * sample_time)
|
20 |
+
if len(y) < sample_length:
|
21 |
+
logging.warning("Audio file is too short")
|
22 |
+
raise ValueError("Audio file is too short")
|
23 |
+
|
24 |
+
start = 0
|
25 |
+
end = start + sample_length
|
26 |
+
m = lb.feature.melspectrogram(y=y[start:end], sr=sr, n_mels=n_mels)
|
27 |
+
m = np.abs(m)
|
28 |
+
m /= 80
|
29 |
+
logging.info("Mel spectrogram sample created successfully")
|
30 |
+
return np.expand_dims(m, axis=-1)
|
31 |
+
|
32 |
+
def detect_deepfake(self, firebase_filename):
|
33 |
+
logging.info(f"Detecting deepfake for audio file: {firebase_filename}")
|
34 |
+
try:
|
35 |
+
audio_content = get_file_content(firebase_filename)
|
36 |
+
logging.info("Audio content retrieved successfully")
|
37 |
+
sample = self.create_mel_spectrogram_sample(audio_content)
|
38 |
+
logging.info("Mel spectrogram sample created")
|
39 |
+
prediction = self.model.predict(np.expand_dims(sample, axis=0))[0][0]
|
40 |
+
logging.info(f"Raw prediction: {prediction}")
|
41 |
+
|
42 |
+
result = "Fake" if prediction > 0.5 else "Real"
|
43 |
+
confidence = prediction if prediction > 0.5 else 1 - prediction
|
44 |
+
|
45 |
+
result_dict = {
|
46 |
+
"prediction": result,
|
47 |
+
"confidence": float(confidence),
|
48 |
+
"raw_prediction": float(prediction)
|
49 |
+
}
|
50 |
+
logging.info(f"Deepfake detection result: {result_dict}")
|
51 |
+
return result_dict
|
52 |
+
|
53 |
+
except Exception as e:
|
54 |
+
logging.error(f"Error processing audio: {str(e)}")
|
55 |
+
raise ValueError(f"Error processing audio: {str(e)}")
|
app/services/face_manipulation_service.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import json
|
3 |
+
from pathlib import Path
|
4 |
+
from PIL import Image
|
5 |
+
from transformers import ViTForImageClassification, ViTImageProcessor
|
6 |
+
from app.utils.file_utils import get_file_content
|
7 |
+
import io
|
8 |
+
import logging
|
9 |
+
|
10 |
+
class FaceManipulationService:
|
11 |
+
def __init__(self):
|
12 |
+
logging.info("Initializing FaceManipulationService")
|
13 |
+
self.device = torch.device('cpu')
|
14 |
+
logging.info(f"Using device: {self.device}")
|
15 |
+
|
16 |
+
model_config = json.loads(Path("models/deepfake_model_config.json").read_text())
|
17 |
+
self.image_params = json.loads(Path("models/deepfake_image_params.json").read_text())
|
18 |
+
label_mappings = json.loads(Path("models/deepfake_label_mappings.json").read_text())
|
19 |
+
logging.info("Configuration files loaded successfully")
|
20 |
+
|
21 |
+
self.model = ViTForImageClassification.from_pretrained("models/deepfake_model", num_labels=model_config["num_labels"])
|
22 |
+
self.model.load_state_dict(torch.load("models/deepfake_model.pth", map_location=self.device))
|
23 |
+
self.model.to(self.device)
|
24 |
+
self.model.eval()
|
25 |
+
logging.info("Face manipulation detection model loaded and set to evaluation mode")
|
26 |
+
|
27 |
+
self.id2label = {int(k): v for k, v in label_mappings["id2label"].items()}
|
28 |
+
logging.info(f"Label mappings: {self.id2label}")
|
29 |
+
|
30 |
+
self.processor = ViTImageProcessor(
|
31 |
+
size=self.image_params["size"],
|
32 |
+
image_mean=self.image_params["mean"],
|
33 |
+
image_std=self.image_params["std"]
|
34 |
+
)
|
35 |
+
logging.info("Image processor initialized")
|
36 |
+
|
37 |
+
def predict_image(self, firebase_filename):
|
38 |
+
logging.info(f"Predicting image manipulation for: {firebase_filename}")
|
39 |
+
image_content = get_file_content(firebase_filename)
|
40 |
+
logging.info("Image content retrieved successfully")
|
41 |
+
image = Image.open(io.BytesIO(image_content))
|
42 |
+
logging.info(f"Image opened. Size: {image.size}, Mode: {image.mode}")
|
43 |
+
inputs = self.processor(images=image, return_tensors="pt").to(self.device)
|
44 |
+
logging.info("Image processed and inputs prepared")
|
45 |
+
with torch.no_grad():
|
46 |
+
outputs = self.model(**inputs)
|
47 |
+
logging.info("Model inference completed")
|
48 |
+
probs = outputs.logits.softmax(1)
|
49 |
+
pred_class = probs.argmax().item()
|
50 |
+
confidence = probs[0][pred_class].item()
|
51 |
+
predicted_label = self.id2label[pred_class]
|
52 |
+
logging.info(f"Prediction: Class {pred_class}, Label: {predicted_label}, Confidence: {confidence}")
|
53 |
+
return predicted_label, confidence
|
54 |
+
|
55 |
+
def detect_manipulation(self, firebase_filename):
|
56 |
+
logging.info(f"Detecting face manipulation for: {firebase_filename}")
|
57 |
+
label, confidence = self.predict_image(firebase_filename)
|
58 |
+
is_deepfake = label == "Fake"
|
59 |
+
|
60 |
+
result = {
|
61 |
+
"is_deepfake": is_deepfake,
|
62 |
+
"confidence": float(confidence),
|
63 |
+
"predicted_label": label
|
64 |
+
}
|
65 |
+
logging.info(f"Face manipulation detection result: {result}")
|
66 |
+
return result
|
app/services/gan_detection_service.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from tensorflow.keras.models import load_model
|
3 |
+
from tensorflow.keras.preprocessing import image
|
4 |
+
from app.utils.file_utils import get_file_content
|
5 |
+
import io
|
6 |
+
|
7 |
+
class GANDetectionService:
|
8 |
+
def __init__(self):
|
9 |
+
self.model = load_model('models/gan_model.h5')
|
10 |
+
|
11 |
+
def load_and_preprocess_image(self, image_content, target_size=(256, 256)):
|
12 |
+
img = image.load_img(io.BytesIO(image_content), target_size=target_size)
|
13 |
+
img_array = image.img_to_array(img)
|
14 |
+
img_array = img_array / 255.0
|
15 |
+
img_array = np.expand_dims(img_array, axis=0)
|
16 |
+
return img_array
|
17 |
+
|
18 |
+
def detect_gan(self, firebase_filename):
|
19 |
+
image_content = get_file_content(firebase_filename)
|
20 |
+
img_array = self.load_and_preprocess_image(image_content)
|
21 |
+
prediction = self.model.predict(img_array)
|
22 |
+
|
23 |
+
real_confidence = float(prediction[0][0] * 100)
|
24 |
+
fake_confidence = float((1 - prediction[0][0]) * 100)
|
25 |
+
|
26 |
+
return {
|
27 |
+
"is_gan": fake_confidence > real_confidence,
|
28 |
+
"real_confidence": real_confidence,
|
29 |
+
"fake_confidence": fake_confidence
|
30 |
+
}
|
app/services/image_manipulation_service.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import tensorflow as tf
|
2 |
+
import numpy as np
|
3 |
+
from PIL import Image, ImageChops, ImageEnhance
|
4 |
+
import json
|
5 |
+
from app.utils.file_utils import get_file_content
|
6 |
+
import io
|
7 |
+
|
8 |
+
class ImageManipulationService:
|
9 |
+
def __init__(self):
|
10 |
+
self.model = tf.keras.models.load_model('models/image_manipulation_detection_model.h5')
|
11 |
+
|
12 |
+
with open('models/img_manipulation_class_names.json', 'r') as f:
|
13 |
+
self.class_names = json.load(f)
|
14 |
+
|
15 |
+
with open('models/img_man_preprocessing_params.json', 'r') as f:
|
16 |
+
self.preprocessing_params = json.load(f)
|
17 |
+
|
18 |
+
def convert_to_ela_image(self, image, quality):
|
19 |
+
temp_buffer = io.BytesIO()
|
20 |
+
image.save(temp_buffer, 'JPEG', quality=quality)
|
21 |
+
temp_buffer.seek(0)
|
22 |
+
temp_image = Image.open(temp_buffer)
|
23 |
+
|
24 |
+
ela_image = ImageChops.difference(image, temp_image)
|
25 |
+
|
26 |
+
extrema = ela_image.getextrema()
|
27 |
+
max_diff = max([ex[1] for ex in extrema])
|
28 |
+
if max_diff == 0:
|
29 |
+
max_diff = 1
|
30 |
+
scale = 255.0 / max_diff
|
31 |
+
|
32 |
+
ela_image = ImageEnhance.Brightness(ela_image).enhance(scale)
|
33 |
+
|
34 |
+
return ela_image
|
35 |
+
|
36 |
+
def prepare_image(self, image_content):
|
37 |
+
image = Image.open(io.BytesIO(image_content))
|
38 |
+
ela_image = self.convert_to_ela_image(image, quality=90)
|
39 |
+
ela_image = ela_image.resize((128, 128))
|
40 |
+
return np.array(ela_image).flatten() / 255.0
|
41 |
+
|
42 |
+
def detect_manipulation(self, firebase_filename):
|
43 |
+
image_content = get_file_content(firebase_filename)
|
44 |
+
prepared_image = self.prepare_image(image_content)
|
45 |
+
prepared_image = prepared_image.reshape(-1, 128, 128, 3)
|
46 |
+
|
47 |
+
prediction = self.model.predict(prepared_image)
|
48 |
+
predicted_class = int(np.argmax(prediction, axis=1)[0])
|
49 |
+
confidence = float(np.max(prediction) * 100)
|
50 |
+
|
51 |
+
result = {
|
52 |
+
"class": self.class_names[predicted_class],
|
53 |
+
"confidence": f"{confidence:.2f}%",
|
54 |
+
"is_manipulated": bool(predicted_class == 0)
|
55 |
+
}
|
56 |
+
|
57 |
+
return result
|
app/services/video_service.py
CHANGED
@@ -115,8 +115,8 @@ async def compare_videos(video_url1, video_url2):
|
|
115 |
fp1 = await fingerprint_video(video_url1)
|
116 |
fp2 = await fingerprint_video(video_url2)
|
117 |
|
118 |
-
video_similarity = 1 - (imagehash.hex_to_hash(fp1['
|
119 |
-
audio_similarity = 1 - (imagehash.hex_to_hash(fp1['
|
120 |
|
121 |
overall_similarity = (video_similarity + audio_similarity) / 2
|
122 |
is_same_content = overall_similarity > 0.9 # You can adjust this threshold
|
|
|
115 |
fp1 = await fingerprint_video(video_url1)
|
116 |
fp2 = await fingerprint_video(video_url2)
|
117 |
|
118 |
+
video_similarity = 1 - (imagehash.hex_to_hash(fp1['video_hash']) - imagehash.hex_to_hash(fp2['video_hash'])) / 64.0
|
119 |
+
audio_similarity = 1 - (imagehash.hex_to_hash(fp1['audio_hash']) - imagehash.hex_to_hash(fp2['audio_hash'])) / 64.0
|
120 |
|
121 |
overall_similarity = (video_similarity + audio_similarity) / 2
|
122 |
is_same_content = overall_similarity > 0.9 # You can adjust this threshold
|
app/utils/file_utils.py
CHANGED
@@ -52,4 +52,15 @@ def get_file_stream(filename: str) -> io.BytesIO:
|
|
52 |
return io.BytesIO(content)
|
53 |
except Exception as e:
|
54 |
logging.error(f"Error getting file stream from Firebase: {str(e)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
raise
|
|
|
52 |
return io.BytesIO(content)
|
53 |
except Exception as e:
|
54 |
logging.error(f"Error getting file stream from Firebase: {str(e)}")
|
55 |
+
raise
|
56 |
+
|
57 |
+
|
58 |
+
async def upload_file_to_firebase(file_content: bytes, filename: str) -> str:
|
59 |
+
try:
|
60 |
+
blob = firebase_bucket.blob(filename)
|
61 |
+
blob.upload_from_string(file_content, content_type='application/octet-stream')
|
62 |
+
logging.info(f"File uploaded to Firebase: {filename}")
|
63 |
+
return filename
|
64 |
+
except Exception as e:
|
65 |
+
logging.error(f"Error uploading file to Firebase: {str(e)}")
|
66 |
raise
|
app/utils/forgery_image_utils.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
from typing import Union
|
4 |
+
from PIL import Image
|
5 |
+
from io import BytesIO
|
6 |
+
import imghdr
|
7 |
+
from fastapi import HTTPException
|
8 |
+
from app.utils.file_utils import get_file_content
|
9 |
+
|
10 |
+
SUPPORTED_IMAGE_FORMATS = ['.jpg', '.jpeg', '.png', '.bmp', '.gif', '.tiff', '.webp']
|
11 |
+
|
12 |
+
def verify_image_format(firebase_filename: str):
|
13 |
+
content = get_file_content(firebase_filename)
|
14 |
+
file_ext = '.' + (imghdr.what(BytesIO(content)) or '')
|
15 |
+
if file_ext not in SUPPORTED_IMAGE_FORMATS:
|
16 |
+
raise HTTPException(status_code=400, detail=f"Unsupported image format. Supported formats are: {', '.join(SUPPORTED_IMAGE_FORMATS)}")
|
17 |
+
|
18 |
+
def preprocess_image(image: Union[str, np.ndarray, Image.Image], hash_size: int = 32) -> np.ndarray:
|
19 |
+
if isinstance(image, str):
|
20 |
+
content = get_file_content(image)
|
21 |
+
img = Image.open(BytesIO(content))
|
22 |
+
img = strip_metadata(img)
|
23 |
+
image = np.array(img)
|
24 |
+
elif isinstance(image, Image.Image):
|
25 |
+
image = strip_metadata(image)
|
26 |
+
image = np.array(image)
|
27 |
+
|
28 |
+
if len(image.shape) == 3:
|
29 |
+
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
|
30 |
+
|
31 |
+
image = cv2.resize(image, (hash_size, hash_size), interpolation=cv2.INTER_AREA)
|
32 |
+
image = cv2.normalize(image, None, 0, 255, cv2.NORM_MINMAX)
|
33 |
+
return image
|
34 |
+
|
35 |
+
def strip_metadata(img: Image.Image) -> Image.Image:
|
36 |
+
data = list(img.getdata())
|
37 |
+
img_without_exif = Image.new(img.mode, img.size)
|
38 |
+
img_without_exif.putdata(data)
|
39 |
+
return img_without_exif
|
40 |
+
|
41 |
+
def detect_face(image_content: bytes) -> bool:
|
42 |
+
nparr = np.frombuffer(image_content, np.uint8)
|
43 |
+
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
|
44 |
+
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
45 |
+
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
|
46 |
+
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
|
47 |
+
return len(faces) > 0
|
app/utils/forgery_video_utils.py
ADDED
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
from moviepy.editor import VideoFileClip
|
4 |
+
from PIL import Image
|
5 |
+
import io
|
6 |
+
from app.utils.file_utils import get_file_content, upload_file_to_firebase, remove_temp_file
|
7 |
+
import subprocess
|
8 |
+
import tempfile
|
9 |
+
import os
|
10 |
+
import logging
|
11 |
+
|
12 |
+
async def extract_audio(firebase_filename):
|
13 |
+
try:
|
14 |
+
video_content = get_file_content(firebase_filename)
|
15 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as temp_video:
|
16 |
+
temp_video.write(video_content)
|
17 |
+
temp_video_path = temp_video.name
|
18 |
+
|
19 |
+
with VideoFileClip(temp_video_path) as video:
|
20 |
+
if video.audio is not None:
|
21 |
+
audio_filename = f"{firebase_filename}_audio.wav"
|
22 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix='.wav') as temp_audio:
|
23 |
+
video.audio.write_audiofile(temp_audio.name, logger=None)
|
24 |
+
temp_audio_path = temp_audio.name
|
25 |
+
|
26 |
+
with open(temp_audio_path, 'rb') as audio_file:
|
27 |
+
audio_content = audio_file.read()
|
28 |
+
|
29 |
+
await upload_file_to_firebase(audio_content, audio_filename)
|
30 |
+
os.remove(temp_audio_path)
|
31 |
+
os.remove(temp_video_path)
|
32 |
+
return audio_filename
|
33 |
+
|
34 |
+
os.remove(temp_video_path)
|
35 |
+
except Exception as e:
|
36 |
+
logging.error(f"Error extracting audio: {str(e)}")
|
37 |
+
return None
|
38 |
+
|
39 |
+
async def extract_frames(firebase_filename, max_frames=10):
|
40 |
+
frames = []
|
41 |
+
video_content = get_file_content(firebase_filename)
|
42 |
+
|
43 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as temp_video:
|
44 |
+
temp_video.write(video_content)
|
45 |
+
temp_video_path = temp_video.name
|
46 |
+
|
47 |
+
try:
|
48 |
+
with VideoFileClip(temp_video_path) as video:
|
49 |
+
duration = video.duration
|
50 |
+
frame_interval = duration / max_frames
|
51 |
+
|
52 |
+
for i in range(max_frames):
|
53 |
+
t = i * frame_interval
|
54 |
+
frame = video.get_frame(t)
|
55 |
+
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
56 |
+
frame_image = Image.fromarray(frame_rgb)
|
57 |
+
|
58 |
+
frame_filename = f"{firebase_filename}_frame_{i}.jpg"
|
59 |
+
frame_byte_arr = io.BytesIO()
|
60 |
+
frame_image.save(frame_byte_arr, format='JPEG')
|
61 |
+
frame_byte_arr = frame_byte_arr.getvalue()
|
62 |
+
|
63 |
+
await upload_file_to_firebase(frame_byte_arr, frame_filename)
|
64 |
+
frames.append(frame_filename)
|
65 |
+
|
66 |
+
finally:
|
67 |
+
os.remove(temp_video_path)
|
68 |
+
|
69 |
+
return frames
|
70 |
+
|
71 |
+
async def compress_and_process_video(firebase_filename, target_size_mb=50, max_duration=60):
|
72 |
+
video_content = get_file_content(firebase_filename)
|
73 |
+
|
74 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as temp_video:
|
75 |
+
temp_video.write(video_content)
|
76 |
+
input_path = temp_video.name
|
77 |
+
|
78 |
+
output_filename = f"{firebase_filename}_compressed.mp4"
|
79 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as temp_output:
|
80 |
+
output_path = temp_output.name
|
81 |
+
|
82 |
+
try:
|
83 |
+
probe_cmd = ['ffprobe', '-v', 'error', '-select_streams', 'v:0',
|
84 |
+
'-show_entries', 'stream=width,height,duration,bit_rate',
|
85 |
+
'-of', 'json', input_path]
|
86 |
+
|
87 |
+
result = subprocess.run(probe_cmd, capture_output=True, text=True)
|
88 |
+
video_info = eval(result.stdout)['streams'][0]
|
89 |
+
|
90 |
+
width = video_info.get('width', 1280)
|
91 |
+
height = video_info.get('height', 720)
|
92 |
+
duration = float(video_info.get('duration', '0'))
|
93 |
+
original_bitrate = int(video_info.get('bit_rate', '0'))
|
94 |
+
|
95 |
+
if duration <= 0:
|
96 |
+
logging.warning(f"Invalid video duration ({duration}). Using 1 second as default.")
|
97 |
+
duration = 1
|
98 |
+
|
99 |
+
duration = min(duration, max_duration)
|
100 |
+
|
101 |
+
target_size_bits = target_size_mb * 8 * 1024 * 1024
|
102 |
+
target_bitrate = int(target_size_bits / duration)
|
103 |
+
|
104 |
+
if width > height:
|
105 |
+
new_width = min(width, 1280)
|
106 |
+
new_height = int((new_width / width) * height)
|
107 |
+
else:
|
108 |
+
new_height = min(height, 720)
|
109 |
+
new_width = int((new_height / height) * width)
|
110 |
+
|
111 |
+
new_width = new_width - (new_width % 2)
|
112 |
+
new_height = new_height - (new_height % 2)
|
113 |
+
|
114 |
+
cmd = [
|
115 |
+
'ffmpeg', '-y', '-i', input_path,
|
116 |
+
'-c:v', 'libx264', '-preset', 'faster',
|
117 |
+
'-crf', '23',
|
118 |
+
'-b:v', f'{target_bitrate}',
|
119 |
+
'-maxrate', f'{int(1.5*target_bitrate)}',
|
120 |
+
'-bufsize', f'{2*target_bitrate}',
|
121 |
+
'-vf', f'scale={new_width}:{new_height}',
|
122 |
+
'-t', str(duration),
|
123 |
+
'-c:a', 'aac', '-b:a', '128k',
|
124 |
+
output_path
|
125 |
+
]
|
126 |
+
|
127 |
+
subprocess.run(cmd, check=True, capture_output=True)
|
128 |
+
|
129 |
+
with open(output_path, 'rb') as compressed_video:
|
130 |
+
compressed_content = compressed_video.read()
|
131 |
+
|
132 |
+
await upload_file_to_firebase(compressed_content, output_filename)
|
133 |
+
|
134 |
+
finally:
|
135 |
+
os.remove(input_path)
|
136 |
+
os.remove(output_path)
|
137 |
+
|
138 |
+
return output_filename
|
models/DeepFakeVoiceDetector_V1.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e6b46662643d50c4f91d1bb2efe2e1df5dad9e7aef9ac77ea1f0b3950c7a4f68
|
3 |
+
size 7820664
|
models/deepfake_image_params.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"size": 224, "mean": [0.5, 0.5, 0.5], "std": [0.5, 0.5, 0.5]}
|
models/deepfake_label_mappings.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"id2label": {"0": "Real", "1": "Fake"}, "label2id": {"Real": 0, "Fake": 1}}
|
models/deepfake_model.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8bd4c50ebcf3f3c63c8ba47279a46154b40c8951024ce2d84bb5bf48d12c8787
|
3 |
+
size 343268125
|
models/deepfake_model/config.json
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "dima806/deepfake_vs_real_image_detection",
|
3 |
+
"architectures": [
|
4 |
+
"ViTForImageClassification"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.0,
|
7 |
+
"encoder_stride": 16,
|
8 |
+
"hidden_act": "gelu",
|
9 |
+
"hidden_dropout_prob": 0.0,
|
10 |
+
"hidden_size": 768,
|
11 |
+
"id2label": {
|
12 |
+
"0": "Real",
|
13 |
+
"1": "Fake"
|
14 |
+
},
|
15 |
+
"image_size": 224,
|
16 |
+
"initializer_range": 0.02,
|
17 |
+
"intermediate_size": 3072,
|
18 |
+
"label2id": {
|
19 |
+
"Fake": 1,
|
20 |
+
"Real": 0
|
21 |
+
},
|
22 |
+
"layer_norm_eps": 1e-12,
|
23 |
+
"model_type": "vit",
|
24 |
+
"num_attention_heads": 12,
|
25 |
+
"num_channels": 3,
|
26 |
+
"num_hidden_layers": 12,
|
27 |
+
"patch_size": 16,
|
28 |
+
"problem_type": "single_label_classification",
|
29 |
+
"qkv_bias": true,
|
30 |
+
"torch_dtype": "float32",
|
31 |
+
"transformers_version": "4.44.2"
|
32 |
+
}
|
models/deepfake_model/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b38e7d3de2be44b224cb4cffe881a0c4b75f7283c5d4a2017fbadd58bd41fdef
|
3 |
+
size 343223968
|
models/deepfake_model_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"num_labels": 2, "model_name": "dima806/deepfake_vs_real_image_detection"}
|
models/gan_model.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0390eddbacc1eae4337957da885070ce66151b7870bdfb22bc6ed28854bb89e2
|
3 |
+
size 183006752
|
models/image_manipulation_detection_model.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:24752f81e5da1362b4690b12d434e80d2b8e2953836617456fcbcc65bed836f6
|
3 |
+
size 354282808
|
models/img_man_preprocessing_params.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"image_size": [128, 128], "ela_quality": 90}
|
models/img_manipulation_class_names.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
["fake", "real"]
|
requirements.txt
CHANGED
@@ -1,17 +1,21 @@
|
|
1 |
aiohttp==3.10.5
|
2 |
-
fastapi==0.
|
3 |
-
#
|
4 |
-
|
5 |
firebase_admin==6.5.0
|
6 |
-
|
7 |
librosa==0.10.2.post1
|
8 |
-
|
|
|
|
|
|
|
9 |
Pillow==10.4.0
|
10 |
-
pydantic==2.
|
11 |
pydub==0.25.1
|
12 |
python-dotenv==1.0.1
|
13 |
scipy==1.14.1
|
14 |
-
tensorflow==2.
|
|
|
|
|
|
|
15 |
uvicorn==0.30.6
|
16 |
-
opencv-python==4.8.0.74
|
17 |
-
python-magic==0.4.27
|
|
|
1 |
aiohttp==3.10.5
|
2 |
+
fastapi==0.115.0
|
3 |
+
#ffmpeg==1.4
|
4 |
+
ffmpeg_python==0.2.0
|
5 |
firebase_admin==6.5.0
|
6 |
+
ImageHash==4.3.1
|
7 |
librosa==0.10.2.post1
|
8 |
+
moviepy==1.0.3
|
9 |
+
numpy==2.1.1
|
10 |
+
opencv_python==4.10.0.84
|
11 |
+
opencv_python_headless==4.10.0.84
|
12 |
Pillow==10.4.0
|
13 |
+
pydantic==2.9.2
|
14 |
pydub==0.25.1
|
15 |
python-dotenv==1.0.1
|
16 |
scipy==1.14.1
|
17 |
+
tensorflow==2.17.0
|
18 |
+
tensorflow_intel==2.17.0
|
19 |
+
torch==2.4.1
|
20 |
+
transformers==4.44.2
|
21 |
uvicorn==0.30.6
|
|
|
|