Spaces:
No application file
No application file
Create requirements.txt
Browse files- requirements.txt +29 -0
requirements.txt
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, UploadFile
|
2 |
+
import ffmpeg
|
3 |
+
from transformers import pipeline
|
4 |
+
|
5 |
+
app = FastAPI()
|
6 |
+
|
7 |
+
# Load Hugging Face model (example: text-to-video)
|
8 |
+
model = pipeline("text-to-video-generation", model="damo-vilab/modelscope-text-to-video-synthesis")
|
9 |
+
|
10 |
+
@app.post("/generate_video/")
|
11 |
+
async def generate_video(prompt: str):
|
12 |
+
# Generate video from text
|
13 |
+
result = model(prompt)
|
14 |
+
output_file = "generated_video.mp4"
|
15 |
+
with open(output_file, "wb") as f:
|
16 |
+
f.write(result["video"])
|
17 |
+
|
18 |
+
return {"message": "Video generated successfully!", "file_path": output_file}
|
19 |
+
|
20 |
+
@app.post("/upload_video/")
|
21 |
+
async def upload_video(file: UploadFile):
|
22 |
+
input_file = f"uploaded_{file.filename}"
|
23 |
+
with open(input_file, "wb") as f:
|
24 |
+
f.write(file.file.read())
|
25 |
+
|
26 |
+
# Example: Trim video using FFmpeg
|
27 |
+
trimmed_file = "trimmed_video.mp4"
|
28 |
+
ffmpeg.input(input_file).output(trimmed_file, ss=0, t=10).run()
|
29 |
+
return {"message": "Video uploaded and trimmed!", "file_path": trimmed_file}
|