|
import gradio as gr |
|
import torch |
|
from torchvision import transforms |
|
from PIL import Image |
|
import numpy as np |
|
import subprocess |
|
|
|
|
|
model = torch.hub.load('tencent/HunyuanVideo', 'HunyuanVideo', pretrained=True) |
|
|
|
|
|
def generate_video(prompt, video_size=(720, 1280), video_length=129, infer_steps=50): |
|
|
|
prompt = prompt.strip() |
|
|
|
|
|
command = [ |
|
"python3", "sample_video.py", |
|
"--video-size", str(video_size[0]), str(video_size[1]), |
|
"--video-length", str(video_length), |
|
"--infer-steps", str(infer_steps), |
|
"--prompt", prompt, |
|
"--flow-reverse", |
|
"--use-cpu-offload", |
|
"--save-path", "./results" |
|
] |
|
|
|
|
|
subprocess.run(command) |
|
|
|
|
|
video_path = "./results/generated_video.mp4" |
|
|
|
|
|
return video_path |
|
|
|
|
|
iface = gr.Interface( |
|
fn=generate_video, |
|
inputs=[ |
|
gr.Textbox(label="Inserisci il prompt", placeholder="Un gatto cammina sull'erba, stile realistico."), |
|
gr.Dropdown(label="Dimensione del video", choices=[(720, 1280), (544, 960)], value=(720, 1280)), |
|
gr.Slider(label="Lunghezza del video (frame)", minimum=1, maximum=300, value=129), |
|
gr.Slider(label="Passi di inferenza", minimum=1, maximum=100, value=50) |
|
], |
|
outputs=gr.Video(label="Video generato"), |
|
title="Generazione di video con HunyuanVideo", |
|
description="Genera video utilizzando il modello HunyuanVideo fornendo un prompt di testo." |
|
) |
|
|
|
|
|
iface.launch() |