Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -6,11 +6,10 @@ from moviepy.editor import ImageSequenceClip, VideoFileClip, AudioFileClip
|
|
6 |
import numpy as np
|
7 |
import os
|
8 |
from mutagen.mp3 import MP3
|
9 |
-
import
|
10 |
from dotenv import load_dotenv
|
11 |
-
from transformers import
|
12 |
import torch
|
13 |
-
import soundfile as sf
|
14 |
|
15 |
# Load environment variables
|
16 |
load_dotenv()
|
@@ -57,33 +56,36 @@ def merge_audio_video(entities_num, resize_img_list, text_input):
|
|
57 |
|
58 |
return mergedclip
|
59 |
|
60 |
-
|
|
|
61 |
ner = gr.Interface.load("huggingface/flair/ner-english-ontonotes-large")
|
62 |
-
entities = ner(text_input)
|
63 |
-
entities = [tupl for tupl in entities if None not in tupl]
|
64 |
-
entities_num = len(entities)
|
65 |
-
img_list = []
|
66 |
-
|
67 |
latentdiffusion = gr.Interface.load("spaces/multimodalart/latentdiffusion")
|
68 |
-
|
69 |
-
for ent in entities:
|
70 |
-
img = latentdiffusion(ent[0], '50', '256', '256', '1', 10)[0]
|
71 |
-
img_list.append(img)
|
72 |
-
|
73 |
-
resize_img_list = resize(img_list)
|
74 |
-
mergedclip = merge_audio_video(entities_num, resize_img_list, text_input)
|
75 |
-
mergedclip.write_videofile('mergedvideo.mp4')
|
76 |
-
|
77 |
-
return 'mergedvideo.mp4'
|
78 |
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
import numpy as np
|
7 |
import os
|
8 |
from mutagen.mp3 import MP3
|
9 |
+
import soundfile as sf
|
10 |
from dotenv import load_dotenv
|
11 |
+
from transformers import AutoProcessor, AutoModel
|
12 |
import torch
|
|
|
13 |
|
14 |
# Load environment variables
|
15 |
load_dotenv()
|
|
|
56 |
|
57 |
return mergedclip
|
58 |
|
59 |
+
with gr.Blocks() as app:
|
60 |
+
# Load models in Blocks context
|
61 |
ner = gr.Interface.load("huggingface/flair/ner-english-ontonotes-large")
|
|
|
|
|
|
|
|
|
|
|
62 |
latentdiffusion = gr.Interface.load("spaces/multimodalart/latentdiffusion")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
|
64 |
+
def engine(text_input):
|
65 |
+
entities = ner(text_input)
|
66 |
+
entities = [tupl for tupl in entities if None not in tupl]
|
67 |
+
entities_num = len(entities)
|
68 |
+
img_list = []
|
69 |
+
|
70 |
+
for ent in entities:
|
71 |
+
img = latentdiffusion(ent[0], '50', '256', '256', '1', 10)[0]
|
72 |
+
img_list.append(img)
|
73 |
+
|
74 |
+
resize_img_list = resize(img_list)
|
75 |
+
mergedclip = merge_audio_video(entities_num, resize_img_list, text_input)
|
76 |
+
mergedclip.write_videofile('mergedvideo.mp4')
|
77 |
+
|
78 |
+
return 'mergedvideo.mp4'
|
79 |
+
|
80 |
+
interface = gr.Interface(
|
81 |
+
fn=engine,
|
82 |
+
inputs=gr.Textbox(lines=5, label="Input Text"),
|
83 |
+
outputs=gr.Video(label='Final Merged Video'),
|
84 |
+
description="<div>πποΈπΏ AI Movie Maker - Comedy π¬ π§ π¨</div>",
|
85 |
+
examples=[
|
86 |
+
["Two space marines take up arms to save the planet from an alien invasion. These two dashing strong men play a comedic role in the science fiction movie of the future where even Barnaby bunny is willing to join their wacky gang of space marines to save the planet with good looks and comedy."]
|
87 |
+
],
|
88 |
+
title="AI Pipeline Multi Model πποΈπΏ Movie Maker π¬ π§ π¨",
|
89 |
+
article="<br><div></div>"
|
90 |
+
)
|
91 |
+
interface.launch(debug=True)
|