Spaces:
Runtime error
Runtime error
Commit
·
19302dd
1
Parent(s):
1915b8d
Update app.py
Browse files
app.py
CHANGED
@@ -7,6 +7,10 @@ import torch
|
|
7 |
from diffusers import AudioLDMPipeline
|
8 |
from transformers import AutoProcessor, ClapModel, BlipProcessor, BlipForConditionalGeneration
|
9 |
|
|
|
|
|
|
|
|
|
10 |
# make Space compatible with CPU duplicates
|
11 |
if torch.cuda.is_available():
|
12 |
device = "cuda"
|
@@ -26,10 +30,6 @@ processor = AutoProcessor.from_pretrained("sanchit-gandhi/clap-htsat-unfused-m-f
|
|
26 |
|
27 |
generator = torch.Generator(device)
|
28 |
|
29 |
-
# Charger le modèle et le processeur Blip pour la description d'images
|
30 |
-
image_caption_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
31 |
-
image_caption_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
|
32 |
-
|
33 |
# Streamlit app setup
|
34 |
st.set_page_config(
|
35 |
page_title="Text to Media",
|
@@ -55,7 +55,7 @@ if uploaded_files:
|
|
55 |
f.write(uploaded_file.read())
|
56 |
image_paths.append(image_path)
|
57 |
|
58 |
-
#
|
59 |
try:
|
60 |
image = Image.open(image_path).convert("RGB")
|
61 |
inputs = image_caption_processor(image, return_tensors="pt")
|
@@ -65,7 +65,7 @@ if uploaded_files:
|
|
65 |
except Exception as e:
|
66 |
descriptions.append("Erreur lors de la génération de la légende")
|
67 |
|
68 |
-
#
|
69 |
for i, image_path in enumerate(image_paths):
|
70 |
st.image(image_path, caption=f"Description : {descriptions[i]}", use_column_width=True)
|
71 |
|
@@ -78,7 +78,7 @@ if uploaded_files:
|
|
78 |
|
79 |
# Configuration de la musique
|
80 |
seed = st.number_input("Seed", value=45)
|
81 |
-
duration = st.slider("Duration (seconds)", 2.5, 10.0, 5.0, 2.5)
|
82 |
guidance_scale = st.slider("Guidance scale", 0.0, 4.0, 2.5, 0.5)
|
83 |
n_candidates = st.slider("Number waveforms to generate", 1, 3, 3, 1)
|
84 |
|
@@ -107,23 +107,27 @@ if uploaded_files:
|
|
107 |
else:
|
108 |
waveform = waveforms[0]
|
109 |
|
110 |
-
|
111 |
st.audio(waveform, format="audio/wav", sample_rate=16000)
|
112 |
-
|
113 |
# Créer une vidéo à partir des images
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
|
119 |
-
|
120 |
-
|
|
|
121 |
|
122 |
-
|
123 |
|
124 |
-
|
125 |
|
126 |
-
|
|
|
|
|
|
|
127 |
|
128 |
# Afficher la vidéo
|
129 |
st.video(open(output_video_path, 'rb').read())
|
@@ -133,5 +137,3 @@ if uploaded_files:
|
|
133 |
os.remove(image_path)
|
134 |
os.remove(output_video_path)
|
135 |
os.rmdir(temp_dir)
|
136 |
-
|
137 |
-
|
|
|
7 |
from diffusers import AudioLDMPipeline
|
8 |
from transformers import AutoProcessor, ClapModel, BlipProcessor, BlipForConditionalGeneration
|
9 |
|
10 |
+
# Charger le modèle et le processeur Blip pour la description d'images
|
11 |
+
image_caption_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
12 |
+
image_caption_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
|
13 |
+
|
14 |
# make Space compatible with CPU duplicates
|
15 |
if torch.cuda.is_available():
|
16 |
device = "cuda"
|
|
|
30 |
|
31 |
generator = torch.Generator(device)
|
32 |
|
|
|
|
|
|
|
|
|
33 |
# Streamlit app setup
|
34 |
st.set_page_config(
|
35 |
page_title="Text to Media",
|
|
|
55 |
f.write(uploaded_file.read())
|
56 |
image_paths.append(image_path)
|
57 |
|
58 |
+
# Générer la légende pour chaque image
|
59 |
try:
|
60 |
image = Image.open(image_path).convert("RGB")
|
61 |
inputs = image_caption_processor(image, return_tensors="pt")
|
|
|
65 |
except Exception as e:
|
66 |
descriptions.append("Erreur lors de la génération de la légende")
|
67 |
|
68 |
+
# Afficher les images avec leurs descriptions
|
69 |
for i, image_path in enumerate(image_paths):
|
70 |
st.image(image_path, caption=f"Description : {descriptions[i]}", use_column_width=True)
|
71 |
|
|
|
78 |
|
79 |
# Configuration de la musique
|
80 |
seed = st.number_input("Seed", value=45)
|
81 |
+
duration = st.slider("Duration (seconds) de la musique", 2.5, 10.0, 5.0, 2.5)
|
82 |
guidance_scale = st.slider("Guidance scale", 0.0, 4.0, 2.5, 0.5)
|
83 |
n_candidates = st.slider("Number waveforms to generate", 1, 3, 3, 1)
|
84 |
|
|
|
107 |
else:
|
108 |
waveform = waveforms[0]
|
109 |
|
110 |
+
# Afficher le lecteur audio
|
111 |
st.audio(waveform, format="audio/wav", sample_rate=16000)
|
112 |
+
|
113 |
# Créer une vidéo à partir des images
|
114 |
+
st.header("Génération du Diaporama Vidéo")
|
115 |
+
|
116 |
+
# Configuration du diaporama
|
117 |
+
image_duration = st.slider("Duration (seconds) des images dans le diaporama", 1, 10, 4)
|
118 |
|
119 |
+
if st.button("Créer le Diaporama Vidéo"):
|
120 |
+
def create_video(images, output_video_path):
|
121 |
+
frame_rate = 1 / image_duration
|
122 |
|
123 |
+
image_clips = [ImageSequenceClip([image], fps=frame_rate, durations=[image_duration]) for image in images]
|
124 |
|
125 |
+
final_clip = concatenate_videoclips(image_clips, method="compose")
|
126 |
|
127 |
+
final_clip.write_videofile(output_video_path, codec='libx264', fps=frame_rate)
|
128 |
+
|
129 |
+
output_video_path = os.path.join(temp_dir, "slideshow.mp4")
|
130 |
+
create_video(image_paths, output_video_path)
|
131 |
|
132 |
# Afficher la vidéo
|
133 |
st.video(open(output_video_path, 'rb').read())
|
|
|
137 |
os.remove(image_path)
|
138 |
os.remove(output_video_path)
|
139 |
os.rmdir(temp_dir)
|
|
|
|