DanLeBossDeESGI commited on
Commit
8389523
·
1 Parent(s): 701788b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -4
app.py CHANGED
@@ -5,7 +5,7 @@ from moviepy.editor import ImageSequenceClip, concatenate_videoclips
5
  from PIL import Image
6
  import torch
7
  from diffusers import AudioLDMPipeline
8
- from transformers import AutoProcessor, ClapModel
9
 
10
  # make Space compatible with CPU duplicates
11
  if torch.cuda.is_available():
@@ -26,6 +26,10 @@ processor = AutoProcessor.from_pretrained("sanchit-gandhi/clap-htsat-unfused-m-f
26
 
27
  generator = torch.Generator(device)
28
 
 
 
 
 
29
  # Streamlit app setup
30
  st.set_page_config(
31
  page_title="Text to Media",
@@ -54,9 +58,9 @@ if uploaded_files:
54
  # Générez la légende pour chaque image
55
  try:
56
  image = Image.open(image_path).convert("RGB")
57
- inputs = processor(image, return_tensors="pt")
58
- out = model.generate(**inputs)
59
- caption = processor.decode(out[0], skip_special_tokens=True)
60
  descriptions.append(caption)
61
  except Exception as e:
62
  descriptions.append("Erreur lors de la génération de la légende")
 
5
  from PIL import Image
6
  import torch
7
  from diffusers import AudioLDMPipeline
8
+ from transformers import AutoProcessor, ClapModel, BlipProcessor, BlipForConditionalGeneration
9
 
10
  # make Space compatible with CPU duplicates
11
  if torch.cuda.is_available():
 
26
 
27
  generator = torch.Generator(device)
28
 
29
+ # Charger le modèle et le processeur Blip pour la description d'images
30
+ image_caption_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
31
+ image_caption_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
32
+
33
  # Streamlit app setup
34
  st.set_page_config(
35
  page_title="Text to Media",
 
58
  # Générez la légende pour chaque image
59
  try:
60
  image = Image.open(image_path).convert("RGB")
61
+ inputs = image_caption_processor(image, return_tensors="pt")
62
+ out = image_caption_model.generate(**inputs)
63
+ caption = image_caption_processor.decode(out[0], skip_special_tokens=True)
64
  descriptions.append(caption)
65
  except Exception as e:
66
  descriptions.append("Erreur lors de la génération de la légende")