Bils commited on
Commit
24da5c3
·
verified ·
1 Parent(s): e32e265

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +100 -71
app.py CHANGED
@@ -1,4 +1,3 @@
1
- # Import necessary libraries
2
  import os
3
  import tempfile
4
  import gradio as gr
@@ -6,73 +5,96 @@ from dotenv import load_dotenv
6
  import torch
7
  from scipy.io.wavfile import write
8
  from diffusers import DiffusionPipeline
9
- import google.generativeai as genai
10
  from pathlib import Path
11
 
12
-
13
- # Load environment variables from .env file
14
  load_dotenv()
15
 
16
- #Google Generative AI for Gemini
17
- genai.configure(api_key=os.getenv("API_KEY"))
18
-
19
- # Hugging Face token from environment variables
20
  hf_token = os.getenv("HF_TKN")
21
 
22
- def analyze_image_with_gemini(image_file):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  """
24
- Analyzes an uploaded image with Gemini and generates a descriptive caption.
 
25
  """
26
  try:
27
- # Save uploaded image to a temporary file
28
- temp_image_path = tempfile.NamedTemporaryFile(delete=False, suffix=".jpg").name
29
- with open(temp_image_path, "wb") as temp_file:
30
  temp_file.write(image_file)
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
- # Prepare the image data and prompt for Gemini
33
- image_parts = [{"mime_type": "image/jpeg", "data": Path(temp_image_path).read_bytes()}]
34
- prompt_parts = ["Describe precisely the image in one sentence.\n", image_parts[0], "\n"]
35
- generation_config = {"temperature": 0.05, "top_p": 1, "top_k": 26, "max_output_tokens": 4096}
36
- safety_settings = [{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
37
- {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
38
- {"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
39
- {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"}]
40
- model = genai.GenerativeModel(model_name="gemini-1.0-pro-vision-latest",
41
- generation_config=generation_config,
42
- safety_settings=safety_settings)
43
- response = model.generate_content(prompt_parts)
44
- return response.text.strip(), False # False indicates no error
45
  except Exception as e:
46
- print(f"Error analyzing image with Gemini: {e}")
47
- return "Error analyzing image with Gemini", True # Indicates error with a message
48
 
49
  def get_audioldm_from_caption(caption):
50
  """
51
  Generates sound from a caption using the AudioLDM-2 model.
 
52
  """
53
- # Initialize the model
54
- pipe = DiffusionPipeline.from_pretrained("cvssp/audioldm2", use_auth_token=hf_token)
55
- pipe = pipe.to("cuda" if torch.cuda.is_available() else "cpu")
56
-
57
- # Generate audio from the caption
58
- audio_output = pipe(prompt=caption, num_inference_steps=50, guidance_scale=7.5)
59
- audio = audio_output.audios[0]
60
-
61
- temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".wav")
62
- write(temp_file.name, 16000, audio)
 
 
 
63
 
64
- return temp_file.name
 
 
65
 
66
- # css
67
- css="""
 
 
68
  #col-container{
69
  margin: 0 auto;
70
  max-width: 800px;
71
  }
72
-
73
  """
74
 
75
- # Gradio interface setup
76
  with gr.Blocks(css=css) as demo:
77
  # Main Title and App Description
78
  with gr.Column(elem_id="col-container"):
@@ -81,47 +103,57 @@ with gr.Blocks(css=css) as demo:
81
  🎶 Generate Sound Effects from Image
82
  </h1>
83
  <p style="text-align: center;">
84
- ⚡ Powered by <a href="https://bilsimaging.com" _blank >Bilsimaging</a>
85
  </p>
86
- """)
87
-
88
  gr.Markdown("""
89
- Welcome to this unique sound effect generator! This tool allows you to upload an image and generate a descriptive caption and a corresponding sound effect. Whether you're exploring the sound of nature, urban environments, or anything in between, this app brings your images to auditory life.
 
90
 
91
  **💡 How it works:**
92
  1. **Upload an image**: Choose an image that you'd like to analyze.
93
- 2. **Generate Description**: Click on 'Tap to Generate Description from the image' to get a textual description of your uploaded image.
94
- 3. **Generate Sound Effect**: Based on the image description, click on 'Generate Sound Effect' to create a sound effect that matches the image context.
 
95
 
96
  Enjoy the journey from visual to auditory sensation with just a few clicks!
97
-
98
- For Example Demos sound effects generated , check out our [YouTube channel](https://www.youtube.com/playlist?list=PLwEbW4bdYBSC8exiJ9PfzufGND_14f--C)
99
  """)
100
-
101
- # Interface Components
102
  image_upload = gr.File(label="Upload Image", type="binary")
103
- generate_description_button = gr.Button("Tap to Generate a Description from your image")
104
- caption_display = gr.Textbox(label="Image Description", interactive=False) # Keep as read-only
105
  generate_sound_button = gr.Button("Generate Sound Effect")
106
  audio_output = gr.Audio(label="Generated Sound Effect")
107
- # extra footer
108
- gr.Markdown("""## 👥 How You Can Contribute
109
- We welcome contributions and suggestions for improvements. Your feedback is invaluable to the continuous enhancement of this application.
110
-
111
- For support, questions, or to contribute, please contact us at [[email protected]](mailto:[email protected]).
112
-
113
- Support our work and get involved by donating through [Ko-fi](https://ko-fi.com/bilsimaging). - Bilel Aroua
114
- """)
115
- gr.Markdown("""## 📢 Stay Connected
116
- this app is a testament to the creative possibilities that emerge when technology meets art. Enjoy exploring the auditory landscape of your images!
117
- """)
 
 
 
 
 
 
 
 
 
118
  # Function to update the caption display based on the uploaded image
119
  def update_caption(image_file):
120
- description, _ = analyze_image_with_gemini(image_file)
121
  return description
122
 
123
  # Function to generate sound from the description
124
  def generate_sound(description):
 
 
125
  audio_path = get_audioldm_from_caption(description)
126
  return audio_path
127
 
@@ -137,7 +169,4 @@ with gr.Blocks(css=css) as demo:
137
  outputs=audio_output
138
  )
139
 
140
-
141
-
142
- # Launch the Gradio app
143
- demo.launch(debug=True, share=True)
 
 
1
  import os
2
  import tempfile
3
  import gradio as gr
 
5
  import torch
6
  from scipy.io.wavfile import write
7
  from diffusers import DiffusionPipeline
8
+ from transformers import pipeline
9
  from pathlib import Path
10
 
11
+ # Load environment variables from .env file if needed
 
12
  load_dotenv()
13
 
14
+ # If you have any Hugging Face tokens for private models (AudioLDM2 requires HF_TKN)
 
 
 
15
  hf_token = os.getenv("HF_TKN")
16
 
17
+ # ------------------------------------------------
18
+ # 1) INITIALIZE FREE IMAGE CAPTIONING PIPELINE
19
+ # ------------------------------------------------
20
+ # Replace "nlpconnect/vit-gpt2-image-captioning" with any other free image captioning model you prefer.
21
+ captioning_pipeline = pipeline(
22
+ "image-to-text",
23
+ model="nlpconnect/vit-gpt2-image-captioning",
24
+ # If the model is private or requires auth, pass the token here: use_auth_token=hf_token,
25
+ )
26
+
27
+ # ------------------------------------------------
28
+ # 2) INITIALIZE AUDIO LDM-2 PIPELINE
29
+ # ------------------------------------------------
30
+ # AudioLDM2 is also from Hugging Face. If it’s a private model, pass your token via use_auth_token.
31
+ # If you’re using the public version, you may not need the token at all.
32
+ device = "cuda" if torch.cuda.is_available() else "cpu"
33
+ pipe = DiffusionPipeline.from_pretrained(
34
+ "cvssp/audioldm2",
35
+ use_auth_token=hf_token # remove or comment out if not needed
36
+ )
37
+ pipe = pipe.to(device)
38
+
39
+ def analyze_image_with_free_model(image_file):
40
  """
41
+ Analyzes an uploaded image using a free Hugging Face model for image captioning.
42
+ Returns: (caption_text, is_error_flag)
43
  """
44
  try:
45
+ # Save uploaded image to a temporary file
46
+ with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as temp_file:
 
47
  temp_file.write(image_file)
48
+ temp_image_path = temp_file.name
49
+
50
+ # Run the image captioning pipeline
51
+ results = captioning_pipeline(temp_image_path)
52
+ if not results or not isinstance(results, list):
53
+ return "Error: Could not generate caption.", True
54
+
55
+ # Typically, pipeline returns a list of dicts with a "generated_text" key
56
+ caption = results[0].get("generated_text", "").strip()
57
+ if not caption:
58
+ return "No caption was generated.", True
59
+ return caption, False
60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  except Exception as e:
62
+ print(f"Error analyzing image: {e}")
63
+ return f"Error analyzing image: {e}", True
64
 
65
  def get_audioldm_from_caption(caption):
66
  """
67
  Generates sound from a caption using the AudioLDM-2 model.
68
+ Returns the filename (path) of the generated .wav file.
69
  """
70
+ try:
71
+ # Generate audio from the caption
72
+ audio_output = pipe(
73
+ prompt=caption,
74
+ num_inference_steps=50,
75
+ guidance_scale=7.5
76
+ )
77
+ audio = audio_output.audios[0]
78
+
79
+ # Write the audio to a temporary .wav file
80
+ with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as temp_wav:
81
+ write(temp_wav.name, 16000, audio)
82
+ return temp_wav.name
83
 
84
+ except Exception as e:
85
+ print(f"Error generating audio from caption: {e}")
86
+ return None
87
 
88
+ # ------------------------------------------------
89
+ # 3) GRADIO INTERFACE
90
+ # ------------------------------------------------
91
+ css = """
92
  #col-container{
93
  margin: 0 auto;
94
  max-width: 800px;
95
  }
 
96
  """
97
 
 
98
  with gr.Blocks(css=css) as demo:
99
  # Main Title and App Description
100
  with gr.Column(elem_id="col-container"):
 
103
  🎶 Generate Sound Effects from Image
104
  </h1>
105
  <p style="text-align: center;">
106
+ ⚡ Powered by <a href="https://bilsimaging.com" target="_blank">Bilsimaging</a>
107
  </p>
108
+ """)
109
+
110
  gr.Markdown("""
111
+ Welcome to this unique sound effect generator! This tool allows you to upload an image and generate a
112
+ descriptive caption and a corresponding sound effect, all using free, open-source models on Hugging Face.
113
 
114
  **💡 How it works:**
115
  1. **Upload an image**: Choose an image that you'd like to analyze.
116
+ 2. **Generate Description**: Click on 'Generate Description' to get a textual description of your uploaded image.
117
+ 3. **Generate Sound Effect**: Based on the image description, click on 'Generate Sound Effect' to create a
118
+ sound effect that matches the image context.
119
 
120
  Enjoy the journey from visual to auditory sensation with just a few clicks!
 
 
121
  """)
122
+
 
123
  image_upload = gr.File(label="Upload Image", type="binary")
124
+ generate_description_button = gr.Button("Generate Description")
125
+ caption_display = gr.Textbox(label="Image Description", interactive=False) # Keep read-only
126
  generate_sound_button = gr.Button("Generate Sound Effect")
127
  audio_output = gr.Audio(label="Generated Sound Effect")
128
+
129
+ # Extra footer
130
+ gr.Markdown("""
131
+ ## 👥 How You Can Contribute
132
+ We welcome contributions and suggestions for improvements. Your feedback is invaluable
133
+ to the continuous enhancement of this application.
134
+
135
+ For support, questions, or to contribute, please contact us at
136
+ [contact@bilsimaging.com](mailto:[email protected]).
137
+
138
+ Support our work and get involved by donating through
139
+ [Ko-fi](https://ko-fi.com/bilsimaging). - Bilel Aroua
140
+ """)
141
+
142
+ gr.Markdown("""
143
+ ## 📢 Stay Connected
144
+ This app is a testament to the creative possibilities that emerge when technology meets art.
145
+ Enjoy exploring the auditory landscape of your images!
146
+ """)
147
+
148
  # Function to update the caption display based on the uploaded image
149
  def update_caption(image_file):
150
+ description, error_flag = analyze_image_with_free_model(image_file)
151
  return description
152
 
153
  # Function to generate sound from the description
154
  def generate_sound(description):
155
+ if not description or description.startswith("Error"):
156
+ return None # or some default sound
157
  audio_path = get_audioldm_from_caption(description)
158
  return audio_path
159
 
 
169
  outputs=audio_output
170
  )
171
 
172
+ demo.launch(debug=True, share=True)