File size: 1,605 Bytes
1db872f
9a99a85
1db872f
 
9a99a85
1db872f
 
9a99a85
1db872f
 
9a99a85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1db872f
9a99a85
 
 
 
 
1db872f
9a99a85
 
 
 
 
 
122d074
989da0b
 
9a99a85
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
# to create nueral network
import torch

# for interface
import gradio as gr

# to open images
from PIL  import Image

# used for audio
import scipy.io.wavfile as wavfile

# Use a pipeline as a high-level helper
from transformers import pipeline


device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')

narrator = pipeline("text-to-speech", model="kakao-enterprise/vits-ljs")

# Load the pretrained weights
caption_image = pipeline("image-to-text", model="Salesforce/blip-image-captioning-large", device=device)


# Define the function to generate audio from text 
def generate_audio(text):
    # Generate the narrated text
    narrated_text = narrator(text)

    # Save the audio to WAV file
    wavfile.write("output.wav", rate=narrated_text["sampling_rate"],
                  data=narrated_text["audio"][0])

    # Return the path to the saved output WAV file
    return "output.wav" # return audio

def caption_my_image(pil_image):

    semantics = caption_image(images=pil_image)[0]['generated_text']
    audio = generate_audio(semantics)
    return semantics,audio  # returns both text and audio output


gr.close_all()

demo = gr.Interface(fn=caption_my_image,
                    inputs=[gr.Image(label="Select Image",type="pil")],
                    outputs=[ 
                        gr.Textbox(label="Image Caption"),
                        gr.Audio(label="Image Caption Audio")],
                    title="IMAGE CAPTIONING WITH AUDIO OUTPUT",
                    description="THIS APPLICATION WILL BE USED TO CAPTION IMAGES WITH THE HELP OF AI")
demo.launch()