Sriv890 commited on
Commit
aec26a9
·
verified ·
1 Parent(s): 30e36f1

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +135 -0
  2. requirements.txt +9 -0
app.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import gradio as gr
3
+ from groq import Groq
4
+ import os
5
+ from deep_translator import GoogleTranslator
6
+ from deep_translator import GoogleTranslator # Import the GoogleTranslator class
7
+ import whisper
8
+ import gradio as gr
9
+ from groq import Groq
10
+ import os
11
+ from deep_translator import GoogleTranslator # Import the GoogleTranslator class
12
+ import pickle
13
+ import whisper
14
+ from diffusers import StableDiffusionXLPipeline, UNet2DConditionModel, EulerDiscreteScheduler
15
+ import matplotlib.pyplot as plt
16
+ import torch
17
+ from huggingface_hub import hf_hub_download
18
+ from safetensors.torch import load_file
19
+
20
+
21
+ # Replace with your actual API key
22
+ api_key = "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"
23
+ client = Groq(api_key=api_key)
24
+
25
+ # Load the custom model for image generation
26
+ base = "stabilityai/stable-diffusion-xl-base-1.0"
27
+ repo = "ByteDance/SDXL-Lightning"
28
+ ckpt = "sdxl_lightning_4step_unet.safetensors" # Ensure the correct checkpoint
29
+
30
+ # Load the custom UNet and set up the pipeline
31
+ unet = UNet2DConditionModel.from_config(base, subfolder="unet").to("cpu", torch.float16)
32
+ unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device="cpu"))
33
+ pipe = StableDiffusionXLPipeline.from_pretrained(base, unet=unet, torch_dtype=torch.float16, variant="fp16").to("cpu")
34
+ pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
35
+
36
+
37
+ # Function to transcribe, translate, and generate an image
38
+ def process_audio(audio_path, generate_image):
39
+ if audio_path is None:
40
+ return "Please upload an audio file.", None, None
41
+
42
+ # Step 1: Transcribe audio
43
+ try:
44
+ with open(audio_path, "rb") as file:
45
+ transcription = client.audio.transcriptions.create(
46
+ file=(os.path.basename(audio_path), file.read()),
47
+ model="whisper-large-v3",
48
+ language="ta",
49
+ response_format="verbose_json",
50
+ )
51
+ tamil_text = transcription.text
52
+ except Exception as e:
53
+ return f"An error occurred during transcription: {str(e)}", None, None
54
+
55
+ # Step 2: Translate Tamil to English
56
+ try:
57
+ translator = GoogleTranslator(source='ta', target='en')
58
+ translation = translator.translate(tamil_text)
59
+ except Exception as e:
60
+ return tamil_text, f"An error occurred during translation: {str(e)}", None
61
+
62
+ # Step 3: Generate image (if selected)
63
+ if generate_image:
64
+ try:
65
+ # Use the custom model and pipeline to generate an image
66
+ img = pipe(translation, num_inference_steps=4, guidance_scale=0).images[0]
67
+ return tamil_text, translation, img
68
+ except Exception as e:
69
+ return tamil_text, translation, f"An error occurred during image generation: {str(e)}"
70
+
71
+ return tamil_text, translation, None
72
+
73
+
74
+ # Function for direct prompt to image generation
75
+ def generate_image_from_prompt(prompt):
76
+ try:
77
+ img = pipe(prompt, num_inference_steps=4, guidance_scale=0).images[0]
78
+ return img
79
+ except Exception as e:
80
+ return f"An error occurred during image generation: {str(e)}"
81
+
82
+
83
+ # Assuming your 'process_audio' and 'generate_image_from_prompt' functions are defined elsewhere
84
+
85
+ # Gradio interface with the requested customizations
86
+ with gr.Blocks(css="""
87
+ .gradio-container {background-color: #D8D2C2;}
88
+ .btn-red {background-color: red; color: white;}
89
+ .gr-button:hover {color: white !important;}
90
+ .gr-button {color: black !important;}
91
+ .gr-textbox {color: black !important;}
92
+ .gr-Tab {color: black !important;} /* Tab text color set to black */
93
+ """) as iface:
94
+
95
+ # Title
96
+ gr.Markdown("<h1 style='text-align: center; color:black;'>TransArt - Multimodal Application</h1>")
97
+
98
+ # First Tab: Audio to Text -> Image
99
+ with gr.Tab("Audio to Text"):
100
+ gr.Markdown("<h3 style='text-align: center; color:black;'>Upload audio file, translate and generate an image</h3>")
101
+
102
+ # Audio input and processing button
103
+ with gr.Row():
104
+ audio_input = gr.Audio(type="filepath", label="Upload Audio File")
105
+ generate_image_checkbox = gr.Checkbox(label="Generate Image", value=False)
106
+
107
+ # Outputs for transcription, translation, and image
108
+ outputs = [
109
+ gr.Textbox(label="Tamil Transcription"),
110
+ gr.Textbox(label="English Translation"),
111
+ gr.Image(label="Generated Image") # Expecting an image output
112
+ ]
113
+
114
+ # Button for processing audio
115
+ btn = gr.Button("Proceed Audio", elem_classes="btn-red")
116
+ # Bind the correct function that returns transcription, translation, and an image
117
+ btn.click(fn=process_audio, inputs=[audio_input, generate_image_checkbox], outputs=outputs)
118
+
119
+ # Second Tab: Direct Prompt to Image Generation
120
+ with gr.Tab("Prompt to Image"):
121
+ gr.Markdown("<h3 style='text-align: center; color:black;'>Input a prompt and generate an image</h3>")
122
+
123
+ # Text input for the prompt
124
+ prompt_input = gr.Textbox(label="Enter Prompt", placeholder="Enter the scene description here...", lines=5)
125
+
126
+ # Image output
127
+ image_output = gr.Image(label="Generated Image") # Expecting an image output
128
+
129
+ # Button for generating the image from the prompt
130
+ btn_image = gr.Button("Proceed Image Generation", elem_classes="btn-red")
131
+ # Bind the correct function that returns an image
132
+ btn_image.click(fn=generate_image_from_prompt, inputs=prompt_input, outputs=image_output)
133
+
134
+ # Launch the interface
135
+ iface.launch(server_name="0.0.0.0")
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ openai-whisper
2
+ deep-translator
3
+ groq
4
+ gradio
5
+ accelerate
6
+ transformers
7
+ diffusers
8
+ torch
9
+ torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118