File size: 12,331 Bytes
2f4f9a7
 
 
 
2dff2e4
ff68386
325e7ed
552c89e
ff68386
 
 
 
cbaa4b8
969918e
2dff2e4
f2b389b
 
85a5f0e
969918e
85a5f0e
 
c5a9717
693ffeb
 
 
 
 
 
f2b389b
cbaa4b8
 
693ffeb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e8ec0a6
7a37d39
 
 
 
 
 
 
f2b389b
c8216a6
 
 
 
 
 
5cda4a7
c8216a6
 
 
f2b389b
 
7a37d39
f2b389b
ff68386
 
7a37d39
c8216a6
c5bfb86
7a37d39
f2b389b
340f6f8
 
ff68386
5cda4a7
 
 
 
 
325e7ed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
340f6f8
 
 
 
cbaa4b8
340f6f8
cbaa4b8
 
7a37d39
cbaa4b8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
693ffeb
340f6f8
ff68386
969918e
5cda4a7
 
325e7ed
 
5cda4a7
693ffeb
 
 
 
 
340f6f8
ff68386
969918e
5cda4a7
 
 
 
969918e
693ffeb
c5a9717
693ffeb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c8216a6
 
693ffeb
 
 
 
 
 
cbaa4b8
 
693ffeb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cbaa4b8
693ffeb
 
 
 
 
5ef1e5c
 
52077bc
 
 
 
 
 
 
5ef1e5c
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from diffusers import StableDiffusionPipeline
import torch
import os
import logging
from huggingface_hub import login
import accelerate

# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# Read the Hugging Face access token from the environment variable
read_token = os.getenv('AccToken')
if not read_token:
    raise ValueError("Hugging Face access token not found. Please set the AccToken environment variable.")
logger.info(f"Hugging Face access token found: {read_token[:5]}...")  # Log the first 5 characters for verification

# Log in to Hugging Face using the token
login(read_token)

# Define a dictionary of conversational models
conversational_models = {
    "DeepSeek R1": "deepseek-ai/DeepSeek-R1",
    "Perplexity (R1 Post-trained)": "perplexity-ai/r1-1776",
    "Llama-Instruct by Meta": "meta-llama/Llama-3.2-3B-Instruct",
    "Mistral": "mistralai/Mistral-7B-v0.1",
    "Gemma": "google/gemma-2-2b-it",
}

# Define a dictionary of Text-to-Image models
text_to_image_models = {
    "Stable Diffusion 3.5 Large": "stabilityai/stable-diffusion-3.5-large",
    "Stable Diffusion 1.4": "CompVis/stable-diffusion-v1-4",
    "Flux Dev": "black-forest-labs/FLUX.1-dev",
}

# Define a dictionary of Text-to-Speech models
text_to_speech_models = {
    "Spark TTS": "SparkAudio/Spark-TTS-0.5B",
}

# Initialize tokenizers and models for conversational AI
conversational_tokenizers = {}
conversational_models_loaded = {}

# Initialize pipelines for Text-to-Image
text_to_image_pipelines = {}

# Initialize pipelines for Text-to-Speech
text_to_speech_pipelines = {}

# Initialize pipelines for other tasks


visual_qa_pipeline = pipeline("visual-question-answering", model="dandelin/vilt-b32-finetuned-vqa")
document_qa_pipeline = pipeline("question-answering", model="deepset/roberta-base-squad2")
image_classification_pipeline = pipeline("image-classification", model="facebook/deit-base-distilled-patch16-224")
object_detection_pipeline = pipeline("object-detection", model="facebook/detr-resnet-50")
video_classification_pipeline = pipeline("video-classification", model="facebook/timesformer-base-finetuned-k400")
summarization_pipeline = pipeline("summarization", model="facebook/bart-large-cnn")

# Load speaker embeddings for text-to-audio
def load_speaker_embeddings(model_name):
    if model_name == "microsoft/speecht5_tts":
        logger.info("Loading speaker embeddings for SpeechT5")
        from datasets import load_dataset
        dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
        speaker_embeddings = torch.tensor(dataset[7306]["xvector"]).unsqueeze(0)  # Example speaker
        return speaker_embeddings
    return None

# Use a different model for text-to-audio if stabilityai/stable-audio-open-1.0 is not supported
try:
    text_to_audio_pipeline = pipeline("text-to-audio", model="stabilityai/stable-audio-open-1.0")
except ValueError as e:
    logger.error(f"Error loading stabilityai/stable-audio-open-1.0: {e}")
    logger.info("Falling back to a different text-to-audio model.")
    text_to_audio_pipeline = pipeline("text-to-audio", model="microsoft/speecht5_tts")
    speaker_embeddings = load_speaker_embeddings("microsoft/speecht5_tts")

audio_classification_pipeline = pipeline("audio-classification", model="facebook/wav2vec2-base")

def load_conversational_model(model_name):
    if model_name not in conversational_models_loaded:
        logger.info(f"Loading conversational model: {model_name}")
        tokenizer = AutoTokenizer.from_pretrained(
            conversational_models[model_name], 
            use_auth_token=read_token, 
            trust_remote_code=True
        )
        try:
            model = AutoModelForCausalLM.from_pretrained(
                conversational_models[model_name], 
                use_auth_token=read_token, 
                trust_remote_code=True,
                device_map="auto" if torch.cuda.is_available() else "cpu"
            )
        except RuntimeError as e:
            logger.error(f"RuntimeError: {e}")
            logger.info("Falling back to CPU for the model.")
            model = AutoModelForCausalLM.from_pretrained(
                conversational_models[model_name], 
                use_auth_token=read_token, 
                trust_remote_code=True,
                device_map="cpu"
            )
        conversational_tokenizers[model_name] = tokenizer
        conversational_models_loaded[model_name] = model
    return conversational_tokenizers[model_name], conversational_models_loaded[model_name]

def chat(model_name, user_input, history=[]):
    tokenizer, model = load_conversational_model(model_name)
    
    # Encode the input
    input_ids = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors="pt")
    
    # Generate a response
    with torch.no_grad():
        output = model.generate(input_ids, max_length=150, pad_token_id=tokenizer.eos_token_id)
    
    response = tokenizer.decode(output[0], skip_special_tokens=True)
    
    # Clean up the response to remove the user input part
    response = response[len(user_input):].strip()
    
    # Append to chat history
    history.append((user_input, response))
    
    return history, history

def generate_image(model_name, prompt):
    if model_name not in text_to_image_pipelines:
        logger.info(f"Loading text-to-image model: {model_name}")
        text_to_image_pipelines[model_name] = StableDiffusionPipeline.from_pretrained(
            text_to_image_models[model_name], 
            use_auth_token=read_token, 
            torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, 
            device_map="auto" if torch.cuda.is_available() else "cpu"
        )
    pipeline = text_to_image_pipelines[model_name]
    image = pipeline(prompt).images[0]
    return image

def generate_speech(model_name, text):
    if model_name not in text_to_speech_pipelines:
        logger.info(f"Loading text-to-speech model: {model_name}")
        text_to_speech_pipelines[model_name] = pipeline(
            "text-to-speech", 
            model=text_to_speech_models[model_name], 
            use_auth_token=read_token, 
            device=device
        )
    pipeline = text_to_speech_pipelines[model_name]
    audio = pipeline(text, speaker_embeddings=speaker_embeddings)
    return audio["audio"]

def visual_qa(image, question):
    result = visual_qa_pipeline(image, question)
    return result["answer"]

def document_qa(document, question):
    result = document_qa_pipeline(question=question, context=document)
    return result["answer"]

def image_classification(image):
    result = image_classification_pipeline(image)
    return result

def object_detection(image):
    result = object_detection_pipeline(image)
    return result

def video_classification(video):
    result = video_classification_pipeline(video)
    return result

def summarize_text(text):
    result = summarization_pipeline(text)
    return result[0]["summary_text"]

def text_to_audio(text):
    global speaker_embeddings
    result = text_to_audio_pipeline(text, speaker_embeddings=speaker_embeddings)
    return result["audio"]

def audio_classification(audio):
    result = audio_classification_pipeline(audio)
    return result

# Define the Gradio interface
with gr.Blocks() as demo:
    gr.Markdown("## Versatile AI Chatbot and Text-to-X Tasks")
    
    with gr.Tab("Conversational AI"):
        conversational_model_choice = gr.Dropdown(list(conversational_models.keys()), label="Choose a Conversational Model")
        conversational_chatbot = gr.Chatbot(label="Chat")
        conversational_message = gr.Textbox(label="Message")
        conversational_submit = gr.Button("Submit")
        
        conversational_submit.click(chat, inputs=[conversational_model_choice, conversational_message, conversational_chatbot], outputs=[conversational_chatbot, conversational_chatbot])
        conversational_message.submit(chat, inputs=[conversational_model_choice, conversational_message, conversational_chatbot], outputs=[conversational_chatbot, conversational_chatbot])
    
    with gr.Tab("Text-to-Image"):
        text_to_image_model_choice = gr.Dropdown(list(text_to_image_models.keys()), label="Choose a Text-to-Image Model")
        text_to_image_prompt = gr.Textbox(label="Prompt")
        text_to_image_generate = gr.Button("Generate Image")
        text_to_image_output = gr.Image(label="Generated Image")
        
        text_to_image_generate.click(generate_image, inputs=[text_to_image_model_choice, text_to_image_prompt], outputs=text_to_image_output)
    
    with gr.Tab("Text-to-Speech"):
        text_to_speech_model_choice = gr.Dropdown(list(text_to_speech_models.keys()), label="Choose a Text-to-Speech Model")
        text_to_speech_text = gr.Textbox(label="Text")
        text_to_speech_generate = gr.Button("Generate Speech")
        text_to_speech_output = gr.Audio(label="Generated Speech")
        
        text_to_speech_generate.click(generate_speech, inputs=[text_to_speech_model_choice, text_to_speech_text], outputs=text_to_speech_output)
    
    with gr.Tab("Visual Question Answering"):
        visual_qa_image = gr.Image(label="Upload Image")
        visual_qa_question = gr.Textbox(label="Question")
        visual_qa_generate = gr.Button("Answer")
        visual_qa_output = gr.Textbox(label="Answer")
        
        visual_qa_generate.click(visual_qa, inputs=[visual_qa_image, visual_qa_question], outputs=visual_qa_output)
    
    with gr.Tab("Document Question Answering"):
        document_qa_document = gr.Textbox(label="Document Text")
        document_qa_question = gr.Textbox(label="Question")
        document_qa_generate = gr.Button("Answer")
        document_qa_output = gr.Textbox(label="Answer")
        
        document_qa_generate.click(document_qa, inputs=[document_qa_document, document_qa_question], outputs=document_qa_output)
    
    with gr.Tab("Image Classification"):
        image_classification_image = gr.Image(label="Upload Image")
        image_classification_generate = gr.Button("Classify")
        image_classification_output = gr.Textbox(label="Classification Result")
        
        image_classification_generate.click(image_classification, inputs=image_classification_image, outputs=image_classification_output)
    
    with gr.Tab("Object Detection"):
        object_detection_image = gr.Image(label="Upload Image")
        object_detection_generate = gr.Button("Detect")
        object_detection_output = gr.Image(label="Detection Result")
        
        object_detection_generate.click(object_detection, inputs=object_detection_image, outputs=object_detection_output)
    
    with gr.Tab("Video Classification"):
        video_classification_video = gr.Video(label="Upload Video")
        video_classification_generate = gr.Button("Classify")
        video_classification_output = gr.Textbox(label="Classification Result")
        
        video_classification_generate.click(video_classification, inputs=video_classification_video, outputs=video_classification_output)
    
    with gr.Tab("Summarization"):
        summarize_text_text = gr.Textbox(label="Text")
        summarize_text_generate = gr.Button("Summarize")
        summarize_text_output = gr.Textbox(label="Summary")
        
        summarize_text_generate.click(summarize_text, inputs=summarize_text_text, outputs=summarize_text_output)
    
    with gr.Tab("Text-to-Audio"):
        text_to_audio_text = gr.Textbox(label="Text")
        text_to_audio_generate = gr.Button("Generate Audio")
        text_to_audio_output = gr.Audio(label="Generated Audio")
        
        text_to_audio_generate.click(text_to_audio, inputs=text_to_audio_text, outputs=text_to_audio_output)

    with gr.Tab("Audio Classification"):
        audio_classification_audio = gr.Audio(label="Upload Audio")
        audio_classification_generate = gr.Button("Classify")
        audio_classification_output = gr.Textbox(label="Classification Result")
        
        audio_classification_generate.click(audio_classification, inputs=audio_classification_audio, outputs=audio_classification_output)

# Launch the Gradio interface
demo.launch()