File size: 2,903 Bytes
23bf6d3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e5c179d
23bf6d3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
from transformers import pipeline, Conversation
import gradio as gr
import scipy
from diffusers import DiffusionPipeline

# Model initialization
chatbot = pipeline(model="facebook/blenderbot-400M-distill")
synthesiser = pipeline("text-to-audio", "facebook/musicgen-small")
ldm = DiffusionPipeline.from_pretrained("CompVis/ldm-text2im-large-256")

# Declare array
message_list = []
response_list = []

# Libray for Chatbot
def vanilla_chatbot(message):
    conversation = Conversation(text=message, past_user_inputs=message_list,
    generated_responses=response_list)
    bot = chatbot(conversation.messages[0]['content']) # working code
    return bot[-1]['generated_text']
#Libray for music
def generate_music(Prompt):
    music = synthesiser(Prompt, forward_params={"do_sample": True, "max_new_tokens":100})
    rate = music["sampling_rate"]
    mus = music["audio"][0].reshape(-1)
    return rate,mus
#Libray fo image
def generate_image(Prompt):
    images = ldm([Prompt], num_inference_steps=50, eta=.3, guidance_scale=6)
    return images.images[0]

def process_input(Prompt,choice):
    if choice == "Chat":
        return vanilla_chatbot(Prompt),None,None
    elif choice == 'Music':
        rate,audio = generate_music(Prompt)
        return None, (rate,audio), None
    else:
        return None , None , generate_image(Prompt)
      
# demo=gr.Blocks()
# with demo:
#     with gr.Row():
#         text_input = gr.Textbox()
#         choice = gr.Radio(choices=["Chat","Music","Image"])

#     with gr.Row():
#         chatbot_output = gr.Textbox()
#         music_output =gr.Audio()
#         image_output =gr.Image()
        
#     submit_btn = gr.Button("Generate")
    
#     submit_btn.click(fn=process_input,inputs=[text_input,choice],outputs=[chatbot_output,music_output,image_output])

# demo.launch(debug=True)


demo =gr.Interface(
    fn=process_input,
    inputs=[gr.Textbox(),gr.Radio(["Chat","Music","Image"])],
    outputs = [gr.Textbox(),gr.Audio(),gr.Image()],
    # outputs =["text","audio","image"]
    title="Multimodal Assistant"

)

demo.launch(debug=True)     
    
# demo_chatbot = gr.ChatInterface(vanilla_chatbot, title="Mashdemy Chatbot", 
# description="Enter text to start chatting.")
# demo_chatbot.launch(share = True)
    
# inf = gr.Interface(generate_music,title = "Mashdemy Demo Music Generator App", description = """Type in the kind 
# of music you prefer and click submit""", inputs =["text"], outputs=["audio"], 
# examples = ["lo-fi music with a soothing melody", "pop music"])
# inf.launch(share = True

    
# interface = gr.Interface(fn = generate_image,inputs = "text",outputs = "image",
# title = "Mashdemy Demo Image Generator App", description = "Type in a text and click submit to generate an image:", 
# examples = ["a clown reading a book", "a cat using a laptop", "An elephant on grass"])
# #Launch application
# interface.launch(share = True)