pratham0011 commited on
Commit
3e4baba
·
verified ·
1 Parent(s): 792e562

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +96 -100
app.py CHANGED
@@ -1,100 +1,96 @@
1
- import asyncio
2
- import logging
3
- import gradio as gr
4
-
5
- from services.qwen import respond
6
-
7
-
8
- logger = logging.getLogger(__name__)
9
-
10
- # Track conversation state
11
- conversation_history = []
12
-
13
- def clear_conversation():
14
- global conversation_history
15
- conversation_history = []
16
- return [],None
17
-
18
- def sync_respond(audio, text_input, do_search, history):
19
- if not audio and not text_input:
20
- return None, history
21
-
22
- logger.info(f"Processing request with search enabled: {do_search}")
23
- result = asyncio.run(respond(audio, text_input, do_search, history))
24
- audio_path, response_text = result
25
-
26
- if audio:
27
- user_message = {"role": "user", "content": "Voice message"}
28
- else:
29
- user_message = {"role": "user", "content": text_input}
30
-
31
- assistant_message = {"role": "assistant", "content": response_text}
32
- history.extend([user_message, assistant_message])
33
-
34
- return audio_path, history
35
-
36
- # Build Gradio interface
37
- with gr.Blocks(theme=gr.themes.Soft()) as interface:
38
- gr.Markdown(
39
- """
40
- <div style="text-align: center; margin-bottom: 1rem;">
41
- <h1 style="font-weight: bold;">ConversAI: AI Voice & Chat Assistant</h1>
42
- </div>
43
- """,
44
- show_label=False
45
- )
46
-
47
- # Input components (left column)
48
- with gr.Row():
49
- with gr.Column(scale=2):
50
- audio_input = gr.Audio(
51
- label="Your Voice Input",
52
- type="filepath",
53
- sources=["microphone"]
54
- )
55
- text_input = gr.Textbox(
56
- label="Or Type Your Message",
57
- placeholder="Type here..."
58
- )
59
- search_checkbox = gr.Checkbox(
60
- label="Enable web search",
61
- value=False
62
- )
63
- clear_btn = gr.Button("Clear Chat")
64
-
65
- # Output components (right column)
66
- with gr.Column(scale=3):
67
- chatbot = gr.Chatbot(label="Conversation", type="messages")
68
- audio_output = gr.Audio(
69
- label="AI Voice Response",
70
- type="filepath",
71
- autoplay=True
72
- )
73
-
74
- # Define input event handlers
75
- input_events = [
76
- audio_input.change(
77
- fn=sync_respond,
78
- inputs=[audio_input, text_input,search_checkbox, chatbot],
79
- outputs=[audio_output, chatbot]
80
- ),
81
- text_input.submit(
82
- fn=sync_respond,
83
- inputs=[audio_input, text_input, search_checkbox, chatbot],
84
- outputs=[audio_output, chatbot]
85
- )
86
- ]
87
-
88
- # Clear chat button handler
89
- clear_btn.click(
90
- fn=clear_conversation,
91
- outputs=[chatbot, audio_output]
92
- )
93
-
94
- # Start server
95
- if __name__ == "__main__":
96
- interface.launch(
97
- server_name="0.0.0.0",
98
- server_port=7860,
99
- debug=True
100
- )
 
1
+ import asyncio
2
+ import logging
3
+ import gradio as gr
4
+
5
+ from services.qwen import respond
6
+
7
+
8
+ logger = logging.getLogger(__name__)
9
+
10
+ # Track conversation state
11
+ conversation_history = []
12
+
13
+ def clear_conversation():
14
+ global conversation_history
15
+ conversation_history = []
16
+ return [],None
17
+
18
+ def sync_respond(audio, text_input, do_search, history):
19
+ if not audio and not text_input:
20
+ return None, history
21
+
22
+ logger.info(f"Processing request with search enabled: {do_search}")
23
+ result = asyncio.run(respond(audio, text_input, do_search, history))
24
+ audio_path, response_text = result
25
+
26
+ if audio:
27
+ user_message = {"role": "user", "content": "Voice message"}
28
+ else:
29
+ user_message = {"role": "user", "content": text_input}
30
+
31
+ assistant_message = {"role": "assistant", "content": response_text}
32
+ history.extend([user_message, assistant_message])
33
+
34
+ return audio_path, history
35
+
36
+ # Build Gradio interface
37
+ with gr.Blocks(theme=gr.themes.Soft()) as interface:
38
+ gr.Markdown(
39
+ """
40
+ <div style="text-align: center; margin-bottom: 1rem;">
41
+ <h1 style="font-weight: bold;">ConversAI: AI Voice & Chat Assistant</h1>
42
+ </div>
43
+ """,
44
+ show_label=False
45
+ )
46
+
47
+ # Input components (left column)
48
+ with gr.Row():
49
+ with gr.Column(scale=2):
50
+ audio_input = gr.Audio(
51
+ label="Your Voice Input",
52
+ type="filepath",
53
+ sources=["microphone"]
54
+ )
55
+ text_input = gr.Textbox(
56
+ label="Or Type Your Message",
57
+ placeholder="Type here..."
58
+ )
59
+ search_checkbox = gr.Checkbox(
60
+ label="Enable web search",
61
+ value=False
62
+ )
63
+ clear_btn = gr.Button("Clear Chat")
64
+
65
+ # Output components (right column)
66
+ with gr.Column(scale=3):
67
+ chatbot = gr.Chatbot(label="Conversation", type="messages")
68
+ audio_output = gr.Audio(
69
+ label="AI Voice Response",
70
+ type="filepath",
71
+ autoplay=True
72
+ )
73
+
74
+ # Define input event handlers
75
+ input_events = [
76
+ audio_input.change(
77
+ fn=sync_respond,
78
+ inputs=[audio_input, text_input,search_checkbox, chatbot],
79
+ outputs=[audio_output, chatbot]
80
+ ),
81
+ text_input.submit(
82
+ fn=sync_respond,
83
+ inputs=[audio_input, text_input, search_checkbox, chatbot],
84
+ outputs=[audio_output, chatbot]
85
+ )
86
+ ]
87
+
88
+ # Clear chat button handler
89
+ clear_btn.click(
90
+ fn=clear_conversation,
91
+ outputs=[chatbot, audio_output]
92
+ )
93
+
94
+ # Start server
95
+ if __name__ == "__main__":
96
+ interface.launch(debug=True)