Krishnavamshithumma commited on
Commit
c53823f
Β·
verified Β·
1 Parent(s): 4b2a6d1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -58
app.py CHANGED
@@ -1,80 +1,70 @@
1
  import gradio as gr
2
- import os
3
  from openai import OpenAI
4
 
5
-
6
  system_prompt = """
7
- You are a voice bot representing Krishnavamshi Thumma. When responding to questions, answer as if you are:
8
 
9
- - A Generative AI and Data Engineering enthusiast with 1.5+ years of experience in data pipelines, automation, and scalable solutions
10
- - Currently working as a Data Engineer at Wishkarma in Hyderabad, where you've optimized ETL pipelines processing 10K+ records daily and developed an image-based product similarity search engine using CLIP-ViT-L/14
11
- - Previously worked as a Data Engineer Intern at DeepThought Growth Management System, where you processed 700+ data records and mentored 400+ students
12
- - Skilled in Python, SQL, JavaScript (Node.js), OpenAI GPT-4o, LangChain, MongoDB Vector Search, FAISS, Apache Airflow, AWS Lambda, and FastAPI
13
- - Experienced in building GenAI products including conversational AI chatbots, RAG pipelines, and AI-powered tools
14
- - A Computer Science graduate from Neil Gogte Institute of Technology with a CGPA of 7.5/10
15
- - Passionate about solving real-world problems at the intersection of AI and software engineering
16
 
17
- Answer questions about your background, experience, projects, and skills based on this resume. Keep responses professional but engaging (2-3 sentences max for most questions).
18
- """
19
 
20
- # Chat function that takes a question + API key and keeps history
21
- def transcribe_and_chat(audio, history, api_key):
22
  if not api_key:
23
  return history, "❌ Please enter your OpenAI API key."
24
-
25
  try:
26
  client = OpenAI(api_key=api_key)
27
-
28
- # 1) Transcribe the audio file with Whisper
29
- with open(audio, "rb") as f:
30
- whisper_resp = client.audio.transcriptions.create(
31
- model="whisper-1",
32
- file=f
33
- )
34
- user_text = whisper_resp.text.strip()
35
 
36
- # 2) Append user message to history
37
  messages = [{"role": "system", "content": system_prompt}]
38
- for u, b in history:
39
- messages += [
40
- {"role": "user", "content": u},
41
- {"role": "assistant", "content": b}
42
- ]
43
- messages.append({"role": "user", "content": user_text})
44
 
45
- # 3) ChatCompletion
46
- chat_resp = client.chat.completions.create(
47
- model="gpt-4",
48
  messages=messages,
49
  temperature=0.7
50
  )
51
- bot_reply = chat_resp.choices[0].message.content.strip()
52
-
53
- # 4) Update history
54
- history.append((user_text, bot_reply))
55
  return history, history
56
-
57
  except Exception as e:
58
- return history, f"❌ Error: {e}"
59
-
60
- with gr.Blocks(title="Voice Bot: Krishnavamshi Thumma") as demo:
61
- gr.Markdown("## πŸŽ™οΈ Speak your question; get a text answer")
62
 
63
- api_key = gr.Textbox(label="πŸ” OpenAI API Key", type="password")
64
- chatbot = gr.Chatbot(label="🧠 Voice Bot", type="messages")
65
- mic = gr.Audio(sources=["microphone"], type="filepath", label="🎀 Record your question")
66
- clear = gr.Button("Clear chat")
67
- state = gr.State([])
68
-
69
- # When mic component gets new audio, run transcription + chat
70
- mic.change(
71
- fn=transcribe_and_chat,
72
- inputs=[mic, state, api_key],
73
- outputs=[chatbot, state]
 
 
 
 
 
 
 
 
 
74
  )
 
 
75
 
76
- # Clear history
77
- clear.click(lambda: ([], []), None, [chatbot, state])
78
-
79
- # Note: we don’t need share=True on Spaces
80
- demo.launch()
 
1
  import gradio as gr
 
2
  from openai import OpenAI
3
 
 
4
  system_prompt = """
5
+ You are a voice bot representing Krishnavamshi Thumma. When responding to questions, answer as if you are:
6
 
7
+ - A Generative AI and Data Engineering enthusiast with 1.5+ years of experience in data pipelines, automation, and scalable solutions
8
+ - Currently working as a Data Engineer at Wishkarma in Hyderabad, where you've optimized ETL pipelines processing 10K+ records daily and developed an image-based product similarity search engine using CLIP-ViT-L/14
9
+ - Previously worked as a Data Engineer Intern at DeepThought Growth Management System, where you processed 700+ data records and mentored 400+ students
10
+ - Skilled in Python, SQL, JavaScript (Node.js), OpenAI GPT-4o, LangChain, MongoDB Vector Search, FAISS, Apache Airflow, AWS Lambda, and FastAPI
11
+ - Experienced in building GenAI products including conversational AI chatbots, RAG pipelines, and AI-powered tools
12
+ - A Computer Science graduate from Neil Gogte Institute of Technology with a CGPA of 7.5/10
13
+ - Passionate about solving real-world problems at the intersection of AI and software engineering
14
 
15
+ Answer questions about your background, experience, projects, and skills based on this resume. Keep responses professional but engaging (2-3 sentences max for most questions).
16
+ """
17
 
18
+ def chat_with_openai(user_input, history, api_key):
 
19
  if not api_key:
20
  return history, "❌ Please enter your OpenAI API key."
21
+
22
  try:
23
  client = OpenAI(api_key=api_key)
 
 
 
 
 
 
 
 
24
 
25
+ # Build messages from history
26
  messages = [{"role": "system", "content": system_prompt}]
27
+ for entry in history:
28
+ messages.append({"role": "user", "content": entry[0]})
29
+ messages.append({"role": "assistant", "content": entry[1]})
30
+ messages.append({"role": "user", "content": user_input})
 
 
31
 
32
+ # Get response from OpenAI
33
+ response = client.chat.completions.create(
34
+ model="gpt-4o",
35
  messages=messages,
36
  temperature=0.7
37
  )
38
+
39
+ bot_reply = response.choices[0].message.content
40
+ history.append((user_input, bot_reply))
 
41
  return history, history
42
+
43
  except Exception as e:
44
+ return history, f"❌ Error: {str(e)}"
 
 
 
45
 
46
+ with gr.Blocks(title="Voice Bot: Krishnavamshi Thumma", js="./custom.js") as demo:
47
+ gr.Markdown("## πŸŽ™οΈ Krishnavamshi Thumma - Voice Assistant")
48
+
49
+ api_key = gr.Textbox(label="πŸ” OpenAI API Key", type="password", elem_id="apiKeyInput")
50
+ chatbot = gr.Chatbot(elem_id="chatBox")
51
+ state = gr.State([])
52
+
53
+ with gr.Row():
54
+ mic_btn = gr.Button("🎀 Speak", elem_id="micButton")
55
+ clear_btn = gr.Button("πŸ—‘οΈ Clear Chat")
56
+
57
+ # Hidden components for JS communication
58
+ voice_input = gr.Textbox(visible=False, elem_id="voiceInput")
59
+ js_trigger = gr.Textbox(visible=False, elem_id="jsTrigger")
60
+
61
+ # Event handlers
62
+ voice_input.change(
63
+ chat_with_openai,
64
+ [voice_input, state, api_key],
65
+ [chatbot, state]
66
  )
67
+ mic_btn.click(None, None, None, _js="startListening")
68
+ clear_btn.click(lambda: ([], []), None, [chatbot, state])
69
 
70
+ demo.launch()