garyd1 commited on
Commit
8496f4f
·
verified ·
1 Parent(s): 02b7531

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -5
app.py CHANGED
@@ -4,6 +4,8 @@ from transformers import pipeline
4
  from sentence_transformers import SentenceTransformer
5
  from sklearn.metrics.pairwise import cosine_similarity
6
  import PyPDF2
 
 
7
 
8
  # Load local models for inference
9
  stt_model = pipeline("automatic-speech-recognition", model="openai/whisper-base")
@@ -12,6 +14,14 @@ conversation_model = pipeline("text-generation", model="facebook/blenderbot-400M
12
  # Load a pre-trained model for vector embeddings
13
  embedding_model = SentenceTransformer('all-MiniLM-L6-v2')
14
 
 
 
 
 
 
 
 
 
15
  # Parse PDF and create resume content
16
  def parse_resume(pdf):
17
  """Extract text from an uploaded PDF file."""
@@ -55,21 +65,32 @@ class MockInterview:
55
  self.resume_embeddings = process_resume(resume)
56
  self.job_desc_embedding = process_job_description(job_desc)
57
  self.interview_active = True
58
- return "Resume and job description processed. Interview is ready to start."
 
 
 
 
 
 
 
59
 
60
  def conduct_interview(self, audio_file):
61
  if not self.interview_active:
62
  return "Please upload your resume and job description first.", ""
63
 
64
- transcription = stt_model(audio_file)["text"] # Transcribe audio
 
65
  if not transcription.strip():
66
  return "No audio detected. Please try again.", ""
67
 
 
68
  question = generate_question(transcription, self.resume_embeddings)
 
69
  return transcription, question
70
 
71
  def end_interview(self):
72
  self.interview_active = False
 
73
  return "Interview ended. Thank you for participating."
74
 
75
  mock_interview = MockInterview()
@@ -77,6 +98,9 @@ mock_interview = MockInterview()
77
  def upload_inputs(resume, job_desc):
78
  return mock_interview.upload_inputs(resume, job_desc)
79
 
 
 
 
80
  def conduct_interview(audio_file):
81
  return mock_interview.conduct_interview(audio_file)
82
 
@@ -86,7 +110,7 @@ def end_interview():
86
  interface = gr.Blocks()
87
  with interface:
88
  gr.Markdown("""# Mock Interview AI
89
- Upload your resume and job description, then engage in a realistic interview simulation.""")
90
 
91
  with gr.Row():
92
  resume_input = gr.File(label="Upload Resume (PDF)")
@@ -100,9 +124,9 @@ Upload your resume and job description, then engage in a realistic interview sim
100
  submit_button = gr.Button("Submit Response")
101
  end_button = gr.Button("End Interview")
102
 
103
- upload_button.click(upload_inputs, inputs=[resume_input, job_desc_input], outputs=[transcription_output])
104
  submit_button.click(conduct_interview, inputs=[audio_input], outputs=[transcription_output, question_output])
105
- end_button.click(end_interview, outputs=[transcription_output])
106
 
107
  if __name__ == "__main__":
108
  interface.launch()
 
4
  from sentence_transformers import SentenceTransformer
5
  from sklearn.metrics.pairwise import cosine_similarity
6
  import PyPDF2
7
+ import pyttsx3
8
+ import time
9
 
10
  # Load local models for inference
11
  stt_model = pipeline("automatic-speech-recognition", model="openai/whisper-base")
 
14
  # Load a pre-trained model for vector embeddings
15
  embedding_model = SentenceTransformer('all-MiniLM-L6-v2')
16
 
17
+ # Text-to-Speech engine setup
18
+ tts_engine = pyttsx3.init()
19
+
20
+ def speak_text(text):
21
+ """Speak the given text using TTS engine."""
22
+ tts_engine.say(text)
23
+ tts_engine.runAndWait()
24
+
25
  # Parse PDF and create resume content
26
  def parse_resume(pdf):
27
  """Extract text from an uploaded PDF file."""
 
65
  self.resume_embeddings = process_resume(resume)
66
  self.job_desc_embedding = process_job_description(job_desc)
67
  self.interview_active = True
68
+ return "Resume and job description processed. Starting the interview."
69
+
70
+ def start_interview(self):
71
+ if not self.interview_active:
72
+ return "Please upload your resume and job description first."
73
+ question = "Tell me about yourself."
74
+ speak_text(question)
75
+ return question
76
 
77
  def conduct_interview(self, audio_file):
78
  if not self.interview_active:
79
  return "Please upload your resume and job description first.", ""
80
 
81
+ # Transcribe audio
82
+ transcription = stt_model(audio_file)["text"]
83
  if not transcription.strip():
84
  return "No audio detected. Please try again.", ""
85
 
86
+ # Generate next question
87
  question = generate_question(transcription, self.resume_embeddings)
88
+ speak_text(question)
89
  return transcription, question
90
 
91
  def end_interview(self):
92
  self.interview_active = False
93
+ speak_text("Thank you for participating in the interview. Goodbye!")
94
  return "Interview ended. Thank you for participating."
95
 
96
  mock_interview = MockInterview()
 
98
  def upload_inputs(resume, job_desc):
99
  return mock_interview.upload_inputs(resume, job_desc)
100
 
101
+ def start_interview():
102
+ return mock_interview.start_interview()
103
+
104
  def conduct_interview(audio_file):
105
  return mock_interview.conduct_interview(audio_file)
106
 
 
110
  interface = gr.Blocks()
111
  with interface:
112
  gr.Markdown("""# Mock Interview AI
113
+ Upload your resume and job description, then engage in a realistic audio-based interview simulation.""")
114
 
115
  with gr.Row():
116
  resume_input = gr.File(label="Upload Resume (PDF)")
 
124
  submit_button = gr.Button("Submit Response")
125
  end_button = gr.Button("End Interview")
126
 
127
+ upload_button.click(upload_inputs, inputs=[resume_input, job_desc_input], outputs=[question_output])
128
  submit_button.click(conduct_interview, inputs=[audio_input], outputs=[transcription_output, question_output])
129
+ end_button.click(end_interview, outputs=[question_output])
130
 
131
  if __name__ == "__main__":
132
  interface.launch()