Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
import gradio as gr
|
2 |
import time
|
3 |
-
from transformers import pipeline
|
4 |
from sentence_transformers import SentenceTransformer
|
5 |
from sklearn.metrics.pairwise import cosine_similarity
|
6 |
from TTS.api import TTS # Coqui TTS library
|
@@ -10,6 +10,9 @@ import PyPDF2
|
|
10 |
stt_model = pipeline("automatic-speech-recognition", model="openai/whisper-tiny")
|
11 |
embedding_model = SentenceTransformer("all-MiniLM-L6-v2")
|
12 |
tts_model = TTS(model_name="tts_models/en/ljspeech/tacotron2-DDC", progress_bar=False, gpu=False)
|
|
|
|
|
|
|
13 |
|
14 |
# Parse PDF and create resume content
|
15 |
def parse_resume(pdf):
|
@@ -26,15 +29,20 @@ def process_inputs(resume, job_desc):
|
|
26 |
job_desc_embedding = embedding_model.encode(job_desc)
|
27 |
return resume_embeddings, job_desc_embedding
|
28 |
|
29 |
-
# Generate a follow-up question
|
30 |
-
def
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
}
|
36 |
-
|
37 |
-
|
|
|
|
|
|
|
|
|
|
|
38 |
|
39 |
# Generate TTS audio for a question
|
40 |
def generate_audio(question):
|
@@ -66,8 +74,8 @@ class MockInterview:
|
|
66 |
if not transcription.strip():
|
67 |
return "No response detected. Please try again.", None
|
68 |
|
69 |
-
# Generate the next question
|
70 |
-
self.current_question =
|
71 |
return transcription, generate_audio(self.current_question)
|
72 |
|
73 |
def end_interview(self):
|
@@ -98,11 +106,11 @@ with interface:
|
|
98 |
audio_input = gr.Audio(type="filepath", label="Your Response")
|
99 |
question_audio_output = gr.Audio(label="Question Audio")
|
100 |
transcription_output = gr.Textbox(label="Transcription")
|
101 |
-
interaction_button = gr.Button("Next Interaction")
|
102 |
-
end_button = gr.Button("End Interview")
|
103 |
|
104 |
-
|
105 |
-
|
|
|
|
|
106 |
end_button.click(end_interview, outputs=[transcription_output, question_audio_output])
|
107 |
|
108 |
if __name__ == "__main__":
|
|
|
1 |
import gradio as gr
|
2 |
import time
|
3 |
+
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
|
4 |
from sentence_transformers import SentenceTransformer
|
5 |
from sklearn.metrics.pairwise import cosine_similarity
|
6 |
from TTS.api import TTS # Coqui TTS library
|
|
|
10 |
stt_model = pipeline("automatic-speech-recognition", model="openai/whisper-tiny")
|
11 |
embedding_model = SentenceTransformer("all-MiniLM-L6-v2")
|
12 |
tts_model = TTS(model_name="tts_models/en/ljspeech/tacotron2-DDC", progress_bar=False, gpu=False)
|
13 |
+
gpt_model_name = "OpenAssistant/oasst-sft-6-llama-30b"
|
14 |
+
gpt_tokenizer = AutoTokenizer.from_pretrained(gpt_model_name)
|
15 |
+
gpt_model = AutoModelForCausalLM.from_pretrained(gpt_model_name)
|
16 |
|
17 |
# Parse PDF and create resume content
|
18 |
def parse_resume(pdf):
|
|
|
29 |
job_desc_embedding = embedding_model.encode(job_desc)
|
30 |
return resume_embeddings, job_desc_embedding
|
31 |
|
32 |
+
# Generate a follow-up question using GPT
|
33 |
+
def generate_question_gpt(response, resume_embeddings, job_description):
|
34 |
+
prompt = f"""
|
35 |
+
You are a hiring manager conducting a professional job interview.
|
36 |
+
Job Description: {job_description}
|
37 |
+
Candidate's Resume Insights: {resume_embeddings}
|
38 |
+
Candidate's Last Response: {response}
|
39 |
+
|
40 |
+
Based on the job description and candidate's resume, generate a professional follow-up question.
|
41 |
+
"""
|
42 |
+
inputs = gpt_tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512)
|
43 |
+
outputs = gpt_model.generate(**inputs, max_length=150, num_beams=3, early_stopping=True)
|
44 |
+
question = gpt_tokenizer.decode(outputs[0], skip_special_tokens=True)
|
45 |
+
return question.strip()
|
46 |
|
47 |
# Generate TTS audio for a question
|
48 |
def generate_audio(question):
|
|
|
74 |
if not transcription.strip():
|
75 |
return "No response detected. Please try again.", None
|
76 |
|
77 |
+
# Generate the next question using GPT
|
78 |
+
self.current_question = generate_question_gpt(transcription, self.resume_embeddings, self.job_desc_embedding)
|
79 |
return transcription, generate_audio(self.current_question)
|
80 |
|
81 |
def end_interview(self):
|
|
|
106 |
audio_input = gr.Audio(type="filepath", label="Your Response")
|
107 |
question_audio_output = gr.Audio(label="Question Audio")
|
108 |
transcription_output = gr.Textbox(label="Transcription")
|
|
|
|
|
109 |
|
110 |
+
resume_input.change(start_interview, inputs=[resume_input, job_desc_input], outputs=[transcription_output, question_audio_output])
|
111 |
+
audio_input.change(next_interaction, inputs=[audio_input], outputs=[transcription_output, question_audio_output])
|
112 |
+
|
113 |
+
end_button = gr.Button("End Interview")
|
114 |
end_button.click(end_interview, outputs=[transcription_output, question_audio_output])
|
115 |
|
116 |
if __name__ == "__main__":
|