vai0511 commited on
Commit
05d276c
·
verified ·
1 Parent(s): e177899

Added system prompt

Browse files
Files changed (1) hide show
  1. app.py +116 -47
app.py CHANGED
@@ -1,73 +1,142 @@
1
  import gradio as gr
2
  from transformers import T5Tokenizer, T5ForConditionalGeneration
 
 
 
 
 
 
 
 
 
 
 
3
 
4
- # Load your Hugging Face model
5
  model_name = "vai0511/flan-t5-ai-mock-interviewer"
6
 
7
  tokenizer = T5Tokenizer.from_pretrained(model_name)
8
  model = T5ForConditionalGeneration.from_pretrained(model_name)
9
 
10
 
11
- def init_interview(domain, company):
12
- prompt = f"Generate first interview question for {domain} at {company}:"
13
- inputs = tokenizer(prompt, return_tensors="pt")
14
- outputs = model.generate(**inputs, max_new_tokens=100)
15
- first_question = tokenizer.decode(outputs[0], skip_special_tokens=True)
16
-
17
- return [
18
- (None, f"Interview started for {domain} at {company or 'a company'}"),
19
- (None, first_question)
20
- ]
21
 
22
- def respond(history, domain, company):
23
- last_user_answer = history[-1][0] # Get user's last input
 
24
 
25
- prompt = f"""
26
- You are a technical interviewer for {company or 'a tech company'}
27
- assessing a candidate in {domain}.
28
-
29
- The candidate just answered: {last_user_answer}
 
 
 
30
 
31
- Generate a follow-up question that:
32
- 1. Probes deeper into technical knowledge
33
- 2. Relates to their previous answer
34
- 3. Focuses on {domain} concepts
35
 
36
- Next question:
37
- """
38
- inputs = tokenizer(prompt, return_tensors="pt")
39
- outputs = model.generate(**inputs, max_new_tokens=100)
40
- next_question = tokenizer.decode(outputs[0], skip_special_tokens=True)
41
 
42
- return history + [(None, next_question)]
43
 
44
- with gr.Blocks() as demo:
45
- gr.Markdown("## AI Interview Practice")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
- with gr.Row():
48
- domain = gr.Textbox(label="Domain")
49
- company = gr.Textbox(label="Company (Optional)")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
51
- chatbot = gr.Chatbot()
52
- msg = gr.Textbox(label="Your Answer")
53
- start_btn = gr.Button("Start Interview")
54
- submit_btn = gr.Button("Submit Answer")
 
 
 
 
 
 
 
 
55
 
 
56
  start_btn.click(
57
- init_interview,
58
- inputs=[domain, company],
59
- outputs=[chatbot]
 
 
 
 
60
  )
61
-
 
62
  submit_btn.click(
63
- lambda message: [(message, None)],
64
  inputs=[msg],
65
- outputs=[chatbot],
66
- queue=False
67
  ).then(
68
  respond,
69
- inputs=[chatbot, domain, company],
70
- outputs=[chatbot]
71
- ).then(lambda: "", None, msg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
 
73
  demo.launch()
 
1
  import gradio as gr
2
  from transformers import T5Tokenizer, T5ForConditionalGeneration
3
+ import torch
4
+ from transformers import AutoTokenizer
5
+
6
+ system_prompt = """
7
+ You are conducting a mock technical interview. Generate questions and follow-up questions based on the domain provided. Consider these aspects:
8
+ 1. The question should be relevant to the domain (e.g., software engineering, machine learning).
9
+ 2. For follow-up questions, analyze the candidate's last response and ask questions that probe deeper into their understanding, challenge their approach, or request clarification.
10
+ 3. The follow-up question should aim to explore the candidate's depth of knowledge and ability to adapt.
11
+ 4. If no clear follow-up can be derived, generate a fresh, related question.
12
+ Important: Ensure that each question is clear, concise, and allows the candidate to demonstrate their technical and communicative abilities effectively.
13
+ """
14
 
 
15
  model_name = "vai0511/flan-t5-ai-mock-interviewer"
16
 
17
  tokenizer = T5Tokenizer.from_pretrained(model_name)
18
  model = T5ForConditionalGeneration.from_pretrained(model_name)
19
 
20
 
21
+ # Initialize conversation state
22
+ def init_state():
23
+ return {
24
+ "domain": "",
25
+ "company": "",
26
+ "questions": [],
27
+ "current_step": 0,
28
+ "max_questions": 10
29
+ }
 
30
 
31
+ def generate_question(prompt):
32
+ full_prompt = system_prompt + "\n" + prompt
33
+ inputs = tokenizer(full_prompt, return_tensors="pt", return_attention_mask=False).to("cuda")
34
 
35
+ outputs = model.generate(
36
+ **inputs,
37
+ max_new_tokens=100,
38
+ do_sample=True,
39
+ top_p=0.9,
40
+ temperature=0.7,
41
+ pad_token_id=tokenizer.eos_token_id,
42
+ )
43
 
44
+ question = tokenizer.decode(outputs[0], skip_special_tokens=True)
45
+ question = question.replace(full_prompt, "").strip()
 
 
46
 
47
+ if not question.endswith("?"):
48
+ question = question.split("?")[0] + "?"
 
 
 
49
 
50
+ return question
51
 
52
+ def respond(history, message, state):
53
+ if state["current_step"] == 0:
54
+ # First question generation
55
+ prompt = f"Domain: {state['domain']}. "
56
+ if state["company"]:
57
+ prompt += f"Company: {state['company']}. "
58
+ prompt += "Generate the first technical question:"
59
+
60
+ question = generate_question(prompt)
61
+ state["questions"].append({"question": question, "response": None})
62
+ state["current_step"] += 1
63
+
64
+ return [
65
+ (None, f"Starting {state['domain']} interview for {state['company'] or 'general'}"),
66
+ (None, question)
67
+ ], state
68
 
69
+ else:
70
+ # Store response and generate follow-up
71
+ state["questions"][-1]["response"] = message
72
+ last_response = message
73
+
74
+ prompt = f"Domain: {state['domain']}. Candidate's last response: {last_response}. Generate follow-up:"
75
+ question = generate_question(prompt)
76
+
77
+ # If no good follow-up, generate new question
78
+ if "?" not in question:
79
+ prompt = f"Domain: {state['domain']}. Generate new question:"
80
+ question = generate_question(prompt)
81
+
82
+ state["questions"].append({"question": question, "response": None})
83
+ state["current_step"] += 1
84
+
85
+ history.append((message, None)) # User response
86
+ history.append((None, question)) # AI question
87
+ return history, state
88
+
89
+ with gr.Blocks(title="AI Interviewer") as demo:
90
+ state = gr.State(init_state)
91
 
92
+ with gr.Column():
93
+ gr.Markdown("## Technical Interview Simulator")
94
+
95
+ with gr.Row():
96
+ domain = gr.Textbox(label="Domain (e.g., Machine Learning)", interactive=True)
97
+ company = gr.Textbox(label="Company (Optional)", interactive=True)
98
+ start_btn = gr.Button("Start Interview")
99
+
100
+ chatbot = gr.Chatbot(label="Conversation")
101
+ msg = gr.Textbox(label="Your Answer", interactive=True)
102
+ submit_btn = gr.Button("Submit Answer")
103
+ end_btn = gr.Button("End Interview")
104
 
105
+ # Start interview
106
  start_btn.click(
107
+ fn=lambda d, c, s: (s.update({"domain": d, "company": c, "current_step": 0}), s),
108
+ inputs=[domain, company, state],
109
+ outputs=[state]
110
+ ).then(
111
+ respond,
112
+ inputs=[chatbot, gr.Textbox("", visible=False), state],
113
+ outputs=[chatbot, state]
114
  )
115
+
116
+ # Handle responses
117
  submit_btn.click(
118
+ lambda x: x, # Temporary to capture input
119
  inputs=[msg],
120
+ outputs=[msg]
 
121
  ).then(
122
  respond,
123
+ inputs=[chatbot, msg, state],
124
+ outputs=[chatbot, state]
125
+ ).then(
126
+ lambda: "", # Clear input
127
+ None,
128
+ msg
129
+ )
130
+
131
+ # End interview
132
+ end_btn.click(
133
+ lambda: [(None, "Interview ended. Thank you!")],
134
+ None,
135
+ chatbot
136
+ ).then(
137
+ init_state,
138
+ None,
139
+ state
140
+ )
141
 
142
  demo.launch()