File size: 8,173 Bytes
4e6ea87
3667c7a
44800eb
e12b285
44800eb
3667c7a
9fc1785
e12b285
9fc1785
 
 
e12b285
9fc1785
 
 
 
e12b285
9fc1785
 
 
e12b285
 
 
 
9fc1785
 
 
3667c7a
e12b285
3667c7a
 
9fc1785
3667c7a
d6cd6c2
e12b285
d6cd6c2
 
 
8d3b67a
78654a1
d6cd6c2
 
8d3b67a
78654a1
d6cd6c2
e12b285
d6cd6c2
1e66988
d6cd6c2
 
 
 
 
 
e12b285
44800eb
 
e12b285
44800eb
 
d6cd6c2
 
 
 
 
 
 
e12b285
d6cd6c2
e12b285
 
 
 
 
 
 
d6cd6c2
 
 
 
e12b285
d6cd6c2
e12b285
 
 
 
 
 
 
d6cd6c2
 
 
 
3667c7a
e12b285
9fc1785
e12b285
4e6ea87
e12b285
9fc1785
 
e12b285
9fc1785
8d3b67a
 
 
e12b285
9fc1785
8d3b67a
44800eb
e12b285
 
 
9fc1785
8d3b67a
3667c7a
e12b285
 
 
82598a2
3667c7a
e12b285
3667c7a
78654a1
 
e12b285
 
 
82598a2
d6cd6c2
e0a8291
3667c7a
78654a1
 
e12b285
 
 
82598a2
78654a1
 
 
 
278d569
78654a1
 
3667c7a
e12b285
 
 
44800eb
9fc1785
e12b285
1f19f64
 
 
 
 
44800eb
e12b285
d6cd6c2
 
e12b285
 
d6cd6c2
e12b285
 
 
d6cd6c2
 
e12b285
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
import os
from openai import OpenAI
from utils.errors import APIError
from typing import List, Dict, Generator, Optional, Tuple


class PromptManager:
    def __init__(self, prompts: Dict[str, str]):
        self.prompts = prompts
        self.limit = os.getenv("DEMO_WORD_LIMIT")

    def add_limit(self, prompt: str) -> str:
        if self.limit:
            prompt += f" Keep your responses very short and simple, no more than {self.limit} words."
        return prompt

    def get_system_prompt(self, key: str) -> str:
        prompt = self.prompts[key]
        return self.add_limit(prompt)

    def get_problem_requirements_prompt(
        self, type: str, difficulty: Optional[str] = None, topic: Optional[str] = None, requirements: Optional[str] = None
    ) -> str:
        prompt = f"Create a {type} problem. Difficulty: {difficulty}. Topic: {topic} Additional requirements: {requirements}. "
        return self.add_limit(prompt)


class LLMManager:
    def __init__(self, config, prompts: Dict[str, str]):
        self.config = config
        self.client = OpenAI(base_url=config.llm.url, api_key=config.llm.key)
        self.prompt_manager = PromptManager(prompts)

        self.status = self.test_llm()
        self.streaming = self.test_llm_stream() if self.status else False

        if self.streaming:
            self.end_interview = self.end_interview_stream
            self.get_problem = self.get_problem_stream
            self.send_request = self.send_request_stream
        else:
            self.end_interview = self.end_interview_full
            self.get_problem = self.get_problem_full
            self.send_request = self.send_request_full

    def get_text(self, messages: List[Dict[str, str]]) -> str:
        try:
            response = self.client.chat.completions.create(model=self.config.llm.name, messages=messages, temperature=1, max_tokens=2000)
            if not response.choices:
                raise APIError("LLM Get Text Error", details="No choices in response")
            return response.choices[0].message.content.strip()
        except Exception as e:
            raise APIError(f"LLM Get Text Error: Unexpected error: {e}")

    def get_text_stream(self, messages: List[Dict[str, str]]) -> Generator[str, None, None]:
        try:
            response = self.client.chat.completions.create(
                model=self.config.llm.name, messages=messages, temperature=1, stream=True, max_tokens=2000
            )
        except Exception as e:
            raise APIError(f"LLM End Interview Error: Unexpected error: {e}")
        text = ""
        for chunk in response:
            if chunk.choices[0].delta.content:
                text += chunk.choices[0].delta.content
            yield text

    def test_llm(self) -> bool:
        try:
            self.get_text(
                [
                    {"role": "system", "content": "You just help me test the connection."},
                    {"role": "user", "content": "Hi!"},
                    {"role": "user", "content": "Ping!"},
                ]
            )
            return True
        except:
            return False

    def test_llm_stream(self) -> bool:
        try:
            for _ in self.get_text_stream(
                [
                    {"role": "system", "content": "You just help me test the connection."},
                    {"role": "user", "content": "Hi!"},
                    {"role": "user", "content": "Ping!"},
                ]
            ):
                pass
            return True
        except:
            return False

    def init_bot(self, problem: str, interview_type: str = "coding") -> List[Dict[str, str]]:
        system_prompt = self.prompt_manager.get_system_prompt(f"{interview_type}_interviewer_prompt")
        return [{"role": "system", "content": f"{system_prompt}\nThe candidate is solving the following problem:\n {problem}"}]

    def get_problem_prepare_messages(self, requirements: str, difficulty: str, topic: str, interview_type: str) -> List[Dict[str, str]]:
        system_prompt = self.prompt_manager.get_system_prompt(f"{interview_type}_problem_generation_prompt")
        full_prompt = self.prompt_manager.get_problem_requirements_prompt(interview_type, difficulty, topic, requirements)
        return [
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": full_prompt},
        ]

    def get_problem_full(self, requirements: str, difficulty: str, topic: str, interview_type: str = "coding") -> str:
        messages = self.get_problem_prepare_messages(requirements, difficulty, topic, interview_type)
        return self.get_text(messages)

    def get_problem_stream(
        self, requirements: str, difficulty: str, topic: str, interview_type: str = "coding"
    ) -> Generator[str, None, None]:
        messages = self.get_problem_prepare_messages(requirements, difficulty, topic, interview_type)
        yield from self.get_text_stream(messages)

    def update_chat_history(
        self, code: str, previous_code: str, chat_history: List[Dict[str, str]], chat_display: List[List[Optional[str]]]
    ) -> List[Dict[str, str]]:
        message = chat_display[-1][0]
        if code != previous_code:
            message += "\nMY NOTES AND CODE:\n" + code
        chat_history.append({"role": "user", "content": message})
        return chat_history

    def send_request_full(
        self, code: str, previous_code: str, chat_history: List[Dict[str, str]], chat_display: List[List[Optional[str]]]
    ) -> Tuple[List[Dict[str, str]], List[List[Optional[str]]], str]:
        chat_history = self.update_chat_history(code, previous_code, chat_history, chat_display)
        reply = self.get_text(chat_history)
        chat_display.append([None, reply.split("#NOTES#")[0].strip()])
        chat_history.append({"role": "assistant", "content": reply})
        return chat_history, chat_display, code

    def send_request_stream(
        self, code: str, previous_code: str, chat_history: List[Dict[str, str]], chat_display: List[List[Optional[str]]]
    ) -> Generator[Tuple[List[Dict[str, str]], List[List[Optional[str]]], str], None, None]:
        chat_history = self.update_chat_history(code, previous_code, chat_history, chat_display)
        chat_display.append([None, ""])
        chat_history.append({"role": "assistant", "content": ""})
        reply = self.get_text_stream(chat_history)
        for message in reply:
            chat_display[-1][1] = message.split("#NOTES#")[0].strip()
            chat_history[-1]["content"] = message
            yield chat_history, chat_display, code

    def end_interview_prepare_messages(
        self, problem_description: str, chat_history: List[Dict[str, str]], interview_type: str
    ) -> List[Dict[str, str]]:
        transcript = [f"{message['role'].capitalize()}: {message['content']}" for message in chat_history[1:]]
        system_prompt = self.prompt_manager.get_system_prompt(f"{interview_type}_grading_feedback_prompt")
        return [
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": f"The original problem to solve: {problem_description}"},
            {"role": "user", "content": "\n\n".join(transcript)},
            {"role": "user", "content": "Grade the interview based on the transcript provided and give feedback."},
        ]

    def end_interview_full(self, problem_description: str, chat_history: List[Dict[str, str]], interview_type: str = "coding") -> str:
        if len(chat_history) <= 2:
            return "No interview history available"
        messages = self.end_interview_prepare_messages(problem_description, chat_history, interview_type)
        return self.get_text(messages)

    def end_interview_stream(
        self, problem_description: str, chat_history: List[Dict[str, str]], interview_type: str = "coding"
    ) -> Generator[str, None, None]:
        if len(chat_history) <= 2:
            yield "No interview history available"
        messages = self.end_interview_prepare_messages(problem_description, chat_history, interview_type)
        yield from self.get_text_stream(messages)