File size: 4,570 Bytes
ba9f995
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
from langchain.prompts import PromptTemplate
from langchain.prompts.chat import (
    ChatPromptTemplate,
    HumanMessagePromptTemplate,
    SystemMessagePromptTemplate,
)
from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model

# ================================================================================

REFINE_QA_TEMPLATE = """Break down or rephrase the follow up input into fewer than 3 heterogeneous one-hop queries to be the input of a retrieval tool, if the follow up inout is multi-hop, multi-step, complex or comparative queries and relevant to Chat History and Document Names. Otherwise keep the follow up input as it is.


The output format should strictly follow the following, and each query can only conatain 1 document name:
```
1. One-hop standalone query
...
3. One-hop standalone query
...
```


Document Names in the database:
```
{database}
```


Chat History:
```
{chat_history}
```


Begin:

Follow Up Input: {question}

One-hop standalone queries(s):
"""


# ================================================================================

DOCS_SELECTION_TEMPLATE = """Below are some verified sources and a human input. If you think any of them are relevant to the human input, then list all possible context numbers.

```
{snippets}
```

The output format must be like the following, nothing else. If not, you will output []:
[0, ..., n]

Human Input: {query}
"""


# ================================================================================

RETRIEVAL_QA_SYS = """You are a helpful assistant designed by IncarnaMind.
If you think the below below information are relevant to the human input, please respond to the human based on the relevant retrieved sources; otherwise, respond in your own words only about the human input."""


RETRIEVAL_QA_TEMPLATE = """
File Names in the database:
```
{database}
```


Chat History:
```
{chat_history}
```


Verified Sources:
```
{context}
```


User: {question}
"""


RETRIEVAL_QA_CHAT_TEMPLATE = """
File Names in the database:
```
{database}
```


Chat History:
```
{chat_history}
```


Verified Sources:
```
{context}
```
"""


class PromptTemplates:
    """_summary_"""

    def __init__(self):
        self.refine_qa_prompt = REFINE_QA_TEMPLATE
        self.docs_selection_prompt = DOCS_SELECTION_TEMPLATE
        self.retrieval_qa_sys = RETRIEVAL_QA_SYS
        self.retrieval_qa_prompt = RETRIEVAL_QA_TEMPLATE
        self.retrieval_qa_chat_prompt = RETRIEVAL_QA_CHAT_TEMPLATE

    def get_refine_qa_template(self, llm: str):
        """get the refine qa prompt template"""
        if "llama" in llm.lower():
            temp = f"[INST] {self.refine_qa_prompt} [/INST]"
        else:
            temp = self.refine_qa_prompt

        return PromptTemplate(
            input_variables=["database", "chat_history", "question"],
            template=temp,
        )

    def get_docs_selection_template(self, llm: str):
        """get the docs selection prompt template"""
        if "llama" in llm.lower():
            temp = f"[INST] {self.docs_selection_prompt} [/INST]"
        else:
            temp = self.docs_selection_prompt

        return PromptTemplate(
            input_variables=["snippets", "query"],
            template=temp,
        )

    def get_retrieval_qa_template_selector(self, llm: str):
        """get the retrieval qa prompt template"""
        if "llama" in llm.lower():
            temp = f"[INST] <<SYS>>\n{self.retrieval_qa_sys}\n<</SYS>>\n\n{self.retrieval_qa_prompt} [/INST]"
            messages = [
                SystemMessagePromptTemplate.from_template(
                    f"[INST] <<SYS>>\n{self.retrieval_qa_sys}\n<</SYS>>\n\n{self.retrieval_qa_chat_prompt} [/INST]"
                ),
                HumanMessagePromptTemplate.from_template("{question}"),
            ]
        else:
            temp = f"{self.retrieval_qa_sys}\n{self.retrieval_qa_prompt}"
            messages = [
                SystemMessagePromptTemplate.from_template(
                    f"{self.retrieval_qa_sys}\n{self.retrieval_qa_chat_prompt}"
                ),
                HumanMessagePromptTemplate.from_template("{question}"),
            ]

        prompt_temp = PromptTemplate(
            template=temp,
            input_variables=["database", "chat_history", "context", "question"],
        )
        prompt_temp_chat = ChatPromptTemplate.from_messages(messages)

        return ConditionalPromptSelector(
            default_prompt=prompt_temp,
            conditionals=[(is_chat_model, prompt_temp_chat)],
        )