File size: 1,565 Bytes
5b2b2db
9eaf8c4
5b2b2db
 
9eaf8c4
 
 
 
 
 
 
 
5b2b2db
9eaf8c4
5b2b2db
 
9eaf8c4
5b2b2db
00c5e68
 
 
 
 
 
 
5b2b2db
 
9eaf8c4
5b2b2db
 
 
 
 
 
9eaf8c4
 
 
cb9a86a
 
9eaf8c4
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
# my_memory_logic.py
import os
from langchain.memory import ConversationBufferMemory
from langchain.chains import LLMChain
from langchain.prompts.chat import (
    ChatPromptTemplate,
    SystemMessagePromptTemplate,
    MessagesPlaceholder,
    HumanMessagePromptTemplate,
)
# Import ChatGroq from the langchain_groq package
from langchain_groq import ChatGroq

# 1) Memory object for storing conversation messages
memory = ConversationBufferMemory(return_messages=True)

# 2) Restatement system prompt for question rewriting
restatement_system_prompt = (
    "Given the entire chat history below and the latest user question, 
    your ONLY job is to rewrite or restate the latest question so it 
    makes sense on its own. 
    Do NOT repeat or quote large sections of the history. 
    Do NOT provide the answer or any additional explanation. 
    Respond ONLY with a short, standalone question."

)

# 3) Build the ChatPromptTemplate
restatement_prompt = ChatPromptTemplate.from_messages([
    SystemMessagePromptTemplate.from_template(restatement_system_prompt),
    MessagesPlaceholder(variable_name="chat_history"),
    HumanMessagePromptTemplate.from_template("{input}")
])

# 4) Initialize the ChatGroq LLM
#    Ensure you have your GROQ_API_KEY set in the environment
restatement_llm = ChatGroq(
    model="llama3-70b-8192",
    # model="mixtral-8x7b-32768"# or whichever model
    groq_api_key=os.environ["GROQ_API_KEY"]
)

# 5) Create the LLMChain for restatement
restatement_chain = LLMChain(
    llm=restatement_llm,
    prompt=restatement_prompt
)