File size: 1,521 Bytes
5b2b2db 9eaf8c4 5b2b2db 9eaf8c4 5b2b2db 9eaf8c4 5b2b2db 9eaf8c4 5b2b2db 9eaf8c4 5b2b2db 9eaf8c4 5b2b2db 9eaf8c4 cb9a86a 9eaf8c4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
# my_memory_logic.py
import os
from langchain.memory import ConversationBufferMemory
from langchain.chains import LLMChain
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
MessagesPlaceholder,
HumanMessagePromptTemplate,
)
# Import ChatGroq from the langchain_groq package
from langchain_groq import ChatGroq
# 1) Memory object for storing conversation messages
memory = ConversationBufferMemory(return_messages=True)
# 2) Restatement system prompt for question rewriting
restatement_system_prompt = (
"Given a chat history and the latest user question "
"which might reference context in the chat history, "
"formulate a standalone question that can be understood "
"without the chat history. Do NOT answer the question, "
"just reformulate it if needed; otherwise return it as is."
)
# 3) Build the ChatPromptTemplate
restatement_prompt = ChatPromptTemplate.from_messages([
SystemMessagePromptTemplate.from_template(restatement_system_prompt),
MessagesPlaceholder(variable_name="chat_history"),
HumanMessagePromptTemplate.from_template("{input}")
])
# 4) Initialize the ChatGroq LLM
# Ensure you have your GROQ_API_KEY set in the environment
restatement_llm = ChatGroq(
model="llama3-70b-8192",
# model="mixtral-8x7b-32768"# or whichever model
groq_api_key=os.environ["GROQ_API_KEY"]
)
# 5) Create the LLMChain for restatement
restatement_chain = LLMChain(
llm=restatement_llm,
prompt=restatement_prompt
)
|