Chatbot2 / tailor_chain.py
Phoenix21's picture
Update tailor_chain.py
363f267 verified
raw
history blame
777 Bytes
from langchain.chains import LLMChain
from prompts import tailor_prompt
def get_tailor_chain() -> LLMChain:
"""
Creates the tailor chain to simplify and personalize the assistant's responses.
"""
tailor_chain = LLMChain(
llm=your_llm, # Update this with your actual LLM model
prompt=tailor_prompt
)
return tailor_chain
def tailor_with_history(response: str, chat_history: list) -> str:
"""
Tailors the assistant's response based on the history context.
"""
context = "\n".join([f"User: {msg['content']}" for msg in chat_history]) + "\nAssistant: " + response
# Use the context along with the response for tailoring
tailored_response = get_tailor_chain().run({"response": context})
return tailored_response