File size: 777 Bytes
fb8e56f 363f267 fb8e56f 363f267 fb8e56f 363f267 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 |
from langchain.chains import LLMChain
from prompts import tailor_prompt
def get_tailor_chain() -> LLMChain:
"""
Creates the tailor chain to simplify and personalize the assistant's responses.
"""
tailor_chain = LLMChain(
llm=your_llm, # Update this with your actual LLM model
prompt=tailor_prompt
)
return tailor_chain
def tailor_with_history(response: str, chat_history: list) -> str:
"""
Tailors the assistant's response based on the history context.
"""
context = "\n".join([f"User: {msg['content']}" for msg in chat_history]) + "\nAssistant: " + response
# Use the context along with the response for tailoring
tailored_response = get_tailor_chain().run({"response": context})
return tailored_response
|