|
from langchain.chains import LLMChain |
|
from prompts import tailor_prompt |
|
|
|
def get_tailor_chain() -> LLMChain: |
|
""" |
|
Creates the tailor chain to simplify and personalize the assistant's responses. |
|
""" |
|
tailor_chain = LLMChain( |
|
llm=your_llm, |
|
prompt=tailor_prompt |
|
) |
|
return tailor_chain |
|
|
|
def tailor_with_history(response: str, chat_history: list) -> str: |
|
""" |
|
Tailors the assistant's response based on the history context. |
|
""" |
|
context = "\n".join([f"User: {msg['content']}" for msg in chat_history]) + "\nAssistant: " + response |
|
|
|
tailored_response = get_tailor_chain().run({"response": context}) |
|
return tailored_response |
|
|