Spaces:
Sleeping
Sleeping
John Graham Reynolds
commited on
Commit
·
5029f5a
1
Parent(s):
806b1ef
add date to date_str var in template formatting
Browse files
chain.py
CHANGED
@@ -100,7 +100,7 @@ class ChainBuilder:
|
|
100 |
# Prompt Template for generation
|
101 |
prompt = ChatPromptTemplate.from_messages(
|
102 |
[
|
103 |
-
("system", self.llm_config.get("llm_prompt_template")),
|
104 |
# *** Note: This chain does not compress the history, so very long converastions can overflow the context window. TODO
|
105 |
# We need to at some point chop this history down to fixed amount of recent messages
|
106 |
MessagesPlaceholder(variable_name="formatted_chat_history"),
|
@@ -184,7 +184,6 @@ class ChainBuilder:
|
|
184 |
| RunnableLambda(self.format_context),
|
185 |
"formatted_chat_history": itemgetter("formatted_chat_history"),
|
186 |
"question": itemgetter("question"),
|
187 |
-
"date_str": RunnableLambda(get_date), # date to be passed to system prompt for context of when knowledge base was last updated
|
188 |
}
|
189 |
| self.get_prompt() # 'context', 'formatted_chat_history', and 'question' passed to prompt
|
190 |
| self.get_model() # prompt passed to model
|
|
|
100 |
# Prompt Template for generation
|
101 |
prompt = ChatPromptTemplate.from_messages(
|
102 |
[
|
103 |
+
("system", self.llm_config.get("llm_prompt_template").format(date_str=datetime.datetime.now().strftime("%B %d, %Y"))), # add current date to the date_str var in system prompt
|
104 |
# *** Note: This chain does not compress the history, so very long converastions can overflow the context window. TODO
|
105 |
# We need to at some point chop this history down to fixed amount of recent messages
|
106 |
MessagesPlaceholder(variable_name="formatted_chat_history"),
|
|
|
184 |
| RunnableLambda(self.format_context),
|
185 |
"formatted_chat_history": itemgetter("formatted_chat_history"),
|
186 |
"question": itemgetter("question"),
|
|
|
187 |
}
|
188 |
| self.get_prompt() # 'context', 'formatted_chat_history', and 'question' passed to prompt
|
189 |
| self.get_model() # prompt passed to model
|