Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -14,9 +14,9 @@ llm = HuggingFaceHub(repo_id="suriya7/MaxMini-Instruct-248M",
|
|
14 |
})
|
15 |
|
16 |
|
17 |
-
template = """Please Answer the
|
18 |
|
19 |
-
prompt = PromptTemplate(template=template,input_variables=['question'
|
20 |
|
21 |
llm_chain = LLMChain(
|
22 |
llm=llm,
|
@@ -24,14 +24,14 @@ llm_chain = LLMChain(
|
|
24 |
verbose=True,
|
25 |
)
|
26 |
|
27 |
-
previous_response = ""
|
28 |
def conversational_chat(user_query):
|
29 |
-
global previous_response
|
30 |
-
previous_response = "".join([f"User: {i[0]}\nChatbot: {i[1]}" for i in st.session_state['history'] if i is not None])
|
31 |
-
print(f"this is my previous {previous_response}")
|
32 |
result = llm_chain.predict(
|
33 |
question=user_query,
|
34 |
-
chat_history = previous_response
|
35 |
)
|
36 |
st.session_state['history'].append((user_query, result))
|
37 |
return result
|
|
|
14 |
})
|
15 |
|
16 |
|
17 |
+
template = """Please Answer the Question:{question}"""
|
18 |
|
19 |
+
prompt = PromptTemplate(template=template,input_variables=['question'])
|
20 |
|
21 |
llm_chain = LLMChain(
|
22 |
llm=llm,
|
|
|
24 |
verbose=True,
|
25 |
)
|
26 |
|
27 |
+
# previous_response = ""
|
28 |
def conversational_chat(user_query):
|
29 |
+
# global previous_response
|
30 |
+
# previous_response = "".join([f"User: {i[0]}\nChatbot: {i[1]}" for i in st.session_state['history'] if i is not None])
|
31 |
+
# print(f"this is my previous {previous_response}")
|
32 |
result = llm_chain.predict(
|
33 |
question=user_query,
|
34 |
+
# chat_history = previous_response
|
35 |
)
|
36 |
st.session_state['history'].append((user_query, result))
|
37 |
return result
|