Spaces:
Sleeping
Sleeping
Mr-Vicky-01
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -40,14 +40,12 @@ llm_chain = LLMChain(
|
|
40 |
# verbose=True,
|
41 |
)
|
42 |
|
43 |
-
template_2 = """
|
44 |
web_result:
|
45 |
{web_result}
|
46 |
-
provided document:
|
47 |
-
{provided_docs}
|
48 |
previous_chat:
|
49 |
{chat_history}
|
50 |
-
|
51 |
Chatbot:"""
|
52 |
|
53 |
|
@@ -61,7 +59,21 @@ llm_chain_2 = LLMChain(
|
|
61 |
verbose=True,
|
62 |
)
|
63 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
|
|
|
|
|
|
|
|
|
|
|
65 |
|
66 |
previous_response = ""
|
67 |
provided_docs = ""
|
@@ -74,8 +86,9 @@ def conversational_chat(query):
|
|
74 |
result = llm_chain.predict(chat_history=previous_response, human_input=query, provided_docs=provided_docs)
|
75 |
|
76 |
if 'search_query'in result.strip():
|
77 |
-
|
78 |
-
print(
|
|
|
79 |
result = llm_chain_2.predict(web_result= web_result,chat_history=previous_response, human_input=query, provided_docs=provided_docs)
|
80 |
|
81 |
st.session_state['history'].append((query, result))
|
@@ -85,6 +98,7 @@ def conversational_chat(query):
|
|
85 |
|
86 |
return result
|
87 |
|
|
|
88 |
st.title("CRETA 🤖")
|
89 |
st.text("I am CRETA Your Friendly Assitant")
|
90 |
|
|
|
40 |
# verbose=True,
|
41 |
)
|
42 |
|
43 |
+
template_2 = """read the web_result and Answer the fallowing question carefully, your response must be short and informative
|
44 |
web_result:
|
45 |
{web_result}
|
|
|
|
|
46 |
previous_chat:
|
47 |
{chat_history}
|
48 |
+
Question: {human_input}
|
49 |
Chatbot:"""
|
50 |
|
51 |
|
|
|
59 |
verbose=True,
|
60 |
)
|
61 |
|
62 |
+
search_template = """Write a brief, user-friendly search query based on the details below. The response should be concise and ready for direct use on a search engine.
|
63 |
+
Chat History:
|
64 |
+
{chat_history}
|
65 |
+
Question: {human_input}
|
66 |
+
Search Query::"""
|
67 |
+
|
68 |
+
search_prompt = PromptTemplate(
|
69 |
+
input_variables=["chat_history", "human_input"], template=search_template
|
70 |
+
)
|
71 |
|
72 |
+
search_llm = LLMChain(
|
73 |
+
llm=llm,
|
74 |
+
prompt=search_prompt,
|
75 |
+
# verbose=True,
|
76 |
+
)
|
77 |
|
78 |
previous_response = ""
|
79 |
provided_docs = ""
|
|
|
86 |
result = llm_chain.predict(chat_history=previous_response, human_input=query, provided_docs=provided_docs)
|
87 |
|
88 |
if 'search_query'in result.strip():
|
89 |
+
search_query = search_llm.predict(chat_history=previous_response, human_input=query)
|
90 |
+
print(search_query)
|
91 |
+
web_result = get_web_result(search_query.strip())
|
92 |
result = llm_chain_2.predict(web_result= web_result,chat_history=previous_response, human_input=query, provided_docs=provided_docs)
|
93 |
|
94 |
st.session_state['history'].append((query, result))
|
|
|
98 |
|
99 |
return result
|
100 |
|
101 |
+
|
102 |
st.title("CRETA 🤖")
|
103 |
st.text("I am CRETA Your Friendly Assitant")
|
104 |
|