Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -20,18 +20,24 @@ llm = ChatGoogleGenerativeAI(model="gemini-pro",
|
|
20 |
|
21 |
|
22 |
|
23 |
-
template = """
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
{provided_docs}
|
28 |
-
previous_chat:
|
29 |
{chat_history}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
Human: {human_input}
|
31 |
-
Chatbot:"""
|
32 |
|
33 |
prompt = PromptTemplate(
|
34 |
-
input_variables=["chat_history", "human_input", "provided_docs","extracted_text"],
|
|
|
35 |
)
|
36 |
|
37 |
llm_chain = LLMChain(
|
@@ -44,22 +50,17 @@ llm_chain = LLMChain(
|
|
44 |
previous_response = ""
|
45 |
provided_docs = ""
|
46 |
def conversational_chat(query):
|
47 |
-
global previous_response, provided_docs
|
48 |
-
for i in st.session_state['history']
|
49 |
-
|
50 |
-
|
51 |
-
docs = ""
|
52 |
-
for j in st.session_state["docs"]:
|
53 |
-
if j is not None:
|
54 |
-
docs += j
|
55 |
-
text = ""
|
56 |
-
for k in st.session_state["extracted_text"]:
|
57 |
-
if k is not None:
|
58 |
-
docs += k
|
59 |
|
60 |
-
|
61 |
-
|
62 |
-
|
|
|
|
|
|
|
63 |
st.session_state['history'].append((query, result))
|
64 |
return result
|
65 |
|
|
|
20 |
|
21 |
|
22 |
|
23 |
+
template = """
|
24 |
+
You are CRETA, a friendly and knowledgeable chatbot created by Suriya, an AI enthusiast. You can access and understand the content from provided documents and websites to help answer questions.
|
25 |
+
|
26 |
+
Previous Conversation:
|
|
|
|
|
27 |
{chat_history}
|
28 |
+
|
29 |
+
Provided Document Content:
|
30 |
+
{provided_docs}
|
31 |
+
|
32 |
+
Extracted URL Text:
|
33 |
+
{extracted_text}
|
34 |
+
|
35 |
Human: {human_input}
|
36 |
+
Chatbot: """
|
37 |
|
38 |
prompt = PromptTemplate(
|
39 |
+
input_variables=["chat_history", "human_input", "provided_docs", "extracted_text"],
|
40 |
+
template=template
|
41 |
)
|
42 |
|
43 |
llm_chain = LLMChain(
|
|
|
50 |
previous_response = ""
|
51 |
provided_docs = ""
|
52 |
def conversational_chat(query):
|
53 |
+
global previous_response, provided_docs, extracted_text
|
54 |
+
previous_response = "".join([f"Human: {i[0]}\nChatbot: {i[1]}" for i in st.session_state['history'] if i is not None])
|
55 |
+
provided_docs = "".join([doc for doc in st.session_state["docs"] if doc is not None])
|
56 |
+
extracted_text = "".join([text for text in st.session_state["extracted_text"] if text is not None])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
|
58 |
+
result = llm_chain.predict(
|
59 |
+
chat_history=previous_response,
|
60 |
+
human_input=query,
|
61 |
+
provided_docs=provided_docs,
|
62 |
+
extracted_text=extracted_text
|
63 |
+
)
|
64 |
st.session_state['history'].append((query, result))
|
65 |
return result
|
66 |
|