Spaces:
Runtime error
Runtime error
on1onmangoes
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -16,6 +16,27 @@ client = Client("on1onmangoes/CNIHUB101324v10", hf_token=HF_TOKEN)
|
|
16 |
# Update the conversation history within the function.
|
17 |
# Return the updated history along with any other required outputs.
|
18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
def stream_chat_with_rag(
|
20 |
message: str,
|
21 |
history: list,
|
@@ -32,24 +53,66 @@ def stream_chat_with_rag(
|
|
32 |
print(f"Message: {message}")
|
33 |
print(f"History: {history}")
|
34 |
|
35 |
-
|
36 |
# Build the conversation prompt including system prompt and history
|
37 |
-
conversation = system_prompt
|
|
|
|
|
38 |
for user_input, assistant_response in history:
|
39 |
conversation += f"User: {user_input}\nAssistant: {assistant_response}\n"
|
|
|
|
|
40 |
conversation += f"User: {message}\nAssistant:"
|
|
|
|
|
41 |
question = message
|
42 |
answer = client.predict(question=question, api_name="/answer_with_rag")
|
43 |
-
|
|
|
44 |
print("The Answer in stream_chat_with_rag:")
|
45 |
print(answer)
|
46 |
|
47 |
-
#
|
48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
|
50 |
-
|
51 |
-
|
52 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
|
55 |
# Function to handle PDF processing API call
|
|
|
16 |
# Update the conversation history within the function.
|
17 |
# Return the updated history along with any other required outputs.
|
18 |
|
19 |
+
def format_answer_with_documents(answer):
|
20 |
+
"""
|
21 |
+
This function formats the assistant's answer and separates the personal details from the relevant documents.
|
22 |
+
"""
|
23 |
+
# Extract the personal details and documents from the answer
|
24 |
+
personal_details = answer[0]
|
25 |
+
relevant_documents = answer[1:] # Assuming documents are included after the answer
|
26 |
+
|
27 |
+
# Format the personal details
|
28 |
+
formatted_answer = f"Based on the documents provided, the personal details are as follows:\n\n{personal_details}\n\n"
|
29 |
+
|
30 |
+
# If there are relevant documents, format them as a separate section
|
31 |
+
if relevant_documents:
|
32 |
+
formatted_answer += "Relevant Documents:\n"
|
33 |
+
for idx, document in enumerate(relevant_documents, start=1):
|
34 |
+
formatted_answer += f"{idx}. {document.metadata['heading']} (Page {int(document.metadata['page_number'])})\n"
|
35 |
+
formatted_answer += f" Source: {document.metadata['source']}\n"
|
36 |
+
formatted_answer += f" Snippet: {document.page_content[:200]}...\n\n" # Showing a snippet of content
|
37 |
+
|
38 |
+
return formatted_answer
|
39 |
+
|
40 |
def stream_chat_with_rag(
|
41 |
message: str,
|
42 |
history: list,
|
|
|
53 |
print(f"Message: {message}")
|
54 |
print(f"History: {history}")
|
55 |
|
|
|
56 |
# Build the conversation prompt including system prompt and history
|
57 |
+
conversation = f"{system_prompt}\n\nFor Client: {client_name}\n"
|
58 |
+
|
59 |
+
# Add previous conversation history
|
60 |
for user_input, assistant_response in history:
|
61 |
conversation += f"User: {user_input}\nAssistant: {assistant_response}\n"
|
62 |
+
|
63 |
+
# Add the current user message
|
64 |
conversation += f"User: {message}\nAssistant:"
|
65 |
+
|
66 |
+
# Call the API with the user's message
|
67 |
question = message
|
68 |
answer = client.predict(question=question, api_name="/answer_with_rag")
|
69 |
+
|
70 |
+
# Debugging: Print the response
|
71 |
print("The Answer in stream_chat_with_rag:")
|
72 |
print(answer)
|
73 |
|
74 |
+
# Format the assistant's answer to separate the relevant documents
|
75 |
+
formatted_answer = format_answer_with_documents(answer)
|
76 |
+
|
77 |
+
# Update the conversation history with the new message and answer
|
78 |
+
history.append((message, formatted_answer))
|
79 |
+
|
80 |
+
# Return the formatted answer
|
81 |
+
return formatted_answer
|
82 |
+
|
83 |
+
|
84 |
+
# this version works just issue with formatting
|
85 |
+
# def stream_chat_with_rag(
|
86 |
+
# message: str,
|
87 |
+
# history: list,
|
88 |
+
# client_name: str,
|
89 |
+
# system_prompt: str,
|
90 |
+
# num_retrieved_docs: int = 10,
|
91 |
+
# num_docs_final: int = 9,
|
92 |
+
# temperature: float = 0,
|
93 |
+
# max_new_tokens: int = 1024,
|
94 |
+
# top_p: float = 1.0,
|
95 |
+
# top_k: int = 20,
|
96 |
+
# penalty: float = 1.2,
|
97 |
+
# ):
|
98 |
+
# print(f"Message: {message}")
|
99 |
+
# print(f"History: {history}")
|
100 |
+
|
101 |
|
102 |
+
# # Build the conversation prompt including system prompt and history
|
103 |
+
# conversation = system_prompt + "\n\n" + "For Client:" + client_name
|
104 |
+
# for user_input, assistant_response in history:
|
105 |
+
# conversation += f"User: {user_input}\nAssistant: {assistant_response}\n"
|
106 |
+
# conversation += f"User: {message}\nAssistant:"
|
107 |
+
# question = message
|
108 |
+
# answer = client.predict(question=question, api_name="/answer_with_rag")
|
109 |
+
# # debug 092624
|
110 |
+
# print("The Answer in stream_chat_with_rag:")
|
111 |
+
# print(answer)
|
112 |
+
|
113 |
+
# # Update the conversation history
|
114 |
+
# history.append((message, answer))
|
115 |
+
# return answer
|
116 |
|
117 |
|
118 |
# Function to handle PDF processing API call
|