Update app.py
Browse files
app.py
CHANGED
@@ -40,7 +40,6 @@ index = faiss.IndexFlatL2(embedding_dim)
|
|
40 |
data.add_faiss_index("embeddings", custom_index=index)
|
41 |
# adds an index column for the embeddings
|
42 |
|
43 |
-
print("check1")
|
44 |
#question = "How can I reverse Diabetes?"
|
45 |
|
46 |
SYS_PROMPT = """You are an assistant for answering questions.
|
@@ -95,8 +94,6 @@ def search(query: str, k: int = 2 ):
|
|
95 |
# returns scores (List[float]): the retrieval scores from either FAISS (IndexFlatL2 by default) and examples (dict) format
|
96 |
# called by talk function that passes prompt
|
97 |
|
98 |
-
#print(scores, retrieved_examples)
|
99 |
-
|
100 |
def format_prompt(prompt,retrieved_documents,k):
|
101 |
"""using the retrieved documents we will prompt the model to generate our responses"""
|
102 |
PROMPT = f"Question:{prompt}\nContext:"
|
@@ -129,22 +126,22 @@ def talk(prompt, history):
|
|
129 |
k = 2 # number of retrieved documents
|
130 |
scores , retrieved_documents = search(prompt, k) # get retrival scores and examples in dictionary format based on the prompt passed
|
131 |
print(retrieved_documents.keys())
|
132 |
-
print("check4")
|
133 |
formatted_prompt = format_prompt(prompt,retrieved_documents,k) # create a new prompt using the retrieved documents
|
134 |
print("check5")
|
135 |
-
print(retrieved_documents['0'])
|
136 |
-
print(formatted_prompt)
|
137 |
# formatted_prompt_with_history = add_history(formatted_prompt, history)
|
138 |
|
139 |
# formatted_prompt_with_history = formatted_prompt_with_history[:600] # to avoid memory issue
|
140 |
# print(formatted_prompt_with_history)
|
141 |
messages = [{"role":"system","content":SYS_PROMPT},{"role":"user","content":formatted_prompt}]
|
|
|
142 |
# binding the system context and new prompt for LLM
|
143 |
# the chat template structure should be based on text generation model format
|
144 |
print("check6")
|
145 |
|
146 |
# indicates the end of a sequence
|
147 |
-
import pprint
|
148 |
stream = model.create_chat_completion(messages = messages, max_tokens=1000, stop=["</s>"], stream=False)
|
149 |
print(f"{stream}")
|
150 |
print("check 7")
|
|
|
40 |
data.add_faiss_index("embeddings", custom_index=index)
|
41 |
# adds an index column for the embeddings
|
42 |
|
|
|
43 |
#question = "How can I reverse Diabetes?"
|
44 |
|
45 |
SYS_PROMPT = """You are an assistant for answering questions.
|
|
|
94 |
# returns scores (List[float]): the retrieval scores from either FAISS (IndexFlatL2 by default) and examples (dict) format
|
95 |
# called by talk function that passes prompt
|
96 |
|
|
|
|
|
97 |
def format_prompt(prompt,retrieved_documents,k):
|
98 |
"""using the retrieved documents we will prompt the model to generate our responses"""
|
99 |
PROMPT = f"Question:{prompt}\nContext:"
|
|
|
126 |
k = 2 # number of retrieved documents
|
127 |
scores , retrieved_documents = search(prompt, k) # get retrival scores and examples in dictionary format based on the prompt passed
|
128 |
print(retrieved_documents.keys())
|
129 |
+
# print("check4")
|
130 |
formatted_prompt = format_prompt(prompt,retrieved_documents,k) # create a new prompt using the retrieved documents
|
131 |
print("check5")
|
132 |
+
# print(retrieved_documents['0'])
|
133 |
+
# print(formatted_prompt)
|
134 |
# formatted_prompt_with_history = add_history(formatted_prompt, history)
|
135 |
|
136 |
# formatted_prompt_with_history = formatted_prompt_with_history[:600] # to avoid memory issue
|
137 |
# print(formatted_prompt_with_history)
|
138 |
messages = [{"role":"system","content":SYS_PROMPT},{"role":"user","content":formatted_prompt}]
|
139 |
+
print(messages)
|
140 |
# binding the system context and new prompt for LLM
|
141 |
# the chat template structure should be based on text generation model format
|
142 |
print("check6")
|
143 |
|
144 |
# indicates the end of a sequence
|
|
|
145 |
stream = model.create_chat_completion(messages = messages, max_tokens=1000, stop=["</s>"], stream=False)
|
146 |
print(f"{stream}")
|
147 |
print("check 7")
|