Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -113,8 +113,8 @@ def upload_files(files):
|
|
113 |
print(f"General error processing files: {e}")
|
114 |
return {"error": str(e)}
|
115 |
|
116 |
-
def process_and_query(
|
117 |
-
global faiss_index
|
118 |
if not question:
|
119 |
return {"error": "No question provided"}
|
120 |
|
@@ -127,7 +127,50 @@ def process_and_query(state, question):
|
|
127 |
|
128 |
# Generate response based on retrieved results
|
129 |
context = " ".join(retrieved_results)
|
130 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
131 |
inputs = generator_tokenizer(combined_input, return_tensors="pt", max_length=512, truncation=True)
|
132 |
with torch.no_grad():
|
133 |
generator_outputs = generator.generate(**inputs)
|
|
|
113 |
print(f"General error processing files: {e}")
|
114 |
return {"error": str(e)}
|
115 |
|
116 |
+
def process_and_query(question):
|
117 |
+
global state, faiss_index
|
118 |
if not question:
|
119 |
return {"error": "No question provided"}
|
120 |
|
|
|
127 |
|
128 |
# Generate response based on retrieved results
|
129 |
context = " ".join(retrieved_results)
|
130 |
+
|
131 |
+
# Enhanced prompt template
|
132 |
+
prompt_template = """
|
133 |
+
Answer the question as detailed as possible from the provided context,
|
134 |
+
make sure to provide all the details, if the answer is not in
|
135 |
+
provided context just say, "answer is not available in the context",
|
136 |
+
don't provide the wrong answer
|
137 |
+
|
138 |
+
Context:\n{context}
|
139 |
+
|
140 |
+
Question: \n{question}
|
141 |
+
|
142 |
+
Answer:
|
143 |
+
--------------------------------------------------
|
144 |
+
Prompt Suggestions:
|
145 |
+
1. Summarize the primary theme of the context.
|
146 |
+
2. Elaborate on the crucial concepts highlighted in the context.
|
147 |
+
3. Pinpoint any supporting details or examples pertinent to the question.
|
148 |
+
4. Examine any recurring themes or patterns relevant to the question within the context.
|
149 |
+
5. Contrast differing viewpoints or elements mentioned in the context.
|
150 |
+
6. Explore the potential implications or outcomes of the information provided.
|
151 |
+
7. Assess the trustworthiness and validity of the information given.
|
152 |
+
8. Propose recommendations or advice based on the presented information.
|
153 |
+
9. Forecast likely future events or results stemming from the context.
|
154 |
+
10. Expand on the context or background information pertinent to the question.
|
155 |
+
11. Define any specialized terms or technical language used within the context.
|
156 |
+
12. Analyze any visual representations like charts or graphs in the context.
|
157 |
+
13. Highlight any restrictions or important considerations when responding to the question.
|
158 |
+
14. Examine any presuppositions or biases evident within the context.
|
159 |
+
15. Present alternate interpretations or viewpoints regarding the information provided.
|
160 |
+
16. Reflect on any moral or ethical issues raised by the context.
|
161 |
+
17. Investigate any cause-and-effect relationships identified in the context.
|
162 |
+
18. Uncover any questions or areas requiring further exploration.
|
163 |
+
19. Resolve any vague or conflicting information in the context.
|
164 |
+
20. Cite case studies or examples that demonstrate the concepts discussed in the context.
|
165 |
+
--------------------------------------------------
|
166 |
+
Context:\n{context}
|
167 |
+
|
168 |
+
Question:\n{question}
|
169 |
+
|
170 |
+
Answer:
|
171 |
+
"""
|
172 |
+
|
173 |
+
combined_input = prompt_template.format(context=context, question=question)
|
174 |
inputs = generator_tokenizer(combined_input, return_tensors="pt", max_length=512, truncation=True)
|
175 |
with torch.no_grad():
|
176 |
generator_outputs = generator.generate(**inputs)
|