Update app.py
Browse files
app.py
CHANGED
@@ -83,7 +83,7 @@ def process_files(pdf_files, chunk_limit, chunk_separator):
|
|
83 |
msgs = [
|
84 |
{
|
85 |
'role': 'system',
|
86 |
-
'content': '
|
87 |
},
|
88 |
{
|
89 |
'role': 'user',
|
@@ -155,7 +155,6 @@ with gr.Blocks(theme=Monochrome()) as demo:
|
|
155 |
- When a user asks a question, the system searches for the most relevant chunks of text from the uploaded documents.
|
156 |
- It then uses these relevant chunks as context for a large language model (LLM) to generate an answer.
|
157 |
- The LLM (in this case, GPT-4) formulates a response based on the provided context and the user's question.
|
158 |
-
- Pixeltable is used to manage the document data, chunks, and embeddings while also to retrieve context.
|
159 |
""")
|
160 |
|
161 |
with gr.Row():
|
|
|
83 |
msgs = [
|
84 |
{
|
85 |
'role': 'system',
|
86 |
+
'content': 'Answer questions using only the provided context. If the context lacks sufficient information, state this clearly. Don't assume or add external information. Express uncertainty when needed. Be concise yet thorough, citing relevant parts of the context. Maintain a professional tone.'
|
87 |
},
|
88 |
{
|
89 |
'role': 'user',
|
|
|
155 |
- When a user asks a question, the system searches for the most relevant chunks of text from the uploaded documents.
|
156 |
- It then uses these relevant chunks as context for a large language model (LLM) to generate an answer.
|
157 |
- The LLM (in this case, GPT-4) formulates a response based on the provided context and the user's question.
|
|
|
158 |
""")
|
159 |
|
160 |
with gr.Row():
|