anasmkh commited on
Commit
6b3a267
·
verified ·
1 Parent(s): bf59e9e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -116
app.py CHANGED
@@ -1,7 +1,6 @@
1
  import os
2
  import shutil
3
  import gradio as gr
4
- from qdrant_client.http import models
5
  import qdrant_client
6
  from getpass import getpass
7
 
@@ -63,120 +62,9 @@ def process_upload(files):
63
  global client, vector_store, storage_context, index, query_engine, memory, chat_engine
64
  client = qdrant_client.QdrantClient(location=":memory:")
65
 
66
- # vector_store = QdrantVectorStore(
67
- # collection_name="paper",
68
- # client=client,
69
- # enable_hybrid=True,
70
- # batch_size=20,
71
- # )
72
- collection_name = "paper"
73
- # client = qdrant_client.QdrantClient(
74
- # path="./qdrant_db",
75
- # prefer_grpc=True
76
- # )
77
- existing_collections = {col.name for col in client.get_collections().collections}
78
- if collection_name not in existing_collections:
79
- client.create_collection(
80
- collection_name=collection_name,
81
- vectors_config=models.VectorParams(
82
- size=1536,
83
- distance=models.Distance.COSINE
84
- )
85
- )
86
  vector_store = QdrantVectorStore(
 
87
  client=client,
88
- collection_name=collection_name,
89
- )
90
-
91
- storage_context = StorageContext.from_defaults(vector_store=vector_store)
92
-
93
- index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)
94
-
95
- query_engine = index.as_query_engine(vector_store_query_mode="hybrid")
96
-
97
- memory = ChatMemoryBuffer.from_defaults(token_limit=3000)
98
-
99
- chat_engine = index.as_chat_engine(
100
- chat_mode="context",
101
- memory=memory,
102
- system_prompt=(
103
- "You are an AI assistant who answers the user questions, "
104
- "use the schema fields to generate appropriate and valid json queries"
105
- ),
106
- )
107
-
108
- return "Documents uploaded and index built successfully!"
109
-
110
- # -------------------------------------------------------
111
- # Chat function that uses the built chat engine.
112
- # -------------------------------------------------------
113
- def chat_with_ai(user_input, chat_history):
114
- global chat_engine
115
- # Check if the chat engine is initialized.
116
- if chat_engine is None:
117
- return chat_history, "Please upload documents first."
118
-
119
- response = chat_engine.chat(user_input)
120
- references = response.source_nodes
121
- ref, pages = [], []
122
-
123
- # Extract file names from the source nodes (if available)
124
- for node in references:
125
- file_name = node.metadata.get('file_name')
126
- if file_name and file_name not in ref:
127
- ref.append(file_name)
128
-
129
- complete_response = str(response) + "\n\n"
130
- if ref or pages:
131
- chat_history.append((user_input, complete_response))
132
- else:
133
- chat_history.append((user_input, str(response)))
134
- return chat_history, ""
135
-
136
- # -------------------------------------------------------
137
- # Function to clear the chat history.
138
- # -------------------------------------------------------
139
- def clear_history():
140
- return [], ""
141
-
142
- # -------------------------------------------------------
143
- # Build the Gradio interface.
144
- # -------------------------------------------------------
145
- def gradio_interface():
146
- with gr.Blocks() as demo:
147
- gr.Markdown("# Chat Interface for LlamaIndex with File Upload")
148
-
149
- # Use Tabs to separate the file upload and chat interfaces.
150
- with gr.Tab("Upload Documents"):
151
- gr.Markdown("Upload PDF, Excel, CSV, DOC/DOCX, or TXT files below:")
152
- # The file upload widget: we specify allowed file types.
153
- file_upload = gr.File(
154
- label="Upload Files",
155
- file_count="multiple",
156
- file_types=[".pdf", ".csv", ".txt", ".xlsx", ".xls", ".doc", ".docx"],
157
- type="filepath" # returns file paths
158
- )
159
- upload_status = gr.Textbox(label="Upload Status", interactive=False)
160
- upload_button = gr.Button("Process Upload")
161
-
162
- upload_button.click(process_upload, inputs=file_upload, outputs=upload_status)
163
-
164
- with gr.Tab("Chat"):
165
- chatbot = gr.Chatbot(label="LlamaIndex Chatbot")
166
- user_input = gr.Textbox(
167
- placeholder="Ask a question...", label="Enter your question"
168
- )
169
- submit_button = gr.Button("Send")
170
- btn_clear = gr.Button("Clear History")
171
-
172
- # A State to hold the chat history.
173
- chat_history = gr.State([])
174
-
175
- submit_button.click(chat_with_ai, inputs=[user_input, chat_history], outputs=[chatbot, user_input])
176
- user_input.submit(chat_with_ai, inputs=[user_input, chat_history], outputs=[chatbot, user_input])
177
- btn_clear.click(clear_history, outputs=[chatbot, user_input])
178
-
179
- return demo
180
-
181
- # Launch the Gradio app.
182
- gradio_interface().launch(debug=True)
 
1
  import os
2
  import shutil
3
  import gradio as gr
 
4
  import qdrant_client
5
  from getpass import getpass
6
 
 
62
  global client, vector_store, storage_context, index, query_engine, memory, chat_engine
63
  client = qdrant_client.QdrantClient(location=":memory:")
64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  vector_store = QdrantVectorStore(
66
+ collection_name="paper",
67
  client=client,
68
+ enable_hybrid=True,
69
+ batch_size=20,
70
+ )