|
import os |
|
import shutil |
|
import time |
|
import gradio as gr |
|
import qdrant_client |
|
from getpass import getpass |
|
|
|
|
|
openai_api_key = os.getenv('OPENAI_API_KEY') |
|
|
|
|
|
|
|
|
|
from llama_index.llms.openai import OpenAI |
|
from llama_index.embeddings.openai import OpenAIEmbedding |
|
from llama_index.core import Settings |
|
|
|
Settings.llm = OpenAI(model="gpt-3.5-turbo", temperature=0.4) |
|
Settings.embed_model = OpenAIEmbedding(model="text-embedding-ada-002") |
|
|
|
|
|
|
|
|
|
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, StorageContext |
|
from llama_index.vector_stores.qdrant import QdrantVectorStore |
|
from llama_index.core.memory import ChatMemoryBuffer |
|
|
|
|
|
chat_engine = None |
|
index = None |
|
query_engine = None |
|
memory = None |
|
client = None |
|
vector_store = None |
|
storage_context = None |
|
|
|
|
|
collection_name = "paper" |
|
|
|
|
|
upload_dir = "uploaded_files" |
|
if not os.path.exists(upload_dir): |
|
os.makedirs(upload_dir) |
|
|
|
|
|
|
|
|
|
|
|
def process_upload(files): |
|
""" |
|
Accepts a list of uploaded file paths, saves them to a persistent folder, |
|
loads new documents, and builds or updates the vector index and chat engine. |
|
""" |
|
global client, vector_store, storage_context, index, query_engine, memory, chat_engine |
|
|
|
|
|
new_file_paths = [] |
|
for file_path in files: |
|
file_name = os.path.basename(file_path) |
|
dest = os.path.join(upload_dir, file_name) |
|
if not os.path.exists(dest): |
|
shutil.copy(file_path, dest) |
|
new_file_paths.append(dest) |
|
|
|
|
|
if not new_file_paths: |
|
return "No new documents to add." |
|
|
|
|
|
new_documents = SimpleDirectoryReader(input_files=new_file_paths).load_data() |
|
|
|
|
|
client = qdrant_client.QdrantClient( |
|
path="./qdrant_db", |
|
prefer_grpc=True |
|
) |
|
|
|
|
|
from qdrant_client.http import models |
|
existing_collections = {col.name for col in client.get_collections().collections} |
|
if collection_name not in existing_collections: |
|
client.create_collection( |
|
collection_name=collection_name, |
|
vectors_config={ |
|
"text-dense": models.VectorParams( |
|
size=1536, |
|
distance=models.Distance.COSINE |
|
) |
|
} |
|
) |
|
|
|
time.sleep(1) |
|
|
|
|
|
vector_store = QdrantVectorStore( |
|
collection_name=collection_name, |
|
client=client, |
|
enable_hybrid=True, |
|
batch_size=20, |
|
) |
|
|
|
storage_context = StorageContext.from_defaults(vector_store=vector_store) |
|
|
|
|
|
if index is None: |
|
|
|
index = VectorStoreIndex.from_documents( |
|
SimpleDirectoryReader(upload_dir).load_data(), |
|
storage_context=storage_context |
|
) |
|
else: |
|
index.insert_documents(new_documents) |
|
|
|
|
|
query_engine = index.as_query_engine(vector_store_query_mode="hybrid") |
|
memory = ChatMemoryBuffer.from_defaults(token_limit=3000) |
|
chat_engine = index.as_chat_engine( |
|
chat_mode="context", |
|
memory=memory, |
|
system_prompt=( |
|
"You are an AI assistant who answers the user questions, " |
|
"use the schema fields to generate appropriate and valid json queries" |
|
), |
|
) |
|
|
|
return "Documents uploaded and index updated successfully!" |
|
|
|
|
|
|
|
|
|
def chat_with_ai(user_input, chat_history): |
|
global chat_engine |
|
if chat_engine is None: |
|
return chat_history, "Please upload documents first." |
|
|
|
response = chat_engine.chat(user_input) |
|
references = response.source_nodes |
|
ref = [] |
|
for node in references: |
|
file_name = node.metadata.get('file_name') |
|
if file_name and file_name not in ref: |
|
ref.append(file_name) |
|
|
|
complete_response = str(response) + "\n\n" |
|
chat_history.append((user_input, complete_response)) |
|
return chat_history, "" |
|
|
|
|
|
|
|
|
|
def clear_history(): |
|
return [], "" |
|
|
|
|
|
|
|
|
|
def gradio_interface(): |
|
with gr.Blocks() as demo: |
|
gr.Markdown("# Chat Interface for LlamaIndex with File Upload") |
|
|
|
|
|
with gr.Tab("Upload Documents"): |
|
gr.Markdown("Upload PDF, Excel, CSV, DOC/DOCX, or TXT files below:") |
|
file_upload = gr.File( |
|
label="Upload Files", |
|
file_count="multiple", |
|
file_types=[".pdf", ".csv", ".txt", ".xlsx", ".xls", ".doc", ".docx"], |
|
type="filepath" |
|
) |
|
upload_status = gr.Textbox(label="Upload Status", interactive=False) |
|
upload_button = gr.Button("Process Upload") |
|
|
|
upload_button.click(process_upload, inputs=file_upload, outputs=upload_status) |
|
|
|
with gr.Tab("Chat"): |
|
chatbot = gr.Chatbot(label="LlamaIndex Chatbot") |
|
user_input = gr.Textbox( |
|
placeholder="Ask a question...", label="Enter your question" |
|
) |
|
submit_button = gr.Button("Send") |
|
btn_clear = gr.Button("Clear History") |
|
|
|
|
|
chat_history = gr.State([]) |
|
|
|
submit_button.click(chat_with_ai, inputs=[user_input, chat_history], outputs=[chatbot, user_input]) |
|
user_input.submit(chat_with_ai, inputs=[user_input, chat_history], outputs=[chatbot, user_input]) |
|
btn_clear.click(clear_history, outputs=[chatbot, user_input]) |
|
|
|
return demo |
|
|
|
|
|
gradio_interface().launch(debug=True) |
|
|