|
|
|
import os |
|
from getpass import getpass |
|
|
|
openai_api_key = os.getenv('OPENAI_API_KEY') |
|
openai_api_key = openai_api_key |
|
|
|
|
|
from llama_index.llms.openai import OpenAI |
|
from llama_index.embeddings.openai import OpenAIEmbedding |
|
from llama_index.core import Settings |
|
|
|
Settings.llm = OpenAI(model="gpt-3.5-turbo",temperature=0.4) |
|
Settings.embed_model = OpenAIEmbedding(model="text-embedding-ada-002") |
|
|
|
from llama_index.core import SimpleDirectoryReader |
|
|
|
documents = SimpleDirectoryReader("new_file").load_data() |
|
|
|
from llama_index.core import VectorStoreIndex, StorageContext |
|
from llama_index.vector_stores.qdrant import QdrantVectorStore |
|
import qdrant_client |
|
|
|
client = qdrant_client.QdrantClient( |
|
location=":memory:", |
|
) |
|
|
|
vector_store = QdrantVectorStore( |
|
collection_name = "paper", |
|
client=client, |
|
enable_hybrid=True, |
|
batch_size=20, |
|
) |
|
|
|
storage_context = StorageContext.from_defaults(vector_store=vector_store) |
|
|
|
index = VectorStoreIndex.from_documents( |
|
documents, |
|
storage_context=storage_context, |
|
) |
|
|
|
query_engine = index.as_query_engine( |
|
vector_store_query_mode="hybrid" |
|
) |
|
|
|
from llama_index.core.memory import ChatMemoryBuffer |
|
|
|
memory = ChatMemoryBuffer.from_defaults(token_limit=3000) |
|
|
|
chat_engine = index.as_chat_engine( |
|
chat_mode="context", |
|
memory=memory, |
|
system_prompt=( |
|
"""You are an AI assistant who answers the user questions, |
|
use the schema fields to generate appriopriate and valid json queries""" |
|
), |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import gradio as gr |
|
def chat_with_ai(user_input, chat_history): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
response = chat_engine.chat(user_input) |
|
references = response.source_nodes |
|
ref,pages = [],[] |
|
for i in range(len(references)): |
|
if references[i].metadata['file_name'] not in ref: |
|
ref.append(references[i].metadata['file_name']) |
|
|
|
complete_response = str(response) + "\n\n" |
|
if ref !=[] or pages!=[]: |
|
chat_history.append((user_input, complete_response)) |
|
ref = [] |
|
elif ref==[] or pages==[]: |
|
chat_history.append((user_input,str(response))) |
|
|
|
return chat_history, "" |
|
|
|
def clear_history(): |
|
return [], "" |
|
|
|
def gradio_chatbot(): |
|
with gr.Blocks() as demo: |
|
gr.Markdown("# Chat Interface for LlamaIndex") |
|
|
|
chatbot = gr.Chatbot(label="LlamaIndex Chatbot") |
|
user_input = gr.Textbox( |
|
placeholder="Ask a question...", label="Enter your question" |
|
) |
|
|
|
submit_button = gr.Button("Send") |
|
btn_clear = gr.Button("Delete Context") |
|
|
|
|
|
chat_history = gr.State([]) |
|
|
|
submit_button.click(chat_with_ai, inputs=[user_input, chat_history], outputs=[chatbot, user_input]) |
|
|
|
user_input.submit(chat_with_ai, inputs=[user_input, chat_history], outputs=[chatbot, user_input]) |
|
btn_clear.click(fn=clear_history, outputs=[chatbot, user_input]) |
|
|
|
return demo |
|
|
|
gradio_chatbot().launch(debug=True) |
|
|