File size: 2,204 Bytes
9130c7f 71c916b 5d513e0 71c916b 72ba547 9130c7f 71c916b fdb9709 833b1c3 71c916b 72ba547 71c916b 72ba547 71c916b 72ba547 71c916b 72ba547 71c916b d56a0da 71c916b b1663e4 9130c7f 71c916b 9130c7f 71c916b 9130c7f 71c916b a3445e5 71c916b a3445e5 71c916b 1f2129b 71c916b a3445e5 71c916b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 |
import os
from getpass import getpass
openai_api_key = os.getenv('OPENAI_API_KEY')
openai_api_key = openai_api_key
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo",temperature=0.4)
Settings.embed_model = OpenAIEmbedding(model="text-embedding-ada-002")
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader("files").load_data()
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.vector_stores.qdrant import QdrantVectorStore
import qdrant_client
client = qdrant_client.QdrantClient(
location=":memory:",
)
vector_store = QdrantVectorStore(
collection_name = "paper",
client=client,
enable_hybrid=True,
batch_size=20,
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents,
storage_context=storage_context,
)
query_engine = index.as_query_engine(
vector_store_query_mode="hybrid"
)
from llama_index.core.memory import ChatMemoryBuffer
memory = ChatMemoryBuffer.from_defaults(token_limit=3000)
chat_engine = index.as_chat_engine(
chat_mode="context",
memory=memory,
system_prompt=(
"You are an AI assistant who answers the user questions"
),
)
import gradio as gr
def chat_with_ai(user_input, chat_history):
response = chat_engine.chat(user_input)
chat_history.append((user_input, str(response)))
return chat_history, ""
def gradio_chatbot():
with gr.Blocks() as demo:
gr.Markdown("# Chat Interface for LlamaIndex")
chatbot = gr.Chatbot(label="LlamaIndex Chatbot")
user_input = gr.Textbox(
placeholder="Ask a question...", label="Enter your question"
)
submit_button = gr.Button("Send")
chat_history = gr.State([])
submit_button.click(chat_with_ai, inputs=[user_input, chat_history], outputs=[chatbot, user_input])
user_input.submit(chat_with_ai, inputs=[user_input, chat_history], outputs=[chatbot, user_input])
return demo
gradio_chatbot().launch(debug=True)
|