File size: 2,886 Bytes
9130c7f
71c916b
 
 
5d513e0
 
71c916b
 
 
 
 
72ba547
9130c7f
71c916b
 
 
 
 
fdb9709
833b1c3
71c916b
 
 
72ba547
71c916b
 
 
 
 
 
 
 
 
 
72ba547
71c916b
72ba547
 
71c916b
 
72ba547
 
71c916b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67c6e4d
 
 
 
 
 
 
 
 
16fbcf6
67c6e4d
 
 
71c916b
 
de6fa5e
 
71c916b
 
 
b1663e4
9130c7f
71c916b
 
 
 
9130c7f
71c916b
6fa8435
 
9130c7f
71c916b
a3445e5
71c916b
a3445e5
71c916b
de6fa5e
1f2129b
71c916b
a3445e5
71c916b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103

import os
from getpass import getpass

openai_api_key = os.getenv('OPENAI_API_KEY')
openai_api_key = openai_api_key



from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core import Settings

Settings.llm = OpenAI(model="gpt-3.5-turbo",temperature=0.4)
Settings.embed_model = OpenAIEmbedding(model="text-embedding-ada-002")

from llama_index.core import SimpleDirectoryReader

documents = SimpleDirectoryReader("files").load_data()

from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.vector_stores.qdrant import QdrantVectorStore
import qdrant_client

client = qdrant_client.QdrantClient(
    location=":memory:",
)

vector_store = QdrantVectorStore(
    collection_name = "paper",
    client=client,
    enable_hybrid=True,
    batch_size=20,
)

storage_context = StorageContext.from_defaults(vector_store=vector_store)

index = VectorStoreIndex.from_documents(
    documents,
    storage_context=storage_context,
)

query_engine = index.as_query_engine(
    vector_store_query_mode="hybrid"
)

from llama_index.core.memory import ChatMemoryBuffer

memory = ChatMemoryBuffer.from_defaults(token_limit=3000)

chat_engine = index.as_chat_engine(
    chat_mode="context",
    memory=memory,
    system_prompt=(
        "You are an AI assistant who answers the user questions"
    ),
)

import gradio as gr
def chat_with_ai(user_input, chat_history):
    response = chat_engine.chat(user_input)
    references = response.source_nodes
    ref,pages = [],[]
    for i in range(len(references)):
      if references[i].metadata['file_name'] not in ref:
        ref.append(references[i].metadata['file_name'])
      pages.append(references[i].metadata['page_label'])
    complete_response = str(response) + "\n\n" + "references:" + str(ref) + "\n\n" + "pages:" + str(pages)
    if ref !=[] or pages!=[]:
      chat_history.append((user_input, complete_response))
      ref = []
    elif ref==[] or pages==[]:
      chat_history.append((user_input,str(response)))
        
    return chat_history, ""

def clear_history():
    return [], ""

def gradio_chatbot():
    with gr.Blocks() as demo:
        gr.Markdown("# Chat Interface for LlamaIndex")

        chatbot = gr.Chatbot(label="LlamaIndex Chatbot")
        user_input = gr.Textbox(
            placeholder="Ask a question...", label="Enter your question"
        )

        submit_button = gr.Button("Send")
        btn_clear = gr.Button("Delete Context") 


        chat_history = gr.State([])

        submit_button.click(chat_with_ai, inputs=[user_input, chat_history], outputs=[chatbot, user_input])

        user_input.submit(chat_with_ai, inputs=[user_input, chat_history], outputs=[chatbot, user_input])
        btn_clear.click(fn=clear_history, outputs=[chatbot, user_input])  

    return demo

gradio_chatbot().launch(debug=True)