File size: 4,045 Bytes
71c916b
765ede8
 
 
 
 
71c916b
 
765ede8
72ba547
90b92b4
765ede8
215277a
765ede8
29c811a
90b92b4
765ede8
71c916b
765ede8
 
 
29c811a
765ede8
 
 
29c811a
71c916b
90b92b4
71c916b
 
 
 
765ede8
71c916b
72ba547
765ede8
 
 
 
 
 
 
 
 
 
 
 
 
71c916b
 
765ede8
 
 
90b92b4
765ede8
71c916b
 
765ede8
90b92b4
765ede8
71c916b
67c6e4d
90b92b4
765ede8
90b92b4
 
 
 
 
 
 
 
765ede8
 
 
 
 
 
90b92b4
5f94cf5
 
 
 
 
 
 
 
 
 
 
 
 
 
90b92b4
 
5f94cf5
 
 
 
 
 
 
 
 
 
90b92b4
5f94cf5
 
 
 
90b92b4
765ede8
 
 
 
 
 
 
 
 
 
90b92b4
 
 
 
765ede8
90b92b4
 
765ede8
 
 
90b92b4
 
 
 
765ede8
 
 
90b92b4
765ede8
 
71c916b
765ede8
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
import os
from getpass import getpass

openai_api_key = os.getenv('OPENAI_API_KEY')
openai_api_key = openai_api_key

from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core import Settings

Settings.llm = OpenAI(model="gpt-3.5-turbo", temperature=0.4)
Settings.embed_model = OpenAIEmbedding(model="text-embedding-ada-002")

from llama_index.core import SimpleDirectoryReader

# Load initial documents
documents = SimpleDirectoryReader("new_file").load_data()

from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.vector_stores.qdrant import QdrantVectorStore
import qdrant_client

client = qdrant_client.QdrantClient(
    location=":memory:",
)

vector_store = QdrantVectorStore(
    collection_name="paper",
    client=client,
    enable_hybrid=True,
    batch_size=20,
)

storage_context = StorageContext.from_defaults(vector_store=vector_store)

index = VectorStoreIndex.from_documents(
    documents,
    storage_context=storage_context,
)

query_engine = index.as_query_engine(
    vector_store_query_mode="hybrid"
)

from llama_index.core.memory import ChatMemoryBuffer

memory = ChatMemoryBuffer.from_defaults(token_limit=3000)

chat_engine = index.as_chat_engine(
    chat_mode="context",
    memory=memory,
    system_prompt=(
        """You are an AI assistant who answers the user questions,
           use the schema fields to generate appropriate and valid json queries"""
    ),
)

import gradio as gr

def chat_with_ai(user_input, chat_history):
    response = chat_engine.chat(user_input)
    references = response.source_nodes
    ref, pages = [], []
    for i in range(len(references)):
        if references[i].metadata['file_name'] not in ref:
            ref.append(references[i].metadata['file_name'])
    complete_response = str(response) + "\n\n"
    if ref != [] or pages != []:
        chat_history.append((user_input, complete_response))
        ref = []
    elif ref == [] or pages == []:
        chat_history.append((user_input, str(response)))
        
    return chat_history, ""

def clear_history():
    return [], ""

def upload_file(file):
    if file is None:
        return "No file uploaded!"
    
    if isinstance(file, list):
        file = file[0]

    if hasattr(file, 'name'):
        file_name = file.name
    elif isinstance(file, dict):
        file_name = file.get("name", "uploaded_file")
    else:
        file_name = "uploaded_file"
    

    if not os.path.exists("new_file"):
        os.makedirs("new_file")
    

    file_path = os.path.join("new_file", file_name)
    if hasattr(file, "read"):
        content = file.read()
    elif isinstance(file, dict) and "data" in file:
        content = file["data"]
    else:
        return "Uploaded file format not recognized."
    
    with open(file_path, "wb") as f:
        f.write(content)
    
    return f"File {file_name} uploaded successfully!"


def gradio_chatbot():
    with gr.Blocks() as demo:
        gr.Markdown("# Chat Interface for LlamaIndex")

        chatbot = gr.Chatbot(label="LlamaIndex Chatbot")
        user_input = gr.Textbox(
            placeholder="Ask a question...", label="Enter your question"
        )

        submit_button = gr.Button("Send")
        btn_clear = gr.Button("Delete Context")

        # Add a file upload component
        file_upload = gr.File(label="Upload a file")

        # Add a button to handle file upload
        upload_button = gr.Button("Upload File")

        chat_history = gr.State([])

        # Define the file upload action
        upload_button.click(upload_file, inputs=file_upload, outputs=user_input)

        # Define the chat interaction
        submit_button.click(chat_with_ai, inputs=[user_input, chat_history], outputs=[chatbot, user_input])

        user_input.submit(chat_with_ai, inputs=[user_input, chat_history], outputs=[chatbot, user_input])
        btn_clear.click(fn=clear_history, outputs=[chatbot, user_input])

    return demo

gradio_chatbot().launch(debug=True)