anasmkh commited on
Commit
eaa0108
·
verified ·
1 Parent(s): 0829e5c

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +129 -0
app.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from llama_index.core import SimpleDirectoryReader
2
+ from llama_index.core.node_parser import SentenceSplitter
3
+ from llama_index.core import Settings
4
+ from llama_index.llms.openai import OpenAI
5
+ from llama_index.embeddings.openai import OpenAIEmbedding
6
+ from llama_index.core import SummaryIndex, VectorStoreIndex
7
+ from llama_index.core.tools import QueryEngineTool
8
+ from llama_index.core.query_engine.router_query_engine import RouterQueryEngine
9
+ from llama_index.core.selectors import LLMSingleSelector
10
+ from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
11
+ from llama_index.llms.groq import Groq
12
+ from llama_index.core import Settings
13
+ from langchain.embeddings.huggingface import HuggingFaceEmbeddings
14
+
15
+ import os
16
+ from dotenv import load_dotenv
17
+ load_dotenv()
18
+
19
+
20
+ embed_model= HuggingFaceEmbeddings(model_name="BAAI/bge-large-en-v1.5")
21
+
22
+ GROQ_API_KEY = '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'
23
+
24
+ llm = Groq(model="llama3-70b-8192", api_key=GROQ_API_KEY)
25
+
26
+ Settings.llm = llm
27
+ Settings.embed_model = embed_model
28
+
29
+ from llama_index.core import SimpleDirectoryReader
30
+
31
+ documents = SimpleDirectoryReader("files").load_data()
32
+
33
+ from llama_index.core import VectorStoreIndex, StorageContext
34
+ from llama_index.vector_stores.qdrant import QdrantVectorStore
35
+ import qdrant_client
36
+
37
+ client = qdrant_client.QdrantClient(
38
+ location=":memory:",
39
+ )
40
+
41
+ vector_store = QdrantVectorStore(
42
+ collection_name = "paper",
43
+ client=client,
44
+ enable_hybrid=True,
45
+ batch_size=20,
46
+ )
47
+
48
+ storage_context = StorageContext.from_defaults(vector_store=vector_store)
49
+
50
+ index = VectorStoreIndex.from_documents(
51
+ documents,
52
+ embed_model=embed_model,
53
+ storage_context=storage_context,
54
+ )
55
+
56
+ from llama_index.core.memory import ChatMemoryBuffer
57
+
58
+ memory = ChatMemoryBuffer.from_defaults(token_limit= 3000)
59
+
60
+ chat_engine = index.as_chat_engine(
61
+ chat_mode="context",
62
+ memory=memory,
63
+ system_prompt=(
64
+ "You are an AI assistant who answers the user questions"
65
+ ),
66
+ )
67
+
68
+
69
+
70
+ def is_greeting(user_input):
71
+
72
+ greetings = ["hello", "hi", "hey", "good morning", "good afternoon", "good evening", "greetings"]
73
+ user_input_lower = user_input.lower().strip()
74
+ return any(greet in user_input_lower for greet in greetings)
75
+
76
+ def is_bye(user_input):
77
+ greetings = ["bye","thanks", "thank you", "thanks a lot", "bye bye", "have a good day"]
78
+ user_input_lower = user_input.lower().strip()
79
+ return any(greet in user_input_lower for greet in greetings)
80
+
81
+ import gradio as gr
82
+ def chat_with_ai(user_input, chat_history):
83
+ if is_greeting(str(user_input)):
84
+ response = 'hi,how can i help you?'
85
+ chat_history.append((user_input, response))
86
+ return chat_history, ""
87
+ if is_bye(str(user_input)):
88
+ response = "you're welcome"
89
+ chat_history.append((user_input, response))
90
+ return chat_history, ""
91
+ response = chat_engine.chat(user_input)
92
+ full_text = response.response
93
+
94
+ references = response.source_nodes
95
+ ref,pages = [],[]
96
+ for i in range(len(references)):
97
+ if references[i].metadata['file_name'] not in ref:
98
+ ref.append(references[i].metadata['file_name'])
99
+ pages.append(references[i].metadata['page_label'])
100
+ complete_response = str(full_text) + "\n\n" + "references:" + str(ref) + "\n\n" + "pages:" + str(pages)
101
+ if ref !=[] :
102
+ chat_history.append((user_input, complete_response))
103
+ ref = []
104
+ elif ref==[] :
105
+ chat_history.append((user_input,str(response)))
106
+
107
+ return chat_history, ""
108
+
109
+
110
+ def gradio_chatbot():
111
+ with gr.Blocks() as demo:
112
+ gr.Markdown("# Chat Interface for llama3.1_70B with Groq and llama_index")
113
+
114
+ chatbot = gr.Chatbot(label="llamaindex Chatbot")
115
+ user_input = gr.Textbox(
116
+ placeholder="Ask a question...", label="Enter your question"
117
+ )
118
+
119
+ submit_button = gr.Button("Send")
120
+
121
+ chat_history = gr.State([])
122
+
123
+ submit_button.click(chat_with_ai, inputs=[user_input, chat_history], outputs=[chatbot, user_input])
124
+
125
+ user_input.submit(chat_with_ai, inputs=[user_input, chat_history], outputs=[chatbot, user_input])
126
+
127
+ return demo
128
+
129
+ gradio_chatbot().launch(debug=True)