Spaces:
Runtime error
Runtime error
from llama_index.core import SimpleDirectoryReader | |
from llama_index.core.node_parser import SentenceSplitter | |
from llama_index.core import Settings | |
from llama_index.llms.openai import OpenAI | |
from llama_index.embeddings.openai import OpenAIEmbedding | |
from llama_index.core import SummaryIndex, VectorStoreIndex | |
from llama_index.core.tools import QueryEngineTool | |
from llama_index.core.query_engine.router_query_engine import RouterQueryEngine | |
from llama_index.core.selectors import LLMSingleSelector | |
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader | |
from llama_index.llms.groq import Groq | |
from llama_index.core import Settings | |
from langchain.embeddings.huggingface import HuggingFaceEmbeddings | |
import os | |
from dotenv import load_dotenv | |
load_dotenv() | |
GROQ_API_KEY = os.getenv("groqkey") | |
embed_model= HuggingFaceEmbeddings(model_name="BAAI/bge-large-en-v1.5") | |
llm = Groq(model="llama3-70b-8192", api_key=GROQ_API_KEY) | |
Settings.llm = llm | |
Settings.embed_model = embed_model | |
from llama_index.core import SimpleDirectoryReader | |
documents = SimpleDirectoryReader("files",required_exts=[".pdf", ".csv"]).load_data() | |
from llama_index.core import VectorStoreIndex, StorageContext | |
from llama_index.vector_stores.qdrant import QdrantVectorStore | |
import qdrant_client | |
client = qdrant_client.QdrantClient( | |
location=":memory:", | |
) | |
vector_store = QdrantVectorStore( | |
collection_name = "paper", | |
client=client, | |
enable_hybrid=True, | |
batch_size=20, | |
) | |
storage_context = StorageContext.from_defaults(vector_store=vector_store) | |
index = VectorStoreIndex.from_documents( | |
documents, | |
embed_model=embed_model, | |
storage_context=storage_context, | |
) | |
from llama_index.core.memory import ChatMemoryBuffer | |
memory = ChatMemoryBuffer.from_defaults(token_limit= 3000) | |
chat_engine = index.as_chat_engine( | |
chat_mode="context", | |
memory=memory, | |
system_prompt=( | |
"You are an AI assistant who answers the user questions" | |
), | |
) | |
def is_greeting(user_input): | |
greetings = ["hello", "hi", "hey", "good morning", "good afternoon", "good evening", "greetings"] | |
user_input_lower = user_input.lower().strip() | |
return any(greet in user_input_lower for greet in greetings) | |
def is_bye(user_input): | |
greetings = ["bye","thanks", "thank you", "thanks a lot", "bye bye", "have a good day"] | |
user_input_lower = user_input.lower().strip() | |
return any(greet in user_input_lower for greet in greetings) | |
import gradio as gr | |
def chat_with_ai(user_input, chat_history): | |
if is_greeting(str(user_input)): | |
response = 'hi,how can i help you?' | |
chat_history.append((user_input, response)) | |
return chat_history, "" | |
if is_bye(str(user_input)): | |
response = "you're welcome" | |
chat_history.append((user_input, response)) | |
return chat_history, "" | |
response = chat_engine.chat(user_input) | |
full_text = response.response | |
references = response.source_nodes | |
ref,pages = [],[] | |
for i in range(len(references)): | |
if references[i].metadata['file_name'] not in ref: | |
ref.append(references[i].metadata['file_name']) | |
# pages.append(references[i].metadata['page_label']) | |
complete_response = str(full_text) + "\n\n" + "references: " + str(ref) | |
if ref !=[] : | |
chat_history.append((user_input, complete_response)) | |
ref = [] | |
elif ref==[] : | |
chat_history.append((user_input,str(response))) | |
return chat_history, "" | |
def gradio_chatbot(): | |
with gr.Blocks() as demo: | |
gr.Markdown("# Chat Interface for llama3.1_70B with Groq and llama_index") | |
chatbot = gr.Chatbot(label="llamaindex Chatbot") | |
user_input = gr.Textbox( | |
placeholder="Ask a question...", label="Enter your question" | |
) | |
submit_button = gr.Button("Send") | |
chat_history = gr.State([]) | |
submit_button.click(chat_with_ai, inputs=[user_input, chat_history], outputs=[chatbot, user_input]) | |
user_input.submit(chat_with_ai, inputs=[user_input, chat_history], outputs=[chatbot, user_input]) | |
return demo | |
gradio_chatbot().launch(debug=True) |