Spaces:
Sleeping
Sleeping
File size: 4,369 Bytes
944593e dff1e7c 944593e dff1e7c 944593e dff1e7c 944593e dff1e7c 944593e dff1e7c 944593e dff1e7c 944593e dff1e7c 944593e dff1e7c 944593e dff1e7c 944593e dff1e7c 944593e dff1e7c 944593e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 |
import streamlit as st
import pandas as pd
import os
from dotenv import load_dotenv
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.readers.file import PagedCSVReader
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.vector_stores.faiss import FaissVectorStore
from llama_index.core.ingestion import IngestionPipeline
from langchain_community.document_loaders.csv_loader import CSVLoader
from langchain_community.vectorstores import FAISS as LangChainFAISS
from langchain.chains import create_retrieval_chain
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
import faiss
# Load environment variables
load_dotenv()
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
# Global OpenAI and FAISS settings
EMBED_DIMENSION = 512
llama_llm = OpenAI(model="gpt-3.5-turbo")
llama_embedding_model = OpenAIEmbedding(model="text-embedding-3-small", dimensions=EMBED_DIMENSION)
langchain_llm = ChatOpenAI(model="gpt-3.5-turbo-0125")
# Streamlit app
st.title("Streamlit App with LangChain and LlamaIndex")
# File uploader
uploaded_file = st.file_uploader("Upload a CSV file", type=["csv"])
if uploaded_file:
data = pd.read_csv(uploaded_file)
st.write("Preview of uploaded data:")
st.dataframe(data)
# Tabs
tab1, tab2 = st.tabs(["Chat w CSV using LangChain", "Chat w CSV using LlamaIndex"])
# LangChain Tab
with tab1:
st.subheader("LangChain Query")
loader = CSVLoader(file_path=uploaded_file.name)
docs = loader.load_and_split()
# Preview the first document
st.write("Preview of a document chunk (LangChain):")
st.text(docs[0].page_content)
# LangChain FAISS VectorStore
langchain_index = faiss.IndexFlatL2(EMBED_DIMENSION)
langchain_vector_store = LangChainFAISS(
embedding_function=OpenAIEmbeddings(),
index=langchain_index,
)
langchain_vector_store.add_documents(docs)
# LangChain Retrieval Chain
retriever = langchain_vector_store.as_retriever()
system_prompt = (
"You are an assistant for question-answering tasks. "
"Use the following pieces of retrieved context to answer "
"the question. If you don't know the answer, say that you "
"don't know. Use three sentences maximum and keep the "
"answer concise.\n\n{context}"
)
prompt = ChatPromptTemplate.from_messages(
[("system", system_prompt), ("human", "{input}")]
)
question_answer_chain = create_stuff_documents_chain(langchain_llm, prompt)
langchain_rag_chain = create_retrieval_chain(retriever, question_answer_chain)
# Query input for LangChain
query = st.text_input("Ask a question about your data (LangChain):")
if query:
answer = langchain_rag_chain.invoke({"input": query})
st.write(f"Answer: {answer['answer']}")
# LlamaIndex Tab
with tab2:
st.subheader("LlamaIndex Query")
# Use PagedCSVReader for CSV loading
csv_reader = PagedCSVReader()
reader = SimpleDirectoryReader(
input_files=[uploaded_file.name],
file_extractor={".csv": csv_reader},
)
docs = reader.load_data()
# Preview the first document
st.write("Preview of a document chunk (LlamaIndex):")
st.text(docs[0].text)
# Initialize FAISS Vector Store
llama_faiss_index = faiss.IndexFlatL2(EMBED_DIMENSION)
llama_vector_store = FaissVectorStore(faiss_index=llama_faiss_index)
# Create the ingestion pipeline and process the data
pipeline = IngestionPipeline(vector_store=llama_vector_store, documents=docs)
nodes = pipeline.run()
# Create a query engine
llama_index = VectorStoreIndex(nodes)
query_engine = llama_index.as_query_engine(similarity_top_k=2)
# Query input for LlamaIndex
query = st.text_input("Ask a question about your data (LlamaIndex):")
if query:
response = query_engine.query(query)
st.write(f"Answer: {response.response}")
|