File size: 3,818 Bytes
944593e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a00408f
944593e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import streamlit as st
import pandas as pd
import os
from dotenv import load_dotenv
from langchain_community.document_loaders.csv_loader import CSVLoader
from langchain_community.vectorstores import FAISS as LangChainFAISS
from langchain.chains import create_retrieval_chain
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
from llama_index import SimpleDirectoryReader, VectorStoreIndex
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.vector_stores.faiss import FaissVectorStore
from llama_index.llms.openai import OpenAI
import faiss

# Load environment variables
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")

# Set global OpenAI parameters
EMBED_DIMENSION = 512
llama_llm = OpenAI(model="gpt-3.5-turbo")
llama_embedding_model = OpenAIEmbedding(model="text-embedding-3-small", dimensions=EMBED_DIMENSION)
langchain_llm = ChatOpenAI(model="gpt-4o")

# Streamlit app
st.title("Streamlit App with LangChain and LlamaIndex")

# File uploader
uploaded_file = st.file_uploader("Upload a CSV file", type=["csv"])
if uploaded_file:
    data = pd.read_csv(uploaded_file)
    st.write("Preview of uploaded data:")
    st.dataframe(data)

    # Tabs
    tab1, tab2 = st.tabs(["Chat w CSV using LangChain", "Chat w CSV using LlamaIndex"])

    # LangChain Tab
    with tab1:
        st.subheader("LangChain Query")
        loader = CSVLoader(file_path=uploaded_file)
        docs = loader.load_and_split()

        # LangChain FAISS VectorStore
        langchain_index = faiss.IndexFlatL2(EMBED_DIMENSION)
        langchain_vector_store = LangChainFAISS(
            embedding_function=OpenAIEmbeddings(),
            index=langchain_index,
        )
        langchain_vector_store.add_documents(docs)

        # LangChain Retrieval Chain
        retriever = langchain_vector_store.as_retriever()
        system_prompt = (
            "You are an assistant for question-answering tasks. "
            "Use the following pieces of retrieved context to answer "
            "the question. If you don't know the answer, say that you "
            "don't know. Use three sentences maximum and keep the "
            "answer concise.\n\n{context}"
        )
        prompt = ChatPromptTemplate.from_messages(
            [("system", system_prompt), ("human", "{input}")]
        )
        question_answer_chain = create_stuff_documents_chain(langchain_llm, prompt)
        langchain_rag_chain = create_retrieval_chain(retriever, question_answer_chain)

        # Query input for LangChain
        query = st.text_input("Ask a question about your data (LangChain):")
        if query:
            answer = langchain_rag_chain.invoke({"input": query})
            st.write(f"Answer: {answer['answer']}")

    # LlamaIndex Tab
    with tab2:
        st.subheader("LlamaIndex Query")
        csv_reader = SimpleDirectoryReader(
            input_files=[uploaded_file],
            file_extractor={".csv": PagedCSVReader()},
        )
        docs = csv_reader.load_data()

        # LlamaIndex FAISS VectorStore
        llama_faiss_index = faiss.IndexFlatL2(EMBED_DIMENSION)
        llama_vector_store = FaissVectorStore(faiss_index=llama_faiss_index)
        pipeline = IngestionPipeline(vector_store=llama_vector_store, documents=docs)
        nodes = pipeline.run()

        # LlamaIndex Query Engine
        llama_index = VectorStoreIndex(nodes)
        query_engine = llama_index.as_query_engine(similarity_top_k=2)

        # Query input for LlamaIndex
        query = st.text_input("Ask a question about your data (LlamaIndex):")
        if query:
            response = query_engine.query(query)
            st.write(f"Answer: {response.response}")