Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -14,6 +14,7 @@ from langchain.chains.combine_documents import create_stuff_documents_chain
|
|
14 |
from langchain_core.prompts import ChatPromptTemplate
|
15 |
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
|
16 |
import faiss
|
|
|
17 |
|
18 |
# Load environment variables
|
19 |
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
|
@@ -35,6 +36,11 @@ if uploaded_file:
|
|
35 |
st.write("Preview of uploaded data:")
|
36 |
st.dataframe(data)
|
37 |
|
|
|
|
|
|
|
|
|
|
|
38 |
# Tabs for LangChain and LlamaIndex
|
39 |
tab1, tab2 = st.tabs(["LangChain", "LlamaIndex"])
|
40 |
|
@@ -42,8 +48,8 @@ if uploaded_file:
|
|
42 |
with tab1:
|
43 |
st.subheader("LangChain Query")
|
44 |
try:
|
45 |
-
# Use CSVLoader with the
|
46 |
-
loader = CSVLoader(
|
47 |
docs = loader.load_and_split()
|
48 |
|
49 |
# Preview the first document chunk
|
@@ -86,9 +92,9 @@ if uploaded_file:
|
|
86 |
with tab2:
|
87 |
st.subheader("LlamaIndex Query")
|
88 |
try:
|
89 |
-
# Use PagedCSVReader directly on the uploaded
|
90 |
csv_reader = PagedCSVReader()
|
91 |
-
docs = csv_reader.
|
92 |
|
93 |
# Preview the first document chunk
|
94 |
if docs:
|
@@ -114,6 +120,10 @@ if uploaded_file:
|
|
114 |
st.write(f"Answer: {response.response}")
|
115 |
except Exception as e:
|
116 |
st.error(f"Error processing with LlamaIndex: {e}")
|
|
|
|
|
|
|
|
|
117 |
|
118 |
except Exception as e:
|
119 |
st.error(f"Error reading uploaded file: {e}")
|
|
|
14 |
from langchain_core.prompts import ChatPromptTemplate
|
15 |
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
|
16 |
import faiss
|
17 |
+
import tempfile
|
18 |
|
19 |
# Load environment variables
|
20 |
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
|
|
|
36 |
st.write("Preview of uploaded data:")
|
37 |
st.dataframe(data)
|
38 |
|
39 |
+
# Save the uploaded file to a temporary location
|
40 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".csv", mode="w") as temp_file:
|
41 |
+
temp_file_path = temp_file.name
|
42 |
+
data.to_csv(temp_file_path, index=False)
|
43 |
+
|
44 |
# Tabs for LangChain and LlamaIndex
|
45 |
tab1, tab2 = st.tabs(["LangChain", "LlamaIndex"])
|
46 |
|
|
|
48 |
with tab1:
|
49 |
st.subheader("LangChain Query")
|
50 |
try:
|
51 |
+
# Use CSVLoader with the temporary file path
|
52 |
+
loader = CSVLoader(file_path=temp_file_path)
|
53 |
docs = loader.load_and_split()
|
54 |
|
55 |
# Preview the first document chunk
|
|
|
92 |
with tab2:
|
93 |
st.subheader("LlamaIndex Query")
|
94 |
try:
|
95 |
+
# Use PagedCSVReader directly on the uploaded file
|
96 |
csv_reader = PagedCSVReader()
|
97 |
+
docs = csv_reader.load_from_file(temp_file_path)
|
98 |
|
99 |
# Preview the first document chunk
|
100 |
if docs:
|
|
|
120 |
st.write(f"Answer: {response.response}")
|
121 |
except Exception as e:
|
122 |
st.error(f"Error processing with LlamaIndex: {e}")
|
123 |
+
finally:
|
124 |
+
# Clean up the temporary file
|
125 |
+
if 'temp_file_path' in locals() and os.path.exists(temp_file_path):
|
126 |
+
os.remove(temp_file_path)
|
127 |
|
128 |
except Exception as e:
|
129 |
st.error(f"Error reading uploaded file: {e}")
|