Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,6 @@
|
|
1 |
import streamlit as st
|
2 |
from llama_index.core import VectorStoreIndex, Document
|
3 |
from llama_index.llms.openai import OpenAI
|
4 |
-
from llama_index.core import Settings
|
5 |
import os
|
6 |
import pdfplumber
|
7 |
from docx import Document as DocxDocument
|
@@ -18,8 +17,7 @@ if 'openai_api_key' not in st.session_state:
|
|
18 |
st.session_state.openai_api_key = ""
|
19 |
|
20 |
# Input for OpenAI API Key
|
21 |
-
st.session_state.openai_api_key = st.sidebar.text_input("Enter your OpenAI API Key:",
|
22 |
-
type="password",
|
23 |
value=st.session_state.openai_api_key)
|
24 |
|
25 |
# Initialize session state for messages
|
@@ -46,8 +44,12 @@ def read_docx(file):
|
|
46 |
|
47 |
@st.cache_resource(show_spinner=False)
|
48 |
def load_data(uploaded_files):
|
|
|
|
|
|
|
|
|
|
|
49 |
with st.spinner("Loading and indexing the documents β hang tight! This should take 1-2 minutes."):
|
50 |
-
docs = []
|
51 |
for uploaded_file in uploaded_files:
|
52 |
if uploaded_file.type == "application/pdf":
|
53 |
text = read_pdf(uploaded_file)
|
@@ -56,10 +58,7 @@ def load_data(uploaded_files):
|
|
56 |
text = read_docx(uploaded_file)
|
57 |
docs.append(Document(text=text))
|
58 |
|
59 |
-
|
60 |
-
system_prompt="You are an expert on the Streamlit Python library and your job is to answer technical questions. Assume that all questions are related to the Streamlit Python library. Keep your answers technical and based on facts β do not hallucinate features.")
|
61 |
-
|
62 |
-
index = VectorStoreIndex.from_documents(docs, settings=Settings.llm)
|
63 |
return index
|
64 |
|
65 |
# Function to save the conversation
|
@@ -162,4 +161,4 @@ if st.session_state.show_conversations:
|
|
162 |
else:
|
163 |
st.sidebar.write("No previous conversations found.")
|
164 |
else:
|
165 |
-
st.sidebar.write("Previous conversations are hidden. Click 'Toggle Previous Conversations' to show.")
|
|
|
1 |
import streamlit as st
|
2 |
from llama_index.core import VectorStoreIndex, Document
|
3 |
from llama_index.llms.openai import OpenAI
|
|
|
4 |
import os
|
5 |
import pdfplumber
|
6 |
from docx import Document as DocxDocument
|
|
|
17 |
st.session_state.openai_api_key = ""
|
18 |
|
19 |
# Input for OpenAI API Key
|
20 |
+
st.session_state.openai_api_key = st.sidebar.text_input("Enter your OpenAI API Key:", type="password",
|
|
|
21 |
value=st.session_state.openai_api_key)
|
22 |
|
23 |
# Initialize session state for messages
|
|
|
44 |
|
45 |
@st.cache_resource(show_spinner=False)
|
46 |
def load_data(uploaded_files):
|
47 |
+
# Create the LLM instance outside the cache
|
48 |
+
llm = OpenAI(model="gpt-3.5-turbo", temperature=0.5,
|
49 |
+
system_prompt="You are an expert on the Streamlit Python library and your job is to answer technical questions. Assume that all questions are related to the Streamlit Python library. Keep your answers technical and based on facts β do not hallucinate features.")
|
50 |
+
|
51 |
+
docs = []
|
52 |
with st.spinner("Loading and indexing the documents β hang tight! This should take 1-2 minutes."):
|
|
|
53 |
for uploaded_file in uploaded_files:
|
54 |
if uploaded_file.type == "application/pdf":
|
55 |
text = read_pdf(uploaded_file)
|
|
|
58 |
text = read_docx(uploaded_file)
|
59 |
docs.append(Document(text=text))
|
60 |
|
61 |
+
index = VectorStoreIndex.from_documents(docs, settings=llm)
|
|
|
|
|
|
|
62 |
return index
|
63 |
|
64 |
# Function to save the conversation
|
|
|
161 |
else:
|
162 |
st.sidebar.write("No previous conversations found.")
|
163 |
else:
|
164 |
+
st.sidebar.write("Previous conversations are hidden. Click 'Toggle Previous Conversations' to show.")
|