jamesliu23 commited on
Commit
1b6979d
·
1 Parent(s): de6961e
Files changed (1) hide show
  1. app.py +53 -6
app.py CHANGED
@@ -1,9 +1,56 @@
 
1
  import streamlit as st
2
- from transformers import pipeline
 
3
 
4
- pipe = pipeline('sentiment-analysis')
5
- text = st.text_area('enter some text')
6
 
7
- if text:
8
- out = pipe(text)
9
- st.json(out)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
  import streamlit as st
3
+ from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, ServiceContext
4
+ from llama_index.llm_predictor.chatgpt import ChatGPTLLMPredictor
5
 
 
 
6
 
7
+ index_name = "./index.json"
8
+ documents_folder = "./documents"
9
+
10
+
11
+ @st.cache_resource
12
+ def initialize_index(index_name, documents_folder):
13
+ llm_predictor = ChatGPTLLMPredictor()
14
+ service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
15
+ if os.path.exists(index_name):
16
+ index = GPTSimpleVectorIndex.load_from_disk(index_name, service_context=service_context)
17
+ else:
18
+ documents = SimpleDirectoryReader(documents_folder).load_data()
19
+ index = GPTSimpleVectorIndex.from_documents(documents, service_context=service_context)
20
+ index.save_to_disk(index_name)
21
+
22
+ return index
23
+
24
+
25
+ @st.cache_data(max_entries=200, persist=True)
26
+ def query_index(_index, query_text):
27
+ response = _index.query(query_text)
28
+ return str(response)
29
+
30
+
31
+ st.title("🦙 Llama Index Demo 🦙")
32
+ st.header("Welcome to the Llama Index Streamlit Demo")
33
+ st.write("Enter a query about Paul Graham's essays. You can check out the original essay [here](https://raw.githubusercontent.com/jerryjliu/llama_index/main/examples/paul_graham_essay/data/paul_graham_essay.txt). Your query will be answered using the essay as context, using embeddings from text-ada-002 and LLM completions from ChatGPT. You can read more about Llama Index and how this works in [our docs!](https://gpt-index.readthedocs.io/en/latest/index.html)")
34
+
35
+ index = None
36
+ api_key = st.text_input("Enter your OpenAI API key here:", type="password")
37
+ if api_key:
38
+ os.environ['OPENAI_API_KEY'] = api_key
39
+ index = initialize_index(index_name, documents_folder)
40
+
41
+
42
+ if index is None:
43
+ st.warning("Please enter your api key first.")
44
+
45
+ text = st.text_input("Query text:", value="What did the author do growing up?")
46
+
47
+ if st.button("Run Query") and text is not None:
48
+ response = query_index(index, text)
49
+ st.markdown(response)
50
+
51
+ llm_col, embed_col = st.columns(2)
52
+ with llm_col:
53
+ st.markdown(f"LLM Tokens Used: {index.service_context.llm_predictor._last_token_usage}")
54
+
55
+ with embed_col:
56
+ st.markdown(f"Embedding Tokens Used: {index.service_context.embed_model._last_token_usage}")