File size: 1,625 Bytes
ab5fc7c
dc76948
ab5fc7c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dc76948
 
 
 
 
ab5fc7c
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import streamlit as st
from log10.load import log10, log10_session
import openai

@st.cache_resource
def init_log10():
    log10(openai)


# Example from: https://python.langchain.com/en/latest/use_cases/question_answering.html
# Download the state_of_the_union.txt here: https://raw.githubusercontent.com/hwchase17/langchain/master/docs/modules/state_of_the_union.txt
# This example requires: pip install chromadb

# Load Your Documents
from langchain.document_loaders import TextLoader

# Create Your Index
from langchain.indexes import VectorstoreIndexCreator
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.chat_models import ChatOpenAI

@st.cache_resource
def init_vector_db():
    loader = TextLoader('./state_of_the_union.txt')
    index = VectorstoreIndexCreator(
        vectorstore_cls=Chroma, 
        embedding=OpenAIEmbeddings(),
        text_splitter=CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
    ).from_loaders([loader])
    return index

init_log10()
index = init_vector_db();

st.title('State of the Union')
query = st.text_input("Question:", "What did the president say about Ketanji Brown Jackson?")
#answer = index.query_with_sources(query, llm=ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo"))

# If you want to combine all queries into a single session can comment the 2 lines below and uncomment the answer=... line above
with log10_session():
    answer = index.query_with_sources(query, llm=ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo"))

st.write(answer)