bstraehle commited on
Commit
7696266
·
1 Parent(s): 0421e9d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -31
app.py CHANGED
@@ -45,37 +45,40 @@ def invoke(openai_api_key, use_rag, prompt):
45
  openai_api_key = openai_api_key,
46
  temperature = 0)
47
  if (use_rag):
48
- # Document loading
49
- #docs = []
50
- # Load PDF
51
- #loader = PyPDFLoader(PDF_URL)
52
- #docs.extend(loader.load())
53
- # Load Web
54
- #loader = WebBaseLoader(WEB_URL_1)
55
- #docs.extend(loader.load())
56
- # Load YouTube
57
- #loader = GenericLoader(YoutubeAudioLoader([YOUTUBE_URL_1,
58
- # YOUTUBE_URL_2,
59
- # YOUTUBE_URL_3], YOUTUBE_DIR),
60
- # OpenAIWhisperParser())
61
- #docs.extend(loader.load())
62
- # Document splitting
63
- #text_splitter = RecursiveCharacterTextSplitter(chunk_overlap = 150,
64
- # chunk_size = 1500)
65
- #splits = text_splitter.split_documents(docs)
66
- # Document storage
67
- #vector_db = Chroma.from_documents(documents = splits,
68
- # embedding = OpenAIEmbeddings(disallowed_special = ()),
69
- # persist_directory = CHROMA_DIR)
70
- # Document retrieval
71
- vector_db = Chroma(embedding_function = OpenAIEmbeddings(),
72
- persist_directory = CHROMA_DIR)
73
- rag_chain = RetrievalQA.from_chain_type(llm,
74
- chain_type_kwargs = {"prompt": RAG_CHAIN_PROMPT},
75
- retriever = vector_db.as_retriever(search_kwargs = {"k": 3}),
76
- return_source_documents = True)
77
- result = rag_chain({"query": prompt})
78
- result = result["result"]
 
 
 
79
  else:
80
  chain = LLMChain(llm = llm, prompt = LLM_CHAIN_PROMPT)
81
  result = chain.run({"question": prompt})
 
45
  openai_api_key = openai_api_key,
46
  temperature = 0)
47
  if (use_rag):
48
+ try:
49
+ # Document loading
50
+ #docs = []
51
+ # Load PDF
52
+ #loader = PyPDFLoader(PDF_URL)
53
+ #docs.extend(loader.load())
54
+ # Load Web
55
+ #loader = WebBaseLoader(WEB_URL_1)
56
+ #docs.extend(loader.load())
57
+ # Load YouTube
58
+ #loader = GenericLoader(YoutubeAudioLoader([YOUTUBE_URL_1,
59
+ # YOUTUBE_URL_2,
60
+ # YOUTUBE_URL_3], YOUTUBE_DIR),
61
+ # OpenAIWhisperParser())
62
+ #docs.extend(loader.load())
63
+ # Document splitting
64
+ #text_splitter = RecursiveCharacterTextSplitter(chunk_overlap = 150,
65
+ # chunk_size = 1500)
66
+ #splits = text_splitter.split_documents(docs)
67
+ # Document storage
68
+ #vector_db = Chroma.from_documents(documents = splits,
69
+ # embedding = OpenAIEmbeddings(disallowed_special = ()),
70
+ # persist_directory = CHROMA_DIR)
71
+ # Document retrieval
72
+ vector_db = Chroma(embedding_function = OpenAIEmbeddings(),
73
+ persist_directory = CHROMA_DIR)
74
+ rag_chain = RetrievalQA.from_chain_type(llm,
75
+ chain_type_kwargs = {"prompt": RAG_CHAIN_PROMPT},
76
+ retriever = vector_db.as_retriever(search_kwargs = {"k": 3}),
77
+ return_source_documents = True)
78
+ result = rag_chain({"query": prompt})
79
+ result = result["result"]
80
+ except Exception as e
81
+ returns e
82
  else:
83
  chain = LLMChain(llm = llm, prompt = LLM_CHAIN_PROMPT)
84
  result = chain.run({"question": prompt})