bstraehle commited on
Commit
616f50d
·
1 Parent(s): 0a1cd5f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -6
app.py CHANGED
@@ -20,7 +20,7 @@ template = """Use the following pieces of context to answer the question at the
20
  an answer. Keep the answer as concise as possible. Always say "🔥 Thanks for using the app - Bernd Straehle." at the end of the answer.
21
  {context} Question: {question} Helpful Answer: """
22
 
23
- QA_CHAIN_PROMPT = PromptTemplate(input_variables = ["context", "question"], template = template)
24
 
25
  CHROMA_DIR = "docs/chroma"
26
  YOUTUBE_DIR = "docs/youtube"
@@ -40,13 +40,11 @@ def invoke(openai_api_key, use_rag, prompt):
40
  text_splitter = RecursiveCharacterTextSplitter(chunk_size = 1500, chunk_overlap = 150)
41
  splits = text_splitter.split_documents(docs)
42
  vector_db = Chroma.from_documents(documents = splits, embedding = OpenAIEmbeddings(), persist_directory = CHROMA_DIR)
43
- rag_chain = RetrievalQA.from_chain_type(llm, retriever = vector_db.as_retriever(search_kwargs = {"k": 3}), return_source_documents = True, chain_type_kwargs = {"prompt": QA_CHAIN_PROMPT})
44
  result = rag_chain({"query": prompt})
45
  else:
46
- #qa_chain = RetrievalQA.from_chain_type(llm, retriever = None, return_source_documents = True, cchain_type_kwargs = {"prompt": QA_CHAIN_PROMPT})
47
- #result = qa_chain({"query": prompt})
48
- chain = LLMChain(llm = llm)
49
- result = chain({"query": prompt})
50
  #print(result)
51
  return result["result"]
52
 
 
20
  an answer. Keep the answer as concise as possible. Always say "🔥 Thanks for using the app - Bernd Straehle." at the end of the answer.
21
  {context} Question: {question} Helpful Answer: """
22
 
23
+ CHAIN_PROMPT = PromptTemplate(input_variables = ["context", "question"], template = template)
24
 
25
  CHROMA_DIR = "docs/chroma"
26
  YOUTUBE_DIR = "docs/youtube"
 
40
  text_splitter = RecursiveCharacterTextSplitter(chunk_size = 1500, chunk_overlap = 150)
41
  splits = text_splitter.split_documents(docs)
42
  vector_db = Chroma.from_documents(documents = splits, embedding = OpenAIEmbeddings(), persist_directory = CHROMA_DIR)
43
+ rag_chain = RetrievalQA.from_chain_type(llm, retriever = vector_db.as_retriever(search_kwargs = {"k": 3}), return_source_documents = True, chain_type_kwargs = {"prompt": CHAIN_PROMPT})
44
  result = rag_chain({"query": prompt})
45
  else:
46
+ chain = LLMChain(llm = llm, prompt = CHAIN_PROMPT)
47
+ result = chain.run(prompt)
 
 
48
  #print(result)
49
  return result["result"]
50