bstraehle commited on
Commit
7bd0b80
·
1 Parent(s): 377a584

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -3
app.py CHANGED
@@ -28,7 +28,7 @@ YOUTUBE_DIR = "docs/youtube"
28
  MODEL_NAME = "gpt-4"
29
 
30
  def invoke(openai_api_key, youtube_url, process_video, prompt):
31
- openai.api_key = openai_api_key
32
  if (process_video):
33
  if (os.path.isdir(CHROMA_DIR)):
34
  shutil.rmtree(CHROMA_DIR)
@@ -41,11 +41,10 @@ def invoke(openai_api_key, youtube_url, process_video, prompt):
41
  vector_db = Chroma.from_documents(documents = splits, embedding = OpenAIEmbeddings(), persist_directory = CHROMA_DIR)
42
  else:
43
  vector_db = Chroma(persist_directory = CHROMA_DIR, embedding_function = OpenAIEmbeddings())
44
- llm = ChatOpenAI(model_name = MODEL_NAME, temperature = 0)
45
  qa_chain = RetrievalQA.from_chain_type(llm, retriever = vector_db.as_retriever(search_kwargs = {"k": 3}), return_source_documents = True, chain_type_kwargs = {"prompt": QA_CHAIN_PROMPT})
46
  result = qa_chain({"query": prompt})
47
  #print(result)
48
- print(openai.api_key)
49
  return result["result"]
50
 
51
  description = """<strong>Overview:</strong> The app demonstrates how to use a Large Language Model (LLM) with Retrieval Augmented Generation (RAG) on external data
 
28
  MODEL_NAME = "gpt-4"
29
 
30
  def invoke(openai_api_key, youtube_url, process_video, prompt):
31
+ #openai.api_key = openai_api_key
32
  if (process_video):
33
  if (os.path.isdir(CHROMA_DIR)):
34
  shutil.rmtree(CHROMA_DIR)
 
41
  vector_db = Chroma.from_documents(documents = splits, embedding = OpenAIEmbeddings(), persist_directory = CHROMA_DIR)
42
  else:
43
  vector_db = Chroma(persist_directory = CHROMA_DIR, embedding_function = OpenAIEmbeddings())
44
+ llm = ChatOpenAI(model_name = MODEL_NAME, openai_api_key = openai_api_key, temperature = 0)
45
  qa_chain = RetrievalQA.from_chain_type(llm, retriever = vector_db.as_retriever(search_kwargs = {"k": 3}), return_source_documents = True, chain_type_kwargs = {"prompt": QA_CHAIN_PROMPT})
46
  result = qa_chain({"query": prompt})
47
  #print(result)
 
48
  return result["result"]
49
 
50
  description = """<strong>Overview:</strong> The app demonstrates how to use a Large Language Model (LLM) with Retrieval Augmented Generation (RAG) on external data