bstraehle commited on
Commit
96ff3f4
·
1 Parent(s): 42acd07

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -3
app.py CHANGED
@@ -27,6 +27,8 @@ CHROMA_DIR = "docs/chroma/"
27
 
28
  MODEL_NAME = "gpt-4"
29
 
 
 
30
  def invoke(openai_api_key, youtube_url, process_video, prompt):
31
  openai.api_key = openai_api_key
32
  if (process_video):
@@ -37,10 +39,10 @@ def invoke(openai_api_key, youtube_url, process_video, prompt):
37
  splits = text_splitter.split_documents(docs)
38
  vectordb = Chroma.from_documents(documents = splits, embedding = OpenAIEmbeddings(), persist_directory = CHROMA_DIR)
39
  llm = ChatOpenAI(model_name = MODEL_NAME, temperature = 0)
40
- qa_chain = RetrievalQA.from_chain_type(llm, retriever = vectordb.as_retriever(), return_source_documents = True, chain_type_kwargs = {"prompt": QA_CHAIN_PROMPT})
41
  else:
42
  print(222)
43
- result = qa_chain({"query": prompt})
44
  shutil.rmtree(YOUTUBE_DIR)
45
  #shutil.rmtree(CHROMA_DIR)
46
  return result["result"]
@@ -54,7 +56,7 @@ description = """The app demonstrates how to use a <strong>Large Language Model<
54
 
55
  gr.close_all()
56
  demo = gr.Interface(fn=invoke,
57
- inputs = [gr.Textbox(label = "OpenAI API Key", value = "sk-", lines = 1), gr.Textbox(label = "YouTube URL", value = "https://www.youtube.com/watch?v=--khbXchTeE", lines = 1), gr.Radio([True, False], label="Process Video", value = "Yes"), gr.Textbox(label = "Prompt", value = "GPT-4 human level performance", lines = 1)],
58
  outputs = [gr.Textbox(label = "Completion", lines = 1)],
59
  title = "Generative AI - LLM & RAG",
60
  description = description)
 
27
 
28
  MODEL_NAME = "gpt-4"
29
 
30
+ QA_CHAIN = None;
31
+
32
  def invoke(openai_api_key, youtube_url, process_video, prompt):
33
  openai.api_key = openai_api_key
34
  if (process_video):
 
39
  splits = text_splitter.split_documents(docs)
40
  vectordb = Chroma.from_documents(documents = splits, embedding = OpenAIEmbeddings(), persist_directory = CHROMA_DIR)
41
  llm = ChatOpenAI(model_name = MODEL_NAME, temperature = 0)
42
+ QA_CHAIN = RetrievalQA.from_chain_type(llm, retriever = vectordb.as_retriever(), return_source_documents = True, chain_type_kwargs = {"prompt": QA_CHAIN_PROMPT})
43
  else:
44
  print(222)
45
+ result = QA_CHAIN({"query": prompt})
46
  shutil.rmtree(YOUTUBE_DIR)
47
  #shutil.rmtree(CHROMA_DIR)
48
  return result["result"]
 
56
 
57
  gr.close_all()
58
  demo = gr.Interface(fn=invoke,
59
+ inputs = [gr.Textbox(label = "OpenAI API Key", value = "sk-", lines = 1), gr.Textbox(label = "YouTube URL", value = "https://www.youtube.com/watch?v=--khbXchTeE", lines = 1), gr.Radio([True, False], label="Process Video", value = True), gr.Textbox(label = "Prompt", value = "GPT-4 human level performance", lines = 1)],
60
  outputs = [gr.Textbox(label = "Completion", lines = 1)],
61
  title = "Generative AI - LLM & RAG",
62
  description = description)