bstraehle commited on
Commit
9960268
·
1 Parent(s): 09c68d4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -6
app.py CHANGED
@@ -25,19 +25,21 @@ QA_CHAIN_PROMPT = PromptTemplate(input_variables = ["context", "question"], temp
25
  YOUTUBE_DIR = "docs/youtube/"
26
  CHROMA_DIR = "docs/chroma/"
27
 
28
- def invoke(openai_api_key, youtube_url, prompt, process_video):
 
 
29
  openai.api_key = openai_api_key
30
- print(process_video)
31
- if (os.path.isdir(CHROMA_DIR) == False):
32
  print(111)
33
  loader = GenericLoader(YoutubeAudioLoader([youtube_url], YOUTUBE_DIR), OpenAIWhisperParser())
34
  docs = loader.load()
35
  text_splitter = RecursiveCharacterTextSplitter(chunk_size = 1500, chunk_overlap = 150)
36
  splits = text_splitter.split_documents(docs)
37
  vectordb = Chroma.from_documents(documents = splits, embedding = OpenAIEmbeddings(), persist_directory = CHROMA_DIR)
38
- llm = ChatOpenAI(model_name = "gpt-4", temperature = 0)
39
  qa_chain = RetrievalQA.from_chain_type(llm, retriever = vectordb.as_retriever(), return_source_documents = True, chain_type_kwargs = {"prompt": QA_CHAIN_PROMPT})
40
- print(222)
 
41
  result = qa_chain({"query": prompt})
42
  shutil.rmtree(YOUTUBE_DIR)
43
  #shutil.rmtree(CHROMA_DIR)
@@ -52,7 +54,7 @@ description = """The app demonstrates how to use a <strong>Large Language Model<
52
 
53
  gr.close_all()
54
  demo = gr.Interface(fn=invoke,
55
- inputs = [gr.Textbox(label = "OpenAI API Key", value = "sk-", lines = 1), gr.Textbox(label = "YouTube URL", value = "https://www.youtube.com/watch?v=--khbXchTeE", lines = 1), gr.Textbox(label = "Prompt", value = "GPT-4 human level performance", lines = 1), gr.Radio(["Yes", "No"], label="Process Video")],
56
  outputs = [gr.Textbox(label = "Completion", lines = 1)],
57
  title = "Generative AI - LLM & RAG",
58
  description = description)
 
25
  YOUTUBE_DIR = "docs/youtube/"
26
  CHROMA_DIR = "docs/chroma/"
27
 
28
+ MODEL_NAME = "gpt-4"
29
+
30
+ def invoke(openai_api_key, youtube_url, process_video, prompt):
31
  openai.api_key = openai_api_key
32
+ if (process_video):
 
33
  print(111)
34
  loader = GenericLoader(YoutubeAudioLoader([youtube_url], YOUTUBE_DIR), OpenAIWhisperParser())
35
  docs = loader.load()
36
  text_splitter = RecursiveCharacterTextSplitter(chunk_size = 1500, chunk_overlap = 150)
37
  splits = text_splitter.split_documents(docs)
38
  vectordb = Chroma.from_documents(documents = splits, embedding = OpenAIEmbeddings(), persist_directory = CHROMA_DIR)
39
+ llm = ChatOpenAI(model_name = MODEL_NAME, temperature = 0)
40
  qa_chain = RetrievalQA.from_chain_type(llm, retriever = vectordb.as_retriever(), return_source_documents = True, chain_type_kwargs = {"prompt": QA_CHAIN_PROMPT})
41
+ else:
42
+ print(222)
43
  result = qa_chain({"query": prompt})
44
  shutil.rmtree(YOUTUBE_DIR)
45
  #shutil.rmtree(CHROMA_DIR)
 
54
 
55
  gr.close_all()
56
  demo = gr.Interface(fn=invoke,
57
+ inputs = [gr.Textbox(label = "OpenAI API Key", value = "sk-", lines = 1), gr.Textbox(label = "YouTube URL", value = "https://www.youtube.com/watch?v=--khbXchTeE", lines = 1), gr.Radio(["Yes", "No"], label="Process Video", value = "Yes"), gr.Textbox(label = "Prompt", value = "GPT-4 human level performance", lines = 1)],
58
  outputs = [gr.Textbox(label = "Completion", lines = 1)],
59
  title = "Generative AI - LLM & RAG",
60
  description = description)