bstraehle commited on
Commit
2cf5d84
·
1 Parent(s): 423b214

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -29,6 +29,7 @@ MODEL_NAME = "gpt-4"
29
 
30
  def invoke(openai_api_key, youtube_url, process_video, prompt):
31
  openai.api_key = openai_api_key
 
32
  if (process_video):
33
  loader = GenericLoader(YoutubeAudioLoader([youtube_url], YOUTUBE_DIR), OpenAIWhisperParser())
34
  docs = loader.load()
@@ -48,7 +49,8 @@ description = """<strong>Overview:</strong> The app demonstrates how to use a <s
48
  <strong>Instructions:</strong> Enter an OpenAI API key, YouTube URL, and prompt to perform semantic search, sentiment analysis, summarization,
49
  translation, etc. "Process Video" specifies whether or not to perform speech-to-text processing. To ask multiple questions related to the same video,
50
  typically set it to "True" the first time and then to "False". Note that persistence is not guaranteed in the Hugging Face free tier
51
- (the plan is to migrate to AWS S3).\n\n
 
52
  <strong>Technology:</strong> <a href='https://www.gradio.app/'>Gradio</a> UI using <a href='https://platform.openai.com/'>OpenAI</a> API
53
  via AI-first <a href='https://www.langchain.com/'>LangChain</a> toolkit with <a href='https://openai.com/research/whisper'>Whisper</a> (speech-to-text)
54
  and <a href='https://openai.com/research/gpt-4'>GPT-4</a> (LLM) foundation models as well as AI-native
@@ -56,7 +58,7 @@ description = """<strong>Overview:</strong> The app demonstrates how to use a <s
56
 
57
  gr.close_all()
58
  demo = gr.Interface(fn=invoke,
59
- inputs = [gr.Textbox(label = "OpenAI API Key", value = "sk-", lines = 1), gr.Textbox(label = "YouTube URL", value = "https://www.youtube.com/watch?v=--khbXchTeE", lines = 1), gr.Radio([True, False], label="Process Video", value = True), gr.Textbox(label = "Prompt", value = "What is GPT-4", lines = 1)],
60
  outputs = [gr.Textbox(label = "Completion", lines = 1)],
61
  title = "Generative AI - LLM & RAG",
62
  description = description)
 
29
 
30
  def invoke(openai_api_key, youtube_url, process_video, prompt):
31
  openai.api_key = openai_api_key
32
+ print(process_video)
33
  if (process_video):
34
  loader = GenericLoader(YoutubeAudioLoader([youtube_url], YOUTUBE_DIR), OpenAIWhisperParser())
35
  docs = loader.load()
 
49
  <strong>Instructions:</strong> Enter an OpenAI API key, YouTube URL, and prompt to perform semantic search, sentiment analysis, summarization,
50
  translation, etc. "Process Video" specifies whether or not to perform speech-to-text processing. To ask multiple questions related to the same video,
51
  typically set it to "True" the first time and then to "False". Note that persistence is not guaranteed in the Hugging Face free tier
52
+ (the plan is to migrate to AWS S3). The example is a 3:12 min. video about GPT-4 and takes about 20 sec. to process. Try different prompts, for example
53
+ "what is gpt-4, answer in german" or "write a poem about gpt-4".\n\n
54
  <strong>Technology:</strong> <a href='https://www.gradio.app/'>Gradio</a> UI using <a href='https://platform.openai.com/'>OpenAI</a> API
55
  via AI-first <a href='https://www.langchain.com/'>LangChain</a> toolkit with <a href='https://openai.com/research/whisper'>Whisper</a> (speech-to-text)
56
  and <a href='https://openai.com/research/gpt-4'>GPT-4</a> (LLM) foundation models as well as AI-native
 
58
 
59
  gr.close_all()
60
  demo = gr.Interface(fn=invoke,
61
+ inputs = [gr.Textbox(label = "OpenAI API Key", value = "sk-", lines = 1), gr.Textbox(label = "YouTube URL", value = "https://www.youtube.com/watch?v=--khbXchTeE", lines = 1), gr.Radio([True, False], label="Process Video", value = True), gr.Textbox(label = "Prompt", value = "what is gpt-4", lines = 1)],
62
  outputs = [gr.Textbox(label = "Completion", lines = 1)],
63
  title = "Generative AI - LLM & RAG",
64
  description = description)