bstraehle commited on
Commit
b88396f
·
1 Parent(s): 2d2ef28

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -7
app.py CHANGED
@@ -47,13 +47,19 @@ def invoke(openai_api_key, youtube_url, process_video, prompt):
47
  #print(result)
48
  return result["result"]
49
 
50
- description = """<strong>Overview:</strong> The app demonstrates how to use a <strong>Large Language Model</strong> (LLM) with <strong>Retrieval Augmented Generation
51
- </strong> (RAG) on external data (YouTube videos in this case, but it could be PDFs, URLs, databases, or other structured/unstructured private/public
52
- <a href='https://raw.githubusercontent.com/bstraehle/ai-ml-dl/c38b224c196fc984aab6b6cc6bdc666f8f4fbcff/langchain/document-loaders.png'>data sources</a>).\n\n
53
- <strong>Instructions:</strong> Enter an OpenAI API key, YouTube URL, and prompt to perform semantic search, sentiment analysis, summarization,
54
- translation, etc. To ask multiple questions related to the same video, set "Process Video" to "True" on the first submission and then to "False".
55
- The example is a 3:12 min. video about GPT-4 and takes less than 30 sec. to process. Experiment with different prompts, for example "what is gpt-4,
56
- answer in german" or "write a haiku about gpt-4".\n\n
 
 
 
 
 
 
57
  <strong>Technology:</strong> <a href='https://www.gradio.app/'>Gradio</a> UI using <a href='https://platform.openai.com/'>OpenAI</a> API
58
  via AI-first <a href='https://www.langchain.com/'>LangChain</a> toolkit with <a href='https://openai.com/research/whisper'>Whisper</a> (speech-to-text)
59
  and <a href='https://openai.com/research/gpt-4'>GPT-4</a> (LLM) foundation models as well as AI-native
 
47
  #print(result)
48
  return result["result"]
49
 
50
+ description = """<strong>Overview:</strong> The app demonstrates how to use a <strong>Large Language Model</strong> (LLM) with <strong>Retrieval Augmented Generation
51
+ </strong> (RAG) on external data (YouTube videos in this case, but it could be PDFs, URLs, databases, or other structured/unstructured and private/public
52
+ <a href='https://raw.githubusercontent.com/bstraehle/ai-ml-dl/c38b224c196fc984aab6b6cc6bdc666f8f4fbcff/langchain/document-loaders.png'>data sources</a>).
53
+ \n\n<strong>Instructions:</strong> Enter an OpenAI API key, YouTube URL, and prompt to perform semantic search, sentiment analysis, summarization,
54
+ translation, etc.
55
+ <ol>
56
+ <li>Submit prompt "what is gpt-4". The LLM without RAG does not know the answer.</li>
57
+ <li>Select "Process Video" equals "True" and submit prompt "what is gpt-4". The LLM with RAG knows the answer.</li>
58
+ <li>Experiment with different prompts, for example "what is gpt-4, answer in german" or "write a haiku about gpt-4".</li>
59
+ </ol>
60
+ In a production system processing external data is done in a batch process, while prompting is done in a user interaction.\n\nA sample system could load
61
+ all <a href='https://www.youtube.com/playlist?list=PL2yQDdvlhXf_hIzmfHCdbcXj2hS52oP9r'>AWS re:Invent 2022</a> YouTube videos and enable LLM use cases
62
+ related to them.\n\n
63
  <strong>Technology:</strong> <a href='https://www.gradio.app/'>Gradio</a> UI using <a href='https://platform.openai.com/'>OpenAI</a> API
64
  via AI-first <a href='https://www.langchain.com/'>LangChain</a> toolkit with <a href='https://openai.com/research/whisper'>Whisper</a> (speech-to-text)
65
  and <a href='https://openai.com/research/gpt-4'>GPT-4</a> (LLM) foundation models as well as AI-native