import gradio as gr #api = gr.Interface.load("huggingface/EleutherAI/gpt-j-6B") api = gr.Interface.load("models/bigscience/bloom") def complete_with_gpt(text): # Use the last 50 characters of the text as context # return text[:-50] + api(text[-50:]) # Use the last 100 characters of the text as context return text[:-100] + api(text[-100:]) with gr.Blocks() as demo: with gr.Row(): textbox = gr.Textbox(placeholder="Type here and press enter...", lines=14) with gr.Column(): btn = gr.Button("Generate") btn.click(complete_with_gpt, textbox, textbox) with gr.Row(): gr.Markdown(""" # Outline of Exciting AI Developments! 🤖💻🔬 Here is an outline of some of the most exciting recent developments in AI: ## Language Models 🗣️ 🏆 Bloom sets new record for most performant and efficient AI model in science! 🌸 ### Comparison of Large Language Models | Model Name | Model Size (in Parameters) | | ----------------- | -------------------------- | | BigScience-tr11-176B | 176 billion | | GPT-3 | 175 billion | | OpenAI's DALL-E 2.0 | 500 million | | NVIDIA's Megatron | 8.3 billion | | Transformer-XL | 250 million | | XLNet | 210 million | ## ChatGPT Datasets 📚 - WebText - Common Crawl - BooksCorpus - English Wikipedia - Toronto Books Corpus - OpenWebText ## Big Science Model 🚀 - 📜 Papers: 1. BLOOM: A 176B-Parameter Open-Access Multilingual Language Model [Paper](https://arxiv.org/abs/2211.05100) 2. Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism [Paper](https://arxiv.org/abs/1909.08053) 3. 8-bit Optimizers via Block-wise Quantization [Paper](https://arxiv.org/abs/2110.02861) 4. Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation [Paper](https://arxiv.org/abs/2108.12409) 5. [Other papers related to Big Science](https://huggingface.co/models?other=doi:10.57967/hf/0003) 6. [217 other models optimized for use with Bloom](https://huggingface.co/models?other=bloom) - 📚 Datasets: 1. [Universal Dependencies](https://paperswithcode.com/dataset/universal-dependencies) 2. [WMT 2014](https://paperswithcode.com/dataset/wmt-2014) 3. [The Pile](https://paperswithcode.com/dataset/the-pile) 4. [HumanEval](https://paperswithcode.com/dataset/humaneval) 5. [FLORES-101](https://paperswithcode.com/dataset/flores-101) 6. [CrowS-Pairs](https://paperswithcode.com/dataset/crows-pairs) 7. [WikiLingua](https://paperswithcode.com/dataset/wikilingua) 8. [MTEB](https://paperswithcode.com/dataset/mteb) 9. [xP3](https://paperswithcode.com/dataset/xp3) 10. [DiaBLa](https://paperswithcode.com/dataset/diabla) ## Deep RL ML Strategy 🧠 1. Language Model Preparation, Human Augmented with Supervised Fine Tuning 2. Reward Model Training with Prompts Dataset Multi-Model Generate Data to Rank 3. Fine Tuning with Reinforcement Reward and Distance Distribution Regret Score 4. Proximal Policy Optimization Fine Tuning ## Variations - Preference Model Pretraining 🤔 1. Use Ranking Datasets Sentiment - Thumbs Up/Down, Distribution 2. Online Version Getting Feedback 3. OpenAI - InstructGPT - Humans generate LM Training Text 4. DeepMind - Advantage Actor Critic Sparrow, GopherCite 5. Reward Model Human Prefence Feedback """) demo.launch()