File size: 672 Bytes
b70859f
 
 
d17e7da
 
104f494
b70859f
d17e7da
 
b70859f
 
65c4a45
d17e7da
65c4a45
b70859f
d17e7da
104f494
d17e7da
 
f2475e8
57a7522
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
#!/usr/bin/env python
# or gradio app.py

import gradio as gr

from chat import iface_chat
from prompt import iface_prompt

with gr.Blocks() as iface:
    gr.Markdown(
        """# Petals playground
        **Let's play with prompts and inference settings for various big LLM models running on Petals!**

        This space uses websocket API of [chat.petals.dev](http://chat.petals.dev). Health status of Petals network [lives here](http://health.petals.dev)."""
    )

    gr.TabbedInterface([iface_prompt, iface_chat], ["Prompt mode", "Chat mode"])

# Queues are required to enable generators
iface.queue(concurrency_count=5, max_size=50)
iface.launch(show_error=True)