Nekochu commited on
Commit
36bc0f1
·
verified ·
1 Parent(s): 83f36f4

Update tabbed.py

Browse files
Files changed (1) hide show
  1. tabbed.py +4 -4
tabbed.py CHANGED
@@ -122,15 +122,15 @@ with gr.Blocks() as demo:
122
  ### This is the [{config["hub"]["repo_id"]}](https://huggingface.co/{config["hub"]["repo_id"]}) quantized model file [{config["hub"]["filename"]}](https://huggingface.co/{config["hub"]["repo_id"]}/blob/main/{config["hub"]["filename"]})
123
 
124
  <details>
125
- <summary><a href="https://huggingface.co/spaces/Nekochu/Llama-2-13B-novel17-french-ggml?duplicate=true">Duplicate the Space</a> to skip the queue and run in a private space or to use your own GGML models, simply update the <a href="https://huggingface.co/spaces/Nekochu/Llama-2-13B-novel17-french-ggml/blob/main/config.yml">config.yml</a></summary>
126
  <ul>
127
- <li>This Space uses GGML with GPU support, so it can quickly run larger models on smaller GPUs & VRAM. <a href="https://github.com/OpenAccess-AI-Collective/ggml-webui">[Contribute]</a></li>
128
  <li>This is running on a smaller, shared GPU, so it may take a few seconds to respond.</li>
129
  </ul>
130
  </details>
131
  """)
132
  with gr.Tab("Chatbot"):
133
- gr.Markdown("# GGML Spaces Chatbot Demo")
134
  chatbot = gr.Chatbot()
135
  with gr.Row():
136
  message = gr.Textbox(
@@ -175,7 +175,7 @@ with gr.Blocks() as demo:
175
  # )
176
  stop.click(fn=None, inputs=None, outputs=None, cancels=[submit_click_event, roleplay_click_event], queue=False)
177
  with gr.Tab("Instruct"):
178
- gr.Markdown("# GGML Spaces Instruct Demo")
179
  instruct_interface.render()
180
 
181
  demo.queue(**config["queue"]).launch(debug=True, server_name="0.0.0.0", server_port=7860)
 
122
  ### This is the [{config["hub"]["repo_id"]}](https://huggingface.co/{config["hub"]["repo_id"]}) quantized model file [{config["hub"]["filename"]}](https://huggingface.co/{config["hub"]["repo_id"]}/blob/main/{config["hub"]["filename"]})
123
 
124
  <details>
125
+ <summary><a href="https://huggingface.co/spaces/Nekochu/Llama-2-13B-novel17-french-GGUF?duplicate=true">Duplicate the Space</a> to skip the queue and run in a private space or to use your own GGUF models, simply update the <a href="https://huggingface.co/spaces/Nekochu/Llama-2-13B-novel17-french-GGUF/blob/main/config.yml">config.yml</a></summary>
126
  <ul>
127
+ <li>This Space uses GGUF with GPU support, so it can quickly run larger models on smaller GPUs & VRAM. <a href="https://github.com/OpenAccess-AI-Collective/ggml-webui">[Contribute]</a></li>
128
  <li>This is running on a smaller, shared GPU, so it may take a few seconds to respond.</li>
129
  </ul>
130
  </details>
131
  """)
132
  with gr.Tab("Chatbot"):
133
+ gr.Markdown("# GGUF Spaces Chatbot Demo")
134
  chatbot = gr.Chatbot()
135
  with gr.Row():
136
  message = gr.Textbox(
 
175
  # )
176
  stop.click(fn=None, inputs=None, outputs=None, cancels=[submit_click_event, roleplay_click_event], queue=False)
177
  with gr.Tab("Instruct"):
178
+ gr.Markdown("# GGUF Spaces Instruct Demo")
179
  instruct_interface.render()
180
 
181
  demo.queue(**config["queue"]).launch(debug=True, server_name="0.0.0.0", server_port=7860)