Nekochu commited on
Commit
2bff321
·
verified ·
1 Parent(s): 6251f1e

Update tabbed.py

Browse files
Files changed (1) hide show
  1. tabbed.py +2 -7
tabbed.py CHANGED
@@ -1,7 +1,7 @@
1
  import gradio as gr
2
  import yaml
3
  import spaces
4
- from huggingface_hub import hf_hub_download, SpaceVariable
5
  from huggingface_hub.utils import LocalEntryNotFoundError
6
  from llama_cpp import Llama
7
 
@@ -25,11 +25,6 @@ while True:
25
 
26
  llm = Llama(model_path=fp, **config["llama_cpp"])
27
 
28
- _space_name = os.getenv('HF_SPACE')
29
- space_name = SpaceVariable("MODEL_REPO_ID").value
30
- base_url = f"https://huggingface.co/spaces/{space_name}/blob/main/config.yml"
31
- duplicate_url = f"https://huggingface.co/spaces/{space_name}?duplicate=true"
32
-
33
  def user(message, history):
34
  history = history or []
35
  # Append the user's message to the conversation history
@@ -124,7 +119,7 @@ with gr.Blocks() as demo:
124
  with gr.Row():
125
  with gr.Column():
126
  gr.Markdown(f"""
127
- ### This {_space_name} & {space_name} is the [{config["hub"]["repo_id"]}](https://huggingface.co/{config["hub"]["repo_id"]}) quantized model file [{config["hub"]["filename"]}](https://huggingface.co/{config["hub"]["repo_id"]}/blob/main/{config["hub"]["filename"]})
128
 
129
  <details>
130
  <summary><a href="https://huggingface.co/spaces/Nekochu/Llama-2-13B-novel17-french-GGUF?duplicate=true">Duplicate the Space</a> to skip the queue and run in a private space or to use your own GGUF models, simply update the <a href="https://huggingface.co/spaces/Nekochu/Llama-2-13B-novel17-french-GGUF/blob/main/config.yml">config.yml</a></summary>
 
1
  import gradio as gr
2
  import yaml
3
  import spaces
4
+ from huggingface_hub import hf_hub_download
5
  from huggingface_hub.utils import LocalEntryNotFoundError
6
  from llama_cpp import Llama
7
 
 
25
 
26
  llm = Llama(model_path=fp, **config["llama_cpp"])
27
 
 
 
 
 
 
28
  def user(message, history):
29
  history = history or []
30
  # Append the user's message to the conversation history
 
119
  with gr.Row():
120
  with gr.Column():
121
  gr.Markdown(f"""
122
+ ### This is the [{config["hub"]["repo_id"]}](https://huggingface.co/{config["hub"]["repo_id"]}) quantized model file [{config["hub"]["filename"]}](https://huggingface.co/{config["hub"]["repo_id"]}/blob/main/{config["hub"]["filename"]})
123
 
124
  <details>
125
  <summary><a href="https://huggingface.co/spaces/Nekochu/Llama-2-13B-novel17-french-GGUF?duplicate=true">Duplicate the Space</a> to skip the queue and run in a private space or to use your own GGUF models, simply update the <a href="https://huggingface.co/spaces/Nekochu/Llama-2-13B-novel17-french-GGUF/blob/main/config.yml">config.yml</a></summary>