Spaces:
Sleeping
Sleeping
update login
Browse files
app.py
CHANGED
@@ -3,8 +3,10 @@ import torch
|
|
3 |
from huggingface_hub import login
|
4 |
import spaces
|
5 |
import gradio as gr
|
|
|
6 |
|
7 |
-
|
|
|
8 |
|
9 |
model_id = "meta-llama/Meta-Llama-3.1-8B-Instruct"
|
10 |
model = AutoModelForCausalLM.from_pretrained(
|
@@ -69,15 +71,15 @@ def respuesta(
|
|
69 |
yield response
|
70 |
|
71 |
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
|
82 |
-
|
83 |
-
|
|
|
3 |
from huggingface_hub import login
|
4 |
import spaces
|
5 |
import gradio as gr
|
6 |
+
import os
|
7 |
|
8 |
+
token = os.environ.get("HF_TOKEN_READ")
|
9 |
+
login(token)
|
10 |
|
11 |
model_id = "meta-llama/Meta-Llama-3.1-8B-Instruct"
|
12 |
model = AutoModelForCausalLM.from_pretrained(
|
|
|
71 |
yield response
|
72 |
|
73 |
|
74 |
+
demo = gr.ChatInterface(
|
75 |
+
respuesta,
|
76 |
+
additional_inputs=[
|
77 |
+
gr.Textbox(value="Eres un chatbot amigable", label="System messaage"),
|
78 |
+
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
79 |
+
gr.Slider(minimum=0.1, maximum=4, value=0.7, step=0.1, label="Temperature"),
|
80 |
+
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p"),
|
81 |
+
]
|
82 |
+
)
|
83 |
|
84 |
+
if __name__ == "__main__":
|
85 |
+
demo.launch()
|