Spaces:
Runtime error
Runtime error
Se cambio modelo nuevo
Browse files
README.md
CHANGED
@@ -9,10 +9,11 @@ app_file: app.py
|
|
9 |
pinned: false
|
10 |
license: cc-by-4.0
|
11 |
models:
|
12 |
-
- ussipan/SipanGPT-0.
|
13 |
datasets:
|
14 |
- ussipan/sipangpt
|
|
|
15 |
---
|
16 |
|
17 |
-
SipánGPT 0.
|
18 |
Entrenado con 5400 conversaciones.
|
|
|
9 |
pinned: false
|
10 |
license: cc-by-4.0
|
11 |
models:
|
12 |
+
- ussipan/SipanGPT-0.3-Llama-3.2-1B-GGUF
|
13 |
datasets:
|
14 |
- ussipan/sipangpt
|
15 |
+
short_description: SipánGPT based on Llama-3.2-1B
|
16 |
---
|
17 |
|
18 |
+
SipánGPT 0.3 Llama 3.2
|
19 |
Entrenado con 5400 conversaciones.
|
app.py
CHANGED
@@ -22,7 +22,7 @@ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
|
22 |
|
23 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
24 |
|
25 |
-
model_id = "ussipan/SipanGPT-0.
|
26 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
27 |
model = AutoModelForCausalLM.from_pretrained(
|
28 |
model_id,
|
@@ -78,12 +78,12 @@ def generate(
|
|
78 |
# Implementing Gradio 5 features and building a ChatInterface UI yourself
|
79 |
PLACEHOLDER = """<div style="padding: 20px; text-align: center; display: flex; flex-direction: column; align-items: center;">
|
80 |
<img src="https://corladlalibertad.org.pe/wp-content/uploads/2024/01/USS.jpg" style="width: 80%; max-width: 550px; height: auto; opacity: 0.55; margin-bottom: 10px;">
|
81 |
-
<h1 style="font-size: 28px; margin: 0;">SipánGPT 0.
|
82 |
<p style="font-size: 8px; margin: 5px 0 0; opacity: 0.65;">
|
83 |
<a href="https://huggingface.co/spaces/ysharma/Llama3-2_with_Gradio-5" target="_blank" style="color: inherit; text-decoration: none;">Forked from @ysharma</a>
|
84 |
</p>
|
85 |
<p style="font-size: 12px; margin: 5px 0 0; opacity: 0.9;">Este modelo es experimental, puede generar alucinaciones o respuestas incorrectas.</p>
|
86 |
-
<p style="font-size: 12px; margin: 5px 0 0; opacity: 0.9;">Entrenado con un dataset de
|
87 |
<p style="font-size: 12px; margin: 5px 0 0; opacity: 0.9;">
|
88 |
<a href="https://huggingface.co/datasets/ussipan/sipangpt" target="_blank" style="color: inherit; text-decoration: none;">Ver el dataset aquí</a>
|
89 |
</p>
|
@@ -212,7 +212,7 @@ theme = SipanGPTTheme()
|
|
212 |
with gr.Blocks(theme=theme, fill_height=True) as demo:
|
213 |
with gr.Column(elem_id="container", scale=1):
|
214 |
chatbot = gr.Chatbot(
|
215 |
-
label="SipánGPT 0.
|
216 |
show_label=False,
|
217 |
type="messages",
|
218 |
scale=1,
|
|
|
22 |
|
23 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
24 |
|
25 |
+
model_id = "ussipan/SipanGPT-0.3-Llama-3.2-1B-GGUF"
|
26 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
27 |
model = AutoModelForCausalLM.from_pretrained(
|
28 |
model_id,
|
|
|
78 |
# Implementing Gradio 5 features and building a ChatInterface UI yourself
|
79 |
PLACEHOLDER = """<div style="padding: 20px; text-align: center; display: flex; flex-direction: column; align-items: center;">
|
80 |
<img src="https://corladlalibertad.org.pe/wp-content/uploads/2024/01/USS.jpg" style="width: 80%; max-width: 550px; height: auto; opacity: 0.55; margin-bottom: 10px;">
|
81 |
+
<h1 style="font-size: 28px; margin: 0;">SipánGPT 0.3 Llama 3.2</h1>
|
82 |
<p style="font-size: 8px; margin: 5px 0 0; opacity: 0.65;">
|
83 |
<a href="https://huggingface.co/spaces/ysharma/Llama3-2_with_Gradio-5" target="_blank" style="color: inherit; text-decoration: none;">Forked from @ysharma</a>
|
84 |
</p>
|
85 |
<p style="font-size: 12px; margin: 5px 0 0; opacity: 0.9;">Este modelo es experimental, puede generar alucinaciones o respuestas incorrectas.</p>
|
86 |
+
<p style="font-size: 12px; margin: 5px 0 0; opacity: 0.9;">Entrenado con un dataset de 50k conversaciones.</p>
|
87 |
<p style="font-size: 12px; margin: 5px 0 0; opacity: 0.9;">
|
88 |
<a href="https://huggingface.co/datasets/ussipan/sipangpt" target="_blank" style="color: inherit; text-decoration: none;">Ver el dataset aquí</a>
|
89 |
</p>
|
|
|
212 |
with gr.Blocks(theme=theme, fill_height=True) as demo:
|
213 |
with gr.Column(elem_id="container", scale=1):
|
214 |
chatbot = gr.Chatbot(
|
215 |
+
label="SipánGPT 0.3 Llama 3.2",
|
216 |
show_label=False,
|
217 |
type="messages",
|
218 |
scale=1,
|