Spaces:
Runtime error
Runtime error
Se retorno a version 0.1
Browse files
README.md
CHANGED
@@ -9,9 +9,9 @@ app_file: app.py
|
|
9 |
pinned: false
|
10 |
license: cc-by-4.0
|
11 |
models:
|
12 |
-
- ussipan/SipanGPT-0.
|
13 |
datasets:
|
14 |
- ussipan/sipangpt
|
15 |
---
|
16 |
|
17 |
-
SipánGPT 0.
|
|
|
9 |
pinned: false
|
10 |
license: cc-by-4.0
|
11 |
models:
|
12 |
+
- ussipan/SipanGPT-0.1-Llama-3.2-1B-GGUF
|
13 |
datasets:
|
14 |
- ussipan/sipangpt
|
15 |
---
|
16 |
|
17 |
+
SipánGPT 0.1 Llama 3.2
|
app.py
CHANGED
@@ -15,7 +15,7 @@ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
|
15 |
|
16 |
# Download model from Huggingface Hub
|
17 |
# Change this to meta-llama or the correct org name from Huggingface Hub
|
18 |
-
model_id = "ussipan/SipanGPT-0.
|
19 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
20 |
model = AutoModelForCausalLM.from_pretrained(
|
21 |
model_id,
|
@@ -71,17 +71,17 @@ def generate(
|
|
71 |
# Implementing Gradio 5 features and building a ChatInterface UI yourself
|
72 |
PLACEHOLDER = """<div style="padding: 20px; text-align: center; display: flex; flex-direction: column; align-items: center;">
|
73 |
<img src="https://corladlalibertad.org.pe/wp-content/uploads/2024/01/USS.jpg" style="width: 80%; max-width: 550px; height: auto; opacity: 0.55; margin-bottom: 10px;">
|
74 |
-
<h1 style="font-size: 28px; margin: 0;">SipánGPT 0.
|
75 |
<p style="font-size: 8px; margin: 5px 0 0; opacity: 0.65;">
|
76 |
<a href="https://huggingface.co/spaces/ysharma/Llama3-2_with_Gradio-5" target="_blank" style="color: inherit; text-decoration: none;">Forked from @ysharma</a>
|
77 |
</p>
|
78 |
<p style="font-size: 12px; margin: 5px 0 0; opacity: 0.9;">Este modelo es experimental, puede generar alucinaciones o respuestas incorrectas.</p>
|
79 |
-
<p style="font-size: 12px; margin: 5px 0 0; opacity: 0.9;">Entrenado con un dataset de 5.4k conversaciones.</p>
|
80 |
<p style="font-size: 12px; margin: 5px 0 0; opacity: 0.9;">
|
81 |
<a href="https://huggingface.co/datasets/ussipan/sipangpt" target="_blank" style="color: inherit; text-decoration: none;">Ver el dataset aquí</a>
|
82 |
</p>
|
83 |
</div>"""
|
84 |
|
|
|
85 |
|
86 |
def handle_retry(history, retry_data: gr.RetryData):
|
87 |
new_history = history[:retry_data.index]
|
@@ -106,7 +106,7 @@ def chat_examples_fill(data: gr.SelectData):
|
|
106 |
with gr.Blocks(theme=gr.themes.Soft(), fill_height=True) as demo:
|
107 |
with gr.Column(elem_id="container", scale=1):
|
108 |
chatbot = gr.Chatbot(
|
109 |
-
label="SipánGPT 0.
|
110 |
show_label=False,
|
111 |
type="messages",
|
112 |
scale=1,
|
|
|
15 |
|
16 |
# Download model from Huggingface Hub
|
17 |
# Change this to meta-llama or the correct org name from Huggingface Hub
|
18 |
+
model_id = "ussipan/SipanGPT-0.1-Llama-3.2-1B-GGUF"
|
19 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
20 |
model = AutoModelForCausalLM.from_pretrained(
|
21 |
model_id,
|
|
|
71 |
# Implementing Gradio 5 features and building a ChatInterface UI yourself
|
72 |
PLACEHOLDER = """<div style="padding: 20px; text-align: center; display: flex; flex-direction: column; align-items: center;">
|
73 |
<img src="https://corladlalibertad.org.pe/wp-content/uploads/2024/01/USS.jpg" style="width: 80%; max-width: 550px; height: auto; opacity: 0.55; margin-bottom: 10px;">
|
74 |
+
<h1 style="font-size: 28px; margin: 0;">SipánGPT 0.1 Llama 3.2</h1>
|
75 |
<p style="font-size: 8px; margin: 5px 0 0; opacity: 0.65;">
|
76 |
<a href="https://huggingface.co/spaces/ysharma/Llama3-2_with_Gradio-5" target="_blank" style="color: inherit; text-decoration: none;">Forked from @ysharma</a>
|
77 |
</p>
|
78 |
<p style="font-size: 12px; margin: 5px 0 0; opacity: 0.9;">Este modelo es experimental, puede generar alucinaciones o respuestas incorrectas.</p>
|
|
|
79 |
<p style="font-size: 12px; margin: 5px 0 0; opacity: 0.9;">
|
80 |
<a href="https://huggingface.co/datasets/ussipan/sipangpt" target="_blank" style="color: inherit; text-decoration: none;">Ver el dataset aquí</a>
|
81 |
</p>
|
82 |
</div>"""
|
83 |
|
84 |
+
# <p style="font-size: 12px; margin: 5px 0 0; opacity: 0.9;">Entrenado con un dataset de 5.4k conversaciones.</p>
|
85 |
|
86 |
def handle_retry(history, retry_data: gr.RetryData):
|
87 |
new_history = history[:retry_data.index]
|
|
|
106 |
with gr.Blocks(theme=gr.themes.Soft(), fill_height=True) as demo:
|
107 |
with gr.Column(elem_id="container", scale=1):
|
108 |
chatbot = gr.Chatbot(
|
109 |
+
label="SipánGPT 0.1 Llama 3.2",
|
110 |
show_label=False,
|
111 |
type="messages",
|
112 |
scale=1,
|