jhangmez commited on
Commit
691de0c
1 Parent(s): 8a71f7a

Se cambio a sipangpt 0.2 plus

Browse files
Files changed (2) hide show
  1. README.md +3 -2
  2. app.py +4 -5
README.md CHANGED
@@ -9,9 +9,10 @@ app_file: app.py
9
  pinned: false
10
  license: cc-by-4.0
11
  models:
12
- - ussipan/SipanGPT-0.1-Llama-3.2-1B-GGUF
13
  datasets:
14
  - ussipan/sipangpt
15
  ---
16
 
17
- SipánGPT 0.1 Llama 3.2
 
 
9
  pinned: false
10
  license: cc-by-4.0
11
  models:
12
+ - ussipan/SipanGPT-0.2-Llama-3.2-1B-GGUF
13
  datasets:
14
  - ussipan/sipangpt
15
  ---
16
 
17
+ SipánGPT 0.2 Llama 3.2
18
+ Entrenado con 5400 conversaciones.
app.py CHANGED
@@ -15,7 +15,7 @@ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
15
 
16
  # Download model from Huggingface Hub
17
  # Change this to meta-llama or the correct org name from Huggingface Hub
18
- model_id = "ussipan/SipanGPT-0.1-Llama-3.2-1B-GGUF"
19
  tokenizer = AutoTokenizer.from_pretrained(model_id)
20
  model = AutoModelForCausalLM.from_pretrained(
21
  model_id,
@@ -71,7 +71,7 @@ def generate(
71
  # Implementing Gradio 5 features and building a ChatInterface UI yourself
72
  PLACEHOLDER = """<div style="padding: 20px; text-align: center; display: flex; flex-direction: column; align-items: center;">
73
  <img src="https://corladlalibertad.org.pe/wp-content/uploads/2024/01/USS.jpg" style="width: 80%; max-width: 550px; height: auto; opacity: 0.55; margin-bottom: 10px;">
74
- <h1 style="font-size: 28px; margin: 0;">SipánGPT 0.1 Llama 3.2</h1>
75
  <p style="font-size: 8px; margin: 5px 0 0; opacity: 0.65;">
76
  <a href="https://huggingface.co/spaces/ysharma/Llama3-2_with_Gradio-5" target="_blank" style="color: inherit; text-decoration: none;">Forked from @ysharma</a>
77
  </p>
@@ -79,10 +79,9 @@ PLACEHOLDER = """<div style="padding: 20px; text-align: center; display: flex; f
79
  <p style="font-size: 12px; margin: 5px 0 0; opacity: 0.9;">
80
  <a href="https://huggingface.co/datasets/ussipan/sipangpt" target="_blank" style="color: inherit; text-decoration: none;">Ver el dataset aquí</a>
81
  </p>
 
82
  </div>"""
83
 
84
- # <p style="font-size: 12px; margin: 5px 0 0; opacity: 0.9;">Entrenado con un dataset de 5.4k conversaciones.</p>
85
-
86
  def handle_retry(history, retry_data: gr.RetryData):
87
  new_history = history[:retry_data.index]
88
  previous_prompt = history[retry_data.index]['content']
@@ -106,7 +105,7 @@ def chat_examples_fill(data: gr.SelectData):
106
  with gr.Blocks(theme=gr.themes.Soft(), fill_height=True) as demo:
107
  with gr.Column(elem_id="container", scale=1):
108
  chatbot = gr.Chatbot(
109
- label="SipánGPT 0.1 Llama 3.2",
110
  show_label=False,
111
  type="messages",
112
  scale=1,
 
15
 
16
  # Download model from Huggingface Hub
17
  # Change this to meta-llama or the correct org name from Huggingface Hub
18
+ model_id = "ussipan/SipanGPT-0.2-Llama-3.2-1B-GGUF"
19
  tokenizer = AutoTokenizer.from_pretrained(model_id)
20
  model = AutoModelForCausalLM.from_pretrained(
21
  model_id,
 
71
  # Implementing Gradio 5 features and building a ChatInterface UI yourself
72
  PLACEHOLDER = """<div style="padding: 20px; text-align: center; display: flex; flex-direction: column; align-items: center;">
73
  <img src="https://corladlalibertad.org.pe/wp-content/uploads/2024/01/USS.jpg" style="width: 80%; max-width: 550px; height: auto; opacity: 0.55; margin-bottom: 10px;">
74
+ <h1 style="font-size: 28px; margin: 0;">SipánGPT 0.2 Llama 3.2</h1>
75
  <p style="font-size: 8px; margin: 5px 0 0; opacity: 0.65;">
76
  <a href="https://huggingface.co/spaces/ysharma/Llama3-2_with_Gradio-5" target="_blank" style="color: inherit; text-decoration: none;">Forked from @ysharma</a>
77
  </p>
 
79
  <p style="font-size: 12px; margin: 5px 0 0; opacity: 0.9;">
80
  <a href="https://huggingface.co/datasets/ussipan/sipangpt" target="_blank" style="color: inherit; text-decoration: none;">Ver el dataset aquí</a>
81
  </p>
82
+ <p style="font-size: 12px; margin: 5px 0 0; opacity: 0.9;">Entrenado con un dataset de 5.4k conversaciones.</p>
83
  </div>"""
84
 
 
 
85
  def handle_retry(history, retry_data: gr.RetryData):
86
  new_history = history[:retry_data.index]
87
  previous_prompt = history[retry_data.index]['content']
 
105
  with gr.Blocks(theme=gr.themes.Soft(), fill_height=True) as demo:
106
  with gr.Column(elem_id="container", scale=1):
107
  chatbot = gr.Chatbot(
108
+ label="SipánGPT 0.2 Llama 3.2",
109
  show_label=False,
110
  type="messages",
111
  scale=1,