sipangpt-test / app.py
jhangmez's picture
Se retorno a version 0.1
8a71f7a
raw
history blame
5.92 kB
import os
from threading import Thread
from typing import Iterator
import gradio as gr
#import spaces
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
MAX_MAX_NEW_TOKENS = 2048
DEFAULT_MAX_NEW_TOKENS = 1024
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Download model from Huggingface Hub
# Change this to meta-llama or the correct org name from Huggingface Hub
model_id = "ussipan/SipanGPT-0.1-Llama-3.2-1B-GGUF"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(
model_id,
device_map="auto",
torch_dtype=torch.bfloat16,
)
model.eval()
# Main Gradio inference function
def generate(
message: str,
chat_history: list[tuple[str, str]],
max_new_tokens: int = 1024,
temperature: float = 0.6,
top_p: float = 0.9,
top_k: int = 50,
repetition_penalty: float = 1.2,
) -> Iterator[str]:
conversation = [{k: v for k, v in d.items() if k != 'metadata'} for d in chat_history]
conversation.append({"role": "user", "content": message})
input_ids = tokenizer.apply_chat_template(conversation, add_generation_prompt=True, return_tensors="pt")
if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
gr.Warning(f"Se recortó la entrada de la conversación porque era más larga que {MAX_INPUT_TOKEN_LENGTH} tokens.")
input_ids = input_ids.to(model.device)
streamer = TextIteratorStreamer(tokenizer, timeout=20.0, skip_prompt=True, skip_special_tokens=True)
generate_kwargs = dict(
{"input_ids": input_ids},
streamer=streamer,
max_new_tokens=max_new_tokens,
do_sample=True,
top_p=top_p,
top_k=top_k,
temperature=temperature,
num_beams=1,
repetition_penalty=repetition_penalty,
)
t = Thread(target=model.generate, kwargs=generate_kwargs)
t.start()
conversation.append({"role": "assistant", "content": ""})
outputs = []
for text in streamer:
outputs.append(text)
bot_response = "".join(outputs)
conversation[-1]['content'] = bot_response
yield "", conversation
# Implementing Gradio 5 features and building a ChatInterface UI yourself
PLACEHOLDER = """<div style="padding: 20px; text-align: center; display: flex; flex-direction: column; align-items: center;">
<img src="https://corladlalibertad.org.pe/wp-content/uploads/2024/01/USS.jpg" style="width: 80%; max-width: 550px; height: auto; opacity: 0.55; margin-bottom: 10px;">
<h1 style="font-size: 28px; margin: 0;">SipánGPT 0.1 Llama 3.2</h1>
<p style="font-size: 8px; margin: 5px 0 0; opacity: 0.65;">
<a href="https://huggingface.co/spaces/ysharma/Llama3-2_with_Gradio-5" target="_blank" style="color: inherit; text-decoration: none;">Forked from @ysharma</a>
</p>
<p style="font-size: 12px; margin: 5px 0 0; opacity: 0.9;">Este modelo es experimental, puede generar alucinaciones o respuestas incorrectas.</p>
<p style="font-size: 12px; margin: 5px 0 0; opacity: 0.9;">
<a href="https://huggingface.co/datasets/ussipan/sipangpt" target="_blank" style="color: inherit; text-decoration: none;">Ver el dataset aquí</a>
</p>
</div>"""
# <p style="font-size: 12px; margin: 5px 0 0; opacity: 0.9;">Entrenado con un dataset de 5.4k conversaciones.</p>
def handle_retry(history, retry_data: gr.RetryData):
new_history = history[:retry_data.index]
previous_prompt = history[retry_data.index]['content']
yield from generate(previous_prompt, chat_history = new_history, max_new_tokens = 1024, temperature = 0.6, top_p = 0.9, top_k = 50, repetition_penalty = 1.2)
def handle_like(data: gr.LikeData):
if data.liked:
print("Votaste positivamente esta respuesta: ", data.value)
else:
print("Votaste negativamente esta respuesta: ", data.value)
def handle_undo(history, undo_data: gr.UndoData):
chatbot = history[:undo_data.index]
prompt = history[undo_data.index]['content']
return chatbot, prompt
def chat_examples_fill(data: gr.SelectData):
yield from generate(data.value['text'], chat_history = [], max_new_tokens = 1024, temperature = 0.6, top_p = 0.9, top_k = 50, repetition_penalty = 1.2)
with gr.Blocks(theme=gr.themes.Soft(), fill_height=True) as demo:
with gr.Column(elem_id="container", scale=1):
chatbot = gr.Chatbot(
label="SipánGPT 0.1 Llama 3.2",
show_label=False,
type="messages",
scale=1,
suggestions = [
{"text": "Háblame del reglamento de estudiantes de la universidad"},
{"text": "Qué becas ofrece la universidad"},
],
placeholder = PLACEHOLDER,
)
msg = gr.Textbox(submit_btn=True, show_label=False)
with gr.Accordion('Additional inputs', open=False):
max_new_tokens = gr.Slider(label="Max new tokens", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS, )
temperature = gr.Slider(label="Temperature",minimum=0.1, maximum=4.0, step=0.1, value=0.6,)
top_p = gr.Slider(label="Top-p (nucleus sampling)", minimum=0.05, maximum=1.0, step=0.05, value=0.9, )
top_k = gr.Slider(label="Top-k", minimum=1, maximum=1000, step=1, value=50, )
repetition_penalty = gr.Slider(label="Repetition penalty", minimum=1.0, maximum=2.0, step=0.05, value=1.2, )
msg.submit(generate, [msg, chatbot, max_new_tokens, temperature, top_p, top_k, repetition_penalty], [msg, chatbot])
chatbot.retry(handle_retry, chatbot, [msg, chatbot])
chatbot.like(handle_like, None, None)
chatbot.undo(handle_undo, chatbot, [chatbot, msg])
chatbot.suggestion_select(chat_examples_fill, None, [msg, chatbot] )
demo.launch()