Spaces:
Paused
Paused
import streamlit as st | |
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig # Agregada esta importación | |
import torch | |
from huggingface_hub import login | |
import os | |
################################################################## | |
def setup_llama3_auth(): | |
"""Configurar autenticación para Llama 3""" | |
if 'HUGGING_FACE_TOKEN_3' in st.secrets: | |
token = st.secrets['HUGGING_FACE_TOKEN_3'] | |
login(token) | |
return True | |
else: | |
st.error("No se encontró el token de Llama 3 en los secrets") | |
st.stop() | |
return False | |
class Llama3Demo: | |
def __init__(self): | |
setup_llama3_auth() | |
self.model_name = "meta-llama/Llama-3.2-3B-Instruct" | |
self._model = None | |
self._tokenizer = None | |
# Configuración de cuantización | |
self.quantization_config = BitsAndBytesConfig( | |
load_in_8bit=True, | |
bnb_4bit_compute_dtype=torch.float16 | |
) | |
def model(self): | |
if self._model is None: | |
try: | |
self._model = AutoModelForCausalLM.from_pretrained( | |
self.model_name, | |
torch_dtype=torch.float16, | |
device_map="auto", | |
quantization_config=self.quantization_config, # Nueva forma de configurar cuantización | |
token=st.secrets['HUGGING_FACE_TOKEN_3'] # Actualizado de use_auth_token a token | |
) | |
except Exception as e: | |
st.error(f"Error cargando el modelo: {str(e)}") | |
raise e | |
return self._model | |
def tokenizer(self): | |
if self._tokenizer is None: | |
try: | |
self._tokenizer = AutoTokenizer.from_pretrained( | |
self.model_name, | |
token=st.secrets['HUGGING_FACE_TOKEN_3'] # Actualizado de use_auth_token a token | |
) | |
except Exception as e: | |
st.error(f"Error cargando el tokenizer: {str(e)}") | |
raise e | |
return self._tokenizer | |
################################################################## | |
def generate_response(self, prompt: str, max_new_tokens: int = 512) -> str: | |
formatted_prompt = f"""<|system|>You are a helpful AI assistant.</s> | |
<|user|>{prompt}</s> | |
<|assistant|>""" | |
inputs = self.tokenizer(formatted_prompt, return_tensors="pt").to(self.model.device) | |
# Asegurar que tenemos un pad_token_id válido | |
if self.tokenizer.pad_token_id is None: | |
self.tokenizer.pad_token_id = self.tokenizer.eos_token_id | |
with torch.no_grad(): | |
outputs = self.model.generate( | |
**inputs, | |
max_new_tokens=max_new_tokens, | |
num_return_sequences=1, | |
temperature=0.7, | |
do_sample=True, | |
top_p=0.9, | |
pad_token_id=self.tokenizer.pad_token_id # Explícitamente establecer pad_token_id | |
) | |
torch.cuda.empty_cache() | |
response = self.tokenizer.decode(outputs[0], skip_special_tokens=True) | |
return response.split("<|assistant|>")[-1].strip() | |
################################################################## | |
def main(): | |
st.set_page_config(page_title="Llama 3.2 Chat", page_icon="🦙") | |
st.title("🦙 Llama 3.2 Chat") | |
# Verificar configuración | |
with st.expander("🔧 Status", expanded=True): | |
try: | |
token_status = setup_llama3_auth() | |
st.write("Token Llama 3:", "✅" if token_status else "❌") | |
if torch.cuda.is_available(): | |
st.write("GPU:", torch.cuda.get_device_name(0)) | |
st.write("Memoria GPU:", f"{torch.cuda.get_device_properties(0).total_memory/1e9:.1f} GB") | |
else: | |
st.warning("GPU no disponible") | |
except Exception as e: | |
st.error(f"Error en configuración: {str(e)}") | |
# Inicializar el modelo | |
if 'llama' not in st.session_state: | |
with st.spinner("Inicializando Llama 3.2... esto puede tomar unos minutos..."): | |
try: | |
st.session_state.llama = Llama3Demo() | |
except Exception as e: | |
st.error("Error inicializando el modelo") | |
st.stop() | |
# Gestión del historial de chat | |
if 'messages' not in st.session_state: | |
st.session_state.messages = [] | |
# Mostrar historial | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
# Interface de chat | |
if prompt := st.chat_input("Escribe tu mensaje aquí"): | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
with st.chat_message("user"): | |
st.markdown(prompt) | |
with st.chat_message("assistant"): | |
try: | |
response = st.session_state.llama.generate_response(prompt) | |
st.markdown(response) | |
st.session_state.messages.append({"role": "assistant", "content": response}) | |
except Exception as e: | |
st.error(f"Error generando respuesta: {str(e)}") | |
# Sidebar con información y controles | |
with st.sidebar: | |
st.markdown(""" | |
### Acerca de | |
Este demo usa Llama 3.2-3B-Instruct, el nuevo modelo de Meta. | |
### Características | |
- Modelo de 3B parámetros | |
- Optimizado para diálogo | |
- Cuantización de 8-bits | |
""") | |
if st.button("Limpiar Chat"): | |
st.session_state.messages = [] | |
st.experimental_rerun() | |
if __name__ == "__main__": | |
main() |