Spaces:
Runtime error
Runtime error
import gradio as gr | |
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedModel, PretrainedConfig | |
import torch | |
import torch.nn as nn | |
import torch.nn.functional as F | |
import math | |
# Model architecture definition | |
class SmolLM2Config(PretrainedConfig): | |
model_type = "smollm2" | |
def __init__( | |
self, | |
vocab_size=49152, | |
hidden_size=576, | |
intermediate_size=1536, | |
num_hidden_layers=30, | |
num_attention_heads=9, | |
num_key_value_heads=3, | |
hidden_act="silu", | |
max_position_embeddings=2048, | |
initializer_range=0.041666666666666664, | |
rms_norm_eps=1e-5, | |
use_cache=True, | |
pad_token_id=None, | |
bos_token_id=0, | |
eos_token_id=0, | |
tie_word_embeddings=True, | |
rope_theta=10000.0, | |
**kwargs | |
): | |
self.vocab_size = vocab_size | |
self.hidden_size = hidden_size | |
self.intermediate_size = intermediate_size | |
self.num_hidden_layers = num_hidden_layers | |
self.num_attention_heads = num_attention_heads | |
self.num_key_value_heads = num_key_value_heads | |
self.hidden_act = hidden_act | |
self.max_position_embeddings = max_position_embeddings | |
self.initializer_range = initializer_range | |
self.rms_norm_eps = rms_norm_eps | |
self.use_cache = use_cache | |
self.rope_theta = rope_theta | |
super().__init__( | |
pad_token_id=pad_token_id, | |
bos_token_id=bos_token_id, | |
eos_token_id=eos_token_id, | |
tie_word_embeddings=tie_word_embeddings, | |
**kwargs | |
) | |
class SmolLM2ForCausalLM(PreTrainedModel): | |
config_class = SmolLM2Config | |
_no_split_modules = ["LlamaDecoderLayer"] | |
def __init__(self, config): | |
super().__init__(config) | |
self.config = config | |
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size) | |
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) | |
if config.tie_word_embeddings: | |
self.lm_head.weight = self.embed_tokens.weight | |
def forward(self, input_ids, attention_mask=None, labels=None): | |
hidden_states = self.embed_tokens(input_ids) | |
logits = self.lm_head(hidden_states) | |
loss = None | |
if labels is not None: | |
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), labels.view(-1)) | |
return logits if loss is None else (loss, logits) | |
def prepare_inputs_for_generation(self, input_ids, **kwargs): | |
return {"input_ids": input_ids} | |
# Register the model architecture | |
from transformers import AutoConfig, AutoModelForCausalLM | |
AutoConfig.register("smollm2", SmolLM2Config) | |
AutoModelForCausalLM.register(SmolLM2Config, SmolLM2ForCausalLM) | |
# Load model and tokenizer | |
model_id = "jatingocodeo/SmolLM2" | |
def load_model(): | |
try: | |
tokenizer = AutoTokenizer.from_pretrained(model_id) | |
# Ensure the tokenizer has the necessary special tokens | |
special_tokens = { | |
'pad_token': '[PAD]', | |
'eos_token': '</s>', | |
'bos_token': '<s>' | |
} | |
tokenizer.add_special_tokens(special_tokens) | |
# Load model without device_map | |
model = AutoModelForCausalLM.from_pretrained( | |
model_id, | |
torch_dtype=torch.float16, | |
pad_token_id=tokenizer.pad_token_id | |
) | |
# Move model to device manually | |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
model = model.to(device) | |
# Resize token embeddings to match new tokenizer | |
model.resize_token_embeddings(len(tokenizer)) | |
return model, tokenizer | |
except Exception as e: | |
print(f"Error loading model: {str(e)}") | |
raise | |
def generate_text(prompt, max_length=100, temperature=0.7, top_k=50): | |
try: | |
# Load model and tokenizer (caching them for subsequent calls) | |
if not hasattr(generate_text, "model"): | |
generate_text.model, generate_text.tokenizer = load_model() | |
# Ensure the prompt is not empty | |
if not prompt.strip(): | |
return "Please enter a prompt." | |
# Add BOS token if needed | |
if not prompt.startswith(generate_text.tokenizer.bos_token): | |
prompt = generate_text.tokenizer.bos_token + prompt | |
# Encode the prompt | |
input_ids = generate_text.tokenizer.encode(prompt, return_tensors="pt", truncation=True, max_length=2048) | |
input_ids = input_ids.to(generate_text.model.device) | |
# Generate text | |
with torch.no_grad(): | |
output_ids = generate_text.model.generate( | |
input_ids, | |
max_length=min(max_length + len(input_ids[0]), 2048), # Respect model's max length | |
temperature=temperature, | |
top_k=top_k, | |
do_sample=True, | |
pad_token_id=generate_text.tokenizer.pad_token_id, | |
eos_token_id=generate_text.tokenizer.eos_token_id, | |
num_return_sequences=1 | |
) | |
# Decode and return the generated text | |
generated_text = generate_text.tokenizer.decode(output_ids[0], skip_special_tokens=True) | |
return generated_text.strip() | |
except Exception as e: | |
print(f"Error during generation: {str(e)}") | |
return f"An error occurred: {str(e)}" | |
# Create Gradio interface | |
iface = gr.Interface( | |
fn=generate_text, | |
inputs=[ | |
gr.Textbox(label="Prompt", placeholder="Enter your prompt here...", lines=2), | |
gr.Slider(minimum=10, maximum=200, value=100, step=1, label="Max Length"), | |
gr.Slider(minimum=0.1, maximum=1.0, value=0.7, step=0.1, label="Temperature"), | |
gr.Slider(minimum=1, maximum=100, value=50, step=1, label="Top K"), | |
], | |
outputs=gr.Textbox(label="Generated Text", lines=5), | |
title="SmolLM2 Text Generator", | |
description="""Generate text using the fine-tuned SmolLM2 model. | |
- Max Length: Controls the length of generated text | |
- Temperature: Controls randomness (higher = more creative) | |
- Top K: Controls diversity of word choices""", | |
examples=[ | |
["Once upon a time", 100, 0.7, 50], | |
["The quick brown fox", 150, 0.8, 40], | |
["In a galaxy far far away", 200, 0.9, 30], | |
], | |
allow_flagging="never" | |
) | |
if __name__ == "__main__": | |
iface.launch() |