import os from transformers import AutoModelForCausalLM, AutoTokenizer import torch MODEL_NAME = "bigcode/starcoderbase-1b" HF_TOKEN = os.getenv("HUGGINGFACE_TOKEN") # Force CPU mode device = "cpu" # Load tokenizer and model tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, token=HF_TOKEN) # Ensure the tokenizer has a pad token set if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token # Set pad_token to eos_token model = AutoModelForCausalLM.from_pretrained( MODEL_NAME, token=HF_TOKEN, torch_dtype=torch.float32, # Use float32 for CPU trust_remote_code=True ).to(device) # Move model explicitly to CPU def generate_code(prompt: str, max_tokens: int = 256): inputs = tokenizer( prompt, return_tensors="pt", padding=True, truncation=True, # Allow truncation max_length=1024 # Set a maximum length explicitly ).to(device) output = model.generate( **inputs, max_new_tokens=max_tokens, pad_token_id=tokenizer.pad_token_id ) return tokenizer.decode(output[0], skip_special_tokens=True)