from transformers import AutoProcessor, AutoModelForCausalLM import torch from PIL import Image # Load model and processor model = AutoModelForCausalLM.from_pretrained("Chesscorner/git-chess-v3") processor = AutoProcessor.from_pretrained("Chesscorner/git-chess-v3") # Set up device and move model to it device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) # Enable mixed precision if on GPU use_fp16 = device.type == "cuda" if use_fp16: model.half() # Set generation parameters gen_kwargs = {'max_length': 100, 'num_beams': 2} # Adjust num_beams if needed # Prediction function def predict_step(image): # Preprocess the image pixel_values = processor(images=image, return_tensors="pt").pixel_values.to(device) # Generate predictions with no_grad for efficiency with torch.no_grad(): output_ids = model.generate(pixel_values=pixel_values, **gen_kwargs) # Decode predictions preds = processor.batch_decode(output_ids, skip_special_tokens=True) return preds[0].strip()