import os from unsloth import FastVisionModel import torch from PIL import Image from datasets import load_dataset from transformers import TextStreamer import matplotlib.pyplot as plt import gradio as gr import random import numpy as np device = torch.device("cpu") def set_seed(seed_value=42): random.seed(seed_value) np.random.seed(seed_value) torch.manual_seed(seed_value) #torch.cuda.manual_seed_all(seed_value) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False set_seed(42) model, tokenizer = FastVisionModel.from_pretrained( "0llheaven/llama-3.2-11B-Vision-Instruct-Finetune", load_in_4bit = True, use_gradient_checkpointing = "unsloth", ) #FastVisionModel.for_inference(model) instruction = "You are an expert radiographer. Describe accurately what you see in this image." def predict_radiology_description(image, temperature, use_top_p, top_p_value, use_min_p, min_p_value): try: set_seed(42) messages = [{"role": "user", "content": [ {"type": "image"}, {"type": "text", "text": instruction} ]}] input_text = tokenizer.apply_chat_template(messages, add_generation_prompt=True) inputs = tokenizer( image, input_text, add_special_tokens=False, return_tensors="pt", ).to(device) text_streamer = TextStreamer(tokenizer, skip_prompt=True) generate_kwargs = { "max_new_tokens": 512, "use_cache": True, "temperature": temperature, } if use_top_p: generate_kwargs["top_p"] = top_p_value if use_min_p: generate_kwargs["min_p"] = min_p_value output_ids = model.generate( **inputs, streamer=text_streamer, **generate_kwargs ) generated_text = tokenizer.decode(output_ids[0], skip_special_tokens=True) return generated_text.replace("assistant", "\n\nassistant").strip() except Exception as e: return f"Error: {str(e)}" with gr.Blocks() as interface: gr.Markdown("