import gradio as gr from transformers import AutoModelForImageClassification, AutoProcessor, pipeline from datasets import load_dataset from PIL import Image import torch # Load the model and processor from Hugging Face model_name = "Deepri24/my_awesome_emotion_identifier_model" processor = AutoProcessor.from_pretrained(model_name) model = AutoModelForImageClassification.from_pretrained(model_name) # Instantiate a pipeline for image classification classifier = pipeline("image-classification", model=model_name) def predict(image): # Use the classifier pipeline to get predictions results = classifier(image) # Extract the label from the results predicted_label = results[0]['label'] # Get the top prediction return predicted_label # Load the validation split of the dataset but only the first 10 samples ds = load_dataset('FastJobs/Visual_Emotional_Analysis', split="train[:10]") # Define a function to get sample images def get_samples(): # Load two sample images from the dataset sample_images = [ds["image"][i] for i in [0, 1]] # Get the first two images return sample_images # Create Gradio interface interface = gr.Interface( fn=predict, inputs=gr.Image(type="pil"), # Accept PIL images outputs="text", # Output will be a text label title="Emotion Identifier", description="Upload an image to identify the emotion.", examples=get_samples() # Use sample images for example inputs ) # Launch the interface interface.launch()