# Import required libraries import gradio as gr from transformers import AutoModelForImageClassification, AutoFeatureExtractor from PIL import Image import torch # Load the pre-trained Vision Transformer model and feature extractor model = AutoModelForImageClassification.from_pretrained("google/vit-base-patch16-224") extractor = AutoFeatureExtractor.from_pretrained("google/vit-base-patch16-224") # Define the function to classify the crop using satellite images def classify_image(image): # Preprocess the image using the feature extractor inputs = extractor(images=image, return_tensors="pt") # Make the prediction using the model with torch.no_grad(): outputs = model(**inputs) # Extract predicted class logits = outputs.logits predicted_class_idx = logits.argmax(-1).item() # Return the predicted class return f"Predicted Class: {model.config.id2label[predicted_class_idx]}" # Define the chatbot function for interaction def respond(message, chat_history): # Respond to the user's question response = f"Received your message: '{message}' but currently, I'm focusing on classifying crops!" return response # Create a Gradio interface for crop classification iface = gr.Interface( fn=classify_image, inputs=gr.Image(type="pil"), outputs="text", title="Real-Time Crop Classification", description="Upload a satellite/mobile image to classify crops.", ) # Create a Gradio Chatbot for interaction chatbot = gr.ChatInterface( fn=respond, title="Agricultural Assistant Chatbot", description="Ask questions about crop classification and more." ) # Combine the crop classifier and chatbot in one interface with gr.Blocks() as demo: gr.Markdown("## Farmovation Chatbot") with gr.Tab("Crop Classification"): iface.render() with gr.Tab("Agriculture Chatbot"): chatbot.render() # Launch the Gradio app in Google Colab demo.launch()