from groq import Groq import os from PIL import Image import torch from torchvision import transforms import gradio as gr # Replace 'your_api_key_here' with your actual Groq API key GROQ_API_KEY = "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" # Initialize the Groq client with the API key client = Groq(api_key=GROQ_API_KEY) # Placeholder for flood prediction model (open-source PyTorch model) class FloodPredictionModel: def __init__(self): # Example: Load a pre-trained model (you should replace this with a flood prediction model) self.model = torch.hub.load("pytorch/vision:v0.10.0", "resnet18", pretrained=True) self.model.eval() def predict(self, image): # Preprocess the image preprocess = transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) input_tensor = preprocess(image).unsqueeze(0) # Make prediction with torch.no_grad(): outputs = self.model(input_tensor) _, predicted = outputs.max(1) return predicted.item() # Placeholder: Replace with actual prediction logic # Instantiate the flood prediction model flood_model = FloodPredictionModel() # Function to process user input and predict def flood_prediction(image, user_query): # Debugging: Check if image is received correctly print("Received Image:", image) # Analyze the image with the flood prediction model try: prediction = flood_model.predict(image) prediction_text = ( "Flood risk detected in the area!" if prediction == 1 else "No immediate flood risk detected." ) except Exception as e: prediction_text = f"Error in flood prediction: {str(e)}" # Debugging: Check prediction result print("Flood Prediction Result:", prediction_text) # Use Groq's API for query-based interaction try: chat_completion = client.chat.completions.create( messages=[{"role": "user", "content": user_query}], model="llama-3.3-70b-versatile", ) ai_response = chat_completion.choices[0].message.content except Exception as e: ai_response = f"Error with Groq API: {str(e)}" # Debugging: Check AI response print("AI Response:", ai_response) return prediction_text, ai_response # Define the Gradio interface with gr.Blocks() as flood_app: gr.Markdown("## 🌊 Flood Prediction App") gr.Markdown( """ Welcome to the Flood Prediction App! This tool helps you analyze uploaded images to predict potential flood risks in the area. You can also interact with a powerful language model for further insights. ### Instructions: 1. Upload an image of the area you want to analyze. 2. Optionally, enter a query (e.g., "What are the risks of flooding in coastal areas?"). 3. Click **Predict Flood Risk** to get the results. """ ) with gr.Row(): image_input = gr.Image(label="Upload Image", type="pil") user_query = gr.Textbox(label="Your Query (Optional)", placeholder="Ask about flood risks...") predict_button = gr.Button("Predict Flood Risk") with gr.Row(): prediction_output = gr.Textbox(label="Flood Prediction") ai_response_output = gr.Textbox(label="AI Response") predict_button.click( flood_prediction, inputs=[image_input, user_query], outputs=[prediction_output, ai_response_output] ) gr.Markdown( """ ### Output Description: - **Flood Prediction**: Indicates whether there is a flood risk based on the uploaded image. - **AI Response**: Provides detailed insights or answers based on your query. """ ) # Launch the app (for local testing or Google Colab) flood_app.launch()