Cloud110702 commited on
Commit
a7e4302
·
verified ·
1 Parent(s): 640f22b

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +92 -0
app.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+ from fastapi import FastAPI, File, UploadFile
3
+ from fastapi.middleware.cors import CORSMiddleware
4
+ import numpy as np
5
+ import tensorflow as tf
6
+ from tensorflow.lite.python.interpreter import Interpreter
7
+ import os
8
+ import google.generativeai as genai
9
+ import uvicorn
10
+ from typing import Optional
11
+ from pydantic import BaseModel
12
+
13
+ app = FastAPI()
14
+
15
+ # Add CORS middleware
16
+ app.add_middleware(
17
+ CORSMiddleware,
18
+ allow_origins=["*"],
19
+ allow_credentials=True,
20
+ allow_methods=["*"],
21
+ allow_headers=["*"],
22
+ )
23
+
24
+ # Load the TensorFlow Lite model
25
+ interpreter = Interpreter(model_path="model.tflite")
26
+ interpreter.allocate_tensors()
27
+
28
+ # Get input and output details
29
+ input_details = interpreter.get_input_details()
30
+ output_details = interpreter.get_output_details()
31
+
32
+ # Define categories
33
+ data_cat = ['disposable cups', 'paper', 'plastic bottle']
34
+ img_height, img_width = 224, 224
35
+
36
+ # Configure Gemini API
37
+ GEMINI_API_KEY = os.getenv('GEMINI_API_KEY', 'AIzaSyBx0A7BA-nKVZOiVn39JXzdGKgeGQqwAFg')
38
+ genai.configure(api_key=GEMINI_API_KEY)
39
+
40
+ # Initialize Gemini model
41
+ gemini_model = genai.GenerativeModel('gemini-pro')
42
+
43
+ @app.post("/predict")
44
+ async def predict(file: UploadFile = File(...)):
45
+ try:
46
+ contents = await file.read()
47
+
48
+ # Preprocess the image
49
+ img = tf.image.decode_image(contents, channels=3)
50
+ img = tf.image.resize(img, [img_height, img_width])
51
+ img_bat = np.expand_dims(img, 0).astype(np.float32)
52
+
53
+ # Set input tensor
54
+ interpreter.set_tensor(input_details[0]['index'], img_bat)
55
+
56
+ # Run inference
57
+ interpreter.invoke()
58
+
59
+ # Get the result
60
+ output_data = interpreter.get_tensor(output_details[0]['index'])
61
+ predicted_class = data_cat[np.argmax(output_data)]
62
+ confidence = float(np.max(output_data) * 100)
63
+
64
+ # Generate sustainability insights with Gemini API
65
+ prompt = f"""
66
+ You are a sustainability-focused AI. Analyze the {predicted_class} (solid dry waste)
67
+ and generate the top three innovative, eco-friendly recommendations for repurposing it.
68
+ Each recommendation should:
69
+ - Provide a title
70
+ - Be practical and easy to implement
71
+ - Be environmentally beneficial
72
+ - Include a one or two-sentence explanation
73
+ Format each recommendation with a clear title followed by the explanation on a new line.
74
+ """
75
+
76
+ try:
77
+ response = gemini_model.generate_content(prompt)
78
+ insights = response.text.strip()
79
+ except Exception as e:
80
+ insights = f"Error generating insights: {str(e)}"
81
+
82
+ return {
83
+ "class": predicted_class,
84
+ "confidence": confidence,
85
+ "insights": insights
86
+ }
87
+
88
+ except Exception as e:
89
+ return {"error": str(e)}
90
+
91
+ if __name__ == "__main__":
92
+ uvicorn.run(app, host="0.0.0.0", port=7860)