Spaces:
Running
Running
update appp
Browse files
app.py
CHANGED
@@ -6,55 +6,69 @@ from PIL import Image
|
|
6 |
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
7 |
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
8 |
|
9 |
-
# Define the BMI classes
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
-
# Define the
|
13 |
-
|
14 |
-
"underweight":
|
15 |
-
"BMI
|
16 |
-
"
|
17 |
-
"
|
18 |
-
|
19 |
-
"normal weight":
|
20 |
-
"
|
21 |
-
"
|
22 |
-
"
|
23 |
-
|
24 |
-
"overweight":
|
25 |
-
"
|
26 |
-
"
|
27 |
-
"
|
28 |
-
|
29 |
-
"obesity":
|
30 |
-
"
|
31 |
-
"
|
32 |
-
"BMI
|
33 |
-
|
34 |
}
|
35 |
|
36 |
-
def predict_bmi(image):
|
37 |
-
# Prepare
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
-
#
|
45 |
-
|
46 |
-
|
47 |
|
48 |
-
#
|
49 |
-
bmi_prediction =
|
50 |
|
51 |
-
#
|
52 |
-
height_in_inches = 75 # Example height; replace with actual input or extraction
|
53 |
predicted_weight = calculate_weight(bmi_prediction, height_in_inches)
|
54 |
|
55 |
# Create the JSON output
|
56 |
result = {
|
57 |
-
"weightCategory": f"{predicted_bmi_class} - {
|
58 |
"bmiPrediction": f"{bmi_prediction:.2f}",
|
59 |
"height": str(height_in_inches),
|
60 |
"predictedWeight": f"{predicted_weight:.2f} lbs"
|
@@ -62,27 +76,33 @@ def predict_bmi(image):
|
|
62 |
|
63 |
return result
|
64 |
|
65 |
-
def
|
66 |
-
"""Return the
|
67 |
category_ranges = bmi_ranges.get(weight_category.lower())
|
68 |
for range_label, (low, high, mid) in category_ranges.items():
|
69 |
-
|
|
|
|
|
|
|
|
|
70 |
|
71 |
def calculate_weight(bmi, height_in_inches):
|
72 |
"""Calculate the weight from BMI and height (in inches)."""
|
73 |
-
height_in_meters = height_in_inches * 0.0254
|
74 |
-
weight_kg = bmi * (height_in_meters ** 2)
|
75 |
-
weight_lbs = weight_kg * 2.20462
|
76 |
return weight_lbs
|
77 |
|
78 |
-
# Create Gradio interface with updated components
|
79 |
interface = gr.Interface(
|
80 |
fn=predict_bmi,
|
81 |
-
inputs=
|
|
|
|
|
|
|
82 |
outputs="json",
|
83 |
title="BMI Prediction",
|
84 |
-
description="Upload an image to predict BMI category
|
85 |
)
|
86 |
|
87 |
-
|
88 |
-
interface.launch(share=True)
|
|
|
6 |
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
7 |
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
8 |
|
9 |
+
# Define the broad BMI classes for Model 1
|
10 |
+
bmi_classes_model1 = [
|
11 |
+
"underweight (x < 18.5 BMI)",
|
12 |
+
"normal weight (18.5 < x < 25 BMI)",
|
13 |
+
"overweight (25 BMI < x < 30)",
|
14 |
+
"obesity (x > 30 BMI)"
|
15 |
+
]
|
16 |
|
17 |
+
# Define the finer BMI ranges for Model 2
|
18 |
+
bmi_ranges_model2 = {
|
19 |
+
"underweight (x < 18.5 bmi)": [
|
20 |
+
"BMI less than 16.0",
|
21 |
+
"BMI between 16.0 and 16.99",
|
22 |
+
"BMI between 17.0 and 18.49"
|
23 |
+
],
|
24 |
+
"normal weight (18.5 < x < 25 bmi)": [
|
25 |
+
"BMI between 18.5 and 20.4",
|
26 |
+
"BMI between 20.5 and 22.4",
|
27 |
+
"BMI between 22.5 and 24.9"
|
28 |
+
],
|
29 |
+
"overweight (25 bmi < x < 30)": [
|
30 |
+
"BMI between 25.0 and 26.9",
|
31 |
+
"BMI between 27.0 and 28.9",
|
32 |
+
"BMI between 29.0 and 29.9"
|
33 |
+
],
|
34 |
+
"obesity (x > 30 bmi)": [
|
35 |
+
"BMI between 30.0 and 34.9",
|
36 |
+
"BMI between 35.0 and 39.9",
|
37 |
+
"BMI 40.0 and above"
|
38 |
+
]
|
39 |
}
|
40 |
|
41 |
+
def predict_bmi(image, height_in_inches):
|
42 |
+
# Prepare inputs for Model 1
|
43 |
+
inputs_model1 = processor(text=bmi_classes_model1, images=image, return_tensors="pt", padding=True)
|
44 |
+
outputs_model1 = model(**inputs_model1)
|
45 |
+
probs_model1 = outputs_model1.logits_per_image.softmax(dim=1)
|
46 |
+
|
47 |
+
# Get the broad category prediction from Model 1
|
48 |
+
max_prob_index_model1 = probs_model1.argmax().item()
|
49 |
+
predicted_bmi_class = bmi_classes_model1[max_prob_index_model1]
|
50 |
+
|
51 |
+
# Select class names for Model 2 based on Model 1's prediction
|
52 |
+
model2_classes = bmi_ranges_model2[predicted_bmi_class.lower()]
|
53 |
+
|
54 |
+
# Prepare inputs for Model 2
|
55 |
+
inputs_model2 = processor(text=model2_classes, images=image, return_tensors="pt", padding=True)
|
56 |
+
outputs_model2 = model(**inputs_model2)
|
57 |
+
probs_model2 = outputs_model2.logits_per_image.softmax(dim=1)
|
58 |
|
59 |
+
# Get the finer range prediction from Model 2
|
60 |
+
max_prob_index_model2 = probs_model2.argmax().item()
|
61 |
+
finer_bmi_range = model2_classes[max_prob_index_model2]
|
62 |
|
63 |
+
# Determine the BMI prediction based on the range
|
64 |
+
bmi_prediction = get_adjusted_bmi(predicted_bmi_class, finer_bmi_range)
|
65 |
|
66 |
+
# Calculate weight using user-provided height
|
|
|
67 |
predicted_weight = calculate_weight(bmi_prediction, height_in_inches)
|
68 |
|
69 |
# Create the JSON output
|
70 |
result = {
|
71 |
+
"weightCategory": f"{predicted_bmi_class} - {finer_bmi_range}",
|
72 |
"bmiPrediction": f"{bmi_prediction:.2f}",
|
73 |
"height": str(height_in_inches),
|
74 |
"predictedWeight": f"{predicted_weight:.2f} lbs"
|
|
|
76 |
|
77 |
return result
|
78 |
|
79 |
+
def get_adjusted_bmi(weight_category, finer_range):
|
80 |
+
"""Return the appropriate BMI value for the given finer range within the weight category."""
|
81 |
category_ranges = bmi_ranges.get(weight_category.lower())
|
82 |
for range_label, (low, high, mid) in category_ranges.items():
|
83 |
+
if "BMI <" in range_label or "BMI ≥" in range_label:
|
84 |
+
return high if "BMI <" in range_label else low
|
85 |
+
elif range_label == finer_range:
|
86 |
+
return mid
|
87 |
+
return None
|
88 |
|
89 |
def calculate_weight(bmi, height_in_inches):
|
90 |
"""Calculate the weight from BMI and height (in inches)."""
|
91 |
+
height_in_meters = height_in_inches * 0.0254
|
92 |
+
weight_kg = bmi * (height_in_meters ** 2)
|
93 |
+
weight_lbs = weight_kg * 2.20462
|
94 |
return weight_lbs
|
95 |
|
96 |
+
# Create Gradio interface with updated input components
|
97 |
interface = gr.Interface(
|
98 |
fn=predict_bmi,
|
99 |
+
inputs=[
|
100 |
+
gr.Image(type="pil"),
|
101 |
+
gr.Number(label="Height in inches", default=75) # Allow user to enter height
|
102 |
+
],
|
103 |
outputs="json",
|
104 |
title="BMI Prediction",
|
105 |
+
description="Upload an image and enter your height to predict BMI category and receive a detailed prediction."
|
106 |
)
|
107 |
|
108 |
+
interface.launch()
|
|