savitha7's picture
update model to google/siglip-so400m-patch14-384
ee8ae4b verified
import gradio as gr
from transformers import CLIPProcessor, CLIPModel
from PIL import Image
import base64
import json
import requests
from io import BytesIO
import os
import time
from openai import OpenAI
# Constants
API_URL = "https://api-inference.huggingface.co/models/google/siglip-so400m-patch14-384"
# Fetch the Hugging Face API token from environment variables
HF_API_TOKEN = os.getenv("HF_API_TOKEN")
if HF_API_TOKEN is None:
raise ValueError(
"Hugging Face API token is not set. Please add it as an environment variable."
)
headers = {"Authorization": f"Bearer {HF_API_TOKEN}"}
# BMI model classes
model1_classes = [
"underweight (x < 18.5 BMI)",
"normal weight (18.5 < x < 25 BMI)",
"overweight (25 BMI < x < 30)",
"obesity (x > 30 BMI)",
]
model2_classes = ["MALE", "FEMALE"]
# Define the default BMI classes and ranges
bmi_classes_ranges = {
"underweight (x < 18.5 bmi)": [16.0, 18.5],
"normal weight (18.5 < x < 25 bmi)": [18.5, 25],
"overweight (25 bmi < x < 30)": [25, 30],
"obesity (x > 30 bmi)": [30, 40],
}
# Function to convert image to base64
def encode_image_to_base64(image):
buffered = BytesIO()
image.save(buffered, format="JPEG")
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
return img_str
# Function to call the Hugging Face API
def query(data, max_retries=5, wait_time=10):
payload = {"parameters": data["parameters"], "inputs": data["image_base64"]}
for attempt in range(max_retries):
response = requests.post(API_URL, headers=headers, json=payload)
print("Raw response from API:", response, response.text, headers)
# Check if the model is still loading
if response.status_code == 503:
print(f"Model is still loading. Retrying in {wait_time} seconds...")
time.sleep(wait_time) # Wait before retrying
continue
try:
return response.json() # Ensure we are parsing JSON response
except ValueError as e:
print(f"Error parsing response as JSON: {e}")
return None
raise ValueError("Failed to get a valid response after multiple retries.")
def query2(data):
payload = {"parameters": data["parameters"], "inputs": data["image_base64"]}
response = requests.post(API_URL, headers=headers, json=payload)
# Print the raw response to inspect it
print("Raw response from API:", response,response.text,headers)
try:
return response.json() # Ensure we are parsing JSON response
except ValueError as e:
print(f"Error parsing response as JSON: {e}")
return None
def get_bmi_classes_range(predicted_bmi_class, bmi_range):
# Determine the specific BMI range based on the output
lower_bound, upper_bound = bmi_classes_ranges[predicted_bmi_class.lower()]
if "BMI <=" in bmi_range:
value = float(bmi_range.split("<=")[1].strip())
return [lower_bound, value]
elif "BMI >" in bmi_range:
value = float(bmi_range.split(">")[1].strip())
return [value, upper_bound]
else:
return []
def chatgpt_openai(height="", gender="", bmi_class_model1="", age=""):
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
content = f"""I have a person {gender} who is {age} and classified as {bmi_class_model1}.
Can you suggest a reasonable range of BMI values that typically represent people in this category?
Do not include any explanations, only provide an RFC8259 compliant JSON response following this format without deviation.
Output JSON in this format:
{{
"bmi_range": {{
"min": 10.0,
"max": 18.4
}}
}}"""
try:
client = OpenAI()
response = client.chat.completions.create(
model="gpt-4",
messages=[
{"role": "system", "content": "You are a health expert specializing in analyzing patient information, including gender, height, and BMI classification, to provide data-driven health insights based on USA statistics."},
{"role": "user", "content": content},
],
)
except Exception as e:
print(f"Error calling OpenAI API: {e}")
raise ValueError("Failed to connect to OpenAI API")
# Print the entire response to understand its structure
print("Raw response from OpenAI:", response)
# Check if the response has 'choices' and it's a list with at least one element
if not hasattr(response, "choices") or len(response.choices) == 0:
raise ValueError("Invalid response structure from OpenAI API")
# Extract the content from the first choice
message_content = response.choices[0].message.content
print("Response content to be parsed:", message_content)
# Parse the content as JSON
try:
json_object = json.loads(message_content.strip())
except json.JSONDecodeError as e:
print(f"JSON Decode Error: {e}")
raise ValueError("Failed to parse JSON response from OpenAI API")
return json_object
def chatgpt_openai2(height="", gender="", bmi_class_model1="", age=""):
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
content = f"""I have a person {gender} who is {height} and classified as {bmi_class_model1}.
Can you suggest a reasonable range of BMI values that typically represent people who are {bmi_class_model1}.
Output JSON in this format:
{{
"bmi_range": {{
"min": 24.3,
"max": 28.9
}}
}}"""
client = OpenAI()
completion = client.chat.completions.create(
model="gpt-4o",
messages=[
{
"role": "system",
"content": "You are a health expert specializing in analyzing patient information, including gender, height, and BMI classification, to provide data-driven health insights based on USA statistics.",
},
{"role": "user", "content": content},
],
)
print(content,completion)
json_object = json.loads(completion.choices[0].message.content)
return json_object
def predict_bmi(image, height_in_inches, age):
image_base64 = encode_image_to_base64(image)
request_payload = {
"image_base64": image_base64,
"parameters": {"candidate_labels": model1_classes},
}
print("res ",request_payload)
# API call to get the model1 output
output = query(request_payload)
# Check if the output is valid and not None
if not output or not isinstance(output, list):
raise ValueError("Invalid response from API: Expected a list of predictions")
# Process the model1 output (ensure output is a list of dictionaries)
try:
predicted_class = max(output, key=lambda x: x["score"])["label"]
except (KeyError, TypeError) as e:
print(f"Error processing output: {e}")
raise ValueError("Unexpected API response format")
request_payload2 = {
"image_base64": image_base64,
"parameters": {"candidate_labels": model2_classes},
}
# API call to get the model2 output
output2 = query(request_payload2)
# Check if the output is valid and not None
if not output2 or not isinstance(output2, list):
raise ValueError(
"Invalid response from API: Expected a list of predictions model2"
)
# Process the model2 output2 (ensure output is a list of dictionaries)
try:
predicted_class2 = max(output2, key=lambda x: x["score"])["label"]
except (KeyError, TypeError) as e:
print(f"Error processing output2: {e}")
raise ValueError("Unexpected API response format")
bmi_range = bmi_classes_ranges[predicted_class.lower()]
if "normal weight" in predicted_class or "overweight" in predicted_class:
# Calculate the average BMI
average_bmi = (bmi_range[0] + bmi_range[1]) / 2
model3_classes = [f"BMI <= {average_bmi:.1f}", f"BMI > {average_bmi:.1f}"]
request_payload3 = {
"image_base64": image_base64,
"parameters": {"candidate_labels": model3_classes},
}
# API call to get the model3 output
output3 = query(request_payload3)
# Check if the output is valid and not None
if not output3 or not isinstance(output3, list):
raise ValueError(
"Invalid response from API: Expected a list of predictions3"
)
# Process the model3 output (ensure output is a list of dictionaries)
try:
predicted_class3 = max(output3, key=lambda x: x["score"])["label"]
except (KeyError, TypeError) as e:
print(f"Error processing output3: {e}")
raise ValueError("Unexpected API response format")
get_bmi_range = get_bmi_classes_range(predicted_class, predicted_class3)
else:
model3_gpt = chatgpt_openai(
height_in_inches, predicted_class2, predicted_class, age
)
print(model3_gpt,'gpt')
data = model3_gpt["bmi_range"]
get_bmi_range = [data["min"], data["max"]]
weight_min, weight_max = get_weight_range(
get_bmi_range[0], get_bmi_range[1], height_in_inches
)
print(f"Predicted weight range: {weight_min:.2f} lbs - {weight_max:.2f} lbs")
result = {
"weightCategory": f"{predicted_class}",
"height": str(height_in_inches),
"bmi_range": get_bmi_range,
"predictedWeightRange": f"{weight_min:.2f} lbs - {weight_max:.2f} lbs",
}
return result
def get_weight_range(bmi_min, bmi_max, height):
# BMI formula: BMI = weight (lb) / [height (in)]^2 * 703
# Rearranged to find weight: weight = BMI * height^2 / 703
weight_min = (bmi_min * height**2) / 703
weight_max = (bmi_max * height**2) / 703
return weight_min, weight_max
# Create Gradio interface with updated input components
interface = gr.Interface(
fn=predict_bmi,
inputs=[
gr.Image(type="pil"),
gr.Number(label="Height in inches"),
gr.Number(label="Age"),
],
outputs="json",
title="BMI Prediction",
description="Upload an image and enter your height to predict BMI category and receive a detailed prediction.",
)
interface.launch()