Spaces:
Sleeping
Sleeping
from langchain.prompts import PromptTemplate | |
from langchain_huggingface import HuggingFaceEndpoint | |
from PIL import Image | |
import os | |
import secrets | |
from pathlib import Path | |
import tempfile | |
import gradio as gr | |
# Initialize the Hugging Face BLIP model | |
image_captioning_model = HuggingFaceEndpoint( | |
endpoint_url="https://api-inference.huggingface.co/models/Salesforce/blip-image-captioning-base", | |
huggingfacehub_api_token=os.getenv("HUGGING_FACE_API"), # Ensure you set this in your environment | |
temperature=0.7, | |
max_new_tokens=1024, | |
) | |
math_llm=HuggingFaceEndpoint( | |
endpoint_url="https://api-inference.huggingface.co/models/Qwen/Qwen2.5-Math-7B-Instruct", | |
huggingfacehub_api_token=os.getenv("HUGGING_FACE_API"), # Ensure you set this in your environment | |
temperature=0.7, | |
max_new_tokens=1024,) | |
# Function to process the image | |
def process_image(image, shouldConvert=False): | |
# Ensure temporary directory exists | |
uploaded_file_dir = os.environ.get("GRADIO_TEMP_DIR") or str( | |
Path(tempfile.gettempdir()) / "gradio" | |
) | |
os.makedirs(uploaded_file_dir, exist_ok=True) | |
# Save the uploaded image | |
name = f"tmp{secrets.token_hex(20)}.jpg" | |
filename = os.path.join(uploaded_file_dir, name) | |
if shouldConvert: | |
# Convert image to RGB mode if it contains transparency | |
new_img = Image.new("RGB", size=(image.width, image.height), color=(255, 255, 255)) | |
new_img.paste(image, (0, 0), mask=image) | |
image = new_img | |
image.save(filename) | |
# Define a PromptTemplate for text instruction | |
template = """ | |
You are a helpful AI assistant. | |
Please describe the math-related content in this image, ensuring that any LaTeX formulas are correctly transcribed. | |
Non-mathematical details do not need to be described. | |
Image Path: {image} | |
""" | |
prompt_template = PromptTemplate( | |
input_variables=["image"], # Dynamically insert the image path | |
template=template | |
) | |
# Create the text instruction by rendering the prompt template | |
prompt = prompt_template.format(image=f"file://{filename}") | |
# Use the model with both the image and the generated prompt | |
with open(filename, "rb") as img_file: | |
response = image_captioning_model({ | |
"inputs": { | |
"image": img_file, | |
"text": prompt | |
} | |
}) | |
# Return the model's response | |
return response | |
def get_math_response(image_description, user_question): | |
template = """ | |
You are a helpful AI assistant specialized in solving math reasoning problems. | |
Analyze the following question carefully and provide a step-by-step explanation along with the answer. | |
Image description : {image_description} | |
Question: {user_question}? | |
""" | |
prompt_template = PromptTemplate( | |
input_variables=["user_question","image_description"], # Define the placeholder(s) in the template | |
template=template | |
) | |
formatted_prompt = prompt_template.format(user_question=user_question, image_description=image_description) | |
# Pass the formatted prompt to the model | |
response = math_llm(formatted_prompt) | |
# Print the response | |
yield response | |
def math_chat_bot(image, sketchpad, question, state): | |
current_tab_index = state["tab_index"] | |
image_description = None | |
# Upload | |
if current_tab_index == 0: | |
if image is not None: | |
image_description = process_image(image) | |
# Sketch | |
elif current_tab_index == 1: | |
print(sketchpad) | |
if sketchpad and sketchpad["composite"]: | |
image_description = process_image(sketchpad["composite"], True) | |
yield from get_math_response(image_description, question) | |
css = """ | |
#qwen-md .katex-display { display: inline; } | |
#qwen-md .katex-display>.katex { display: inline; } | |
#qwen-md .katex-display>.katex>.katex-html { display: inline; } | |
""" | |
def tabs_select(e: gr.SelectData, _state): | |
_state["tab_index"] = e.index | |
return _state | |
with gr.Blocks(css=css) as demo: | |
state = gr.State({"tab_index": 0}) | |
with gr.Row(): | |
with gr.Column(): | |
with gr.Tabs() as input_tabs: | |
with gr.Tab("Upload"): | |
input_image = gr.Image(type="pil", label="Upload") | |
with gr.Tab("Sketch"): | |
input_sketchpad = gr.Sketchpad(type="pil", label="Sketch", layers=False) | |
input_tabs.select(fn=tabs_select, inputs=[state], outputs=[state]) | |
input_text = gr.Textbox(label="Input your question") | |
with gr.Row(): | |
clear_btn = gr.ClearButton([input_image, input_sketchpad, input_text]) | |
submit_btn = gr.Button("Submit", variant="primary") | |
with gr.Column(): | |
output_md = gr.Markdown(label="Answer", elem_id="qwen-md") | |
submit_btn.click( | |
fn=math_chat_bot, | |
inputs=[input_image, input_sketchpad, input_text, state], | |
outputs=output_md, | |
) | |
demo.launch() |