Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoModel, AutoTokenizer | |
# Load the model | |
model = AutoModel.from_pretrained("openbmb/MiniCPM-Llama3-V-2_5", trust_remote_code=True) | |
# Load the tokenizer | |
tokenizer = AutoTokenizer.from_pretrained("openbmb/MiniCPM-Llama3-V-2_5", trust_remote_code=True) | |
model.eval() | |
# Define the Gradio components | |
image = gr.Image(type="pil", label="Image") | |
question = gr.Textbox(value="Using the standard 9x9 sudoku format, solve the sudoku puzzle in the image correctly.", label="Question") | |
answer = gr.Textbox(label="Answer", show_label=True, show_copy_button=True) | |
title = "Sudoku Solver by FG" | |
description = "Sudoku Solver using MiniCPM-Llama3-V-2_5" | |
# Define the function for solving Sudoku | |
def solve_sudoku(image, question): | |
msgs = [{"role": "user", "content": question}] | |
res = model.chat( | |
image=image, | |
msgs=msgs, | |
tokenizer=tokenizer, | |
system_prompt="You are an expert in solving sudoku puzzles. Please solve the sudoku puzzle in the image correctly.", | |
) | |
return "".join(res) | |
# Create the Gradio interface | |
demo = gr.Interface( | |
fn=solve_sudoku, | |
inputs=[image, question], | |
outputs=answer, | |
title=title, | |
description=description, | |
theme="compact", | |
) | |
# Launch the interface | |
demo.launch(share=True) | |