Spaces:
Sleeping
Sleeping
File size: 1,292 Bytes
cfc7f91 a8f34eb d425ac5 a8f34eb 3cf1b30 cfc7f91 e85cb46 982c89f e85cb46 0429320 1b4c032 e85cb46 b5e31dc 982c89f e85cb46 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
import gradio as gr
from transformers import AutoModel, AutoTokenizer
# Load the model
model = AutoModel.from_pretrained("openbmb/MiniCPM-V-2", trust_remote_code=True)
# Load the tokenizer
tokenizer = AutoTokenizer.from_pretrained("openbmb/MiniCPM-Llama3-V-2_5", trust_remote_code=True)
model.eval()
# Define the Gradio components
image = gr.Image(type="pil", label="Image")
question = gr.Textbox(value="Using the standard 9x9 sudoku format, solve the sudoku puzzle in the image correctly.", label="Question")
answer = gr.Textbox(label="Answer", show_label=True, show_copy_button=True)
title = "Sudoku Solver by FG"
description = "Sudoku Solver using MiniCPM-Llama3-V-2_5"
# Define the function for solving Sudoku
def solve_sudoku(image, question):
msgs = [{"role": "user", "content": question}]
res = model.chat(
image=image,
msgs=msgs,
tokenizer=tokenizer,
system_prompt="You are an expert in solving sudoku puzzles. Please solve the sudoku puzzle in the image correctly.",
)
return "".join(res)
# Create the Gradio interface
demo = gr.Interface(
fn=solve_sudoku,
inputs=[image, question],
outputs=answer,
title=title,
description=description,
theme="compact",
)
# Launch the interface
demo.launch(share=True)
|