Spaces:
Sleeping
Sleeping
import os | |
import gradio as gr | |
import base64 | |
import prompts | |
import json | |
from openai import OpenAI | |
from dotenv import load_dotenv | |
load_dotenv() | |
client = OpenAI() | |
PROMPT = prompts.SINGLE_QCM_PROMPT | |
# Function to encode the image | |
def encode_image(image_path): | |
with open(image_path, "rb") as image_file: | |
return base64.b64encode(image_file.read()).decode("utf-8") | |
def load_qcm(file_path): | |
try: | |
with open(file_path, "r", encoding="utf-8") as file: | |
return json.load(file) | |
except json.JSONDecodeError as e: | |
print(f"Error decoding JSON: {e}") | |
return {} | |
def get_answers(qcm): # qcm is in json format | |
answers = [answer["value"] for answer in qcm["Answers"]] | |
correct_answers = [ | |
answer["value"] for answer in qcm["Answers"] if answer["correct"] | |
] | |
md_answers = "\n".join([f"* {answer}" for answer in answers]) | |
md_correct_answers = "\n".join([f"* {answer}" for answer in correct_answers]) | |
return {"md_answers": md_answers, "md_correct_answers": md_correct_answers} | |
def process(image_path): | |
try: | |
response = client.chat.completions.create( | |
model="gpt-4o", | |
# response_format={ "type": "json_object" }, # si nécessaire | |
messages=[ | |
# {"role": "system", "content": "You are a helpful assistant designed to output JSON."}, | |
{ | |
"role": "user", | |
"content": [ | |
{"type": "text", "text": PROMPT}, | |
{ | |
"type": "image_url", | |
"image_url": { | |
"url": f"data:image/jpeg;base64,{encode_image(image_path)}" | |
}, | |
}, | |
], | |
} | |
], | |
temperature=0.2, | |
# max_tokens=256, | |
# top_p=1, | |
# frequency_penalty=0, | |
# presence_penalty=0 | |
) | |
# print(response["usage"]["total_tokens"]) | |
json_output = response.choices[0].message.content | |
return json.loads(json_output) | |
except Exception as e: | |
print(f"an error occurred : {e}") | |
return {"error": str(e)}, str(e) | |
with gr.Blocks() as demo: | |
with gr.Row(): | |
image = gr.Image(label="Image", type="filepath") | |
with gr.Column(): | |
submit_btn = gr.Button("Soumettre") | |
progress = gr.Textbox(label="Traitement") | |
with gr.Accordion( | |
open=False, | |
): | |
gr_json_output = gr.JSON(label="json output") | |
with gr.Tab(label="QCM", visible=False) as gr_qcm_column: | |
gr_question = gr.Textbox(label="Question") | |
with gr.Accordion(label="Réponses possibles"): | |
gr_answers = gr.Markdown() | |
gr_hint = gr.Textbox(label="Aide à la réponse") | |
with gr.Accordion(label="Bonnes réponses"): | |
gr_correct_answers = gr.Markdown() | |
gr_explanation = gr.Textbox(label="Explication") | |
def submit(image_path): | |
qcm = process(image_path) | |
# qcm = load_qcm("questions.json") | |
ga = get_answers(qcm) | |
return { | |
progress: "Terminé !", | |
gr_qcm_column: gr.Tab(visible=True), | |
gr_json_output: qcm, | |
gr_question: qcm["Question"], | |
gr_answers: ga["md_answers"], | |
gr_hint: qcm["hint"], | |
gr_correct_answers: ga["md_correct_answers"], | |
gr_explanation: qcm["explanation"], | |
} | |
submit_btn.click( | |
fn=submit, | |
inputs=image, | |
outputs=[ | |
progress, | |
gr_qcm_column, | |
gr_json_output, | |
gr_question, | |
gr_hint, | |
gr_answers, | |
gr_correct_answers, | |
gr_explanation, | |
], | |
api_name="submit", | |
) | |
if __name__ == "__main__": | |
authorized_users = [("test", os.environ["TEST_PASSWORD"])] | |
demo.launch(auth=authorized_users) | |