Spaces:
Sleeping
Sleeping
File size: 4,034 Bytes
11bbc55 2d11d60 f3efe05 c76398e 7dd3a10 f3efe05 c76398e f3efe05 c76398e f3efe05 c76398e f3efe05 c76398e f3efe05 7dd3a10 f3efe05 c76398e f3efe05 d05dc9f 7dd3a10 f3efe05 7dd3a10 f3efe05 7dd3a10 f3efe05 7dd3a10 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 |
import os
import gradio as gr
import base64
import prompts
import json
from openai import OpenAI
from dotenv import load_dotenv
load_dotenv()
client = OpenAI()
PROMPT = prompts.SINGLE_QCM_PROMPT
# Function to encode the image
def encode_image(image_path):
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode("utf-8")
def load_qcm(file_path):
try:
with open(file_path, "r", encoding="utf-8") as file:
return json.load(file)
except json.JSONDecodeError as e:
print(f"Error decoding JSON: {e}")
return {}
def get_answers(qcm): # qcm is in json format
answers = [answer["value"] for answer in qcm["Answers"]]
correct_answers = [
answer["value"] for answer in qcm["Answers"] if answer["correct"]
]
md_answers = "\n".join([f"* {answer}" for answer in answers])
md_correct_answers = "\n".join([f"* {answer}" for answer in correct_answers])
return {"md_answers": md_answers, "md_correct_answers": md_correct_answers}
def process(image_path):
try:
response = client.chat.completions.create(
model="gpt-4o",
# response_format={ "type": "json_object" }, # si nécessaire
messages=[
# {"role": "system", "content": "You are a helpful assistant designed to output JSON."},
{
"role": "user",
"content": [
{"type": "text", "text": PROMPT},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{encode_image(image_path)}"
},
},
],
}
],
temperature=0.2,
# max_tokens=256,
# top_p=1,
# frequency_penalty=0,
# presence_penalty=0
)
# print(response["usage"]["total_tokens"])
json_output = response.choices[0].message.content
return json.loads(json_output)
except Exception as e:
print(f"an error occurred : {e}")
return {"error": str(e)}, str(e)
with gr.Blocks() as demo:
with gr.Row():
image = gr.Image(label="Image", type="filepath")
with gr.Column():
submit_btn = gr.Button("Soumettre")
progress = gr.Textbox(label="Traitement")
with gr.Accordion(
open=False,
):
gr_json_output = gr.JSON(label="json output")
with gr.Tab(label="QCM", visible=False) as gr_qcm_column:
gr_question = gr.Textbox(label="Question")
with gr.Accordion(label="Réponses possibles"):
gr_answers = gr.Markdown()
gr_hint = gr.Textbox(label="Aide à la réponse")
with gr.Accordion(label="Bonnes réponses"):
gr_correct_answers = gr.Markdown()
gr_explanation = gr.Textbox(label="Explication")
def submit(image_path):
qcm = process(image_path)
# qcm = load_qcm("questions.json")
ga = get_answers(qcm)
return {
progress: "Terminé !",
gr_qcm_column: gr.Tab(visible=True),
gr_json_output: qcm,
gr_question: qcm["Question"],
gr_answers: ga["md_answers"],
gr_hint: qcm["hint"],
gr_correct_answers: ga["md_correct_answers"],
gr_explanation: qcm["explanation"],
}
submit_btn.click(
fn=submit,
inputs=image,
outputs=[
progress,
gr_qcm_column,
gr_json_output,
gr_question,
gr_hint,
gr_answers,
gr_correct_answers,
gr_explanation,
],
api_name="submit",
)
if __name__ == "__main__":
authorized_users = [("test", os.environ["TEST_PASSWORD"])]
demo.launch(auth=authorized_users)
|