File size: 1,583 Bytes
2d11d60
f3efe05
c76398e
f3efe05
c76398e
f3efe05
c76398e
f3efe05
 
c76398e
f3efe05
 
c76398e
 
 
 
f3efe05
 
 
 
 
 
 
 
 
 
 
 
c76398e
f3efe05
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import gradio as gr
import base64
import prompts
from openai import OpenAI
from dotenv import load_dotenv

load_dotenv()
client = OpenAI()

PROMPT = prompts.SINGLE_QCM_PROMPT


# Function to encode the image
def encode_image(image_path):
    with open(image_path, "rb") as image_file:
        return base64.b64encode(image_file.read()).decode("utf-8")


def process(image_path):
    try:
        response = client.chat.completions.create(
            model="gpt-4o",
            # response_format={ "type": "json_object" }, # si nécessaire
            messages=[
                # {"role": "system", "content": "You are a helpful assistant designed to output JSON."},
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": PROMPT},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": f"data:image/jpeg;base64,{encode_image(image_path)}"
                            },
                        },
                    ],
                }
            ],
            temperature=0.2,
            #    max_tokens=256,
            #    top_p=1,
            #    frequency_penalty=0,
            #    presence_penalty=0
        )
        return response.choices[0].message.content

    except Exception as e:
        print(f"an error occurred : {e}")
        return {"error": str(e)}


iface = gr.Interface(
    fn=process,
    inputs=gr.Image(type="filepath"),
    outputs=gr.JSON(),
)

iface.launch()