File size: 2,988 Bytes
c3883a9
 
 
 
 
612150f
c3883a9
 
 
 
 
612150f
 
c3883a9
 
 
612150f
 
 
 
c3883a9
 
 
 
 
 
612150f
3ea476a
612150f
8d32f6e
11e026a
612150f
 
 
 
8d32f6e
 
 
612150f
32cfa0c
612150f
 
3ea476a
612150f
e6b37d1
612150f
 
 
 
 
 
 
 
 
 
c3883a9
 
 
 
 
 
 
 
612150f
c3883a9
612150f
3ea476a
612150f
6f4e2e1
3ea476a
c3883a9
 
 
 
 
 
612150f
d6b7293
612150f
d6b7293
612150f
c3883a9
612150f
 
c3883a9
 
612150f
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
from flask import Flask, request, render_template, jsonify
import PIL.Image
import google.generativeai as genai
import os
from tempfile import NamedTemporaryFile
from gradio_client import Client, handle_file  # Importez gradio_client

app = Flask(__name__)

# Configuration de Gemini
generation_config = {
    "temperature": 1,
    "max_output_tokens": 8192,
}

safety_settings = [
    {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"},
    {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"},
    {"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_NONE"},
    {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
]

GOOGLE_API_KEY = os.environ.get("TOKEN")

genai.configure(api_key=GOOGLE_API_KEY)

# Fonction pour interroger Gemini
def query_gemini(image_path,prompt_gemini):
    img = PIL.Image.open(image_path)
    model = genai.GenerativeModel(
        model_name="gemini-exp-1206",
        generation_config=generation_config,
        safety_settings=safety_settings
    )
    try:
        response = model.generate_content([prompt_gemini, img], request_options={"timeout": 600})
        #response = "Non disponible pour l'instant"
        return response.text
    except Exception as e:
        return "Non disponible pour l'instant"

# Fonction pour interroger Qwen2
def query_qwen2(image_path, question):
    try:
        client = Client("Qwen/Qwen2.5-Math-Demo")
        result = client.predict(
            image=handle_file(image_path),
            sketchpad=None,
            question=question,
            api_name="/math_chat_bot"
        )
        return result
    except Exception as e:
        return str(e)

@app.route('/')
def index():
    return render_template('math.html')

@app.route('/upload', methods=['POST'])
def upload_image():
    if 'image' not in request.files:
        return jsonify({'error': 'Aucune image fournie'}), 400

    file = request.files['image']
    model_choice = request.form.get('model_choice', 'gemini')  # Obtient le choix du modèle
    custom_instruction = request.form.get('custom_instruction')

    prompt_gemini = f"Résous ce problème mathématiques. Je veux qu'en réponse tu me donnes un rendu complet en utilisant du Latex. {custom_instruction}"
    question = f"Donne la réponse en français et en utilisant LaTeX.{custom_instruction}"
    if file.filename == '':
        return jsonify({'error': 'Aucun fichier sélectionné'}), 400

    with NamedTemporaryFile(delete=False) as temp_file:
        file.save(temp_file.name)
        try:
            if model_choice == "mariam's":
                result = query_gemini(temp_file.name,prompt_gemini)
            else:
                result = query_qwen2(temp_file.name,question)

            os.unlink(temp_file.name)
            return jsonify({'result': result, 'model': model_choice})

        except Exception as e:
            return jsonify({'error': str(e)}), 500