File size: 4,314 Bytes
3230243
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
# -*- coding: utf-8 -*-
"""

Created on Mon Feb 24 12:03:11 2025



@author: MIPO10053340

"""
#JWT
from dotenv import load_dotenv
load_dotenv()

import os
import numpy as np
import pandas as pd
from scipy.stats import entropy

# API Clients
from mistralai.client import MistralClient
import openai
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline


# ⚙️ Configurations API (remplace par tes clés API)
MISTRAL_API_KEY = os.getenv('MISTRAL_API_KEY_static')
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY_static')
ANTHROPIC_API_KEY = os.getenv('ANTHROPIC_API_KEY_static')
LLAMA_API_KEY = os.getenv('LLAMA_API_KEY_static')

# 📌 Choix des modèles à utiliser
USE_MODELS = {
    "mistral": False,
    "gpt-4": True,
    "llama": False,  # Active si tu veux l'utiliser
    "qwen": False,
    "deepseek": False
}

# 📊 Fonction pour calculer l'entropie des réponses
def calculate_entropy(text):
    tokens = text.split()
    probas = np.array([tokens.count(word) / len(tokens) for word in set(tokens)])
    return entropy(probas)

# 🚀 Fonction pour interroger les modèles
def get_model_responses(question):
    responses = {}

    # 🔹 MISTRAL
    if USE_MODELS["mistral"]:
        mistral_client = MistralClient(api_key=MISTRAL_API_KEY)
        messages = [{"role": "user", "content": question}]
        response = mistral_client.chat(model="mistral-medium", messages=messages)
        text_response = response.choices[0].message.content
        responses["mistral"] = {"response": text_response, "entropy": calculate_entropy(text_response)}

    # 🔹 GPT-4 (OpenAI)
    if USE_MODELS["gpt-4"]:
        #  openai>=1.0.0 
        client = openai.OpenAI(api_key=OPENAI_API_KEY)
        
        response = client.chat.completions.create(
            model="gpt-4",
            messages=[{"role": "user", "content": question}]
        )
        
        text_response = response.choices[0].message.content
        responses["gpt-4"] = {"response": text_response, "entropy": calculate_entropy(text_response)}

    # 🔹 LLAMA (Hugging Face)
    if USE_MODELS["llama"]:
        model_id = "meta-llama/Llama-2-7b-chat-hf"
        tokenizer = AutoTokenizer.from_pretrained(model_id)
        model = AutoModelForCausalLM.from_pretrained(model_id)
        pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
        text_response = pipe(question, max_length=300)[0]["generated_text"]
        responses["llama"] = {"response": text_response, "entropy": calculate_entropy(text_response)}

    # 🔹 QWEN (Hugging Face)
    if USE_MODELS["qwen"]:
        model_id = "Qwen/Qwen-7B-Chat"
        tokenizer = AutoTokenizer.from_pretrained(model_id)
        model = AutoModelForCausalLM.from_pretrained(model_id)
        pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
        text_response = pipe(question, max_length=300)[0]["generated_text"]
        responses["qwen"] = {"response": text_response, "entropy": calculate_entropy(text_response)}

    # 🔹 DEEPSEEK (Hugging Face)
    if USE_MODELS["deepseek"]:
        model_id = "deepseek-ai/deepseek-7b-chat"
        tokenizer = AutoTokenizer.from_pretrained(model_id)
        model = AutoModelForCausalLM.from_pretrained(model_id)
        pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
        text_response = pipe(question, max_length=300)[0]["generated_text"]
        responses["deepseek"] = {"response": text_response, "entropy": calculate_entropy(text_response)}

    return responses

# 📌 Question de test
question = "Quels sont les besoins en protéines des poulets de chair en phase de croissance ?"

# 🔥 Exécuter le test
results = get_model_responses(question)

# 📊 Afficher les résultats
df = pd.DataFrame.from_dict(results, orient="index")
print(df)

# 💾 Sauvegarde en .txt
with open("model_responses.txt", "w", encoding="utf-8") as f:
    for model, data in results.items():
        f.write(f"🔹 Modèle : {model.upper()}\n")
        f.write(f"Réponse :\n{data['response']}\n")
        f.write(f"📊 Entropie : {data['entropy']:.4f}\n")
        f.write("=" * 50 + "\n\n")

print("\n✅ Réponses enregistrées dans 'model_responses.txt'")