Spaces:
Sleeping
Sleeping
# -*- coding: utf-8 -*- | |
""" | |
Created on Mon Feb 24 12:03:11 2025 | |
@author: MIPO10053340 | |
""" | |
#JWT | |
from dotenv import load_dotenv | |
load_dotenv() | |
import os | |
import numpy as np | |
import pandas as pd | |
from scipy.stats import entropy | |
# API Clients | |
from mistralai.client import MistralClient | |
import openai | |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline | |
# ⚙️ Configurations API (remplace par tes clés API) | |
MISTRAL_API_KEY = os.getenv('MISTRAL_API_KEY_static') | |
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY_static') | |
ANTHROPIC_API_KEY = os.getenv('ANTHROPIC_API_KEY_static') | |
LLAMA_API_KEY = os.getenv('LLAMA_API_KEY_static') | |
# 📌 Choix des modèles à utiliser | |
USE_MODELS = { | |
"mistral": False, | |
"gpt-4": True, | |
"llama": False, # Active si tu veux l'utiliser | |
"qwen": False, | |
"deepseek": False | |
} | |
# 📊 Fonction pour calculer l'entropie des réponses | |
def calculate_entropy(text): | |
tokens = text.split() | |
probas = np.array([tokens.count(word) / len(tokens) for word in set(tokens)]) | |
return entropy(probas) | |
# 🚀 Fonction pour interroger les modèles | |
def get_model_responses(question): | |
responses = {} | |
# 🔹 MISTRAL | |
if USE_MODELS["mistral"]: | |
mistral_client = MistralClient(api_key=MISTRAL_API_KEY) | |
messages = [{"role": "user", "content": question}] | |
response = mistral_client.chat(model="mistral-medium", messages=messages) | |
text_response = response.choices[0].message.content | |
responses["mistral"] = {"response": text_response, "entropy": calculate_entropy(text_response)} | |
# 🔹 GPT-4 (OpenAI) | |
if USE_MODELS["gpt-4"]: | |
# openai>=1.0.0 | |
client = openai.OpenAI(api_key=OPENAI_API_KEY) | |
response = client.chat.completions.create( | |
model="gpt-4", | |
messages=[{"role": "user", "content": question}] | |
) | |
text_response = response.choices[0].message.content | |
responses["gpt-4"] = {"response": text_response, "entropy": calculate_entropy(text_response)} | |
# 🔹 LLAMA (Hugging Face) | |
if USE_MODELS["llama"]: | |
model_id = "meta-llama/Llama-2-7b-chat-hf" | |
tokenizer = AutoTokenizer.from_pretrained(model_id) | |
model = AutoModelForCausalLM.from_pretrained(model_id) | |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) | |
text_response = pipe(question, max_length=300)[0]["generated_text"] | |
responses["llama"] = {"response": text_response, "entropy": calculate_entropy(text_response)} | |
# 🔹 QWEN (Hugging Face) | |
if USE_MODELS["qwen"]: | |
model_id = "Qwen/Qwen-7B-Chat" | |
tokenizer = AutoTokenizer.from_pretrained(model_id) | |
model = AutoModelForCausalLM.from_pretrained(model_id) | |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) | |
text_response = pipe(question, max_length=300)[0]["generated_text"] | |
responses["qwen"] = {"response": text_response, "entropy": calculate_entropy(text_response)} | |
# 🔹 DEEPSEEK (Hugging Face) | |
if USE_MODELS["deepseek"]: | |
model_id = "deepseek-ai/deepseek-7b-chat" | |
tokenizer = AutoTokenizer.from_pretrained(model_id) | |
model = AutoModelForCausalLM.from_pretrained(model_id) | |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) | |
text_response = pipe(question, max_length=300)[0]["generated_text"] | |
responses["deepseek"] = {"response": text_response, "entropy": calculate_entropy(text_response)} | |
return responses | |
# 📌 Question de test | |
question = "Quels sont les besoins en protéines des poulets de chair en phase de croissance ?" | |
# 🔥 Exécuter le test | |
results = get_model_responses(question) | |
# 📊 Afficher les résultats | |
df = pd.DataFrame.from_dict(results, orient="index") | |
print(df) | |
# 💾 Sauvegarde en .txt | |
with open("model_responses.txt", "w", encoding="utf-8") as f: | |
for model, data in results.items(): | |
f.write(f"🔹 Modèle : {model.upper()}\n") | |
f.write(f"Réponse :\n{data['response']}\n") | |
f.write(f"📊 Entropie : {data['entropy']:.4f}\n") | |
f.write("=" * 50 + "\n\n") | |
print("\n✅ Réponses enregistrées dans 'model_responses.txt'") | |