Spaces:
Sleeping
Sleeping
# -*- coding: utf-8 -*- | |
""" | |
Created on Mon Feb 24 12:03:11 2025 | |
@author: MIPO10053340 | |
""" | |
#JWT | |
from dotenv import load_dotenv | |
load_dotenv() | |
import os | |
import numpy as np | |
import pandas as pd | |
from scipy.stats import entropy | |
# API Clients | |
from mistralai import Mistral | |
from openai import OpenAI | |
# from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline | |
# ⚙️ Configurations API (remplace par tes clés API) | |
MISTRAL_API_KEY = os.getenv('MISTRAL_API_KEY_static') | |
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY_static') | |
# ANTHROPIC_API_KEY = os.getenv('ANTHROPIC_API_KEY_static') | |
# LLAMA_API_KEY = os.getenv('LLAMA_API_KEY_static') | |
# HUGGINGFACE_TOKEN =os.getenv('HUGGINGFACE_TOKEN_static') | |
# 📌 Choix des modèles à utiliser | |
USE_MODELS = { | |
"mistral": True, | |
"gpt-4": False, | |
"llama": False, # Active si tu veux l'utiliser | |
"qwen": False, | |
"deepseek": False | |
} | |
# 📊 Fonction pour calculer l'entropie des réponses | |
def calculate_entropy(text): | |
tokens = text.split() | |
probas = np.array([tokens.count(word) / len(tokens) for word in set(tokens)]) | |
return entropy(probas) | |
# 🚀 Fonction pour interroger les modèles | |
def get_model_responses(question): | |
responses = {} | |
# # 🔹 MISTRAL | |
if USE_MODELS["mistral"]: | |
# Initialisation du client Mistral | |
client = Mistral(api_key=MISTRAL_API_KEY) | |
# Créer une complétion de chat | |
response = client.chat.complete( | |
model="mistral-medium", | |
messages=[ | |
{"role": "user", "content": question} | |
] | |
) | |
# Extraire et afficher la réponse | |
text_response = response.choices[0].message.content | |
responses["mistral-medium"] = {"response": text_response, "entropy": calculate_entropy(text_response)} | |
# 🔹 GPT-4 (OpenAI) | |
if USE_MODELS["gpt-4"]: | |
# Initialisation du client OpenAI | |
client = OpenAI(api_key=OPENAI_API_KEY) | |
# Créer une complétion de chat | |
response = client.chat.completions.create( | |
model="gpt-4-turbo", | |
messages=[ | |
{"role": "user", "content": question} | |
] | |
) | |
# Extraire et afficher la réponse | |
text_response = response.choices[0].message.content | |
responses["gpt-4-turbo"] = {"response": text_response, "entropy": calculate_entropy(text_response)} | |
# # 🔹 LLAMA (Hugging Face) | |
# if USE_MODELS["llama"]: | |
# model_id = "meta-llama/Llama-2-7b-chat-hf" | |
# tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=HUGGINGFACE_TOKEN) | |
# model = AutoModelForCausalLM.from_pretrained(model_id, use_auth_token=HUGGINGFACE_TOKEN) | |
# pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) | |
# text_response = pipe(question, max_length=300)[0]["generated_text"] | |
# responses["llama"] = {"response": text_response, "entropy": calculate_entropy(text_response)} | |
# # 🔹 QWEN (Hugging Face) | |
# if USE_MODELS["qwen"]: | |
# model_id = "Qwen/Qwen-7B-Chat" | |
# tokenizer = AutoTokenizer.from_pretrained(model_id, token=HUGGINGFACE_TOKEN, trust_remote_code=True) | |
# model = AutoModelForCausalLM.from_pretrained(model_id, token=HUGGINGFACE_TOKEN, trust_remote_code=True) | |
# pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) | |
# text_response = pipe(question, max_length=300)[0]["generated_text"] | |
# responses["qwen"] = {"response": text_response, "entropy": calculate_entropy(text_response)} | |
# # 🔹 DEEPSEEK (Hugging Face) | |
# if USE_MODELS["deepseek"]: | |
# model_id = "deepseek-ai/deepseek-7b-chat" | |
# tokenizer = AutoTokenizer.from_pretrained(model_id) | |
# model = AutoModelForCausalLM.from_pretrained(model_id) | |
# pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) | |
# text_response = pipe(question, max_length=300)[0]["generated_text"] | |
# responses["deepseek"] = {"response": text_response, "entropy": calculate_entropy(text_response)} | |
return responses | |
# 📌 Question de test | |
question = "Quels sont les besoins en protéines des poulets de chair en phase de croissance ?" | |
# 🔥 Exécuter le test | |
results = get_model_responses(question) | |
# 📊 Afficher les résultats | |
df = pd.DataFrame.from_dict(results, orient="index") | |
print(df) | |
# 💾 Sauvegarde en .txt | |
with open("model_responses.txt", "w", encoding="utf-8") as f: | |
for model, data in results.items(): | |
f.write(f"🔹 Modèle : {model.upper()}\n") | |
f.write(f"Réponse :\n{data['response']}\n") | |
f.write(f"📊 Entropie : {data['entropy']:.4f}\n") | |
f.write("=" * 50 + "\n\n") | |
print("\n✅ Réponses enregistrées dans 'model_responses.txt'") | |