Spaces:
Running
Running
Upload 3 files
Browse filesv0 : Script de call vers les APIs de Mistral, GPT-4, Llama, Qwen, Deepseek et Claude. Choix de quelles APIs appeler (une, plusieurs ou toutes), passage d'un prompt, export des réponses au format Txt (concaténé si plusieurs API appelées).
- .env_sample +7 -0
- Tests_API_GenAI.py +116 -0
- requirements.txt +42 -5
.env_sample
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
REQUESTS_CA_BUNDLE=C:\ProgramData\Netskope\STAgent\data\nscacert.pem
|
2 |
+
SSL_CERT_FILE=C:\ProgramData\Netskope\STAgent\data\nscacert.pem
|
3 |
+
|
4 |
+
MISTRAL_API_KEY_static=
|
5 |
+
LLAMA_API_KEY_static=
|
6 |
+
ANTHROPIC_API_KEY_static=
|
7 |
+
OPENAI_API_KEY_static=
|
Tests_API_GenAI.py
ADDED
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
"""
|
3 |
+
Created on Mon Feb 24 12:03:11 2025
|
4 |
+
|
5 |
+
@author: MIPO10053340
|
6 |
+
"""
|
7 |
+
#JWT
|
8 |
+
from dotenv import load_dotenv
|
9 |
+
load_dotenv()
|
10 |
+
|
11 |
+
import os
|
12 |
+
import numpy as np
|
13 |
+
import pandas as pd
|
14 |
+
from scipy.stats import entropy
|
15 |
+
|
16 |
+
# API Clients
|
17 |
+
from mistralai.client import MistralClient
|
18 |
+
import openai
|
19 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
20 |
+
|
21 |
+
|
22 |
+
# ⚙️ Configurations API (remplace par tes clés API)
|
23 |
+
MISTRAL_API_KEY = os.getenv('MISTRAL_API_KEY_static')
|
24 |
+
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY_static')
|
25 |
+
ANTHROPIC_API_KEY = os.getenv('ANTHROPIC_API_KEY_static')
|
26 |
+
LLAMA_API_KEY = os.getenv('LLAMA_API_KEY_static')
|
27 |
+
|
28 |
+
# 📌 Choix des modèles à utiliser
|
29 |
+
USE_MODELS = {
|
30 |
+
"mistral": False,
|
31 |
+
"gpt-4": True,
|
32 |
+
"llama": False, # Active si tu veux l'utiliser
|
33 |
+
"qwen": False,
|
34 |
+
"deepseek": False
|
35 |
+
}
|
36 |
+
|
37 |
+
# 📊 Fonction pour calculer l'entropie des réponses
|
38 |
+
def calculate_entropy(text):
|
39 |
+
tokens = text.split()
|
40 |
+
probas = np.array([tokens.count(word) / len(tokens) for word in set(tokens)])
|
41 |
+
return entropy(probas)
|
42 |
+
|
43 |
+
# 🚀 Fonction pour interroger les modèles
|
44 |
+
def get_model_responses(question):
|
45 |
+
responses = {}
|
46 |
+
|
47 |
+
# 🔹 MISTRAL
|
48 |
+
if USE_MODELS["mistral"]:
|
49 |
+
mistral_client = MistralClient(api_key=MISTRAL_API_KEY)
|
50 |
+
messages = [{"role": "user", "content": question}]
|
51 |
+
response = mistral_client.chat(model="mistral-medium", messages=messages)
|
52 |
+
text_response = response.choices[0].message.content
|
53 |
+
responses["mistral"] = {"response": text_response, "entropy": calculate_entropy(text_response)}
|
54 |
+
|
55 |
+
# 🔹 GPT-4 (OpenAI)
|
56 |
+
if USE_MODELS["gpt-4"]:
|
57 |
+
# openai>=1.0.0
|
58 |
+
client = openai.OpenAI(api_key=OPENAI_API_KEY)
|
59 |
+
|
60 |
+
response = client.chat.completions.create(
|
61 |
+
model="gpt-4",
|
62 |
+
messages=[{"role": "user", "content": question}]
|
63 |
+
)
|
64 |
+
|
65 |
+
text_response = response.choices[0].message.content
|
66 |
+
responses["gpt-4"] = {"response": text_response, "entropy": calculate_entropy(text_response)}
|
67 |
+
|
68 |
+
# 🔹 LLAMA (Hugging Face)
|
69 |
+
if USE_MODELS["llama"]:
|
70 |
+
model_id = "meta-llama/Llama-2-7b-chat-hf"
|
71 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
72 |
+
model = AutoModelForCausalLM.from_pretrained(model_id)
|
73 |
+
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
74 |
+
text_response = pipe(question, max_length=300)[0]["generated_text"]
|
75 |
+
responses["llama"] = {"response": text_response, "entropy": calculate_entropy(text_response)}
|
76 |
+
|
77 |
+
# 🔹 QWEN (Hugging Face)
|
78 |
+
if USE_MODELS["qwen"]:
|
79 |
+
model_id = "Qwen/Qwen-7B-Chat"
|
80 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
81 |
+
model = AutoModelForCausalLM.from_pretrained(model_id)
|
82 |
+
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
83 |
+
text_response = pipe(question, max_length=300)[0]["generated_text"]
|
84 |
+
responses["qwen"] = {"response": text_response, "entropy": calculate_entropy(text_response)}
|
85 |
+
|
86 |
+
# 🔹 DEEPSEEK (Hugging Face)
|
87 |
+
if USE_MODELS["deepseek"]:
|
88 |
+
model_id = "deepseek-ai/deepseek-7b-chat"
|
89 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
90 |
+
model = AutoModelForCausalLM.from_pretrained(model_id)
|
91 |
+
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
92 |
+
text_response = pipe(question, max_length=300)[0]["generated_text"]
|
93 |
+
responses["deepseek"] = {"response": text_response, "entropy": calculate_entropy(text_response)}
|
94 |
+
|
95 |
+
return responses
|
96 |
+
|
97 |
+
# 📌 Question de test
|
98 |
+
question = "Quels sont les besoins en protéines des poulets de chair en phase de croissance ?"
|
99 |
+
|
100 |
+
# 🔥 Exécuter le test
|
101 |
+
results = get_model_responses(question)
|
102 |
+
|
103 |
+
# 📊 Afficher les résultats
|
104 |
+
df = pd.DataFrame.from_dict(results, orient="index")
|
105 |
+
print(df)
|
106 |
+
|
107 |
+
# 💾 Sauvegarde en .txt
|
108 |
+
with open("model_responses.txt", "w", encoding="utf-8") as f:
|
109 |
+
for model, data in results.items():
|
110 |
+
f.write(f"🔹 Modèle : {model.upper()}\n")
|
111 |
+
f.write(f"Réponse :\n{data['response']}\n")
|
112 |
+
f.write(f"📊 Entropie : {data['entropy']:.4f}\n")
|
113 |
+
f.write("=" * 50 + "\n\n")
|
114 |
+
|
115 |
+
print("\n✅ Réponses enregistrées dans 'model_responses.txt'")
|
116 |
+
|
requirements.txt
CHANGED
@@ -1,5 +1,42 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
annotated-types==0.7.0
|
2 |
+
anyio==4.8.0
|
3 |
+
certifi==2025.1.31
|
4 |
+
charset-normalizer==3.4.1
|
5 |
+
colorama==0.4.6
|
6 |
+
distro==1.9.0
|
7 |
+
eval_type_backport==0.2.2
|
8 |
+
exceptiongroup==1.2.2
|
9 |
+
filelock==3.17.0
|
10 |
+
fsspec==2025.2.0
|
11 |
+
h11==0.14.0
|
12 |
+
httpcore==1.0.7
|
13 |
+
httpx==0.28.1
|
14 |
+
huggingface-hub==0.29.1
|
15 |
+
idna==3.10
|
16 |
+
jiter==0.8.2
|
17 |
+
jsonpath-python==1.0.6
|
18 |
+
mistralai==1.5.0
|
19 |
+
mypy-extensions==1.0.0
|
20 |
+
numpy==1.26.4
|
21 |
+
openai==1.64.0
|
22 |
+
packaging==24.2
|
23 |
+
pandas==2.2.3
|
24 |
+
pydantic==2.10.6
|
25 |
+
pydantic_core==2.27.2
|
26 |
+
python-dateutil==2.9.0.post0
|
27 |
+
python-dotenv==1.0.1
|
28 |
+
pytz==2025.1
|
29 |
+
PyYAML==6.0.2
|
30 |
+
regex==2024.11.6
|
31 |
+
requests==2.32.3
|
32 |
+
safetensors==0.5.2
|
33 |
+
scipy==1.12.0
|
34 |
+
six==1.17.0
|
35 |
+
sniffio==1.3.1
|
36 |
+
tokenizers==0.21.0
|
37 |
+
tqdm==4.67.1
|
38 |
+
transformers==4.49.0
|
39 |
+
typing-inspect==0.9.0
|
40 |
+
typing_extensions==4.12.2
|
41 |
+
tzdata==2025.1
|
42 |
+
urllib3==2.3.0
|