Florian.Moret commited on
Commit
8ae11fa
·
1 Parent(s): a8270a2

delete test_api_genai

Browse files
Files changed (1) hide show
  1. Tests_API_GenAI.py +0 -130
Tests_API_GenAI.py DELETED
@@ -1,130 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """
3
- Created on Mon Feb 24 12:03:11 2025
4
-
5
- @author: MIPO10053340
6
- """
7
- #JWT
8
- from dotenv import load_dotenv
9
- load_dotenv()
10
-
11
- import os
12
- import numpy as np
13
- import pandas as pd
14
- from scipy.stats import entropy
15
-
16
- # API Clients
17
- from mistralai import Mistral
18
- from openai import OpenAI
19
-
20
- # from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
21
-
22
-
23
- # ⚙️ Configurations API (remplace par tes clés API)
24
- MISTRAL_API_KEY = os.getenv('MISTRAL_API_KEY_static')
25
- OPENAI_API_KEY = os.getenv('OPENAI_API_KEY_static')
26
- # ANTHROPIC_API_KEY = os.getenv('ANTHROPIC_API_KEY_static')
27
- # LLAMA_API_KEY = os.getenv('LLAMA_API_KEY_static')
28
- # HUGGINGFACE_TOKEN =os.getenv('HUGGINGFACE_TOKEN_static')
29
-
30
- # 📌 Choix des modèles à utiliser
31
- USE_MODELS = {
32
- "mistral": True,
33
- "gpt-4": False,
34
- "llama": False, # Active si tu veux l'utiliser
35
- "qwen": False,
36
- "deepseek": False
37
- }
38
-
39
-
40
- # 📊 Fonction pour calculer l'entropie des réponses
41
- def calculate_entropy(text):
42
- tokens = text.split()
43
- probas = np.array([tokens.count(word) / len(tokens) for word in set(tokens)])
44
- return entropy(probas)
45
-
46
- # 🚀 Fonction pour interroger les modèles
47
- def get_model_responses(question):
48
- responses = {}
49
-
50
- # # 🔹 MISTRAL
51
- if USE_MODELS["mistral"]:
52
- # Initialisation du client Mistral
53
- client = Mistral(api_key=MISTRAL_API_KEY)
54
-
55
- # Créer une complétion de chat
56
- response = client.chat.complete(
57
- model="mistral-medium",
58
- messages=[
59
- {"role": "user", "content": question}
60
- ]
61
- )
62
- # Extraire et afficher la réponse
63
- text_response = response.choices[0].message.content
64
- responses["mistral-medium"] = {"response": text_response, "entropy": calculate_entropy(text_response)}
65
-
66
- # 🔹 GPT-4 (OpenAI)
67
- if USE_MODELS["gpt-4"]:
68
- # Initialisation du client OpenAI
69
- client = OpenAI(api_key=OPENAI_API_KEY)
70
-
71
- # Créer une complétion de chat
72
- response = client.chat.completions.create(
73
- model="gpt-4-turbo",
74
- messages=[
75
- {"role": "user", "content": question}
76
- ]
77
- )
78
- # Extraire et afficher la réponse
79
- text_response = response.choices[0].message.content
80
- responses["gpt-4-turbo"] = {"response": text_response, "entropy": calculate_entropy(text_response)}
81
-
82
- # # 🔹 LLAMA (Hugging Face)
83
- # if USE_MODELS["llama"]:
84
- # model_id = "meta-llama/Llama-2-7b-chat-hf"
85
- # tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=HUGGINGFACE_TOKEN)
86
- # model = AutoModelForCausalLM.from_pretrained(model_id, use_auth_token=HUGGINGFACE_TOKEN)
87
- # pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
88
-
89
- # text_response = pipe(question, max_length=300)[0]["generated_text"]
90
- # responses["llama"] = {"response": text_response, "entropy": calculate_entropy(text_response)}
91
- # # 🔹 QWEN (Hugging Face)
92
- # if USE_MODELS["qwen"]:
93
- # model_id = "Qwen/Qwen-7B-Chat"
94
- # tokenizer = AutoTokenizer.from_pretrained(model_id, token=HUGGINGFACE_TOKEN, trust_remote_code=True)
95
- # model = AutoModelForCausalLM.from_pretrained(model_id, token=HUGGINGFACE_TOKEN, trust_remote_code=True)
96
- # pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
97
- # text_response = pipe(question, max_length=300)[0]["generated_text"]
98
- # responses["qwen"] = {"response": text_response, "entropy": calculate_entropy(text_response)}
99
-
100
- # # 🔹 DEEPSEEK (Hugging Face)
101
- # if USE_MODELS["deepseek"]:
102
- # model_id = "deepseek-ai/deepseek-7b-chat"
103
- # tokenizer = AutoTokenizer.from_pretrained(model_id)
104
- # model = AutoModelForCausalLM.from_pretrained(model_id)
105
- # pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
106
- # text_response = pipe(question, max_length=300)[0]["generated_text"]
107
- # responses["deepseek"] = {"response": text_response, "entropy": calculate_entropy(text_response)}
108
-
109
- return responses
110
-
111
- # 📌 Question de test
112
- question = "Quels sont les besoins en protéines des poulets de chair en phase de croissance ?"
113
-
114
- # 🔥 Exécuter le test
115
- results = get_model_responses(question)
116
-
117
- # 📊 Afficher les résultats
118
- df = pd.DataFrame.from_dict(results, orient="index")
119
- print(df)
120
-
121
- # 💾 Sauvegarde en .txt
122
- with open("model_responses.txt", "w", encoding="utf-8") as f:
123
- for model, data in results.items():
124
- f.write(f"🔹 Modèle : {model.upper()}\n")
125
- f.write(f"Réponse :\n{data['response']}\n")
126
- f.write(f"📊 Entropie : {data['entropy']:.4f}\n")
127
- f.write("=" * 50 + "\n\n")
128
-
129
- print("\n✅ Réponses enregistrées dans 'model_responses.txt'")
130
-