LostPikachu commited on
Commit
0d47e33
·
verified ·
1 Parent(s): 1268959

Upload 2 files

Browse files

MAJ code Mistral AI pour pouvoir passer un dict à l'API. Ajouts des librairies pour Llama et Deepseek, mais Llama, Deepseek et Qwen non fonctionnels

Files changed (2) hide show
  1. Tests_API_GenAI.py +11 -6
  2. requirements.txt +44 -45
Tests_API_GenAI.py CHANGED
@@ -15,9 +15,14 @@ from scipy.stats import entropy
15
 
16
  # API Clients
17
  from mistralai.client import MistralClient
 
 
18
  import openai
19
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
 
 
20
 
 
21
 
22
  # ⚙️ Configurations API (remplace par tes clés API)
23
  MISTRAL_API_KEY = os.getenv('MISTRAL_API_KEY_static')
@@ -27,10 +32,10 @@ LLAMA_API_KEY = os.getenv('LLAMA_API_KEY_static')
27
 
28
  # 📌 Choix des modèles à utiliser
29
  USE_MODELS = {
30
- "mistral": False,
31
- "gpt-4": True,
32
  "llama": False, # Active si tu veux l'utiliser
33
- "qwen": False,
34
  "deepseek": False
35
  }
36
 
@@ -47,7 +52,7 @@ def get_model_responses(question):
47
  # 🔹 MISTRAL
48
  if USE_MODELS["mistral"]:
49
  mistral_client = MistralClient(api_key=MISTRAL_API_KEY)
50
- messages = [{"role": "user", "content": question}]
51
  response = mistral_client.chat(model="mistral-medium", messages=messages)
52
  text_response = response.choices[0].message.content
53
  responses["mistral"] = {"response": text_response, "entropy": calculate_entropy(text_response)}
@@ -77,8 +82,8 @@ def get_model_responses(question):
77
  # 🔹 QWEN (Hugging Face)
78
  if USE_MODELS["qwen"]:
79
  model_id = "Qwen/Qwen-7B-Chat"
80
- tokenizer = AutoTokenizer.from_pretrained(model_id)
81
- model = AutoModelForCausalLM.from_pretrained(model_id)
82
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
83
  text_response = pipe(question, max_length=300)[0]["generated_text"]
84
  responses["qwen"] = {"response": text_response, "entropy": calculate_entropy(text_response)}
 
15
 
16
  # API Clients
17
  from mistralai.client import MistralClient
18
+ from mistralai.models.chat_completion import ChatMessage
19
+
20
  import openai
21
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
22
+ import transformers_stream_generator
23
+ import einops
24
 
25
+ HF_TOKEN = "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"
26
 
27
  # ⚙️ Configurations API (remplace par tes clés API)
28
  MISTRAL_API_KEY = os.getenv('MISTRAL_API_KEY_static')
 
32
 
33
  # 📌 Choix des modèles à utiliser
34
  USE_MODELS = {
35
+ "mistral": True,
36
+ "gpt-4": False,
37
  "llama": False, # Active si tu veux l'utiliser
38
+ "qwen": False,
39
  "deepseek": False
40
  }
41
 
 
52
  # 🔹 MISTRAL
53
  if USE_MODELS["mistral"]:
54
  mistral_client = MistralClient(api_key=MISTRAL_API_KEY)
55
+ messages = [ChatMessage(role="user", content=question)]
56
  response = mistral_client.chat(model="mistral-medium", messages=messages)
57
  text_response = response.choices[0].message.content
58
  responses["mistral"] = {"response": text_response, "entropy": calculate_entropy(text_response)}
 
82
  # 🔹 QWEN (Hugging Face)
83
  if USE_MODELS["qwen"]:
84
  model_id = "Qwen/Qwen-7B-Chat"
85
+ tokenizer = AutoTokenizer.from_pretrained(model_id, token=HF_TOKEN, trust_remote_code=True)
86
+ model = AutoModelForCausalLM.from_pretrained(model_id, token=HF_TOKEN, trust_remote_code=True)
87
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
88
  text_response = pipe(question, max_length=300)[0]["generated_text"]
89
  responses["qwen"] = {"response": text_response, "entropy": calculate_entropy(text_response)}
requirements.txt CHANGED
@@ -1,45 +1,44 @@
1
- annotated-types==0.7.0
2
- anyio==4.8.0
3
- certifi==2025.1.31
4
- charset-normalizer==3.4.1
5
- colorama==0.4.6
6
- distro==1.9.0
7
- eval_type_backport==0.2.2
8
- exceptiongroup==1.2.2
9
- filelock==3.17.0
10
- fsspec==2025.2.0
11
- h11==0.14.0
12
- httpcore==1.0.7
13
- httpx==0.28.1
14
- huggingface-hub==0.29.1
15
- idna==3.10
16
- jiter==0.8.2
17
- jsonpath-python==1.0.6
18
- mistralai==1.5.0
19
- mypy-extensions==1.0.0
20
- numpy==1.26.4
21
- openai==1.64.0
22
- packaging==24.2
23
- pandas==2.2.3
24
- pydantic==2.10.6
25
- pydantic_core==2.27.2
26
- python-dateutil==2.9.0.post0
27
- python-dotenv==1.0.1
28
- pytz==2025.1
29
- PyYAML==6.0.2
30
- regex==2024.11.6
31
- requests==2.32.3
32
- safetensors==0.5.2
33
- scipy==1.12.0
34
- six==1.17.0
35
- sniffio==1.3.1
36
- tokenizers==0.21.0
37
- tqdm==4.67.1
38
- transformers==4.49.0
39
- typing-inspect==0.9.0
40
- typing_extensions==4.12.2
41
- tzdata==2025.1
42
- urllib3==2.3.0
43
- streamlit==1.10.0
44
- tensorflow==2.18.0
45
- tf-keras==2.18.0
 
1
+ annotated-types==0.7.0
2
+ anyio==4.8.0
3
+ certifi==2025.1.31
4
+ charset-normalizer==3.4.1
5
+ colorama==0.4.6
6
+ distro==1.9.0
7
+ einops==0.8.1
8
+ eval_type_backport==0.2.2
9
+ exceptiongroup==1.2.2
10
+ filelock==3.17.0
11
+ fsspec==2025.2.0
12
+ h11==0.14.0
13
+ httpcore==1.0.7
14
+ httpx==0.28.1
15
+ huggingface-hub==0.29.1
16
+ idna==3.10
17
+ jiter==0.8.2
18
+ jsonpath-python==1.0.6
19
+ mistralai==1.5.0
20
+ mypy-extensions==1.0.0
21
+ numpy==1.26.4
22
+ openai==1.64.0
23
+ packaging==24.2
24
+ pandas==2.2.3
25
+ pydantic==2.10.6
26
+ pydantic_core==2.27.2
27
+ python-dateutil==2.9.0.post0
28
+ python-dotenv==1.0.1
29
+ pytz==2025.1
30
+ PyYAML==6.0.2
31
+ regex==2024.11.6
32
+ requests==2.32.3
33
+ safetensors==0.5.2
34
+ scipy==1.12.0
35
+ six==1.17.0
36
+ sniffio==1.3.1
37
+ tokenizers==0.21.0
38
+ tqdm==4.67.1
39
+ transformers==4.49.0
40
+ transformers-stream-generator==0.0.5
41
+ typing-inspect==0.9.0
42
+ typing_extensions==4.12.2
43
+ tzdata==2025.1
44
+ urllib3==2.3.0