Spaces:
Paused
Paused
Ahmet Kaan Sever
commited on
Commit
·
52b6367
1
Parent(s):
f6890a5
Sending inputs to device for non- multi gpu hardware
Browse files
src/deepeval/base_task.py
CHANGED
@@ -18,7 +18,18 @@ class BaseTask(ABC):
|
|
18 |
def __init__(self, dataset_repo, model_name):
|
19 |
self.dataset_repo = dataset_repo
|
20 |
self.dataset = self.load_dataset_from_hf()
|
21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
self.model, self.tokenizer = self.get_cached_model(model_name, self.device)
|
23 |
openai.api_key = OPENAI_KEY
|
24 |
|
@@ -130,8 +141,13 @@ class BaseTask(ABC):
|
|
130 |
formatted_chat = self.tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
|
131 |
#print(formatted_chat)
|
132 |
inputs = self.tokenizer(formatted_chat, return_tensors="pt", padding=True, truncation=True)
|
133 |
-
|
134 |
-
|
|
|
|
|
|
|
|
|
|
|
135 |
|
136 |
# Generate the sequence of letters starting from 'A'
|
137 |
letters = [chr(ord('A') + i) for i in range(len(choices))] # Create option letters A, B, C, D, E, ...
|
@@ -188,8 +204,13 @@ class BaseTask(ABC):
|
|
188 |
)
|
189 |
|
190 |
inputs = self.tokenizer(formatted_chat, return_tensors="pt", padding=True, truncation=True)
|
191 |
-
|
192 |
-
|
|
|
|
|
|
|
|
|
|
|
193 |
|
194 |
output = self.model.generate(
|
195 |
input_ids,
|
|
|
18 |
def __init__(self, dataset_repo, model_name):
|
19 |
self.dataset_repo = dataset_repo
|
20 |
self.dataset = self.load_dataset_from_hf()
|
21 |
+
|
22 |
+
device_count = torch.cuda.device_count()
|
23 |
+
if device_count > 1:
|
24 |
+
self.device = "auto"
|
25 |
+
print(f"Using {device_count} GPUs with auto config.")
|
26 |
+
elif device_count == 1:
|
27 |
+
self.device = "cuda"
|
28 |
+
print(f"Using {device_count} GPU with cuda config.")
|
29 |
+
else:
|
30 |
+
self.device = "cpu"
|
31 |
+
print("No GPU found. Using CPU.")
|
32 |
+
|
33 |
self.model, self.tokenizer = self.get_cached_model(model_name, self.device)
|
34 |
openai.api_key = OPENAI_KEY
|
35 |
|
|
|
141 |
formatted_chat = self.tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
|
142 |
#print(formatted_chat)
|
143 |
inputs = self.tokenizer(formatted_chat, return_tensors="pt", padding=True, truncation=True)
|
144 |
+
|
145 |
+
if self.device == "auto":
|
146 |
+
input_ids = inputs.input_ids
|
147 |
+
attention_mask = inputs.attention_mask
|
148 |
+
else:
|
149 |
+
input_ids = inputs.input_ids.to(self.model.device)
|
150 |
+
attention_mask = inputs.attention_mask.to(self.model.device)
|
151 |
|
152 |
# Generate the sequence of letters starting from 'A'
|
153 |
letters = [chr(ord('A') + i) for i in range(len(choices))] # Create option letters A, B, C, D, E, ...
|
|
|
204 |
)
|
205 |
|
206 |
inputs = self.tokenizer(formatted_chat, return_tensors="pt", padding=True, truncation=True)
|
207 |
+
|
208 |
+
if self.device == "auto":
|
209 |
+
input_ids = inputs.input_ids
|
210 |
+
attention_mask = inputs.attention_mask
|
211 |
+
else:
|
212 |
+
input_ids = inputs.input_ids.to(self.model.device)
|
213 |
+
attention_mask = inputs.attention_mask.to(self.model.device)
|
214 |
|
215 |
output = self.model.generate(
|
216 |
input_ids,
|
src/deepeval/instruction_following_task.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
import datetime
|
2 |
from src.deepeval.base_task import BaseTask
|
3 |
from deepeval.metrics import PromptAlignmentMetric
|
4 |
from deepeval.test_case import LLMTestCase
|
|
|
1 |
+
from datetime import datetime
|
2 |
from src.deepeval.base_task import BaseTask
|
3 |
from deepeval.metrics import PromptAlignmentMetric
|
4 |
from deepeval.test_case import LLMTestCase
|
src/deepeval/toxicity_task.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
import datetime
|
2 |
from src.deepeval.base_task import BaseTask
|
3 |
from deepeval.metrics import ToxicityMetric
|
4 |
from deepeval.test_case import LLMTestCase
|
|
|
1 |
+
from datetime import datetime
|
2 |
from src.deepeval.base_task import BaseTask
|
3 |
from deepeval.metrics import ToxicityMetric
|
4 |
from deepeval.test_case import LLMTestCase
|