Ahmet Kaan Sever commited on
Commit
b5edba5
·
1 Parent(s): 340ef03

Removed manually sending tensors to devices

Browse files

HF handles this it is unnecessary and conflicts whne there are multiple gpus

Files changed (1) hide show
  1. src/deepeval/base_task.py +6 -6
src/deepeval/base_task.py CHANGED
@@ -53,8 +53,8 @@ class BaseTask(ABC):
53
  self.tokenizer.pad_token = self.tokenizer.eos_token # Use EOS token as PAD token
54
 
55
  inputs = self.tokenizer(msg, return_tensors="pt", padding=True, truncation=True)
56
- input_ids = inputs.input_ids.to(self.model.device)
57
- attention_mask = inputs.attention_mask.to(self.model.device)
58
 
59
  if self.model.config.pad_token_id is None:
60
  self.model.config.pad_token_id = self.tokenizer.eos_token_id
@@ -100,8 +100,8 @@ class BaseTask(ABC):
100
  formatted_chat = self.tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
101
  #print(formatted_chat)
102
  inputs = self.tokenizer(formatted_chat, return_tensors="pt", padding=True, truncation=True)
103
- input_ids = inputs.input_ids.to(self.model.device)
104
- attention_mask = inputs.attention_mask.to(self.model.device)
105
 
106
  # Generate the sequence of letters starting from 'A'
107
  letters = [chr(ord('A') + i) for i in range(len(choices))] # Create option letters A, B, C, D, E, ...
@@ -158,8 +158,8 @@ class BaseTask(ABC):
158
  )
159
 
160
  inputs = self.tokenizer(formatted_chat, return_tensors="pt", padding=True, truncation=True)
161
- input_ids = inputs.input_ids.to(self.model.device)
162
- attention_mask = inputs.attention_mask.to(self.model.device)
163
 
164
  output = self.model.generate(
165
  input_ids,
 
53
  self.tokenizer.pad_token = self.tokenizer.eos_token # Use EOS token as PAD token
54
 
55
  inputs = self.tokenizer(msg, return_tensors="pt", padding=True, truncation=True)
56
+ input_ids = inputs.input_ids
57
+ attention_mask = inputs.attention_mask
58
 
59
  if self.model.config.pad_token_id is None:
60
  self.model.config.pad_token_id = self.tokenizer.eos_token_id
 
100
  formatted_chat = self.tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
101
  #print(formatted_chat)
102
  inputs = self.tokenizer(formatted_chat, return_tensors="pt", padding=True, truncation=True)
103
+ input_ids = inputs.input_ids
104
+ attention_mask = inputs.attention_mask
105
 
106
  # Generate the sequence of letters starting from 'A'
107
  letters = [chr(ord('A') + i) for i in range(len(choices))] # Create option letters A, B, C, D, E, ...
 
158
  )
159
 
160
  inputs = self.tokenizer(formatted_chat, return_tensors="pt", padding=True, truncation=True)
161
+ input_ids = inputs.input_ids
162
+ attention_mask = inputs.attention_mask
163
 
164
  output = self.model.generate(
165
  input_ids,