Spaces:
Sleeping
Sleeping
Ahmet Kaan Sever
commited on
Commit
·
1657c25
1
Parent(s):
26c03b8
limited max new tokens to 2
Browse files
src/deepeval/base_task.py
CHANGED
@@ -77,7 +77,7 @@ class BaseTask(ABC):
|
|
77 |
|
78 |
return answer
|
79 |
|
80 |
-
def generate_response_mcqa_multi_token(self, msg, max_new_tokens=
|
81 |
"""
|
82 |
Handles multiple-choice questions where answers might have multiple tokens.
|
83 |
"""
|
|
|
77 |
|
78 |
return answer
|
79 |
|
80 |
+
def generate_response_mcqa_multi_token(self, msg, max_new_tokens=2, choices: list = []):
|
81 |
"""
|
82 |
Handles multiple-choice questions where answers might have multiple tokens.
|
83 |
"""
|
src/deepeval/commonsense_reasoning_task.py
CHANGED
@@ -57,7 +57,7 @@ class CommonsenseReasoningTask(BaseTask):
|
|
57 |
message = prompt
|
58 |
|
59 |
# Get/format answer of the model
|
60 |
-
model_answer = self.generate_response_mcqa_multi_token(message, choices=choices, max_new_tokens=
|
61 |
responses.append(model_answer)
|
62 |
model_answer_cleaned = model_answer.strip().replace('\n', '').replace(' ', '').upper()
|
63 |
|
|
|
57 |
message = prompt
|
58 |
|
59 |
# Get/format answer of the model
|
60 |
+
model_answer = self.generate_response_mcqa_multi_token(message, choices=choices, max_new_tokens=2)
|
61 |
responses.append(model_answer)
|
62 |
model_answer_cleaned = model_answer.strip().replace('\n', '').replace(' ', '').upper()
|
63 |
|
src/deepeval/deepeval_task_manager.py
CHANGED
@@ -133,6 +133,6 @@ class DeepEvalTaskManager:
|
|
133 |
return res
|
134 |
|
135 |
if __name__ == "__main__":
|
136 |
-
des = DeepEvalTaskManager("google/gemma-2-2b-it", ["
|
137 |
res = des.run_tasks()
|
138 |
print(res)
|
|
|
133 |
return res
|
134 |
|
135 |
if __name__ == "__main__":
|
136 |
+
des = DeepEvalTaskManager("google/gemma-2-2b-it", ["TOXICITY", "BIAS"])
|
137 |
res = des.run_tasks()
|
138 |
print(res)
|
src/deepeval/nli.py
CHANGED
@@ -48,7 +48,7 @@ class NLITask(BaseTask):
|
|
48 |
message = prompt
|
49 |
|
50 |
# Get/format answer of the model
|
51 |
-
model_answer = self.generate_response_mcqa_multi_token(message, choices=choices, max_new_tokens=
|
52 |
responses.append(model_answer)
|
53 |
model_answer_cleaned = model_answer.strip().replace('\n', '').replace(' ', '').upper()
|
54 |
|
|
|
48 |
message = prompt
|
49 |
|
50 |
# Get/format answer of the model
|
51 |
+
model_answer = self.generate_response_mcqa_multi_token(message, choices=choices, max_new_tokens=2)
|
52 |
responses.append(model_answer)
|
53 |
model_answer_cleaned = model_answer.strip().replace('\n', '').replace(' ', '').upper()
|
54 |
|