Spaces:
Sleeping
Sleeping
from typing import * | |
import torch | |
from transformers import AutoModelForSequenceClassification, AutoTokenizer | |
from .common import Grader | |
model_name = "KevSun/Engessay_grading_ML" | |
model = AutoModelForSequenceClassification.from_pretrained(model_name) | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
class Engessay_grading_ML(Grader): | |
def info(self) -> str: | |
return "[KevSun/Engessay_grading_ML](https://huggingface.co/KevSun/Engessay_grading_ML)" | |
def grade(self, question: str, answer: str) -> Tuple[float, str]: | |
text = f"{question} {answer}" | |
inputs = tokenizer(text, return_tensors="pt") | |
outputs = model(**inputs) | |
predictions = outputs.logits.squeeze() | |
predicted_scores = predictions.numpy() | |
scaled_scores = 2.25 * predicted_scores - 1.25 | |
rounded_scores = [round(score * 2) / 2 for score in scaled_scores] | |
labels = [ | |
"cohesion", | |
"syntax", | |
"vocabulary", | |
"phraseology", | |
"grammar", | |
"conventions", | |
] | |
overall_score = round(sum(rounded_scores) / len(rounded_scores) * 2) / 2 | |
comment = "" | |
for label, score in zip(labels, rounded_scores): | |
comment += f"{label}: {score}\n" | |
return overall_score, comment | |