EssayScoring / model /IELTS_essay_scoring.py
JacobLinCool's picture
feat: more scoring model
bd35af6
raw
history blame
1.14 kB
from typing import *
import torch
import numpy as np
from transformers import AutoModelForSequenceClassification, AutoTokenizer
model_name = "JacobLinCool/IELTS_essay_scoring_safetensors"
model = AutoModelForSequenceClassification.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
@torch.no_grad()
def grade_IELTS_essay_scoring(question: str, answer: str) -> Tuple[float, str]:
text = f"{question} {answer}"
inputs = tokenizer(
text, return_tensors="pt", padding=True, truncation=True, max_length=512
)
outputs = model(**inputs)
predictions = outputs.logits.squeeze()
predicted_scores = predictions.numpy()
normalized_scores = (predicted_scores / predicted_scores.max()) * 9
rounded_scores = np.round(normalized_scores * 2) / 2
labels = [
"Task Achievement",
"Coherence and Cohesion",
"Vocabulary",
"Grammar",
"Overall",
]
overall_score = float(rounded_scores[-1])
comment = ""
for label, score in zip(labels, rounded_scores):
comment += f"{label}: {score}\n"
return overall_score, comment