File size: 1,143 Bytes
bd35af6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
from typing import *
import torch
import numpy as np
from transformers import AutoModelForSequenceClassification, AutoTokenizer

model_name = "JacobLinCool/IELTS_essay_scoring_safetensors"
model = AutoModelForSequenceClassification.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)


@torch.no_grad()
def grade_IELTS_essay_scoring(question: str, answer: str) -> Tuple[float, str]:
    text = f"{question} {answer}"

    inputs = tokenizer(
        text, return_tensors="pt", padding=True, truncation=True, max_length=512
    )

    outputs = model(**inputs)
    predictions = outputs.logits.squeeze()

    predicted_scores = predictions.numpy()
    normalized_scores = (predicted_scores / predicted_scores.max()) * 9
    rounded_scores = np.round(normalized_scores * 2) / 2

    labels = [
        "Task Achievement",
        "Coherence and Cohesion",
        "Vocabulary",
        "Grammar",
        "Overall",
    ]
    overall_score = float(rounded_scores[-1])

    comment = ""
    for label, score in zip(labels, rounded_scores):
        comment += f"{label}: {score}\n"

    return overall_score, comment