File size: 1,705 Bytes
9f58b2a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
# CODEXGAME/backend/ai_evaluator/tinyllama_inference.py

import json
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

def load_model():
    # Change the model identifier if needed – this should be a TinyLlama variant available on Hugging Face.
    model_name = "TheBloke/tiny-llama-7b"
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    model = AutoModelForCausalLM.from_pretrained(model_name)
    return tokenizer, model

def evaluate_code(question, code):
    # Construct a prompt for the AI evaluator.
    prompt = f"""

You are an expert code evaluator.

Rate the user's solution to the following problem from 0-5 (0 = completely incorrect, 5 = excellent).

Also provide a concise "feedback" message.

Problem: "{question}"

Solution: "{code}"

Return ONLY valid JSON: {{"stars": number, "feedback": string}}

Do not include any extra text outside the JSON.

"""
    tokenizer, model = load_model()
    inputs = tokenizer(prompt, return_tensors="pt")
    outputs = model.generate(**inputs, max_new_tokens=150)
    response_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
    try:
        result = json.loads(response_text.strip())
    except Exception as e:
        result = {"stars": 0, "feedback": "Evaluation failed. Unable to parse AI response."}
    return result

# For direct testing from the command line
if __name__ == "__main__":
    import sys
    if len(sys.argv) < 3:
        print(json.dumps({"error": "Please provide a question and code as arguments"}))
        sys.exit(1)
    question = sys.argv[1]
    code = sys.argv[2]
    result = evaluate_code(question, code)
    print(json.dumps(result))