Spaces:
Running
Running
File size: 2,283 Bytes
eac8ce2 8b70345 eac8ce2 8b70345 eac8ce2 8b70345 eac8ce2 8b70345 eac8ce2 8b70345 5900202 200227d 2b461c6 200227d 8b70345 eac8ce2 8b70345 5900202 fc0d268 5900202 200227d 2b461c6 8b70345 5900202 fc0d268 200227d 8b70345 5900202 2b461c6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
import os
import json
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
def load_model():
model_name = "TheBloke/tiny-llama-7b"
token = os.environ.get("HF_TOKEN")
if not token:
raise ValueError("HF_TOKEN not found in environment variables.")
# Load the tokenizer and model using the provided token
tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=token)
model = AutoModelForCausalLM.from_pretrained(model_name, use_auth_token=token)
return tokenizer, model
# Load the model once at startup
tokenizer, model = load_model()
def evaluate_tinyllama(prompt):
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=150)
response_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
try:
# Try to parse the model's output as JSON
result = json.loads(response_text.strip())
except Exception as e:
result = {"stars": 0, "feedback": "Evaluation failed. Unable to parse AI response."}
return result
def evaluate_code(language, question, code):
if not code.strip():
return "Error: No code provided. Please enter your solution code."
# Build a detailed prompt for the AI evaluator.
prompt = f"""
You are an expert code evaluator.
Rate the following solution on a scale of 0-5 (0 = completely incorrect, 5 = excellent) and provide a concise feedback message.
Language: {language}
Problem: "{question}"
Solution: "{code}"
Return ONLY valid JSON: {{"stars": number, "feedback": string}}.
Do not include any extra text.
"""
result = evaluate_tinyllama(prompt)
return f"Stars: {result.get('stars', 0)}\nFeedback: {result.get('feedback', '')}"
iface = gr.Interface(
fn=evaluate_code,
inputs=[
gr.Dropdown(choices=["C", "Python", "Java"], label="Language"),
gr.Textbox(lines=2, placeholder="Enter the problem question here...", label="Question"),
gr.Code(language="python", label="Your Code")
],
outputs=gr.Textbox(label="Evaluation Result"),
title="Code Evaluator",
description="Enter a coding question and your solution to get AI-powered feedback. Supports C, Python, and Java."
)
if __name__ == "__main__":
iface.launch()
|