LCG-Demo / app.py
phucpx
Add application file
e495a4f
raw
history blame
4.53 kB
import gradio as gr
import requests
API_KEY = "Prep@123"
LCG_SERVICE_URL_v1 = "http://bore.testsprep.online:8082/v1/theory_lcg"
LCG_SERVICE_URL_v2 = "http://bore.testsprep.online:8081/v1/theory_lcg"
LCG_SERVICE_URL_v3 = "http://bore.testsprep.online:8083/v1/theory_lcg"
LCG_SERVICE_URL_v4 = "http://bore.testsprep.online:8084/v1/theory_lcg"
MODEL2SERVICE = {
'llama-3.1-sft-awq': LCG_SERVICE_URL_v1,
'hermes-3-llama3.1-sft-lora': LCG_SERVICE_URL_v2,
'llama-3-sft-awq': LCG_SERVICE_URL_v4,
'qwen2-1.5b-full-sft': LCG_SERVICE_URL_v3
}
weights_mapping = {
'beginner': {
'easy': 0.6,
'medium': 0.2,
'hard': 0.2
},
'intermediate': {
'easy': 0.2,
'medium': 0.6,
'hard': 0.2
},
'advanced': {
'easy': 0.2,
'medium': 0.2,
'hard': 0.6
}
}
LIST_USER_LEVEL = ["beginner", "intermediate", "advanced"]
LIST_MODELS = list(MODEL2SERVICE.keys())
template_los = """0. Identify the challenges in Matching sentence endings: More endings than questions, Some endings may be grammatically correct but not connected to the main ideas in the text, Information for possible endings is placed randomly in the passage
1. Understand and apply the steps for answering Matching sentence endings questions effectively: Read and underline keywords in incomplete sentences and endings, Scan using keywords in incomplete sentences to locate the information area, and Match incomplete sentences with endings and compare to the information area"""
def get_response_message(config):
headers = {
'accept': 'application/json',
'Authorization': f'Bearer {API_KEY}',
'Content-Type': 'application/json'
}
data = {
"model": config["model_name"],
"input_data": {
"user_level": config["user_level"],
"num_questions": config["num_questions"],
"question_type": config["question_type"],
"language": config["language"],
"explanation_language": config["explanation_language"],
"context": config["context"],
"learning_outcomes": [lo.strip() for lo in config['learning_outcomes'].split('\n')],
"mode": config["mode"],
"weights": {
"easy": 0,
"hard": 0,
"medium": 0
}
},
"do_sample": True,
"temperature": 0.7,
"top_p": 0.9,
"n": 1,
"max_tokens": 4096,
"stop": "string",
"stream": False
}
try:
response = requests.post(MODEL2SERVICE[config["model_name"]], headers=headers, json=data)
return response.json()["data"]
except:
return {"message": f"Hiện tại chúng tôi chưa hỗ trợ mô hình {config['model_name']}."}
def generate_questions(model_name, user_level, num_questions, question_type, language, explanation_language, context,
learning_outcomes, mode):
if mode == "Reviewing" and not context.strip():
return {"error": "Với chế độ Reviewing, Context không được để trống."}
config = {
"model_name": model_name,
"user_level": user_level,
"num_questions": int(num_questions),
"question_type": question_type,
"language": language,
"explanation_language": explanation_language,
"context": context,
"learning_outcomes": learning_outcomes,
"mode": mode
}
return get_response_message(config)
iface = gr.Interface(
fn=generate_questions,
inputs=[
gr.Dropdown(LIST_MODELS, label="Model Usage", value=LIST_MODELS[0]),
gr.Dropdown(LIST_USER_LEVEL, label="User Level", value=LIST_USER_LEVEL[0]),
gr.Number(value=5, label="Number of Questions"),
gr.Dropdown(["Short answer", "Single choice", "Multiple choice"], label="Question Type"),
gr.Dropdown(["English", "Vietnamese"], label="Language"),
gr.Dropdown(["English", "Vietnamese"], label="Explanation Language"),
gr.Textbox(lines=5, placeholder="Enter context here...",
label="Context (Lesson content or Reading comprehension passage)"),
gr.Textbox(lines=5, value=template_los, label="Learning Outcomes"),
gr.Dropdown(["Reviewing", "Practicing"], label="Mode")
],
outputs=gr.JSON(label="Generated Questions"),
title="Learning Content Generation",
description="Generate questions based on user input and learning outcomes."
)
iface.launch()