nmarafo commited on
Commit
dbf1e5c
verified
1 Parent(s): 6a4a949

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -14
app.py CHANGED
@@ -6,31 +6,34 @@ import torch
6
  # Configuraci贸n del modelo y tokenizer
7
  model_id = "TheBloke/Mistral-7B-Instruct-v0.2-GPTQ"
8
  adapter = "nmarafo/Mistral-7B-Instruct-v0.2-TrueFalse-Feedback-GPTQ"
 
 
9
  tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True, return_token_type_ids=False)
10
  tokenizer.pad_token = tokenizer.eos_token
11
- model = AutoPeftModelForCausalLM.from_pretrained(model_id, adapter_name=adapter).cuda()
12
 
13
- def generate_response(question, best_answer, student_answer):
14
  system_message = "Analyze the question, the expected answer, and the student's response. Determine if the student's answer is conceptually correct in relation to the expected answer, regardless of the exact wording. Return True if the student's answer is correct or False otherwise. Add a brief comment explaining the rationale behind the answer being correct or incorrect."
15
  prompt = f"{system_message}\n\nQuestion: {question}\nExpected Answer: {best_answer}\nStudent Answer: {student_answer}"
16
- prompt_template = f'<s>[INST] {prompt} [/INST]'
17
- input_ids = tokenizer(prompt_template, return_tensors='pt').input_ids.cuda()
 
 
18
  output = model.generate(input_ids, temperature=0.7, do_sample=True, top_p=0.95, top_k=40, max_new_tokens=512)
19
  response = tokenizer.decode(output[0], skip_special_tokens=True)
20
  return response
21
 
22
- # Crear la interfaz de usuario en Streamlit
23
  st.title("Evaluador de Respuestas con GPTQ")
24
 
25
- # Creaci贸n del formulario
26
- with st.form("evaluation_form"):
27
- question = st.text_input("Pregunta", "")
28
- best_answer = st.text_input("Mejor Respuesta", "")
29
- student_answer = st.text_input("Respuesta del Estudiante", "")
30
- # Bot贸n de env铆o para el formulario
31
- submitted = st.form_submit_button("Evaluar")
32
 
33
- if submitted:
34
- response = generate_response(question, best_answer, student_answer)
 
35
  st.write("Respuesta del Modelo:", response)
36
 
 
6
  # Configuraci贸n del modelo y tokenizer
7
  model_id = "TheBloke/Mistral-7B-Instruct-v0.2-GPTQ"
8
  adapter = "nmarafo/Mistral-7B-Instruct-v0.2-TrueFalse-Feedback-GPTQ"
9
+
10
+ # Carga el modelo y el tokenizer
11
  tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True, return_token_type_ids=False)
12
  tokenizer.pad_token = tokenizer.eos_token
13
+ model = AutoPeftModelForCausalLM.from_pretrained(adapter, low_cpu_mem_usage=True, return_dict=True, torch_dtype=torch.float16, device_map="cuda")
14
 
15
+ def generate_prompt(question, best_answer, student_answer):
16
  system_message = "Analyze the question, the expected answer, and the student's response. Determine if the student's answer is conceptually correct in relation to the expected answer, regardless of the exact wording. Return True if the student's answer is correct or False otherwise. Add a brief comment explaining the rationale behind the answer being correct or incorrect."
17
  prompt = f"{system_message}\n\nQuestion: {question}\nExpected Answer: {best_answer}\nStudent Answer: {student_answer}"
18
+ return prompt
19
+
20
+ def generate_response(prompt):
21
+ input_ids = tokenizer(prompt, return_tensors='pt').input_ids.cuda()
22
  output = model.generate(input_ids, temperature=0.7, do_sample=True, top_p=0.95, top_k=40, max_new_tokens=512)
23
  response = tokenizer.decode(output[0], skip_special_tokens=True)
24
  return response
25
 
26
+ # Crear la interfaz de usuario con Streamlit
27
  st.title("Evaluador de Respuestas con GPTQ")
28
 
29
+ with st.form(key='eval_form'):
30
+ question = st.text_input("Pregunta")
31
+ best_answer = st.text_input("Mejor Respuesta")
32
+ student_answer = st.text_input("Respuesta del Estudiante")
33
+ submit_button = st.form_submit_button(label='Evaluar')
 
 
34
 
35
+ if submit_button:
36
+ prompt = generate_prompt(question, best_answer, student_answer)
37
+ response = generate_response(prompt)
38
  st.write("Respuesta del Modelo:", response)
39