miguelcastroe commited on
Commit
100ce82
verified
1 Parent(s): 374a7f5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +64 -101
app.py CHANGED
@@ -32,70 +32,43 @@ def evaluar_prompt(prompt):
32
  outputs = model.generate(inputs["input_ids"], max_length=150) # Limit max_length for faster results
33
  analysis = tokenizer.decode(outputs[0], skip_special_tokens=True)
34
 
35
- # Initialize feedback
36
- feedback = f"An谩lisis del prompt '{prompt}':\n\n"
37
-
38
  # Initialize variables to store detailed advice
39
- clarity_advice = ""
40
- logic_advice = ""
41
- relevance_advice = ""
42
- evidence_advice = ""
43
 
44
  # Evaluate clarity
45
  if len(prompt.split()) < 5:
46
- feedback += "Clarity: The prompt is too brief and may not be clear. Consider adding more details to clarify your request.\n"
47
- clarity_advice = "Your prompt is very short, which can lead to ambiguity. Try expanding it to provide more context or specifics."
48
- else:
49
- feedback += "Clarity: The prompt is clear and well-defined.\n"
50
-
51
  # Evaluate logic
52
- if any(word in prompt.lower() for word in ["best", "worst", "only"]):
53
- feedback += (
54
- "Logic: The prompt uses subjective terms like 'best' which could introduce bias. "
55
- "Consider rephrasing to improve objectivity.\n"
56
- )
57
- logic_advice = f"Words like '{', '.join([w for w in ['best', 'worst', 'only'] if w in prompt.lower()])}' suggest a subjective perspective. Rephrase to focus on objective criteria."
58
- else:
59
- feedback += "Logic: The prompt is logical and avoids subjective terms.\n"
60
-
61
  # Evaluate relevance
62
- if "talk about" in prompt.lower() or "describe" in prompt.lower():
63
- feedback += "Relevance: The prompt is relevant and aligned with the request for information.\n"
64
- else:
65
- feedback += "Relevance: The relevance of the prompt could be improved to better align with the goal. Ensure your prompt directly addresses the core question or objective.\n"
66
- relevance_advice = "Your prompt might not directly address the core issue. Ensure that it is directly relevant to the information or action you seek."
67
-
68
  # Evaluate evidence request
69
- if any(word in prompt.lower() for word in ["evidence", "proof", "demonstrate"]):
70
- feedback += "Evidence: The prompt requests evidence, which is positive for a well-founded analysis.\n"
71
- else:
72
- feedback += (
73
- "Evidence: The prompt does not explicitly request evidence, which could limit the depth of analysis. "
74
- "Consider asking for evidence to support the response.\n"
75
- )
76
- evidence_advice = "Asking for evidence strengthens your prompt by grounding it in verifiable facts. Consider explicitly requesting evidence or examples."
77
 
78
- # Summary of the evaluation
79
- final_score = 65 # Example score
80
-
81
- feedback += f"\n**Final Score:** {final_score}\n"
82
- feedback += "\n### Suggestions for Improvement:\n"
83
 
84
  # Provide specific advice based on the issues identified
85
- if clarity_advice:
86
- feedback += f"\n* **Clarity:** {clarity_advice}"
87
- if logic_advice:
88
- feedback += f"\n* **Logic:** {logic_advice}"
89
- if relevance_advice:
90
- feedback += f"\n* **Relevance:** {relevance_advice}"
91
- if evidence_advice:
92
- feedback += f"\n* **Evidence:** {evidence_advice}"
93
 
94
  # If no advice is necessary, indicate that the prompt is strong
95
- if not any([clarity_advice, logic_advice, relevance_advice, evidence_advice]):
96
- feedback += "\nYour prompt is well-constructed with no major areas for improvement."
97
 
98
- return feedback, final_score
99
 
100
  except Exception as e:
101
  return str(e), "Error"
@@ -103,96 +76,86 @@ def evaluar_prompt(prompt):
103
  def interfaz():
104
  with gr.Blocks(css="""
105
  body {
106
- font-family: Arial, sans-serif;
107
- background-color: #f4f4f4;
108
  color: #333333;
109
  margin: 0;
110
  padding: 0;
111
  }
112
  .container {
113
- max-width: 1200px;
114
  margin: 0 auto;
115
- padding: 20px;
116
  display: flex;
117
  flex-direction: column;
118
  align-items: center;
 
 
 
119
  }
120
  .hero-section {
121
- background-color: white;
122
  text-align: center;
123
- padding: 50px 0;
124
- width: 100%;
125
  }
126
  .hero-section h1 {
127
- font-size: 48px;
128
  font-weight: bold;
129
  color: #007BFF;
130
- margin: 0;
131
  }
132
  .hero-section p {
133
- font-size: 20px;
134
  color: #666;
135
- margin: 20px 0 0;
136
  }
137
  .evaluation-section {
138
- background-color: #333333;
139
- color: white;
140
- padding: 50px;
141
  width: 100%;
142
  display: flex;
143
- justify-content: space-between;
144
- align-items: center.
145
  }
146
  .evaluation-section h2 {
147
- font-size: 36px;
148
  font-weight: bold;
149
- margin-bottom: 20px.
150
- }
151
- .evaluation-section .left,
152
- .evaluation-section .right {
153
- width: 45%.
154
  }
155
- .evaluation-section .left p,
156
- .evaluation-section .right p {
 
 
157
  font-size: 16px;
158
- line-height: 1.6.
159
- }
160
- .evaluation-section .right {
161
- text-align: left.
162
  }
163
- .evaluation-section .button-container {
164
- margin-top: 20px.
 
 
165
  }
166
- .evaluation-section .button-container .gr-button {
167
  background-color: #007BFF;
168
  color: white;
169
- padding: 15px 30px;
170
- font-size: 18px;
 
171
  border: none;
172
- cursor: pointer.
173
- }
174
- .evaluation-section .button-container .gr-button:hover {
175
- background-color: #0056b3.
176
  }
177
- .evaluation-section .gr-textbox,
178
- .evaluation-section .gr-number {
179
- width: 100%;
180
- padding: 15px;
181
- font-size: 16px;
182
- margin-top: 10px;
183
- box-sizing: border-box.
184
  }
185
  """) as demo:
186
  with gr.Row(elem_id="hero-section"):
187
- gr.Markdown("<h1>Alfred</h1>", elem_classes="hero-section")
188
- gr.Markdown("<p>Research & Development phase.</p>", elem_classes="hero-section")
189
 
190
  with gr.Row(elem_id="evaluation-section"):
191
  with gr.Column(elem_id="left"):
192
- gr.Markdown("<h2>An谩lisis del Prompt</h2>", elem_classes="evaluation-section")
193
- gr.Markdown("<p>Ingrese su prompt a continuaci贸n para recibir una evaluaci贸n detallada basada en principios de pensamiento cr铆tico.</p>", elem_classes="evaluation-section")
194
-
195
- with gr.Column(elem_id="right"):
196
  prompt_input = gr.Textbox(label="Escribe tu prompt aqu铆:", placeholder="Escribe tu prompt...", elem_classes="gr-textbox")
197
  feedback_output = gr.Textbox(label="Retroalimentaci贸n:", interactive=False, elem_classes="gr-textbox")
198
  calificacion_output = gr.Number(label="Calificaci贸n Final:", interactive=False, elem_classes="gr-number")
 
32
  outputs = model.generate(inputs["input_ids"], max_length=150) # Limit max_length for faster results
33
  analysis = tokenizer.decode(outputs[0], skip_special_tokens=True)
34
 
 
 
 
35
  # Initialize variables to store detailed advice
36
+ claridad_consejo = ""
37
+ logica_consejo = ""
38
+ relevancia_consejo = ""
39
+ evidencia_consejo = ""
40
 
41
  # Evaluate clarity
42
  if len(prompt.split()) < 5:
43
+ claridad_consejo = "Tu prompt es muy corto, lo que puede generar ambig眉edad. Intenta expandirlo para proporcionar m谩s contexto o detalles espec铆ficos."
 
 
 
 
44
  # Evaluate logic
45
+ if any(word in prompt.lower() for word in ["mejor", "peor", "煤nico"]):
46
+ logica_consejo = f"Palabras como '{', '.join([w for w in ['mejor', 'peor', '煤nico'] if w in prompt.lower()])}' sugieren una perspectiva subjetiva. Reformula el prompt para enfocarte en criterios objetivos."
 
 
 
 
 
 
 
47
  # Evaluate relevance
48
+ if not any(word in prompt.lower() for word in ["hablar", "describe", "explica", "analiza"]):
49
+ relevancia_consejo = "Tu prompt podr铆a no abordar directamente el problema central. Aseg煤rate de que est茅 directamente relacionado con la informaci贸n o acci贸n que buscas."
 
 
 
 
50
  # Evaluate evidence request
51
+ if not any(word in prompt.lower() for word in ["evidencia", "pruebas", "demuestra"]):
52
+ evidencia_consejo = "Solicitar evidencia fortalece tu prompt al basarlo en hechos verificables. Considera pedir expl铆citamente evidencia o ejemplos."
 
 
 
 
 
 
53
 
54
+ # Compile suggestions
55
+ sugerencias = "### Sugerencias para Mejorar:\n"
 
 
 
56
 
57
  # Provide specific advice based on the issues identified
58
+ if claridad_consejo:
59
+ sugerencias += f"\n* **Claridad:** {claridad_consejo}"
60
+ if logica_consejo:
61
+ sugerencias += f"\n* **L贸gica:** {logica_consejo}"
62
+ if relevancia_consejo:
63
+ sugerencias += f"\n* **Relevancia:** {relevancia_consejo}"
64
+ if evidencia_consejo:
65
+ sugerencias += f"\n* **Evidencia:** {evidencia_consejo}"
66
 
67
  # If no advice is necessary, indicate that the prompt is strong
68
+ if not any([claridad_consejo, logica_consejo, relevancia_consejo, evidencia_consejo]):
69
+ sugerencias += "\nTu prompt est谩 bien construido y no presenta 谩reas importantes para mejorar."
70
 
71
+ return sugerencias, 65 # Returning only the suggestions and an arbitrary score
72
 
73
  except Exception as e:
74
  return str(e), "Error"
 
76
  def interfaz():
77
  with gr.Blocks(css="""
78
  body {
79
+ font-family: 'Helvetica Neue', Arial, sans-serif;
80
+ background-color: #f7f7f7;
81
  color: #333333;
82
  margin: 0;
83
  padding: 0;
84
  }
85
  .container {
86
+ max-width: 800px;
87
  margin: 0 auto;
88
+ padding: 40px;
89
  display: flex;
90
  flex-direction: column;
91
  align-items: center;
92
+ background-color: #ffffff;
93
+ border-radius: 8px;
94
+ box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1);
95
  }
96
  .hero-section {
 
97
  text-align: center;
98
+ margin-bottom: 40px;
 
99
  }
100
  .hero-section h1 {
101
+ font-size: 36px;
102
  font-weight: bold;
103
  color: #007BFF;
104
+ margin-bottom: 10px;
105
  }
106
  .hero-section p {
107
+ font-size: 18px;
108
  color: #666;
109
+ margin-top: 0;
110
  }
111
  .evaluation-section {
 
 
 
112
  width: 100%;
113
  display: flex;
114
+ flex-direction: column;
115
+ gap: 20px;
116
  }
117
  .evaluation-section h2 {
118
+ font-size: 28px;
119
  font-weight: bold;
120
+ color: #333;
121
+ margin-bottom: 10px;
 
 
 
122
  }
123
+ .evaluation-section .gr-textbox,
124
+ .evaluation-section .gr-number {
125
+ width: 100%;
126
+ padding: 12px;
127
  font-size: 16px;
128
+ border: 1px solid #ddd;
129
+ border-radius: 4px;
130
+ box-sizing: border-box;
131
+ transition: border-color 0.2s;
132
  }
133
+ .evaluation-section .gr-textbox:focus,
134
+ .evaluation-section .gr-number:focus {
135
+ border-color: #007BFF;
136
+ outline: none;
137
  }
138
+ .evaluation-section .gr-button {
139
  background-color: #007BFF;
140
  color: white;
141
+ padding: 12px 20px;
142
+ font-size: 16px;
143
+ font-weight: bold;
144
  border: none;
145
+ border-radius: 4px;
146
+ cursor: pointer;
147
+ transition: background-color 0.2s;
148
+ text-align: center;
149
  }
150
+ .evaluation-section .gr-button:hover {
151
+ background-color: #0056b3;
 
 
 
 
 
152
  }
153
  """) as demo:
154
  with gr.Row(elem_id="hero-section"):
155
+ gr.Markdown("<div class='container'><h1>Evaluaci贸n de Prompts</h1><p>Ingrese su prompt a continuaci贸n para recibir sugerencias de mejora basadas en principios de pensamiento cr铆tico.</p></div>", elem_classes="hero-section")
 
156
 
157
  with gr.Row(elem_id="evaluation-section"):
158
  with gr.Column(elem_id="left"):
 
 
 
 
159
  prompt_input = gr.Textbox(label="Escribe tu prompt aqu铆:", placeholder="Escribe tu prompt...", elem_classes="gr-textbox")
160
  feedback_output = gr.Textbox(label="Retroalimentaci贸n:", interactive=False, elem_classes="gr-textbox")
161
  calificacion_output = gr.Number(label="Calificaci贸n Final:", interactive=False, elem_classes="gr-number")