baconnier commited on
Commit
681acf9
1 Parent(s): 56e3500

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -48
app.py CHANGED
@@ -104,43 +104,42 @@ class GradioInterface:
104
  def __init__(self, prompt_refiner: PromptRefiner):
105
  self.prompt_refiner = prompt_refiner
106
 
107
- # Define custom CSS for containers and to override input styling
108
  custom_css = """
109
  .container {
110
- border: 2px solid var(--primary-500);
111
  border-radius: 10px;
112
  padding: 20px;
113
  margin: 15px;
114
- background: var(--background-fill-primary);
115
- box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
116
  position: relative;
117
  }
118
 
119
- /* Add container titles */
120
- .title-container::before { content: 'PROMPT++'; }
121
- .input-container::before { content: 'PROMPT REFINEMENT'; }
122
- .analysis-container::before { content: 'ANALYSIS & REFINEMENT'; }
123
- .model-container::before { content: 'MODEL APPLICATION'; }
124
- .results-container::before { content: 'RESULTS'; }
125
- .examples-container::before { content: 'EXAMPLES'; }
126
-
127
- /* Style for container titles */
128
  .container::before {
129
  position: absolute;
130
  top: -12px;
131
  left: 20px;
132
- background: var(--background-fill-primary);
133
  padding: 0 10px;
134
- color: var(--primary-500);
135
  font-weight: bold;
136
  font-size: 1.2em;
137
  }
138
 
139
- /* Override input styling for output elements */
140
- .output-text {
141
- border: none !important;
142
- background: transparent !important;
143
  }
 
 
 
 
 
 
 
144
  """
145
 
146
  with gr.Blocks(css=custom_css) as self.interface:
@@ -152,26 +151,31 @@ class GradioInterface:
152
 
153
  # Input Container
154
  with gr.Column(elem_classes=["container", "input-container"]):
155
- with gr.Row():
156
- prompt_text = gr.Textbox(label="Type the prompt (or let it empty to see metaprompt)")
 
 
157
  with gr.Accordion("Meta Prompt explanation", open=False):
158
  gr.Markdown(explanation_markdown)
159
- with gr.Row():
160
- meta_prompt_choice = gr.Radio(
161
- ["star","done","physics","morphosis", "verse", "phor","bolism","math","arpe"],
162
- label="Choose Meta Prompt",
163
- value="star"
164
- )
165
- refine_button = gr.Button("Refine Prompt")
166
 
167
  # Analysis Container
168
  with gr.Column(elem_classes=["container", "analysis-container"]):
169
  gr.Markdown("### Initial prompt analysis")
170
- analysis_evaluation = gr.Markdown() # Removed label
171
  gr.Markdown("### Refined Prompt")
172
- refined_prompt = gr.Textbox(interactive=False, elem_classes="output-text") # Added elem_classes
 
 
 
173
  gr.Markdown("### Explanation of Refinements")
174
- explanation_of_refinements = gr.Markdown() # Removed label
175
 
176
  with gr.Accordion("Full Response JSON", open=False, visible=False):
177
  full_response_json = gr.JSON()
@@ -179,28 +183,28 @@ class GradioInterface:
179
  # Model Application Container
180
  with gr.Column(elem_classes=["container", "model-container"]):
181
  gr.Markdown("## See MetaPrompt Impact")
182
- with gr.Row():
183
- apply_model = gr.Dropdown(
184
- [
185
- "Qwen/Qwen2.5-72B-Instruct",
186
- "meta-llama/Meta-Llama-3-70B-Instruct",
187
- "meta-llama/Llama-3.1-8B-Instruct",
188
- "NousResearch/Hermes-3-Llama-3.1-8B",
189
- "HuggingFaceH4/zephyr-7b-alpha",
190
- "meta-llama/Llama-2-7b-chat-hf",
191
- "microsoft/Phi-3.5-mini-instruct"
192
- ],
193
- value="meta-llama/Meta-Llama-3-70B-Instruct",
194
- label="Choose the Model to apply to the prompts (the one you will used)"
195
- )
196
- apply_button = gr.Button("Apply MetaPrompt")
197
 
198
  # Results Container
199
  with gr.Column(elem_classes=["container", "results-container"]):
200
  with gr.Tab("Original Prompt Output"):
201
- original_output = gr.Markdown() # Removed label
202
  with gr.Tab("Refined Prompt Output"):
203
- refined_output = gr.Markdown() # Removed label
204
 
205
  # Examples Container
206
  with gr.Column(elem_classes=["container", "examples-container"]):
@@ -221,6 +225,8 @@ class GradioInterface:
221
  inputs=[prompt_text, meta_prompt_choice]
222
  )
223
 
 
 
224
  def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple:
225
  input_data = PromptInput(text=prompt, meta_prompt_choice=meta_prompt_choice)
226
  result = self.prompt_refiner.refine_prompt(input_data)
 
104
  def __init__(self, prompt_refiner: PromptRefiner):
105
  self.prompt_refiner = prompt_refiner
106
 
107
+ # Define custom CSS for containers with blue theme
108
  custom_css = """
109
  .container {
110
+ border: 2px solid #2196F3; /* Blue border */
111
  border-radius: 10px;
112
  padding: 20px;
113
  margin: 15px;
114
+ background: white;
115
+ box-shadow: 0 4px 6px rgba(33, 150, 243, 0.1);
116
  position: relative;
117
  }
118
 
119
+ /* Container titles */
 
 
 
 
 
 
 
 
120
  .container::before {
121
  position: absolute;
122
  top: -12px;
123
  left: 20px;
124
+ background: white;
125
  padding: 0 10px;
126
+ color: #2196F3;
127
  font-weight: bold;
128
  font-size: 1.2em;
129
  }
130
 
131
+ /* Remove default Gradio input styling */
132
+ .remove-gradio-styles {
133
+ border: 1px solid #ddd !important;
134
+ background: white !important;
135
  }
136
+
137
+ .title-container::before { content: 'PROMPT++'; }
138
+ .input-container::before { content: 'PROMPT REFINEMENT'; }
139
+ .analysis-container::before { content: 'ANALYSIS & REFINEMENT'; }
140
+ .model-container::before { content: 'MODEL APPLICATION'; }
141
+ .results-container::before { content: 'RESULTS'; }
142
+ .examples-container::before { content: 'EXAMPLES'; }
143
  """
144
 
145
  with gr.Blocks(css=custom_css) as self.interface:
 
151
 
152
  # Input Container
153
  with gr.Column(elem_classes=["container", "input-container"]):
154
+ prompt_text = gr.Textbox(
155
+ label="Type the prompt (or let it empty to see metaprompt)",
156
+ elem_classes="remove-gradio-styles"
157
+ )
158
  with gr.Accordion("Meta Prompt explanation", open=False):
159
  gr.Markdown(explanation_markdown)
160
+ meta_prompt_choice = gr.Radio(
161
+ ["star","done","physics","morphosis", "verse", "phor","bolism","math","arpe"],
162
+ label="Choose Meta Prompt",
163
+ value="star",
164
+ elem_classes="remove-gradio-styles"
165
+ )
166
+ refine_button = gr.Button("Refine Prompt")
167
 
168
  # Analysis Container
169
  with gr.Column(elem_classes=["container", "analysis-container"]):
170
  gr.Markdown("### Initial prompt analysis")
171
+ analysis_evaluation = gr.Markdown()
172
  gr.Markdown("### Refined Prompt")
173
+ refined_prompt = gr.Textbox(
174
+ interactive=False,
175
+ elem_classes="remove-gradio-styles"
176
+ )
177
  gr.Markdown("### Explanation of Refinements")
178
+ explanation_of_refinements = gr.Markdown()
179
 
180
  with gr.Accordion("Full Response JSON", open=False, visible=False):
181
  full_response_json = gr.JSON()
 
183
  # Model Application Container
184
  with gr.Column(elem_classes=["container", "model-container"]):
185
  gr.Markdown("## See MetaPrompt Impact")
186
+ apply_model = gr.Dropdown(
187
+ [
188
+ "Qwen/Qwen2.5-72B-Instruct",
189
+ "meta-llama/Meta-Llama-3-70B-Instruct",
190
+ "meta-llama/Llama-3.1-8B-Instruct",
191
+ "NousResearch/Hermes-3-Llama-3.1-8B",
192
+ "HuggingFaceH4/zephyr-7b-alpha",
193
+ "meta-llama/Llama-2-7b-chat-hf",
194
+ "microsoft/Phi-3.5-mini-instruct"
195
+ ],
196
+ value="meta-llama/Meta-Llama-3-70B-Instruct",
197
+ label="Choose the Model to apply to the prompts (the one you will used)",
198
+ elem_classes="remove-gradio-styles"
199
+ )
200
+ apply_button = gr.Button("Apply MetaPrompt")
201
 
202
  # Results Container
203
  with gr.Column(elem_classes=["container", "results-container"]):
204
  with gr.Tab("Original Prompt Output"):
205
+ original_output = gr.Markdown()
206
  with gr.Tab("Refined Prompt Output"):
207
+ refined_output = gr.Markdown()
208
 
209
  # Examples Container
210
  with gr.Column(elem_classes=["container", "examples-container"]):
 
225
  inputs=[prompt_text, meta_prompt_choice]
226
  )
227
 
228
+ # Rest of the class methods remain the same...
229
+
230
  def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple:
231
  input_data = PromptInput(text=prompt, meta_prompt_choice=meta_prompt_choice)
232
  result = self.prompt_refiner.refine_prompt(input_data)