baconnier commited on
Commit
08814a4
1 Parent(s): f760ad3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -16
app.py CHANGED
@@ -307,12 +307,12 @@ class GradioInterface:
307
  outputs=[analysis_evaluation, refined_prompt, explanation_of_refinements, full_response_json]
308
  )
309
 
310
- # In the __init__ method, modify the click event:
311
  apply_button.click(
312
  fn=self.apply_prompts,
313
  inputs=[prompt_text, refined_prompt, apply_model],
314
  outputs=[original_output, refined_output],
315
- queue=True # Enable queuing for streaming
316
  )
317
 
318
  def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple:
@@ -329,21 +329,18 @@ class GradioInterface:
329
  )
330
 
331
  def apply_prompts(self, original_prompt: str, refined_prompt: str, model: str):
332
- async def stream_generator():
333
- original_output = ""
334
- refined_output = ""
 
 
335
 
336
- # Stream original prompt
337
- for response in self.prompt_refiner.apply_prompt(original_prompt, model):
338
- original_output = response
339
- yield original_output, refined_output
340
-
341
- # Stream refined prompt
342
- for response in self.prompt_refiner.apply_prompt(refined_prompt, model):
343
- refined_output = response
344
- yield original_output, refined_output
345
-
346
- return stream_generator()
347
 
348
 
349
 
 
307
  outputs=[analysis_evaluation, refined_prompt, explanation_of_refinements, full_response_json]
308
  )
309
 
310
+ # In the __init__ method of GradioInterface class:
311
  apply_button.click(
312
  fn=self.apply_prompts,
313
  inputs=[prompt_text, refined_prompt, apply_model],
314
  outputs=[original_output, refined_output],
315
+ api_name="apply_prompts" # Optional: adds API endpoint
316
  )
317
 
318
  def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple:
 
329
  )
330
 
331
  def apply_prompts(self, original_prompt: str, refined_prompt: str, model: str):
332
+ try:
333
+ # Process original prompt
334
+ original_output = self.prompt_refiner.apply_prompt(original_prompt, model)
335
+ # Process refined prompt
336
+ refined_output = self.prompt_refiner.apply_prompt(refined_prompt, model)
337
 
338
+ # Return both outputs directly
339
+ return original_output, refined_output
340
+
341
+ except Exception as e:
342
+ # Return error messages for both outputs in case of failure
343
+ return f"Error: {str(e)}", f"Error: {str(e)}"
 
 
 
 
 
344
 
345
 
346