yxmnjxzx commited on
Commit
05fe86b
·
verified ·
1 Parent(s): aaf8578

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -6
app.py CHANGED
@@ -367,15 +367,22 @@ class GradioInterface:
367
  )
368
 
369
  def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple:
 
370
  input_data = PromptInput(text=prompt, meta_prompt_choice=meta_prompt_choice)
371
- # Get the RefinementOutput object
372
  result = self.prompt_refiner.refine_prompt(input_data)
373
 
374
- # Now properly extract values from the RefinementOutput object
375
- analysis_evaluation = f"\n\n{result.initial_prompt_evaluation}" if result.initial_prompt_evaluation else ""
376
- refined_prompt = result.refined_prompt if result.refined_prompt else ""
377
- explanation_refinements = result.explanation_of_refinements if result.explanation_of_refinements else ""
378
- full_response = result.raw_content if result.raw_content else {}
 
 
 
 
 
 
 
379
 
380
  return (
381
  analysis_evaluation,
@@ -383,6 +390,16 @@ class GradioInterface:
383
  explanation_refinements,
384
  full_response
385
  )
 
 
 
 
 
 
 
 
 
 
386
 
387
  def apply_prompts(self, original_prompt: str, refined_prompt: str, model: str):
388
  original_output = self.prompt_refiner.apply_prompt(original_prompt, model)
 
367
  )
368
 
369
  def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple:
370
+ try:
371
  input_data = PromptInput(text=prompt, meta_prompt_choice=meta_prompt_choice)
 
372
  result = self.prompt_refiner.refine_prompt(input_data)
373
 
374
+ # Ensure all values are strings or None
375
+ analysis_evaluation = str(result.initial_prompt_evaluation) if result.initial_prompt_evaluation else ""
376
+ refined_prompt = str(result.refined_prompt) if result.refined_prompt else ""
377
+ explanation_refinements = str(result.explanation_of_refinements) if result.explanation_of_refinements else ""
378
+
379
+ # Create a safe JSON-serializable dictionary
380
+ full_response = {
381
+ "initial_prompt_evaluation": analysis_evaluation,
382
+ "refined_prompt": refined_prompt,
383
+ "explanation_of_refinements": explanation_refinements,
384
+ "raw_content": str(result.raw_content) if result.raw_content else ""
385
+ }
386
 
387
  return (
388
  analysis_evaluation,
 
390
  explanation_refinements,
391
  full_response
392
  )
393
+ except Exception as e:
394
+ # Return safe default values in case of any error
395
+ error_response = {
396
+ "error": str(e),
397
+ "initial_prompt_evaluation": "",
398
+ "refined_prompt": "",
399
+ "explanation_of_refinements": "",
400
+ "raw_content": ""
401
+ }
402
+ return "", "", "", error_response
403
 
404
  def apply_prompts(self, original_prompt: str, refined_prompt: str, model: str):
405
  original_output = self.prompt_refiner.apply_prompt(original_prompt, model)