Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -367,39 +367,39 @@ class GradioInterface:
|
|
367 |
)
|
368 |
|
369 |
def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple:
|
370 |
-
|
371 |
-
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
|
382 |
-
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
391 |
-
|
392 |
-
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
|
397 |
-
|
398 |
-
|
399 |
-
|
400 |
-
|
401 |
-
|
402 |
-
|
403 |
|
404 |
def apply_prompts(self, original_prompt: str, refined_prompt: str, model: str):
|
405 |
original_output = self.prompt_refiner.apply_prompt(original_prompt, model)
|
|
|
367 |
)
|
368 |
|
369 |
def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple:
|
370 |
+
try:
|
371 |
+
input_data = PromptInput(text=prompt, meta_prompt_choice=meta_prompt_choice)
|
372 |
+
result = self.prompt_refiner.refine_prompt(input_data)
|
373 |
+
|
374 |
+
# Ensure all values are strings or None
|
375 |
+
analysis_evaluation = str(result.initial_prompt_evaluation) if result.initial_prompt_evaluation else ""
|
376 |
+
refined_prompt = str(result.refined_prompt) if result.refined_prompt else ""
|
377 |
+
explanation_refinements = str(result.explanation_of_refinements) if result.explanation_of_refinements else ""
|
378 |
+
|
379 |
+
# Create a safe JSON-serializable dictionary
|
380 |
+
full_response = {
|
381 |
+
"initial_prompt_evaluation": analysis_evaluation,
|
382 |
+
"refined_prompt": refined_prompt,
|
383 |
+
"explanation_of_refinements": explanation_refinements,
|
384 |
+
"raw_content": str(result.raw_content) if result.raw_content else ""
|
385 |
+
}
|
386 |
+
|
387 |
+
return (
|
388 |
+
analysis_evaluation,
|
389 |
+
refined_prompt,
|
390 |
+
explanation_refinements,
|
391 |
+
full_response
|
392 |
+
)
|
393 |
+
except Exception as e:
|
394 |
+
# Return safe default values in case of any error
|
395 |
+
error_response = {
|
396 |
+
"error": str(e),
|
397 |
+
"initial_prompt_evaluation": "",
|
398 |
+
"refined_prompt": "",
|
399 |
+
"explanation_of_refinements": "",
|
400 |
+
"raw_content": ""
|
401 |
+
}
|
402 |
+
return "", "", "", error_response
|
403 |
|
404 |
def apply_prompts(self, original_prompt: str, refined_prompt: str, model: str):
|
405 |
original_output = self.prompt_refiner.apply_prompt(original_prompt, model)
|