Spaces:
Running
Running
Applied LCEL chain for analyzers.
Browse files- meta_prompt/meta_prompt.py +14 -24
meta_prompt/meta_prompt.py
CHANGED
@@ -7,6 +7,7 @@ from langgraph.checkpoint.memory import MemorySaver
|
|
7 |
from langgraph.errors import GraphRecursionError
|
8 |
from langgraph.graph import StateGraph, START, END
|
9 |
from langchain_core.runnables.base import RunnableLike
|
|
|
10 |
from pydantic import BaseModel
|
11 |
from typing import Annotated, Dict, Optional, Union, TypedDict
|
12 |
from .consts import *
|
@@ -448,24 +449,17 @@ class MetaPromptGraph:
|
|
448 |
'message': message.content
|
449 |
})
|
450 |
|
451 |
-
|
452 |
-
|
|
|
|
|
453 |
|
454 |
logger.debug({
|
455 |
'node': NODE_OUTPUT_HISTORY_ANALYZER,
|
456 |
'action': 'response',
|
457 |
-
'
|
458 |
-
'message': response.content
|
459 |
})
|
460 |
|
461 |
-
response_content = response.content.strip()
|
462 |
-
if response_content.startswith('```json') and response_content.endswith('```'):
|
463 |
-
response_content = response_content[7:-3].strip()
|
464 |
-
elif response_content.startswith('```') and response_content.endswith('```'):
|
465 |
-
response_content = response_content[3:-3].strip()
|
466 |
-
analysis_dict = json.loads(response_content)
|
467 |
-
|
468 |
-
analysis = analysis_dict["analysis"]
|
469 |
closer_output_id = analysis_dict["closerOutputID"]
|
470 |
|
471 |
if (state["best_output"] is None or
|
@@ -515,24 +509,20 @@ class MetaPromptGraph:
|
|
515 |
'message': message.content
|
516 |
})
|
517 |
|
518 |
-
|
|
|
|
|
|
|
|
|
519 |
logger.debug({
|
520 |
'node': NODE_PROMPT_ANALYZER,
|
521 |
'action': 'response',
|
522 |
-
'
|
523 |
-
'message': response.content
|
524 |
})
|
525 |
|
526 |
-
response_content = response.content.strip()
|
527 |
-
if response_content.startswith('```json') and response_content.endswith('```'):
|
528 |
-
response_content = response_content[7:-3].strip()
|
529 |
-
elif response_content.startswith('```') and response_content.endswith('```'):
|
530 |
-
response_content = response_content[3:-3].strip()
|
531 |
-
analysis_dict = json.loads(response_content)
|
532 |
-
|
533 |
result_dict = {
|
534 |
-
"analysis":
|
535 |
-
"accepted":
|
536 |
}
|
537 |
logger.debug("Accepted: %s", result_dict["accepted"])
|
538 |
|
|
|
7 |
from langgraph.errors import GraphRecursionError
|
8 |
from langgraph.graph import StateGraph, START, END
|
9 |
from langchain_core.runnables.base import RunnableLike
|
10 |
+
from langchain_core.output_parsers import JsonOutputParser
|
11 |
from pydantic import BaseModel
|
12 |
from typing import Annotated, Dict, Optional, Union, TypedDict
|
13 |
from .consts import *
|
|
|
449 |
'message': message.content
|
450 |
})
|
451 |
|
452 |
+
chain = (
|
453 |
+
self.prompt_templates[NODE_OUTPUT_HISTORY_ANALYZER] | self.llms[NODE_OUTPUT_HISTORY_ANALYZER] | JsonOutputParser()
|
454 |
+
)
|
455 |
+
analysis_dict = chain.invoke(state)
|
456 |
|
457 |
logger.debug({
|
458 |
'node': NODE_OUTPUT_HISTORY_ANALYZER,
|
459 |
'action': 'response',
|
460 |
+
'message': json.dumps(analysis_dict)
|
|
|
461 |
})
|
462 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
463 |
closer_output_id = analysis_dict["closerOutputID"]
|
464 |
|
465 |
if (state["best_output"] is None or
|
|
|
509 |
'message': message.content
|
510 |
})
|
511 |
|
512 |
+
chain = (
|
513 |
+
self.prompt_templates[NODE_PROMPT_ANALYZER] | self.llms[NODE_PROMPT_ANALYZER] | JsonOutputParser()
|
514 |
+
)
|
515 |
+
result = chain.invoke(state)
|
516 |
+
|
517 |
logger.debug({
|
518 |
'node': NODE_PROMPT_ANALYZER,
|
519 |
'action': 'response',
|
520 |
+
'message': json.dumps(result)
|
|
|
521 |
})
|
522 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
523 |
result_dict = {
|
524 |
+
"analysis": json.dumps(result),
|
525 |
+
"accepted": result["Accept"] == "Yes"
|
526 |
}
|
527 |
logger.debug("Accepted: %s", result_dict["accepted"])
|
528 |
|