Spaces:
Running
Running
from langchain_core.prompts import PromptTemplate, ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate | |
from .schemas import KeyIssue # Import the Pydantic model | |
# --- Cypher Generation --- | |
CYPHER_GENERATION_TEMPLATE = """Task: Generate Cypher statement to query a graph database. | |
Instructions: | |
Use only the provided relationship types and properties in the schema. | |
Do not use any other relationship types or properties that are not provided. | |
Limit to 10 the number of element retrieved. | |
Schema: | |
{schema} | |
Note: Do not include explanations or apologies. Respond only with the Cypher statement. | |
Do not respond to questions unrelated to Cypher generation. | |
The question is: | |
{question}""" | |
CYPHER_GENERATION_PROMPT = PromptTemplate.from_template(CYPHER_GENERATION_TEMPLATE) | |
# --- Concept Selection (for 'guided' cypher gen) --- | |
CONCEPT_SELECTION_TEMPLATE = """Task: Select the most relevant Concept from the list below for the user's question. | |
Instructions: | |
Output ONLY the name of the single most relevant concept. No explanations. | |
Concepts: | |
{concepts} | |
User Question: | |
{question}""" | |
CONCEPT_SELECTION_PROMPT = PromptTemplate.from_template(CONCEPT_SELECTION_TEMPLATE) | |
# --- Document Relevance Grading --- | |
BINARY_GRADER_TEMPLATE = """Assess the relevance of the retrieved document to the user question. | |
Goal is to filter out clearly erroneous retrievals. | |
If the document contains keywords or semantic meaning related to the question, grade as relevant. | |
Output 'yes' or 'no'.""" | |
BINARY_GRADER_PROMPT = ChatPromptTemplate.from_messages([ | |
("system", BINARY_GRADER_TEMPLATE), | |
("human", "Retrieved document:\n\n{document}\n\nUser question: {question}"), | |
]) | |
SCORE_GRADER_TEMPLATE = """Analyze the query and the document. Quantify the relevance. | |
Provide rationale before the score. | |
Output a score between 0 (irrelevant) and 1 (completely relevant).""" | |
SCORE_GRADER_PROMPT = ChatPromptTemplate.from_messages([ | |
("system", SCORE_GRADER_TEMPLATE), | |
("human", "Passage:\n\n{document}\n\nUser query: {query}"), | |
]) | |
# --- Planning --- | |
PLAN_GENERATION_TEMPLATE = """You are a standardization expert planning to identify NEW and INNOVATIVE Key Issues related to a technical requirement. | |
Devise a concise, step-by-step plan to achieve this. | |
Consider steps like: Understanding the core problem, Researching existing standards/innovations, Identifying potential gaps/challenges, Formulating Key Issues, and Refining/Detailing them. | |
Output the plan starting with 'Plan:' and numbering each step. End the plan with '<END_OF_PLAN>'.""" | |
PLAN_MODIFICATION_TEMPLATE = """You are a standardization expert planning to identify NEW and INNOVATIVE Key Issues related to a technical requirement. | |
Adapt the following generic plan template to the specific requirement. Keep it concise. | |
### PLAN TEMPLATE ### | |
Plan: | |
1. **Understand Core Requirement**: Analyze the user query to define the scope. | |
2. **Gather Context**: Retrieve relevant specifications, standards, and recent research papers. | |
3. **Identify Gaps & Challenges**: Based on context, brainstorm potential new issues and challenges. | |
4. **Formulate Key Issues**: Structure the findings into distinct Key Issues. | |
5. **Refine & Detail**: Elaborate on each Key Issue, outlining specific challenges. | |
<END_OF_PLAN> | |
### END OF PLAN TEMPLATE ### | |
Output the adapted plan starting with 'Plan:' and numbering each step. End with '<END_OF_PLAN>'.""" | |
# --- Document Processing --- | |
SUMMARIZER_TEMPLATE = """You are a 3GPP standardization expert. | |
Summarize the key information in the provided document in 2 or 3 simple technical English sentences, relevant to identifying potential Key Issues. Focus on challenges, gaps, or novel aspects. | |
Document: | |
{document}""" | |
SUMMARIZER_PROMPT = ChatPromptTemplate.from_template(SUMMARIZER_TEMPLATE) | |
# --- Key Issue Structuring (New) --- | |
# This prompt guides the LLM to output structured Key Issues based on gathered context. | |
# It references the Pydantic model 'KeyIssue' for the desired format. | |
KEY_ISSUE_STRUCTURING_TEMPLATE = f"""Based on the provided context (summaries of relevant documents, research findings, etc.), identify and formulate distinct Key Issues related to the original user query. | |
For each Key Issue identified, provide the following information in the exact JSON format described below. Output a JSON list containing multiple KeyIssue objects. | |
JSON Schema for each Key Issue object: | |
[{{{{ | |
"id": "Sequential integer ID starting from 1", | |
"title": "Concise title for the key issue (max 15 words)", | |
"description": "Detailed description of the key issue (2-4 sentences)", | |
"challenges": ["List of specific challenges related to this issue (strings)", "Each challenge as a separate string"], | |
"potential_impact": "Brief description of the potential impact if not addressed (optional, max 30 words)" | |
}}}}] | |
User Query: {{user_query}} | |
Context: {{context}} | |
Generate the JSON list of Key Issues based *only* on the provided context and user query. Ensure the output is a valid JSON list. | |
""" | |
KEY_ISSUE_STRUCTURING_PROMPT = ChatPromptTemplate.from_template(KEY_ISSUE_STRUCTURING_TEMPLATE) | |
# --- Initial Prompt Selection --- | |
def get_initial_planner_prompt(plan_method: str, user_query: str) -> ChatPromptTemplate: | |
if plan_method == "generation": | |
template = PLAN_GENERATION_TEMPLATE | |
elif plan_method == "modification": | |
template = PLAN_MODIFICATION_TEMPLATE | |
else: | |
raise ValueError("Invalid plan_method") | |
# Return as ChatPromptTemplate for consistency | |
return ChatPromptTemplate.from_messages([ | |
("system", template), | |
("human", user_query) | |
]) |