|
import os |
|
import json |
|
from typing import Dict, List, Optional |
|
import base64 |
|
from PIL import Image |
|
import PyPDF2 |
|
import docx |
|
import pytesseract |
|
import gradio as gr |
|
from huggingface_hub import InferenceClient |
|
|
|
|
|
|
|
GENESIS_PROMPT = """ |
|
You are Synapsera, a hyper-advanced Thought Synthesis Engine. Your primary function is to transform unstructured human thought artifacts into a structured, machine-readable, and evolving cognitive model. Your output MUST be a single, valid JSON object, and nothing else. |
|
|
|
A user has submitted their first set of artifacts. Your task is to perform a **Genesis Synthesis**. Analyze the provided text and image descriptions to create the foundational "Semantic Thought Graph". |
|
|
|
The JSON object MUST conform to this exact structure: |
|
{ |
|
"summary": "A concise, one-sentence summary of the core thought model.", |
|
"synthesis_log": "Initial synthesis complete. The foundational thought model has been established based on the provided artifacts.", |
|
"nodes": [ |
|
{ |
|
"id": "Concept1", |
|
"label": "User's Goal", |
|
"type": "Concept", |
|
"content": "The user wants to build a medical application. This likely involves features for patient data management, appointment scheduling, and possibly telemedicine.", |
|
"importance": 0.9, |
|
"mirror_analysis": "The initial concept is broad. Key assumptions include the need for HIPAA compliance, the availability of medical APIs, and a clear target user (patients, doctors, or both). The main bias might be underestimating the complexity of medical software regulations." |
|
} |
|
], |
|
"edges": [] |
|
} |
|
""" |
|
EVOLUTION_PROMPT = """ |
|
You are Synapsera, a Thought Synthesis Engine. Your task is to EVOLVE an existing cognitive model with new information. You will be given the CURRENT thought model as a JSON object and a new set of 'thought artifacts'. |
|
|
|
Your task is to perform an **Evolutionary Synthesis**. Merge the new artifacts into the existing model by adding new nodes, adding new edges, or updating the 'content' and 'importance' of existing nodes. |
|
|
|
You MUST respond with ONLY a single, valid, updated JSON object. Update the 'synthesis_log' to describe the changes (e.g., 'Merged market report. Added 2 Data nodes, which challenged Belief3.'). |
|
""" |
|
PERSONA_PROMPT_TEMPLATE = """ |
|
You are Synapsera, but you are currently embodying the persona of **{persona_name}**. |
|
|
|
**Your Persona Directive:** {persona_directive} |
|
|
|
You have been given a user's complete 'Semantic Thought Graph' as a JSON object. Provide a deep, insightful analysis of the entire model from your persona's unique perspective. Your response should be a concise, hard-hitting, and actionable analysis in Markdown format. |
|
""" |
|
AI_PERSONAS = { |
|
"The Critic": "Ruthlessly identify logical fallacies, cognitive biases, weak assumptions, and internal contradictions.", |
|
"The Visionary": "Identify emergent patterns, latent potential, and high-impact 'what if' scenarios.", |
|
"The Pragmatist": "Ground the model in reality. Identify actionable steps and resource requirements.", |
|
} |
|
AVAILABLE_MODELS = [ |
|
{ "name": "Mixtral 8x22B Instruct", "id": "mistralai/Mixtral-8x22B-Instruct-v0.1" }, |
|
{ "name": "Llama 3 70B Instruct", "id": "meta-llama/Meta-Llama-3-70B-Instruct" }, |
|
] |
|
|
|
|
|
HF_TOKEN = os.getenv('HF_TOKEN') |
|
client = InferenceClient(token=HF_TOKEN) |
|
|
|
|
|
def extract_text_from_file(file: Optional[gr.File]) -> str: |
|
if not file: return "" |
|
file_path = file.name |
|
_, ext = os.path.splitext(file_path).lower() |
|
try: |
|
if ext == ".pdf": |
|
with open(file_path, "rb") as f: |
|
reader = PyPDF2.PdfReader(f) |
|
return "\n".join(p.extract_text() or "" for p in reader.pages) |
|
elif ext == ".docx": |
|
doc = docx.Document(file_path) |
|
return "\n".join(p.text for p in doc.paragraphs) |
|
elif ext in [".txt", ".md", ".csv"]: |
|
with open(file_path, "r", encoding="utf-8") as f: return f.read() |
|
return f"Unsupported file type: {ext}" |
|
except Exception as e: return f"Error reading file '{os.path.basename(file_path)}': {e}" |
|
|
|
def extract_text_from_image(image: Optional[Image.Image]) -> str: |
|
if not image: return "" |
|
try: |
|
pytesseract.get_tesseract_version() |
|
return f"--- OCR Text from Image ---\n{pytesseract.image_to_string(image)}" |
|
except Exception: |
|
return "OCR Error: Tesseract may not be installed or configured correctly. The image was not processed." |
|
|
|
|
|
def render_mermaid_graph(graph_data: Optional[Dict] = None): |
|
if not graph_data or not graph_data.get('nodes'): |
|
return "```mermaid\ngraph TD\n A[Cognitive Canvas Awaiting Genesis Synthesis...]\n```" |
|
|
|
mermaid_string = "```mermaid\ngraph TD\n" |
|
|
|
for node in graph_data['nodes']: |
|
node_id_safe = node['id'].replace('-', '') |
|
mermaid_string += f" {node_id_safe}[\"{node['label']} ({node['type']})\"]\n" |
|
|
|
|
|
for edge in graph_data.get('edges', []): |
|
source_safe = edge['source'].replace('-', '') |
|
target_safe = edge['target'].replace('-', '') |
|
mermaid_string += f" {source_safe} -->|\"{edge['label']}\"| {target_safe}\n" |
|
|
|
mermaid_string += "```" |
|
return mermaid_string |
|
|
|
def synthesize_and_evolve( |
|
text_input: str, image_input: Optional[Image.Image], file_input: Optional[gr.File], |
|
session_graph_data: Optional[Dict], model_info: Dict, |
|
progress=gr.Progress(track_tqdm=True) |
|
): |
|
progress(0, desc="Gathering Artifacts...") |
|
artifacts_context = text_input |
|
if image_input: artifacts_context += f"\n\n{extract_text_from_image(image_input)}" |
|
if file_input: artifacts_context += f"\n\n--- DOCUMENT ARTIFACT ---\n{extract_text_from_file(file_input)}" |
|
|
|
is_genesis = not session_graph_data or 'nodes' not in session_graph_data |
|
system_prompt = GENESIS_PROMPT if is_genesis else EVOLUTION_PROMPT |
|
|
|
user_content_text = f"Current Model (for evolution only):\n{json.dumps(session_graph_data)}\n\nNew Artifacts to Synthesize:\n{artifacts_context}" |
|
messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_content_text}] |
|
|
|
progress(0.5, desc=f"Synthesizing with {model_info['name']}...") |
|
try: |
|
response = client.chat_completion(model=model_info["id"], messages=messages, max_tokens=8192, response_format={"type": "json_object"}) |
|
raw_json = response.choices[0].message.content |
|
new_graph_data = json.loads(raw_json) |
|
if 'nodes' not in new_graph_data: raise ValueError("Invalid JSON structure.") |
|
except Exception as e: |
|
error_msg = f"Synthesis Error: {e}\nModel Response:\n{raw_json if 'raw_json' in locals() else 'No response.'}" |
|
return session_graph_data, error_msg, gr.update(), gr.update(choices=[], value=None), gr.update(visible=False) |
|
|
|
progress(0.9, desc="Rendering Canvas...") |
|
node_choices = [node['label'] for node in new_graph_data['nodes']] |
|
|
|
return ( |
|
new_graph_data, |
|
f"**Synthesis Log:** {new_graph_data.get('synthesis_log', 'Evolution complete.')}", |
|
render_mermaid_graph(new_graph_data), |
|
gr.update(choices=node_choices, value=None), |
|
gr.update(visible=False) |
|
) |
|
|
|
def show_node_details(graph_data: Dict, selected_node_label: str): |
|
if not graph_data or not selected_node_label: |
|
return gr.update(visible=False) |
|
|
|
node = next((n for n in graph_data['nodes'] if n['label'] == selected_node_label), None) |
|
if node: |
|
return gr.update(visible=True), node.get('label'), node.get('content'), node.get('mirror_analysis') |
|
return gr.update(visible=False), "", "", "" |
|
|
|
def apply_persona_analysis(graph_data: Dict, persona_name: str, model_info: Dict): |
|
if not graph_data: return "Please synthesize a graph first." |
|
|
|
prompt = PERSONA_PROMPT_TEMPLATE.format(persona_name=persona_name, persona_directive=AI_PERSONAS[persona_name]) |
|
messages = [{"role": "system", "content": prompt}, {"role": "user", "content": f"Analyze this thought model:\n\n{json.dumps(graph_data)}"}] |
|
try: |
|
response = client.chat_completion(model=model_info["id"], messages=messages, max_tokens=2048) |
|
analysis = response.choices[0].message.content |
|
except Exception as e: |
|
analysis = f"Persona analysis failed: {e}" |
|
|
|
return analysis |
|
|
|
|
|
with gr.Blocks(theme=gr.themes.Soft(primary_hue="purple"), title="Synapsera") as demo: |
|
|
|
session_graph_state = gr.State(None) |
|
default_model_info = AVAILABLE_MODELS[0] |
|
current_model_state = gr.State(default_model_info) |
|
|
|
gr.Markdown("# π§ Synapsera: The Universal Cognitive Canvas") |
|
|
|
with gr.Row(equal_height=False): |
|
|
|
with gr.Column(scale=1, min_width=450): |
|
gr.Markdown("### π Control Deck") |
|
with gr.Accordion("1. Cognitive Input Mesh", open=True): |
|
text_input = gr.Textbox(lines=5, label="Raw Thoughts & Notes", placeholder="I want to build a medical app") |
|
with gr.Row(): |
|
image_input = gr.Image(type="pil", label="Upload Diagram/Sketch", sources=['upload']) |
|
file_input = gr.File(label="Upload Document", type="filepath") |
|
synthesize_btn = gr.Button("Synthesize & Evolve Thought", variant="primary") |
|
|
|
with gr.Accordion("2. AI Persona Analysis", open=True): |
|
persona_output = gr.Markdown("Summon a Persona to get a critical, visionary, or pragmatic analysis of your entire model.") |
|
with gr.Row(): |
|
critic_btn = gr.Button("The Critic") |
|
visionary_btn = gr.Button("The Visionary") |
|
pragmatist_btn = gr.Button("The Pragmatist") |
|
|
|
with gr.Accordion("3. Session Controls", open=False): |
|
model_dropdown = gr.Dropdown(choices=[m['name'] for m in AVAILABLE_MODELS], value=default_model_info['name'], label="Cognitive Core") |
|
clear_btn = gr.Button("β οΈ Clear Entire Session") |
|
|
|
|
|
with gr.Column(scale=2): |
|
with gr.Accordion("π Synthesis Log", open=True): |
|
synthesis_log_output = gr.Markdown("Synthesis Log standing by... The log will record the evolution of your thought model.") |
|
|
|
with gr.Accordion("π¨ Cognitive Canvas (Mermaid Graph)", open=True): |
|
mermaid_graph_output = gr.Markdown(render_mermaid_graph()) |
|
|
|
|
|
with gr.Accordion("π¬ Focus & Compose", open=True): |
|
gr.Markdown("Select a node from the dropdown to analyze it in detail.") |
|
node_selector = gr.Dropdown(label="Select Node to Focus On", choices=[]) |
|
with gr.Group(visible=False) as focus_panel: |
|
focus_title = gr.Textbox(label="Node", interactive=False) |
|
focus_content = gr.Textbox(label="Content", lines=3, interactive=False) |
|
focus_analysis = gr.Textbox(label="πͺ Mirror Mode Analysis", lines=3, interactive=False) |
|
|
|
|
|
def update_model_state(model_name: str): |
|
for model in AVAILABLE_MODELS: |
|
if model['name'] == model_name: return model |
|
return AVAILABLE_MODELS[0] |
|
|
|
model_dropdown.change(update_model_state, inputs=model_dropdown, outputs=current_model_state) |
|
|
|
synthesize_btn.click( |
|
synthesize_and_evolve, |
|
inputs=[text_input, image_input, file_input, session_graph_state, current_model_state], |
|
outputs=[session_graph_state, synthesis_log_output, mermaid_graph_output, node_selector, focus_panel] |
|
) |
|
|
|
node_selector.change( |
|
show_node_details, |
|
inputs=[session_graph_state, node_selector], |
|
outputs=[focus_panel, focus_title, focus_content, focus_analysis] |
|
) |
|
|
|
critic_btn.click(apply_persona_analysis, inputs=[session_graph_state, gr.State("The Critic"), current_model_state], outputs=persona_output) |
|
visionary_btn.click(apply_persona_analysis, inputs=[session_graph_state, gr.State("The Visionary"), current_model_state], outputs=persona_output) |
|
pragmatist_btn.click(apply_persona_analysis, inputs=[session_graph_state, gr.State("The Pragmatist"), current_model_state], outputs=persona_output) |
|
|
|
def clear_all(): |
|
return None, "Session cleared. Synthesis Log standing by...", render_mermaid_graph(), gr.update(choices=[], value=None), False, "", None, None |
|
|
|
clear_btn.click(clear_all, outputs=[session_graph_state, synthesis_log_output, mermaid_graph_output, node_selector, focus_panel, text_input, image_input, file_input]) |
|
|
|
|
|
if __name__ == "__main__": |
|
demo.queue().launch(debug=True) |