File size: 1,359 Bytes
3d539ca
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
# mcp/orchestrator.py

from mcp.arxiv import fetch_arxiv
from mcp.pubmed import fetch_pubmed
from mcp.nlp import extract_keywords
from mcp.umls import lookup_umls
from mcp.openfda import fetch_drug_safety
from mcp.openai_utils import ai_summarize, ai_qa

async def orchestrate_search(query: str) -> dict:
    # Fetch results in parallel (use asyncio.gather for speed)
    arxiv_results = await fetch_arxiv(query)
    pubmed_results = await fetch_pubmed(query)
    all_papers = arxiv_results + pubmed_results
    # Semantic ranking (use OpenAI embeddings or similar)
    # ...
    # NLP: extract keywords/drugs
    keywords = extract_keywords(" ".join([p['summary'] for p in all_papers]))
    # UMLS enrichment
    umls_results = [await lookup_umls(k) for k in keywords]
    # Drug safety
    drug_data = [await fetch_drug_safety(k) for k in keywords]
    # Summarization
    summary = await ai_summarize(" ".join([p['summary'] for p in all_papers]))
    # Suggest reading (top links)
    links = [p['link'] for p in all_papers[:3]]
    return {
        "papers": all_papers,
        "umls": umls_results,
        "drug_safety": drug_data,
        "ai_summary": summary,
        "suggested_reading": links,
    }

async def answer_ai_question(question: str, context: str = "") -> dict:
    answer = await ai_qa(question, context)
    return {"answer": answer}