File size: 1,394 Bytes
3d539ca
 
 
 
 
 
 
 
 
3637999
 
 
 
 
 
 
3d539ca
3637999
 
 
 
 
 
 
 
3d539ca
 
 
 
 
 
 
 
 
3637999
3d539ca
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
# mcp/orchestrator.py

from mcp.arxiv import fetch_arxiv
from mcp.pubmed import fetch_pubmed
from mcp.nlp import extract_keywords
from mcp.umls import lookup_umls
from mcp.openfda import fetch_drug_safety
from mcp.openai_utils import ai_summarize, ai_qa

import asyncio

async def orchestrate_search(query: str):
    # Fetch from arXiv and PubMed in parallel
    arxiv_task = asyncio.create_task(fetch_arxiv(query))
    pubmed_task = asyncio.create_task(fetch_pubmed(query))
    arxiv_results, pubmed_results = await asyncio.gather(arxiv_task, pubmed_task)
    all_papers = arxiv_results + pubmed_results
    paper_text = " ".join([p['summary'] for p in all_papers])
    keywords = extract_keywords(paper_text)[:8]  # Limit for speed
    # UMLS and OpenFDA in parallel
    umls_tasks = [lookup_umls(k) for k in keywords]
    drug_tasks = [fetch_drug_safety(k) for k in keywords]
    umls_results = await asyncio.gather(*umls_tasks)
    drug_data = await asyncio.gather(*drug_tasks)
    summary = await ai_summarize(paper_text)
    links = [p['link'] for p in all_papers[:3]]
    return {
        "papers": all_papers,
        "umls": umls_results,
        "drug_safety": drug_data,
        "ai_summary": summary,
        "suggested_reading": links,
    }

async def answer_ai_question(question: str, context: str = ""):
    answer = await ai_qa(question, context)
    return {"answer": answer}