|
|
|
|
|
from mcp.arxiv import fetch_arxiv |
|
from mcp.pubmed import fetch_pubmed |
|
from mcp.nlp import extract_keywords |
|
from mcp.umls import lookup_umls |
|
from mcp.openfda import fetch_drug_safety |
|
from mcp.openai_utils import ai_summarize, ai_qa |
|
|
|
async def orchestrate_search(query: str) -> dict: |
|
|
|
arxiv_results = await fetch_arxiv(query) |
|
pubmed_results = await fetch_pubmed(query) |
|
all_papers = arxiv_results + pubmed_results |
|
|
|
|
|
|
|
keywords = extract_keywords(" ".join([p['summary'] for p in all_papers])) |
|
|
|
umls_results = [await lookup_umls(k) for k in keywords] |
|
|
|
drug_data = [await fetch_drug_safety(k) for k in keywords] |
|
|
|
summary = await ai_summarize(" ".join([p['summary'] for p in all_papers])) |
|
|
|
links = [p['link'] for p in all_papers[:3]] |
|
return { |
|
"papers": all_papers, |
|
"umls": umls_results, |
|
"drug_safety": drug_data, |
|
"ai_summary": summary, |
|
"suggested_reading": links, |
|
} |
|
|
|
async def answer_ai_question(question: str, context: str = "") -> dict: |
|
answer = await ai_qa(question, context) |
|
return {"answer": answer} |
|
|