mgbam commited on
Commit
90abc98
·
verified ·
1 Parent(s): e173f95

Update mcp/openai_utils.py

Browse files
Files changed (1) hide show
  1. mcp/openai_utils.py +64 -27
mcp/openai_utils.py CHANGED
@@ -1,32 +1,69 @@
1
- # mcp/openai_utils.py
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
  import openai
4
- import os
5
 
6
- openai.api_key = os.environ.get("OPENAI_API_KEY")
 
 
 
 
 
 
 
 
 
 
7
 
8
- async def ai_summarize(text: str, prompt: str = None):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  if not prompt:
10
- prompt = "Summarize these biomedical search results, highlighting key findings and future research directions:"
11
- client = openai.AsyncOpenAI(api_key=openai.api_key)
12
- response = await client.chat.completions.create(
13
- model="gpt-4o",
14
- messages=[
15
- {"role": "system", "content": "You are an expert biomedical research assistant."},
16
- {"role": "user", "content": f"{prompt}\n{text}"}
17
- ],
18
- max_tokens=350
19
- )
20
- return response.choices[0].message.content
21
-
22
- async def ai_qa(question: str, context: str = ""):
23
- client = openai.AsyncOpenAI(api_key=openai.api_key)
24
- response = await client.chat.completions.create(
25
- model="gpt-4o",
26
- messages=[
27
- {"role": "system", "content": "You are an advanced biomedical research agent."},
28
- {"role": "user", "content": f"Question: {question}\nContext: {context}"}
29
- ],
30
- max_tokens=350
31
- )
32
- return response.choices[0].message.content
 
1
+ #!/usr/bin/env python3
2
+ """MedGenesis – OpenAI async helpers (summary + QA).
3
+
4
+ Changes vs. legacy version
5
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
6
+ * Centralised **`_client()`** getter with singleton cache (avoids TLS overhead).
7
+ * Exponential‑back‑off retry (2×, 4×) for transient 5xx.
8
+ * Supports model override (`model="gpt-4o-mini"`, etc.).
9
+ * Allows temperature & max_tokens tuning via kwargs.
10
+ * Returns *str* (content) directly; orchestrator wraps if needed.
11
+ """
12
+ from __future__ import annotations
13
+
14
+ import os, asyncio, functools, time
15
+ from typing import Any, Dict
16
 
17
  import openai
 
18
 
19
+ openai.api_key = os.getenv("OPENAI_API_KEY")
20
+ if not openai.api_key:
21
+ raise RuntimeError("OPENAI_API_KEY not set in environment")
22
+
23
+ # ---------------------------------------------------------------------
24
+ # Internal client helper (cached)
25
+ # ---------------------------------------------------------------------
26
+ @functools.lru_cache(maxsize=1)
27
+ def _client() -> openai.AsyncOpenAI:
28
+ return openai.AsyncOpenAI(api_key=openai.api_key)
29
+
30
 
31
+ async def _chat(messages: list[dict[str, str]], *, model: str, max_tokens: int, temperature: float = 0.2, retries: int = 3) -> str:
32
+ delay = 2
33
+ for _ in range(retries):
34
+ try:
35
+ resp = await _client().chat.completions.create(
36
+ model=model,
37
+ messages=messages,
38
+ max_tokens=max_tokens,
39
+ temperature=temperature,
40
+ )
41
+ return resp.choices[0].message.content.strip()
42
+ except openai.OpenAIError as e:
43
+ if retries <= 1:
44
+ raise
45
+ await asyncio.sleep(delay)
46
+ delay *= 2
47
+ # Should not reach here
48
+ return "[OpenAI request failed]"
49
+
50
+ # ---------------------------------------------------------------------
51
+ # Public helpers
52
+ # ---------------------------------------------------------------------
53
+ async def ai_summarize(text: str, *, prompt: str | None = None, model: str = "gpt-4o", max_tokens: int = 350) -> str:
54
+ """LLM summariser tuned for biomedical search blobs."""
55
  if not prompt:
56
+ prompt = (
57
+ "Summarize the following biomedical search results. Highlight key findings, "
58
+ "significant genes/drugs/trials, and suggest future research directions."
59
+ )
60
+ system = {"role": "system", "content": "You are an expert biomedical research assistant."}
61
+ user = {"role": "user", "content": f"{prompt}\n\n{text}"}
62
+ return await _chat([system, user], model=model, max_tokens=max_tokens)
63
+
64
+
65
+ async def ai_qa(question: str, *, context: str = "", model: str = "gpt-4o", max_tokens: int = 350) -> str:
66
+ """One‑shot QA against provided *context*."""
67
+ system = {"role": "system", "content": "You are an advanced biomedical research agent."}
68
+ user = {"role": "user", "content": f"Answer the question using the given context.\n\nQuestion: {question}\nContext: {context}"}
69
+ return await _chat([system, user], model=model, max_tokens=max_tokens)