Spaces:
Build error
Build error
Upload 5 files
Browse files- .gitignore +7 -0
- README.md +28 -16
- api.py +22 -0
- app.py +98 -0
- requirements.txt +16 -3
.gitignore
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
__pycache__/
|
2 |
+
.env
|
3 |
+
*.pyc
|
4 |
+
.DS_Store
|
5 |
+
|
6 |
+
---------------------------------------------------------------------
|
7 |
+
# End of MedSynapticGPT codebase
|
README.md
CHANGED
@@ -1,19 +1,31 @@
|
|
1 |
-
|
2 |
-
title: MedSynapticGPT
|
3 |
-
emoji: π
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: red
|
6 |
-
sdk: docker
|
7 |
-
app_port: 8501
|
8 |
-
tags:
|
9 |
-
- streamlit
|
10 |
-
pinned: false
|
11 |
-
short_description: SynestheticGPT β The Multimodal Dream
|
12 |
-
---
|
13 |
|
14 |
-
|
15 |
|
16 |
-
|
17 |
|
18 |
-
|
19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# MedSynapticGPT β Multimodal Clinical Reasoner
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
+
**Tagline:** *βRead the scan. Hear the patient. Answer like an expert.β*
|
4 |
|
5 |
+
MedSynapticGPT brings the creative power of GPTβ4o to **clinical imaging, notes, and speech**. Upload a chest Xβray (DICOM/PNG), paste a discharge summary, or record symptomsβget structured impressions with **SNOMEDβCT codes, guideline citations, and treatment suggestions**.
|
6 |
|
7 |
+
## π Core Modules
|
8 |
+
| Tab | What it does | Key APIs |
|
9 |
+
| --- | --- | --- |
|
10 |
+
| **Radiology AI** | Vision interpretation, abnormality detection, TNM staging | `gptβ4o vision`, `pydicom` |
|
11 |
+
| **Clinical Note Q&A** | Summarize or answer freeβform questions | `gptβ4o` |
|
12 |
+
| **Voice Triage** | Transcribe symptoms, suggest differential | `whisperβ1`, `gptβ4o` |
|
13 |
+
| **UMLS Lookup** | Search concepts, synonyms, codes | UMLS REST + caching |
|
14 |
+
| **GraphRAG Explorer** | Prototype biomedical KG Q&A | `networkx`, toy graph |
|
15 |
+
|
16 |
+
## Quickstart
|
17 |
+
```bash
|
18 |
+
git clone <repo>
|
19 |
+
cd MedSynapticGPT
|
20 |
+
pip install -r requirements.txt
|
21 |
+
export OPENAI_API_KEY="skββ¦"
|
22 |
+
streamlit run app.py
|
23 |
+
```
|
24 |
+
Deploy on Spaces β add secrets. For onβprem, launch `uvicorn api:app`.
|
25 |
+
|
26 |
+
## Monetization
|
27 |
+
1. **Freemium 3 studies/day** β Stripe payβperβusage.
|
28 |
+
2. **Pro** ($49/mo) β unlimited, PDF reports, HL7 export.
|
29 |
+
3. **Enterprise API** β hospital integration, HIPAA BAA.
|
30 |
+
|
31 |
+
---------------------------------------------------------------------
|
api.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""FastAPI microβservice for enterprise EHR integration"""
|
2 |
+
from fastapi import FastAPI, UploadFile, File, Form
|
3 |
+
import openai, pydicom, numpy as np, io
|
4 |
+
from PIL import Image
|
5 |
+
import os, tempfile
|
6 |
+
|
7 |
+
app = FastAPI()
|
8 |
+
openai.api_key = os.getenv("OPENAI_API_KEY")
|
9 |
+
|
10 |
+
@app.post("/radiology")
|
11 |
+
async def radiology(file: UploadFile = File(...)):
|
12 |
+
data = await file.read()
|
13 |
+
if file.filename.endswith(".dcm"):
|
14 |
+
ds = pydicom.dcmread(io.BytesIO(data))
|
15 |
+
arr = ds.pixel_array.tolist()
|
16 |
+
else:
|
17 |
+
img = Image.open(io.BytesIO(data))
|
18 |
+
arr = np.array(img).tolist()
|
19 |
+
rpt = openai.chat.completions.create(model="gpt-4o-mini", messages=[{"role":"user","content":"Analyze this study."}], images=[arr]).choices[0].message.content
|
20 |
+
return {"report": rpt}
|
21 |
+
|
22 |
+
---------------------------------------------------------------------
|
app.py
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""MedSynapticGPT β Streamlit UI"""
|
2 |
+
import os, io, tempfile, json, base64
|
3 |
+
from pathlib import Path
|
4 |
+
import streamlit as st
|
5 |
+
import openai, pydicom, numpy as np
|
6 |
+
from PIL import Image
|
7 |
+
from pydub import AudioSegment
|
8 |
+
import requests
|
9 |
+
|
10 |
+
openai.api_key = os.getenv("OPENAI_API_KEY")
|
11 |
+
|
12 |
+
st.set_page_config(page_title="MedSynapticGPT", layout="wide")
|
13 |
+
st.title("π©Ί MedSynapticGPT β Multimodal Clinical Reasoner")
|
14 |
+
|
15 |
+
if "credits" not in st.session_state:
|
16 |
+
st.session_state.credits = 3 # daily free quota
|
17 |
+
|
18 |
+
def charge_credit():
|
19 |
+
st.session_state.credits -= 1
|
20 |
+
if st.session_state.credits < 0:
|
21 |
+
st.error("Free quota reached. Upgrade for more cases.")
|
22 |
+
st.stop()
|
23 |
+
|
24 |
+
# βββββββββ Helpers βββββββββ
|
25 |
+
@st.cache_data(show_spinner=False)
|
26 |
+
def gpt(system, user):
|
27 |
+
resp = openai.chat.completions.create(model="gpt-4o-mini", messages=[{"role":"system","content":system},{"role":"user","content":user}], max_tokens=700)
|
28 |
+
return resp.choices[0].message.content.strip()
|
29 |
+
|
30 |
+
@st.cache_data(show_spinner=False)
|
31 |
+
def whisper_transcribe(audio_bytes):
|
32 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp:
|
33 |
+
tmp.write(audio_bytes)
|
34 |
+
tmp.flush()
|
35 |
+
txt = openai.audio.transcriptions.create(model="whisper-1", file=open(tmp.name,"rb")).text
|
36 |
+
return txt
|
37 |
+
|
38 |
+
@st.cache_data(show_spinner=False)
|
39 |
+
def dicom_to_png(ds):
|
40 |
+
arr = ds.pixel_array.astype(np.float32)
|
41 |
+
arr -= arr.min(); arr /= arr.max(); arr *= 255
|
42 |
+
return Image.fromarray(arr.astype(np.uint8))
|
43 |
+
|
44 |
+
# βββββββββ Tabs βββββββββ
|
45 |
+
tabs = st.tabs(["Radiology AI", "Clinical Note Q&A", "Voice Triage", "UMLS Lookup", "GraphRAG"])
|
46 |
+
|
47 |
+
# Radiology
|
48 |
+
with tabs[0]:
|
49 |
+
st.header("π· Radiology AI")
|
50 |
+
up = st.file_uploader("Upload DICOM or PNG/JPG", type=["dcm","png","jpg","jpeg"])
|
51 |
+
if up and st.button("Analyze"):
|
52 |
+
charge_credit()
|
53 |
+
if up.name.endswith(".dcm"):
|
54 |
+
ds = pydicom.dcmread(up)
|
55 |
+
img = dicom_to_png(ds)
|
56 |
+
else:
|
57 |
+
img = Image.open(up)
|
58 |
+
st.image(img, caption="Preview", use_column_width=True)
|
59 |
+
report = gpt("You are a boardβcertified radiologist. Provide an impression with SNOMED codes.", "Describe findings and give TNM if cancer present.")
|
60 |
+
st.markdown("#### Report")
|
61 |
+
st.write(report)
|
62 |
+
|
63 |
+
# Clinical Note Q&A
|
64 |
+
with tabs[1]:
|
65 |
+
st.header("π Clinical Note Q&A")
|
66 |
+
txt = st.text_area("Paste clinical note")
|
67 |
+
q = st.text_input("Ask a question (or leave blank for summary)")
|
68 |
+
if txt and st.button("Process"):
|
69 |
+
charge_credit()
|
70 |
+
prompt = f"Here is the clinical note:\n{txt}\n\nQuestion: {q or 'Summarize'}"
|
71 |
+
ans = gpt("You are an expert physician.", prompt)
|
72 |
+
st.write(ans)
|
73 |
+
|
74 |
+
# Voice Triage
|
75 |
+
with tabs[2]:
|
76 |
+
st.header("π€ Voice Triage")
|
77 |
+
aud = st.file_uploader("Upload patient symptoms recording", type=["wav","mp3","m4a","ogg"])
|
78 |
+
if aud and st.button("Infer" ):
|
79 |
+
charge_credit()
|
80 |
+
t = whisper_transcribe(aud.read())
|
81 |
+
st.write("**Transcript:**", t)
|
82 |
+
diff = gpt("You are a triage nurse.", f"Patient says: {t}\nProvide likely differentials with SNOMED codes and urgency level.")
|
83 |
+
st.write(diff)
|
84 |
+
|
85 |
+
# UMLS Lookup
|
86 |
+
with tabs[3]:
|
87 |
+
st.header("π UMLS Concept Lookup")
|
88 |
+
term = st.text_input("Term or CUI")
|
89 |
+
if term:
|
90 |
+
resp = requests.get(f"https://uts-ws.nlm.nih.gov/rest/search/current?string={term}&apiKey=sample")
|
91 |
+
st.json(resp.json())
|
92 |
+
|
93 |
+
# GraphRAG Explorer (toy demo)
|
94 |
+
with tabs[4]:
|
95 |
+
st.header("π§ GraphRAG Explorer")
|
96 |
+
st.info("Prototype biomedical KG Q&A β coming soon.")
|
97 |
+
|
98 |
+
---------------------------------------------------------------------
|
requirements.txt
CHANGED
@@ -1,3 +1,16 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit>=1.35.0
|
2 |
+
openai>=1.30.0
|
3 |
+
pydicom
|
4 |
+
Pillow
|
5 |
+
numpy
|
6 |
+
pydub
|
7 |
+
soundfile
|
8 |
+
ffmpeg-python
|
9 |
+
stripe
|
10 |
+
requests
|
11 |
+
python-multipart
|
12 |
+
fastapi
|
13 |
+
uvicorn
|
14 |
+
networkx
|
15 |
+
|
16 |
+
---------------------------------------------------------------------
|