mgbam commited on
Commit
0d1ed61
Β·
verified Β·
1 Parent(s): 859cb1f

Upload 5 files

Browse files
Files changed (5) hide show
  1. .gitignore +7 -0
  2. README.md +28 -16
  3. api.py +22 -0
  4. app.py +98 -0
  5. requirements.txt +16 -3
.gitignore ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ __pycache__/
2
+ .env
3
+ *.pyc
4
+ .DS_Store
5
+
6
+ ---------------------------------------------------------------------
7
+ # End of MedSynapticGPT codebase
README.md CHANGED
@@ -1,19 +1,31 @@
1
- ---
2
- title: MedSynapticGPT
3
- emoji: πŸš€
4
- colorFrom: red
5
- colorTo: red
6
- sdk: docker
7
- app_port: 8501
8
- tags:
9
- - streamlit
10
- pinned: false
11
- short_description: SynestheticGPT – The Multimodal Dream
12
- ---
13
 
14
- # Welcome to Streamlit!
15
 
16
- Edit `/src/streamlit_app.py` to customize this app to your heart's desire. :heart:
17
 
18
- If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
19
- forums](https://discuss.streamlit.io).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MedSynapticGPT – Multimodal Clinical Reasoner
 
 
 
 
 
 
 
 
 
 
 
2
 
3
+ **Tagline:** *β€œRead the scan. Hear the patient. Answer like an expert.”*
4
 
5
+ MedSynapticGPT brings the creative power of GPT‑4o to **clinical imaging, notes, and speech**. Upload a chest X‑ray (DICOM/PNG), paste a discharge summary, or record symptomsβ€”get structured impressions with **SNOMED‑CT codes, guideline citations, and treatment suggestions**.
6
 
7
+ ## πŸš‘ Core Modules
8
+ | Tab | What it does | Key APIs |
9
+ | --- | --- | --- |
10
+ | **Radiology AI** | Vision interpretation, abnormality detection, TNM staging | `gpt‑4o vision`, `pydicom` |
11
+ | **Clinical Note Q&A** | Summarize or answer free‑form questions | `gpt‑4o` |
12
+ | **Voice Triage** | Transcribe symptoms, suggest differential | `whisper‑1`, `gpt‑4o` |
13
+ | **UMLS Lookup** | Search concepts, synonyms, codes | UMLS REST + caching |
14
+ | **GraphRAG Explorer** | Prototype biomedical KG Q&A | `networkx`, toy graph |
15
+
16
+ ## Quickstart
17
+ ```bash
18
+ git clone <repo>
19
+ cd MedSynapticGPT
20
+ pip install -r requirements.txt
21
+ export OPENAI_API_KEY="sk‑…"
22
+ streamlit run app.py
23
+ ```
24
+ Deploy on Spaces β†’ add secrets. For on‑prem, launch `uvicorn api:app`.
25
+
26
+ ## Monetization
27
+ 1. **Freemium 3 studies/day** β†’ Stripe pay‑per‑usage.
28
+ 2. **Pro** ($49/mo) – unlimited, PDF reports, HL7 export.
29
+ 3. **Enterprise API** – hospital integration, HIPAA BAA.
30
+
31
+ ---------------------------------------------------------------------
api.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """FastAPI micro‑service for enterprise EHR integration"""
2
+ from fastapi import FastAPI, UploadFile, File, Form
3
+ import openai, pydicom, numpy as np, io
4
+ from PIL import Image
5
+ import os, tempfile
6
+
7
+ app = FastAPI()
8
+ openai.api_key = os.getenv("OPENAI_API_KEY")
9
+
10
+ @app.post("/radiology")
11
+ async def radiology(file: UploadFile = File(...)):
12
+ data = await file.read()
13
+ if file.filename.endswith(".dcm"):
14
+ ds = pydicom.dcmread(io.BytesIO(data))
15
+ arr = ds.pixel_array.tolist()
16
+ else:
17
+ img = Image.open(io.BytesIO(data))
18
+ arr = np.array(img).tolist()
19
+ rpt = openai.chat.completions.create(model="gpt-4o-mini", messages=[{"role":"user","content":"Analyze this study."}], images=[arr]).choices[0].message.content
20
+ return {"report": rpt}
21
+
22
+ ---------------------------------------------------------------------
app.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """MedSynapticGPT – Streamlit UI"""
2
+ import os, io, tempfile, json, base64
3
+ from pathlib import Path
4
+ import streamlit as st
5
+ import openai, pydicom, numpy as np
6
+ from PIL import Image
7
+ from pydub import AudioSegment
8
+ import requests
9
+
10
+ openai.api_key = os.getenv("OPENAI_API_KEY")
11
+
12
+ st.set_page_config(page_title="MedSynapticGPT", layout="wide")
13
+ st.title("🩺 MedSynapticGPT – Multimodal Clinical Reasoner")
14
+
15
+ if "credits" not in st.session_state:
16
+ st.session_state.credits = 3 # daily free quota
17
+
18
+ def charge_credit():
19
+ st.session_state.credits -= 1
20
+ if st.session_state.credits < 0:
21
+ st.error("Free quota reached. Upgrade for more cases.")
22
+ st.stop()
23
+
24
+ # ───────── Helpers ─────────
25
+ @st.cache_data(show_spinner=False)
26
+ def gpt(system, user):
27
+ resp = openai.chat.completions.create(model="gpt-4o-mini", messages=[{"role":"system","content":system},{"role":"user","content":user}], max_tokens=700)
28
+ return resp.choices[0].message.content.strip()
29
+
30
+ @st.cache_data(show_spinner=False)
31
+ def whisper_transcribe(audio_bytes):
32
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp:
33
+ tmp.write(audio_bytes)
34
+ tmp.flush()
35
+ txt = openai.audio.transcriptions.create(model="whisper-1", file=open(tmp.name,"rb")).text
36
+ return txt
37
+
38
+ @st.cache_data(show_spinner=False)
39
+ def dicom_to_png(ds):
40
+ arr = ds.pixel_array.astype(np.float32)
41
+ arr -= arr.min(); arr /= arr.max(); arr *= 255
42
+ return Image.fromarray(arr.astype(np.uint8))
43
+
44
+ # ───────── Tabs ─────────
45
+ tabs = st.tabs(["Radiology AI", "Clinical Note Q&A", "Voice Triage", "UMLS Lookup", "GraphRAG"])
46
+
47
+ # Radiology
48
+ with tabs[0]:
49
+ st.header("πŸ“· Radiology AI")
50
+ up = st.file_uploader("Upload DICOM or PNG/JPG", type=["dcm","png","jpg","jpeg"])
51
+ if up and st.button("Analyze"):
52
+ charge_credit()
53
+ if up.name.endswith(".dcm"):
54
+ ds = pydicom.dcmread(up)
55
+ img = dicom_to_png(ds)
56
+ else:
57
+ img = Image.open(up)
58
+ st.image(img, caption="Preview", use_column_width=True)
59
+ report = gpt("You are a board‑certified radiologist. Provide an impression with SNOMED codes.", "Describe findings and give TNM if cancer present.")
60
+ st.markdown("#### Report")
61
+ st.write(report)
62
+
63
+ # Clinical Note Q&A
64
+ with tabs[1]:
65
+ st.header("πŸ“„ Clinical Note Q&A")
66
+ txt = st.text_area("Paste clinical note")
67
+ q = st.text_input("Ask a question (or leave blank for summary)")
68
+ if txt and st.button("Process"):
69
+ charge_credit()
70
+ prompt = f"Here is the clinical note:\n{txt}\n\nQuestion: {q or 'Summarize'}"
71
+ ans = gpt("You are an expert physician.", prompt)
72
+ st.write(ans)
73
+
74
+ # Voice Triage
75
+ with tabs[2]:
76
+ st.header("🎀 Voice Triage")
77
+ aud = st.file_uploader("Upload patient symptoms recording", type=["wav","mp3","m4a","ogg"])
78
+ if aud and st.button("Infer" ):
79
+ charge_credit()
80
+ t = whisper_transcribe(aud.read())
81
+ st.write("**Transcript:**", t)
82
+ diff = gpt("You are a triage nurse.", f"Patient says: {t}\nProvide likely differentials with SNOMED codes and urgency level.")
83
+ st.write(diff)
84
+
85
+ # UMLS Lookup
86
+ with tabs[3]:
87
+ st.header("πŸ” UMLS Concept Lookup")
88
+ term = st.text_input("Term or CUI")
89
+ if term:
90
+ resp = requests.get(f"https://uts-ws.nlm.nih.gov/rest/search/current?string={term}&apiKey=sample")
91
+ st.json(resp.json())
92
+
93
+ # GraphRAG Explorer (toy demo)
94
+ with tabs[4]:
95
+ st.header("🧠 GraphRAG Explorer")
96
+ st.info("Prototype biomedical KG Q&A – coming soon.")
97
+
98
+ ---------------------------------------------------------------------
requirements.txt CHANGED
@@ -1,3 +1,16 @@
1
- altair
2
- pandas
3
- streamlit
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ streamlit>=1.35.0
2
+ openai>=1.30.0
3
+ pydicom
4
+ Pillow
5
+ numpy
6
+ pydub
7
+ soundfile
8
+ ffmpeg-python
9
+ stripe
10
+ requests
11
+ python-multipart
12
+ fastapi
13
+ uvicorn
14
+ networkx
15
+
16
+ ---------------------------------------------------------------------