File size: 8,797 Bytes
a444494
 
 
 
 
 
 
c8d430c
 
 
 
 
 
 
 
 
 
 
a444494
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c8d430c
a444494
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c8d430c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a444494
c8d430c
 
a444494
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c8d430c
a444494
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c8d430c
a444494
c8d430c
 
 
 
 
 
 
 
 
 
 
 
a444494
 
 
c8d430c
 
 
 
 
 
a444494
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c8d430c
a444494
c8d430c
 
 
a444494
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from typing import List, Optional, Dict
import gradio as gr
import json
from enum import Enum
import re
import os
import time
from huggingface_hub import hf_hub_download

# We'll import llama_cpp in a way that provides better error messages
try:
    from llama_cpp import Llama
    LLAMA_IMPORT_ERROR = None
except Exception as e:
    LLAMA_IMPORT_ERROR = str(e)
    print(f"Warning: Failed to import llama_cpp: {e}")

class ConsultationState(Enum):
    INITIAL = "initial"
    GATHERING_INFO = "gathering_info"
    DIAGNOSIS = "diagnosis"

class Message(BaseModel):
    role: str
    content: str

class ChatRequest(BaseModel):
    messages: List[Message]

class ChatResponse(BaseModel):
    response: str
    finished: bool

# Standard health assessment questions
HEALTH_ASSESSMENT_QUESTIONS = [
    "What are your current symptoms and how long have you been experiencing them?",
    "Do you have any pre-existing medical conditions or chronic illnesses?",
    "Are you currently taking any medications? If yes, please list them.",
    "Is there any relevant family medical history I should know about?",
    "Have you had any similar symptoms in the past? If yes, what treatments worked?"
]

NURSE_OGE_IDENTITY = """
You are Nurse Oge, a medical AI assistant focused on serving patients in Nigeria. Always be empathetic, 
professional, and thorough in your assessments. When asked about your identity, explain that you are 
Nurse Oge, a medical AI assistant serving Nigerian communities. Remember that you must gather complete 
health information before providing any medical advice.
"""

class NurseOgeAssistant:
    def __init__(self):
        if LLAMA_IMPORT_ERROR:
            raise ImportError(f"Cannot initialize NurseOgeAssistant due to llama_cpp import error: {LLAMA_IMPORT_ERROR}")
            
        # Download the model file
        try:
            model_path = hf_hub_download(
                repo_id="mradermacher/Llama3-Med42-8B-GGUF",
                filename="Llama3-Med42-8B.IQ3_M.gguf",
                resume_download=True
            )
            
            # Initialize the model with the downloaded file
            self.llm = Llama(
                model_path=model_path,
                n_ctx=2048,  # Context window
                n_threads=4   # Number of CPU threads to use
            )
            
        except Exception as e:
            raise RuntimeError(f"Failed to initialize the model: {str(e)}")
            
        self.consultation_states = {}
        self.gathered_info = {}

    # ... (rest of the NurseOgeAssistant class methods remain the same)
    
    def _is_identity_question(self, message: str) -> bool:
        identity_patterns = [
            r"who are you",
            r"what are you",
            r"your name",
            r"what should I call you",
            r"tell me about yourself"
        ]
        return any(re.search(pattern, message.lower()) for pattern in identity_patterns)

    def _is_location_question(self, message: str) -> bool:
        location_patterns = [
            r"where are you",
            r"which country",
            r"your location",
            r"where do you work",
            r"where are you based"
        ]
        return any(re.search(pattern, message.lower()) for pattern in location_patterns)

    def _get_next_assessment_question(self, conversation_id: str) -> Optional[str]:
        if conversation_id not in self.gathered_info:
            self.gathered_info[conversation_id] = []
        
        questions_asked = len(self.gathered_info[conversation_id])
        if questions_asked < len(HEALTH_ASSESSMENT_QUESTIONS):
            return HEALTH_ASSESSMENT_QUESTIONS[questions_asked]
        return None

    async def process_message(self, conversation_id: str, message: str, history: List[Dict]) -> ChatResponse:
        # Initialize state if new conversation
        if conversation_id not in self.consultation_states:
            self.consultation_states[conversation_id] = ConsultationState.INITIAL

        # Handle identity questions
        if self._is_identity_question(message):
            return ChatResponse(
                response="I am Nurse Oge, a medical AI assistant dedicated to helping patients in Nigeria. "
                        "I'm here to provide medical guidance while ensuring I gather all necessary health information "
                        "for accurate assessments.",
                finished=True
            )

        # Handle location questions
        if self._is_location_question(message):
            return ChatResponse(
                response="I am based in Nigeria and specifically trained to serve Nigerian communities, "
                        "taking into account local healthcare contexts and needs.",
                finished=True
            )

        # Start health assessment if it's a medical query
        if self.consultation_states[conversation_id] == ConsultationState.INITIAL:
            self.consultation_states[conversation_id] = ConsultationState.GATHERING_INFO
            next_question = self._get_next_assessment_question(conversation_id)
            return ChatResponse(
                response=f"Before I can provide any medical advice, I need to gather some important health information. "
                        f"{next_question}",
                finished=False
            )

        # Continue gathering information
        if self.consultation_states[conversation_id] == ConsultationState.GATHERING_INFO:
            self.gathered_info[conversation_id].append(message)
            next_question = self._get_next_assessment_question(conversation_id)
            
            if next_question:
                return ChatResponse(
                    response=f"Thank you for that information. {next_question}",
                    finished=False
                )
            else:
                self.consultation_states[conversation_id] = ConsultationState.DIAGNOSIS
                context = "\n".join([
                    f"Q: {q}\nA: {a}" for q, a in 
                    zip(HEALTH_ASSESSMENT_QUESTIONS, self.gathered_info[conversation_id])
                ])
                
                messages = [
                    {"role": "system", "content": NURSE_OGE_IDENTITY},
                    {"role": "user", "content": f"Based on the following patient information, provide a thorough assessment and recommendations:\n\n{context}\n\nOriginal query: {message}"}
                ]
                
                response = self.llm.create_chat_completion(
                    messages=messages,
                    max_tokens=1024,
                    temperature=0.7
                )
                
                self.consultation_states[conversation_id] = ConsultationState.INITIAL
                self.gathered_info[conversation_id] = []
                
                return ChatResponse(
                    response=response['choices'][0]['message']['content'],
                    finished=True
                )

# Initialize FastAPI
app = FastAPI()

# Create a global variable for our assistant
nurse_oge = None

@app.on_event("startup")
async def startup_event():
    global nurse_oge
    try:
        nurse_oge = NurseOgeAssistant()
    except Exception as e:
        print(f"Failed to initialize NurseOgeAssistant: {e}")
        # We'll continue running but the /chat endpoint will return errors

@app.post("/chat")
async def chat_endpoint(request: ChatRequest):
    if nurse_oge is None:
        raise HTTPException(
            status_code=503,
            detail="The medical assistant is not available at the moment. Please try again later."
        )
    
    conversation_id = "default"
    
    if not request.messages:
        raise HTTPException(status_code=400, detail="No messages provided")
    
    latest_message = request.messages[-1].content
    
    response = await nurse_oge.process_message(
        conversation_id=conversation_id,
        message=latest_message,
        history=request.messages[:-1]
    )
    
    return response

# Gradio interface
def gradio_chat(message, history):
    if nurse_oge is None:
        return "The medical assistant is not available at the moment. Please try again later."
    
    response = nurse_oge.process_message("gradio_user", message, history)
    return response.response

demo = gr.ChatInterface(
    fn=gradio_chat,
    title="Nurse Oge",
    description="Finetuned llama 3.0 for medical diagnosis and all. This is just a demo",
    theme="soft"
)

# Mount both FastAPI and Gradio
app = gr.mount_gradio_app(app, demo, path="/gradio")

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8000)