Haseeb-001 commited on
Commit
6ea4317
·
verified ·
1 Parent(s): 87aac0f

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +62 -0
app.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_google_genai import GoogleGenerativeAI
2
+ import streamlit as st
3
+
4
+ # initializing llm
5
+ llm = GoogleGenerativeAI(
6
+ model="gemini-1.5-flash",
7
+ google_api_key="AIzaSyDgOkz_5iou4gl5aaDUNyXfhb63W3E27-o",
8
+ temperature=0.7,
9
+ top_p=0.9,
10
+ max_tokens=1000,
11
+ frequency_penalty=0.5,
12
+ presence_penalty=0.3,
13
+ stop_sequences=["User:", "Assistant:"]
14
+ )
15
+
16
+ # initializing role, requirements
17
+ instruction = "Answer user's(patient) questions politely and provide him accurate information."
18
+ context = (
19
+ "You are a medical assistant doctor named DoctorX."
20
+ "You will ask user which might be a patient about his health, get symptoms from him, predicts his disease and suggest him a good and quick remeedy or prescription based on users condition."
21
+ "Continue Asking more about his condition or health and suggest him a doctor if required."
22
+ "Also give user you a good line to reduce his tension"
23
+ )
24
+ input_data = (
25
+ "Additional information:\n",
26
+ "Hospital Address: John Smith Hospital",
27
+ "Ambulance Call: 1111",
28
+ "Doctor Number: +3100-1000-100"
29
+ )
30
+
31
+ # Streamlit UI Setup
32
+ st.set_page_config(page_title="DoctorX: HealthCare", layout="wide")
33
+ st.title("🧠 DoctorX: HealthCare")
34
+
35
+ # Enhanced logic for chatbot interaction
36
+ if prompt := st.chat_input("Type your question here..."):
37
+ with st.chat_message("user"):
38
+ st.markdown(prompt)
39
+
40
+ with st.chat_message("bot"):
41
+ with st.spinner("🤖 Thinking..."):
42
+ try:
43
+ # Constructing the prompt with enhanced context
44
+ enhanced_prompt = (
45
+ f"{instruction}\n\n"
46
+ f"{context}\n\n"
47
+ f"{input_data}\n\n"
48
+ f"User's Query: {prompt}\n\n"
49
+ "Follow-up: Ask the user about their symptoms, provide a possible diagnosis, and suggest remedies or prescriptions."
50
+ )
51
+
52
+ # Stream the response from the LLM
53
+ response = ""
54
+ for chunk in llm.stream(enhanced_prompt):
55
+ response += chunk
56
+ st.markdown(response) # Display the response incrementally
57
+
58
+ # Add stress-relief advice
59
+ st.markdown("\n**Stress-Relief Tip:** Remember to take deep breaths and stay hydrated.")
60
+
61
+ except Exception as e:
62
+ st.error(f"⚠️ Error processing query: {e}")