Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -4,10 +4,18 @@ from huggingface_hub import InferenceClient
|
|
4 |
from textblob import TextBlob
|
5 |
from langchain.prompts import PromptTemplate
|
6 |
from dotenv import load_dotenv
|
|
|
7 |
|
8 |
# Load environment variables
|
9 |
load_dotenv()
|
10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
# Configure Hugging Face API
|
12 |
client = InferenceClient(
|
13 |
"microsoft/Phi-3-mini-4k-instruct",
|
@@ -39,9 +47,9 @@ prompt_template = PromptTemplate(
|
|
39 |
template="{system_prompt}\n\nUser: {user_input}\nAssistant:"
|
40 |
)
|
41 |
|
42 |
-
|
43 |
st.title("CareBot: AI Medical Assistant for Drug Information and Side Effects")
|
44 |
|
|
|
45 |
if "messages" not in st.session_state:
|
46 |
st.session_state["messages"] = [
|
47 |
{
|
@@ -61,21 +69,15 @@ if "messages" not in st.session_state:
|
|
61 |
}
|
62 |
]
|
63 |
|
64 |
-
# if "messages" not in st.session_state:
|
65 |
-
# st.session_state["messages"] = [
|
66 |
-
# {"role": "assistant", "content": "Hi there! I'm Treasure, your friendly pharmacist.
|
67 |
-
# This AI-powered chatbot provides reliable information about drugs, their side effects, and related medical conditions. Powered by the Groq API and LangChain, it delivers real-time, accurate responses.
|
68 |
-
# Example Questions:
|
69 |
-
# What are the side effects of aspirin?
|
70 |
-
# Can ibuprofen cause dizziness?
|
71 |
-
# Disclaimer: This chatbot is for informational purposes only and not a substitute for professional medical advice.
|
72 |
-
# How can I help you today?"}
|
73 |
-
# ]
|
74 |
-
|
75 |
# Display previous messages
|
76 |
for msg in st.session_state.messages:
|
77 |
st.chat_message(msg["role"]).write(msg["content"])
|
78 |
|
|
|
|
|
|
|
|
|
|
|
79 |
# Chat input and processing
|
80 |
if prompt := st.chat_input():
|
81 |
# Append user message to the session state
|
@@ -85,33 +87,37 @@ if prompt := st.chat_input():
|
|
85 |
# Sentiment Analysis
|
86 |
user_sentiment = TextBlob(prompt).sentiment.polarity
|
87 |
|
88 |
-
#
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
response
|
|
|
|
|
|
|
|
|
114 |
|
115 |
# Append assistant message to the session state
|
116 |
st.session_state.messages.append({"role": "assistant", "content": response.strip()})
|
117 |
-
st.chat_message("assistant").write(response.strip())
|
|
|
4 |
from textblob import TextBlob
|
5 |
from langchain.prompts import PromptTemplate
|
6 |
from dotenv import load_dotenv
|
7 |
+
import pandas as pd
|
8 |
|
9 |
# Load environment variables
|
10 |
load_dotenv()
|
11 |
|
12 |
+
# Load the drug-side effects dataset
|
13 |
+
df = pd.read_csv('./drugs_side_effects_drugs_com.csv')
|
14 |
+
df = df[['drug_name', 'side_effects']].dropna()
|
15 |
+
|
16 |
+
# Create a set of valid drug names to compare user queries
|
17 |
+
valid_drugs = set(df['drug_name'].str.lower())
|
18 |
+
|
19 |
# Configure Hugging Face API
|
20 |
client = InferenceClient(
|
21 |
"microsoft/Phi-3-mini-4k-instruct",
|
|
|
47 |
template="{system_prompt}\n\nUser: {user_input}\nAssistant:"
|
48 |
)
|
49 |
|
|
|
50 |
st.title("CareBot: AI Medical Assistant for Drug Information and Side Effects")
|
51 |
|
52 |
+
# Initialize the session state
|
53 |
if "messages" not in st.session_state:
|
54 |
st.session_state["messages"] = [
|
55 |
{
|
|
|
69 |
}
|
70 |
]
|
71 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
72 |
# Display previous messages
|
73 |
for msg in st.session_state.messages:
|
74 |
st.chat_message(msg["role"]).write(msg["content"])
|
75 |
|
76 |
+
# Function to check if the user's query is relevant to the dataset
|
77 |
+
def is_relevant_query(query):
|
78 |
+
query_tokens = set(query.lower().split())
|
79 |
+
return bool(valid_drugs.intersection(query_tokens))
|
80 |
+
|
81 |
# Chat input and processing
|
82 |
if prompt := st.chat_input():
|
83 |
# Append user message to the session state
|
|
|
87 |
# Sentiment Analysis
|
88 |
user_sentiment = TextBlob(prompt).sentiment.polarity
|
89 |
|
90 |
+
# Check if the query is relevant to the drug-side effects dataset
|
91 |
+
if not is_relevant_query(prompt):
|
92 |
+
response = "I'm sorry, but your question seems to be out of my scope. Please ask about drugs, side effects, or related medical conditions."
|
93 |
+
else:
|
94 |
+
# Craft System Prompt based on sentiment
|
95 |
+
system_prompt = SYSTEM_PROMPT_GENERAL
|
96 |
+
if user_sentiment < 0: # User expresses negative sentiment
|
97 |
+
system_prompt = f"""{system_prompt}
|
98 |
+
The user seems to be feeling down. Prioritize empathetic responses and open-ended questions."""
|
99 |
+
|
100 |
+
# Format prompt using LangChain's PromptTemplate
|
101 |
+
formatted_prompt = prompt_template.format(
|
102 |
+
system_prompt=system_prompt,
|
103 |
+
user_input=prompt
|
104 |
+
)
|
105 |
+
|
106 |
+
# Generate a response using Hugging Face API
|
107 |
+
response = ""
|
108 |
+
for message in client.chat_completion(
|
109 |
+
messages=[{"role": "user", "content": formatted_prompt}],
|
110 |
+
max_tokens=500,
|
111 |
+
stream=True,
|
112 |
+
):
|
113 |
+
response += message.choices[0].delta.content
|
114 |
+
|
115 |
+
# Process response for specific tokens
|
116 |
+
if "Ashley:" in response:
|
117 |
+
response = response.split("Treasure:")[1].strip()
|
118 |
+
elif "User:" in response:
|
119 |
+
response = response.split("Assistant:")[1].strip()
|
120 |
|
121 |
# Append assistant message to the session state
|
122 |
st.session_state.messages.append({"role": "assistant", "content": response.strip()})
|
123 |
+
st.chat_message("assistant").write(response.strip())
|