File size: 4,711 Bytes
ec43d7b
 
 
 
 
 
05516c5
ec43d7b
 
 
 
05516c5
 
 
 
 
 
 
ec43d7b
 
 
 
 
 
 
af3ec2d
4eaec19
a16548e
4eaec19
 
 
 
 
 
 
a16548e
4eaec19
 
 
 
 
af3ec2d
ec43d7b
 
 
 
 
 
 
0c51d7e
ec43d7b
05516c5
ec43d7b
 
44a8eb7
 
 
 
 
 
 
 
 
 
 
c8161e7
44a8eb7
 
 
ec43d7b
 
 
 
 
 
05516c5
 
 
 
 
ec43d7b
 
 
 
 
 
 
 
 
05516c5
f765bd6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a16548e
ec43d7b
 
05516c5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
import streamlit as st
import os
from huggingface_hub import InferenceClient
from textblob import TextBlob
from langchain.prompts import PromptTemplate  
from dotenv import load_dotenv
import pandas as pd

# Load environment variables
load_dotenv()

# Load the drug-side effects dataset
df = pd.read_csv('./drugs_side_effects_drugs_com.csv')
df = df[['drug_name', 'side_effects']].dropna()

# Create a set of valid drug names to compare user queries
valid_drugs = set(df['drug_name'].str.lower())

# Configure Hugging Face API
client = InferenceClient(
    "microsoft/Phi-3-mini-4k-instruct",
    token=os.getenv("HF_API_KEY"),
)

# Define System Prompts
SYSTEM_PROMPT_GENERAL = """
You are CareBot, a pharmacist and medical expert known as Treasure. Your goal is to provide empathetic, supportive, and detailed responses tailored to the user's needs.
Behavior Guidelines:
1. Introduction: Greet the user as Treasure during the first interaction.
2. Personalization: Adapt responses to the user's tone and emotional state.
3. Empathy: Respond warmly to the user's concerns and questions.
4. Evidence-Based: Use reliable sources to answer queries. For missing data, advise seeking professional consultation.
5. Focus: Avoid providing off-topic information; address the user's query specifically.
6. Encouragement: Balance acknowledging concerns with actionable and constructive suggestions.
7. Context Integration: Use the given context to deliver accurate and relevant answers without repeating the context explicitly.
Objective:
Deliver thoughtful, empathetic, and medically sound advice based on the user’s query.
Response Style:
- Detailed but concise
- Professional, empathetic tone
- Clear and actionable guidance
"""

# Define LangChain Prompt Template
prompt_template = PromptTemplate(
    input_variables=["system_prompt", "user_input"],
    template="{system_prompt}\n\nUser: {user_input}\nAssistant:"
)

st.title("CareBot: AI Medical Assistant for Drug Information and Side Effects")

# Initialize the session state
if "messages" not in st.session_state:
    st.session_state["messages"] = [
        {
            "role": "assistant",
            "content": (
                "Hi there! I'm Treasure, your friendly pharmacist. "
                "This AI-powered chatbot provides reliable information about drugs, their side effects, "
                "and related medical conditions. Powered by the Groq API and LangChain, it delivers real-time, "
                "accurate responses.\n\n"
                "Example Questions:\n"
                "- What are the side effects of aspirin?\n"
                "- Can ibuprofen cause dizziness?\n\n"
                "Disclaimer: This chatbot is for informational purposes only and not a substitute for professional "
                "medical advice.\n\n\n"
                "How can I help you today?"
            )
        }
    ]

# Display previous messages
for msg in st.session_state.messages:
    st.chat_message(msg["role"]).write(msg["content"])

# Function to check if the user's query is relevant to the dataset
def is_relevant_query(query):
    query_tokens = set(query.lower().split())
    return bool(valid_drugs.intersection(query_tokens))

# Chat input and processing
if prompt := st.chat_input():
    # Append user message to the session state
    st.session_state.messages.append({"role": "user", "content": prompt})
    st.chat_message("user").write(prompt)

    # Sentiment Analysis
    user_sentiment = TextBlob(prompt).sentiment.polarity

    # Check if the query is relevant to the drug-side effects dataset
    system_prompt = SYSTEM_PROMPT_GENERAL
    if user_sentiment < 0:  # User expresses negative sentiment
        system_prompt = f"""{system_prompt} 
        The user seems to be feeling down. Prioritize empathetic responses and open-ended questions."""

    # Format prompt using LangChain's PromptTemplate
    formatted_prompt = prompt_template.format(
        system_prompt=system_prompt,
        user_input=prompt
    )

    # Generate a response using Hugging Face API
    response = ""
    for message in client.chat_completion(
        messages=[{"role": "user", "content": formatted_prompt}],
        max_tokens=500,
        stream=True,
    ):
        response += message.choices[0].delta.content

    # Process response for specific tokens
    if "Ashley:" in response:
        response = response.split("Treasure:")[1].strip()
    elif "User:" in response:
        response = response.split("Assistant:")[1].strip()

    # Append assistant message to the session state
    st.session_state.messages.append({"role": "assistant", "content": response.strip()})
    st.chat_message("assistant").write(response.strip())