File size: 4,045 Bytes
33bbc31
 
 
 
 
 
 
 
 
 
96aad9d
33bbc31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96aad9d
 
33bbc31
 
 
 
96aad9d
33bbc31
 
 
 
 
96aad9d
33bbc31
96aad9d
33bbc31
 
 
 
 
96aad9d
33bbc31
 
 
96aad9d
33bbc31
 
 
 
 
96aad9d
33bbc31
 
96aad9d
33bbc31
 
 
 
96aad9d
33bbc31
 
 
 
 
 
 
 
 
 
96aad9d
33bbc31
 
 
 
96aad9d
33bbc31
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
import streamlit as st
import os
from textblob import TextBlob
from langchain.prompts import PromptTemplate
import pandas as pd
from langchain_groq import ChatGroq
from langchain_core.prompts import PromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_chroma import Chroma
from langchain_huggingface import HuggingFaceEmbeddings

# Load the dataset
df = pd.read_csv('./drugs_side_effects_drugs_com.csv')
df = df[['drug_name', 'medical_condition', 'side_effects']]
df.dropna(inplace=True)

# Prepare context data for vector store
context_data = [" | ".join([f"{col}: {df.iloc[i][col]}" for col in df.columns]) for i in range(2)]

# Set up Groq LLM and vector store
groq_key = os.environ.get('gloq_key')
llm = ChatGroq(model="llama-3.1-70b-versatile", api_key=groq_key)
embed_model = HuggingFaceEmbeddings(model_name="mixedbread-ai/mxbai-embed-large-v1")
vectorstore = Chroma(
    collection_name="medical_dataset_store",
    embedding_function=embed_model,
    persist_directory="./"
)
vectorstore.add_texts(context_data)
retriever = vectorstore.as_retriever()

# Define prompt template
SYSTEM_PROMPT_GENERAL = """
You are CareBot, a pharmacist and medical expert known as Treasure. Your goal is to provide empathetic, supportive, and detailed responses tailored to the user's needs.
Behavior Guidelines:
1. Introduction: Greet the user as Treasure during the first interaction.
2. Personalization: Adapt responses to the user's tone and emotional state.
3. Empathy: Respond warmly to the user's concerns and questions.
4. Evidence-Based: Use reliable sources to answer queries. For missing data, advise seeking professional consultation.
5. Focus: Avoid providing off-topic information; address the user's query specifically.
6. Encouragement: Balance acknowledging concerns with actionable and constructive suggestions.
7. Context Integration: Use the given context to deliver accurate and relevant answers without repeating the context explicitly.

Objective:
Deliver thoughtful, empathetic, and medically sound advice based on the user’s query.

Response Style:
- Detailed but concise
- Professional, empathetic tone
- Clear and actionable guidance
"""

rag_prompt_template = PromptTemplate(
    input_variables=["context", "user_input"],
    template="""
    {system_prompt}

    Context: {context}
    
    User: {user_input}
    Assistant:"""
)

st.title("CareBot: Your AI Medical Assistant")

# Initialize session state for chat history
if "messages" not in st.session_state:
    st.session_state["messages"] = [
        {"role": "assistant", "content": "Hi there! I'm Treasure, your friendly pharmacist. How can I help you today?"}
    ]

# Display chat history
for msg in st.session_state.messages:
    st.chat_message(msg["role"]).write(msg["content"])

# User input
if user_query := st.chat_input("Ask me a medical question, or share your concerns."):
    # Add user message to the session state
    st.session_state.messages.append({"role": "user", "content": user_query})
    st.chat_message("user").write(user_query)

    # Perform sentiment analysis
    sentiment = TextBlob(user_query).sentiment.polarity

    # Modify prompt based on sentiment
    system_prompt = SYSTEM_PROMPT_GENERAL
    if sentiment < 0:
        system_prompt += "\nThe user seems upset or worried. Prioritize empathy and reassurance."

    # Retrieve context from vector store
    context_results = retriever.get_relevant_documents(user_query)
    context = "\n".join([result.page_content for result in context_results])

    # Format the prompt
    formatted_prompt = rag_prompt_template.format(
        system_prompt=system_prompt,
        context=context,
        user_input=user_query
    )

    # Generate response using Groq LLM
    response = ""
    for text in llm.stream(formatted_prompt):
        response += text

    # Add assistant response to the session state
    st.session_state.messages.append({"role": "assistant", "content": response.strip()})
    st.chat_message("assistant").write(response.strip())