File size: 4,467 Bytes
6773e36
3c2aa29
 
6773e36
3c2aa29
6773e36
 
3c2aa29
6773e36
c65ff09
8fccf3e
 
3c2aa29
6773e36
3c2aa29
6773e36
 
3c2aa29
6773e36
 
3c2aa29
6773e36
3c2aa29
6773e36
 
 
3c2aa29
6773e36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2c1f2ac
6773e36
2c1f2ac
 
6773e36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8e32863
 
 
 
 
 
 
 
 
 
 
 
 
94ce4e1
 
 
 
 
 
6773e36
8e32863
 
 
 
6773e36
8e32863
6773e36
 
699d994
94ce4e1
 
699d994
 
6773e36
699d994
6773e36
8fccf3e
 
 
 
 
94ce4e1
8fccf3e
 
3c2aa29
6773e36
 
c65ff09
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94ce4e1
c65ff09
 
 
699d994
c65ff09
3c2aa29
 
 
f716764
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
import pandas as pd


df = pd.read_csv('./drugs_side_effects_drugs_com.csv')

df = df[['drug_name', 'medical_condition', 'side_effects']]
df.dropna(inplace=True)

context_data = []
for i in range(2):
    context = " | ".join([f"{col}: {df.iloc[i][col]}" for col in df.columns])
    context_data.append(context)

import os

# Get the secret key from the environment
groq_key = os.environ.get('gloq_key')

## LLM used for RAG
from langchain_groq import ChatGroq

llm = ChatGroq(model="llama-3.1-70b-versatile",api_key=groq_key)

## Embedding model!
from langchain_huggingface import HuggingFaceEmbeddings
embed_model = HuggingFaceEmbeddings(model_name="mixedbread-ai/mxbai-embed-large-v1")

# create vector store!
from langchain_chroma import Chroma

vectorstore = Chroma(
    collection_name="medical_dataset_store",
    embedding_function=embed_model,
    persist_directory="./",
)

# add data to vector nstore
vectorstore.add_texts(context_data)

retriever = vectorstore.as_retriever()

from langchain_core.prompts import PromptTemplate

template = ("""You are a pharmacist and medical expert. 
    Use the provided context to answer the question.
    If the question is related to medical condition, drug name 
    and side effects that are not in the context, look online and answer them.
    If you don't know the answer, say so. Explain your answer in detail.
    Do not discuss the context in your response; just provide the answer directly.

    Context: {context}

    Question: {question}

    Answer:""")

rag_prompt = PromptTemplate.from_template(template)

from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough

rag_chain = (
    {"context": retriever, "question": RunnablePassthrough()}
    | rag_prompt
    | llm
    | StrOutputParser()
)


import gradio as gr

# # Function to stream responses
# def rag_memory_stream(message, history):
#     partial_text = ""
#     for new_text in rag_chain.stream(message):  # Assuming rag_chain is pre-defined
#         partial_text += new_text
#         yield partial_text

# Example rag_memory_stream function with history handling
def rag_memory_stream(messages, history=[]):
    """
    A generator-based function that processes messages, maintains history,
    and streams responses for interaction with the chatbot.
    """
    # Ensure messages is a list of strings
    if isinstance(messages, list) and all(isinstance(msg, str) for msg in messages):
        user_message = messages[-1]  # Extract the latest user message
    else:
        raise ValueError("Expected messages to be a list of strings.")

    partial_text = ""
    history.append({"user": user_message, "bot": ""})  # Add to history

    # Simulate response generation (replace with actual rag_chain logic)
    for new_text in rag_chain.stream(user_message):  # Assuming rag_chain is pre-defined
        partial_text += new_text
        history[-1]["bot"] = partial_text  # Update bot response in history
        yield partial_text

examples = [
    "What are the side effects of aspirin?",
    "Can ibuprofen cause dizziness?"
]

# Title and description for the app
title = "CareBot: AI Medical Assistant for Drug Information and Side Effects"
description = """
This AI-powered chatbot provides reliable information about drugs, their side effects, and related medical conditions. 
Powered by the Groq API and LangChain, it delivers real-time, accurate responses.

Example Questions:
- What are the side effects of aspirin?
- Can ibuprofen cause dizziness?

Disclaimer: This chatbot is for informational purposes only and not a substitute for professional medical advice.
"""

# Customizing Gradio interface for a better look
# demo = gr.Interface(
#     fn=rag_memory_stream,
#     inputs=gr.Textbox(
#         lines=5, 
#         placeholder="Type your medical question here...", 
#         label="Your Medical Question"
#     ),
#     outputs=gr.Textbox(
#         lines=15,  # Reduced line count for better layout
#         label="AI Response"
#     ),
#     title=title,
#     description=description,
#     allow_flagging="never"
# )

demo = gr.ChatInterface(fn=rag_memory_stream,
                        type="list",
                        title=title,
                        description=description,
                        fill_height=True,
                        examples=examples,
                        theme="glass",
)

if __name__ == "__main__":
    demo.launch()