File size: 3,395 Bytes
6773e36
3c2aa29
 
6773e36
3c2aa29
6773e36
 
3c2aa29
6773e36
c65ff09
8fccf3e
 
3c2aa29
6773e36
3c2aa29
6773e36
 
3c2aa29
6773e36
 
3c2aa29
6773e36
3c2aa29
6773e36
 
 
3c2aa29
6773e36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2c1f2ac
6773e36
2c1f2ac
 
6773e36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8fccf3e
 
 
 
 
 
 
 
3c2aa29
6773e36
 
c65ff09
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3c2aa29
 
 
f716764
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
import pandas as pd


df = pd.read_csv('./drugs_side_effects_drugs_com.csv')

df = df[['drug_name', 'medical_condition', 'side_effects']]
df.dropna(inplace=True)

context_data = []
for i in range(2):
    context = " | ".join([f"{col}: {df.iloc[i][col]}" for col in df.columns])
    context_data.append(context)

import os

# Get the secret key from the environment
groq_key = os.environ.get('gloq_key')

## LLM used for RAG
from langchain_groq import ChatGroq

llm = ChatGroq(model="llama-3.1-70b-versatile",api_key=groq_key)

## Embedding model!
from langchain_huggingface import HuggingFaceEmbeddings
embed_model = HuggingFaceEmbeddings(model_name="mixedbread-ai/mxbai-embed-large-v1")

# create vector store!
from langchain_chroma import Chroma

vectorstore = Chroma(
    collection_name="medical_dataset_store",
    embedding_function=embed_model,
    persist_directory="./",
)

# add data to vector nstore
vectorstore.add_texts(context_data)

retriever = vectorstore.as_retriever()

from langchain_core.prompts import PromptTemplate

template = ("""You are a pharmacist and medical expert. 
    Use the provided context to answer the question.
    If the question is related to medical condition, drug name 
    and side effects that are not in the context, look online and answer them.
    If you don't know the answer, say so. Explain your answer in detail.
    Do not discuss the context in your response; just provide the answer directly.

    Context: {context}

    Question: {question}

    Answer:""")

rag_prompt = PromptTemplate.from_template(template)

from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough

rag_chain = (
    {"context": retriever, "question": RunnablePassthrough()}
    | rag_prompt
    | llm
    | StrOutputParser()
)


import gradio as gr

# Function to stream responses
def rag_memory_stream(text):
    partial_text = ""
    for new_text in rag_chain.stream(text):  # Assuming rag_chain is pre-defined
        partial_text += new_text
        yield partial_text

# Title and description for the app
title = "AI Medical Assistant for Drug Information and Side Effects"
description = """
This AI-powered chatbot provides reliable information about drugs, their side effects, and related medical conditions. 
Powered by the Groq API and LangChain, it delivers real-time, accurate responses.

Example Questions:
- What are the side effects of aspirin?
- What is the disease for constant fatigue and muscle weakness?

Disclaimer: This chatbot is for informational purposes only and not a substitute for professional medical advice.
"""

# Customizing Gradio interface for a better look
# demo = gr.Interface(
#     fn=rag_memory_stream,
#     inputs=gr.Textbox(
#         lines=5, 
#         placeholder="Type your medical question here...", 
#         label="Your Medical Question"
#     ),
#     outputs=gr.Textbox(
#         lines=15,  # Reduced line count for better layout
#         label="AI Response"
#     ),
#     title=title,
#     description=description,
#     allow_flagging="never"
# )

demo = gr.ChatInterface(fn=rag_memory_stream,
                        type="messages",
                        title=title,
                        description=description,
                        fill_height=True,
                        theme="glass",
)

if __name__ == "__main__":
    demo.launch()