File size: 2,343 Bytes
a786c9d
d16d3b3
cd3861d
a786c9d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dfd696d
a786c9d
8837a99
a786c9d
3f51add
a786c9d
 
 
7c89955
 
 
 
 
 
7b09166
 
7c89955
7df1bda
4c6d55a
 
1f23827
 
 
983182b
dfd696d
 
 
 
7c89955
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import pandas as pd


df = pd.read_csv('./medical_data.csv')

context_data = []
for i in range(len(df)):
  context = ""
  for j in range(3):
    context += df.columns[j]
    context += ": "
    context += df.iloc[i][j]
    context += " "
  context_data.append(context)


import os

# Get the secret key from the environment
groq_key = os.environ.get('groq_api_keys')

## LLM used for RAG
from langchain_groq import ChatGroq

llm = ChatGroq(model="llama-3.1-70b-versatile",api_key=groq_key)

## Embedding model!
from langchain_huggingface import HuggingFaceEmbeddings
embed_model = HuggingFaceEmbeddings(model_name="mixedbread-ai/mxbai-embed-large-v1")

# create vector store!
from langchain_chroma import Chroma

vectorstore = Chroma(
    collection_name="medical_dataset_store",
    embedding_function=embed_model,
)

# add data to vector nstore
vectorstore.add_texts(context_data)

retriever = vectorstore.as_retriever()

from langchain_core.prompts import PromptTemplate

template = ("""You are a medical expert.
    Use the provided context to answer the question.
    If you don't know the answer, say so. Explain your answer in detail.
    Do not discuss the context in your response; just provide the answer directly.

    Context: {context}

    Question: {question}

    Answer:""")

rag_prompt = PromptTemplate.from_template(template)

from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough

rag_chain = (
    {"context": retriever, "question": RunnablePassthrough()}
    | rag_prompt
    | llm
    | StrOutputParser()
)

import gradio as gr

def rag_memory_stream(message, history):
    partial_text = ""
    for new_text in rag_chain.stream(message):
        partial_text += new_text
        yield partial_text

examples = [
    "I feel dizzy", 
    "what is the possible sickness for fatigue?"
]

description = "Real-time AI App with Groq API and LangChain to Answer medical questions"


title = "Medical Expert :) Try me!"
demo = gr.ChatInterface(fn=rag_memory_stream,
                        type="messages",
                        title=title,
                        description=description,
                        fill_height=True,
                        examples=examples,
                        theme="glass",
)


if __name__ == "__main__":
    demo.launch()