net2asif commited on
Commit
2cc1867
·
verified ·
1 Parent(s): 62ab84e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -29
app.py CHANGED
@@ -25,6 +25,10 @@ qdrant_api_key = os.getenv('qdrant_api_key')
25
 
26
 
27
 
 
 
 
 
28
  #csv loader
29
  loader = CSVLoader(file_path='data.csv')
30
  data=loader.load()
@@ -37,6 +41,7 @@ texts = text_splitter.split_documents(data)
37
  #embeding
38
  embeding=OpenAIEmbeddings(openai_api_key=openai_api_key, model="text-embedding-3-small")
39
 
 
40
  #import quantization
41
 
42
  from langchain.vectorstores import Qdrant
@@ -52,7 +57,7 @@ qdrant = Qdrant.from_documents(
52
  url=qdrant_url,
53
  prefer_grpc=True,
54
  api_key=qdrant_api_key,
55
- collection_name="llm_app_01",
56
  quantization_config=models.BinaryQuantization(
57
  binary=models.BinaryQuantizationConfig(
58
  always_ram=True,
@@ -60,6 +65,7 @@ qdrant = Qdrant.from_documents(
60
  )
61
  )
62
 
 
63
  #qdrant client
64
  qdrant_client = QdrantClient(
65
  url=qdrant_url,
@@ -72,12 +78,17 @@ from re import search
72
  retriver=qdrant.as_retriever( search_type="similarity", search_kwargs={"k":2})
73
 
74
 
 
 
 
 
 
75
  from langchain import PromptTemplate
76
 
77
  prompt = PromptTemplate(
78
  template="""
79
  # Your Role
80
- You are a highly skilled AI specialized in healthcare and medical information retrieval. Your expertise lies in understanding the medical needs of patients and accurately matching them with the most suitable healthcare professionals based on the given context.
81
 
82
  # Instruction
83
  Your task is to answer the question using the following pieces of retrieved context delimited by XML tags.
@@ -88,26 +99,26 @@ prompt = PromptTemplate(
88
  </retrieved context>
89
 
90
  # Constraint
91
- 1. Carefully consider the user's question:
92
  User's question:\n{question}\n
93
- Analyze the intent behind the question, particularly in relation to the medical context, and provide a precise and helpful answer.
94
- - Reflect on why the question was asked and provide an appropriate response based on the context you understand.
95
  2. Select the most relevant information (the key details directly related to the question) from the retrieved context and use it to formulate an answer.
96
- 3. Generate a concise, logical, and medically accurate answer. When generating the answer, include the following details about the doctor in a bulleted format:
97
- Doctor Name: Dr. Shahzad Rashid Awan
98
- • City: Peshawar
99
- • Specialization: Dermatologist
100
- • Qualification: MBBS, MCPS (Dermatology)
101
- Experience: 12 years
102
- • Patient Satisfaction Rate: 93%
103
- Avg Time to Patients: 13 mins
104
- • Wait Time: 10 mins
105
- • Hospital Address: Rahim Medical Center And Hospital, Hasht Nagri, Peshawar
106
- • Fee: PKR 1000
107
- • Profile Link: https://www.marham.pk/doctors/peshawar/dermatologist/dr-shahzad-rashid-awan#reviews-scroll
108
- 4. If the retrieved context does not contain information relevant to the question, or if the documents are irrelevant, respond with 'I can't find the answer to that question in the material I have'.
109
- 5. Limit the answer to five sentences maximum. Ensure the answer is concise, logical, and medically appropriate.
110
- 6. At the end of the response, provide the doctor's profile metadata as shown in the relevant documents, ensuring all bullet points are clearly mentioned.
111
 
112
  # Question:
113
  {question}""",
@@ -115,8 +126,10 @@ prompt = PromptTemplate(
115
  )
116
 
117
 
118
- from langchain.chat_models import ChatOpenAI
119
- llm = ChatOpenAI(model_name="gpt-4o", temperature=0, openai_api_key=openai_api_key)
 
 
120
 
121
  def format_docs(docs):
122
  formatted_docs = []
@@ -133,7 +146,9 @@ def format_docs(docs):
133
  # Join all formatted documents with double newlines
134
  return "\n\n".join(formatted_docs)
135
 
136
- from langchain_core.output_parsers import StrOutputParser
 
 
137
  from langchain_core.runnables import RunnablePassthrough
138
  rag_chain = (
139
  {"context": retriver| format_docs, "question": RunnablePassthrough()}
@@ -142,17 +157,13 @@ rag_chain = (
142
  | StrOutputParser()
143
  )
144
 
145
-
146
-
147
-
148
-
149
  from langchain.chat_models import ChatOpenAI
150
  from langchain.schema import AIMessage, HumanMessage
151
  import openai
152
  import os
153
  import gradio as gr
154
 
155
- llm = ChatOpenAI(temperature=1.0, model='gpt-4o', openai_api_key=openai_api_key)
156
 
157
  def reg(message, history):
158
  history_langchain_format = []
@@ -162,6 +173,14 @@ def reg(message, history):
162
  history_langchain_format.append(HumanMessage(content=message))
163
  gpt_response = llm(history_langchain_format)
164
  return rag_chain.invoke(message)
 
 
 
 
 
 
 
 
 
165
 
166
- gr.ChatInterface(reg).launch()
167
 
 
25
 
26
 
27
 
28
+ from langchain.chat_models import ChatOpenAI
29
+ from langchain.schema import AIMessage, HumanMessage
30
+
31
+
32
  #csv loader
33
  loader = CSVLoader(file_path='data.csv')
34
  data=loader.load()
 
41
  #embeding
42
  embeding=OpenAIEmbeddings(openai_api_key=openai_api_key, model="text-embedding-3-small")
43
 
44
+
45
  #import quantization
46
 
47
  from langchain.vectorstores import Qdrant
 
57
  url=qdrant_url,
58
  prefer_grpc=True,
59
  api_key=qdrant_api_key,
60
+ collection_name="llm_app",
61
  quantization_config=models.BinaryQuantization(
62
  binary=models.BinaryQuantizationConfig(
63
  always_ram=True,
 
65
  )
66
  )
67
 
68
+
69
  #qdrant client
70
  qdrant_client = QdrantClient(
71
  url=qdrant_url,
 
78
  retriver=qdrant.as_retriever( search_type="similarity", search_kwargs={"k":2})
79
 
80
 
81
+ #search query
82
+ query="show me a best darmatology doctor in peshawar "
83
+ docs=retriver.get_relevant_documents(query)
84
+
85
+
86
  from langchain import PromptTemplate
87
 
88
  prompt = PromptTemplate(
89
  template="""
90
  # Your Role
91
+ You are a highly skilled AI specialized in healthcare and medical information retrieval. Your expertise lies in understanding the medical needs of patients and accurately matching them with the most suitable healthcare professionals, including but not limited to surgeons, dentists, dermatologists, cardiologists, neurologists, etc., based on the user's query and the provided context.
92
 
93
  # Instruction
94
  Your task is to answer the question using the following pieces of retrieved context delimited by XML tags.
 
99
  </retrieved context>
100
 
101
  # Constraint
102
+ 1. Carefully analyze the user's question:
103
  User's question:\n{question}\n
104
+ Your goal is to understand the user's needs and match them with the most relevant healthcare professional(s) from the provided context.
105
+ - Reflect on why the question was asked, and deliver an appropriate response based on the context you understand.
106
  2. Select the most relevant information (the key details directly related to the question) from the retrieved context and use it to formulate an answer.
107
+ 3. Generate a comprehensive, logical, and medically accurate answer. When generating the answer, include the following details about the healthcare professional:
108
+ • Name of the Professional
109
+ • City
110
+ • Specialization (e.g., Surgeon, Dentist, Cardiologist, etc.)
111
+ • Qualification (e.g., MBBS, FCPS, etc.)
112
+ Years of Experience
113
+ • Patient Satisfaction Rate (if available)
114
+ Average Time Spent with Patients (if available)
115
+ • Wait Time (if available)
116
+ • Hospital/Clinic Address
117
+ Consultation Fee
118
+ • Profile Link (if available)
119
+ 4. If the retrieved context does not contain enough relevant information, or if the documents are irrelevant, respond with 'I can't find the answer to that question in the material I have'.
120
+ 5. Provide a complete answer to the user. Do not limit the information if there is more useful data available in the retrieved context.
121
+ 6. At the end of the response, do not include any unnecessary metadata (such as Source, Row, or _id). Only focus on the healthcare professional's information relevant to the user's query.
122
 
123
  # Question:
124
  {question}""",
 
126
  )
127
 
128
 
129
+ #import ChatOpenAI
130
+ # llm = ChatOpenAI(model_name="gpt-4o", temperature=0, openai_api_key=openai_api_key)
131
+
132
+
133
 
134
  def format_docs(docs):
135
  formatted_docs = []
 
146
  # Join all formatted documents with double newlines
147
  return "\n\n".join(formatted_docs)
148
 
149
+ #import strw
150
+
151
+ from langchain_core.output_parsers import StrOutputParser
152
  from langchain_core.runnables import RunnablePassthrough
153
  rag_chain = (
154
  {"context": retriver| format_docs, "question": RunnablePassthrough()}
 
157
  | StrOutputParser()
158
  )
159
 
 
 
 
 
160
  from langchain.chat_models import ChatOpenAI
161
  from langchain.schema import AIMessage, HumanMessage
162
  import openai
163
  import os
164
  import gradio as gr
165
 
166
+ llm = ChatOpenAI(temperature=0.5, model='gpt-4o', openai_api_key=openai_api_key)
167
 
168
  def reg(message, history):
169
  history_langchain_format = []
 
173
  history_langchain_format.append(HumanMessage(content=message))
174
  gpt_response = llm(history_langchain_format)
175
  return rag_chain.invoke(message)
176
+ # Gradio ChatInterface
177
+ demo = gr.ChatInterface(
178
+ fn=reg,
179
+ title="Doctors Appointments Assistant",
180
+ theme="soft",
181
+ )
182
+
183
+ demo.launch(show_api=False)
184
+
185
 
 
186