eudoxie commited on
Commit
e875df3
·
verified ·
1 Parent(s): 3b6f65b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -62
app.py CHANGED
@@ -1,13 +1,5 @@
1
  import pandas as pd
2
 
3
- df = pd.read_csv("./drugs_side_effects_drugs_com.csv")
4
- df.info()
5
-
6
- df = df[['drug_name', 'medical_condition', 'side_effects']]
7
- df.dropna(inplace=True)
8
-
9
- df.info()
10
-
11
  context_data = pd.read_csv("drugs_side_effects_drugs_com.csv")
12
 
13
  import os
@@ -28,15 +20,9 @@ vectorstore = Chroma(
28
  persist_directory="./",
29
  )
30
 
31
- vectorstore.get().keys()
32
-
33
  # add data to vector nstore
34
  vectorstore.add_texts(context_data)
35
 
36
- query = "What drug that causes these side effects hives ; difficulty breathing; swelling of your face, lips, tongue, or throat."
37
- docs = vectorstore.similarity_search(query)
38
- print(docs[0].page_content)
39
-
40
  retriever = vectorstore.as_retriever()
41
 
42
  from langchain_core.prompts import PromptTemplate
@@ -45,11 +31,8 @@ template = ("""You are a medical expert.
45
  Use the provided context to answer the question.
46
  If you don't know the answer, say so. Explain your answer in detail.
47
  Do not discuss the context in your response; just provide the answer directly.
48
-
49
  Context: {context}
50
-
51
  Question: {question}
52
-
53
  Answer:""")
54
 
55
  rag_prompt = PromptTemplate.from_template(template)
@@ -64,20 +47,6 @@ rag_chain = (
64
  | StrOutputParser()
65
  )
66
 
67
- from IPython.display import display, Markdown
68
-
69
- response = rag_chain.invoke("What drug that causes these side effects hives ; difficulty breathing; swelling of your face, lips, tongue, or throat")
70
- Markdown(response)
71
-
72
- from IPython.display import display, Markdown
73
-
74
- response = rag_chain.invoke("What is Capital of Greece?")
75
- Markdown(response)
76
-
77
- """# Deployment
78
-
79
- """
80
-
81
  import gradio as gr
82
 
83
  def rag_memory_stream(text):
@@ -86,6 +55,10 @@ def rag_memory_stream(text):
86
  partial_text += new_text
87
  yield partial_text
88
 
 
 
 
 
89
 
90
  title = "MediGuide ChatBot"
91
  demo = gr.Interface(
@@ -93,6 +66,7 @@ demo = gr.Interface(
93
  fn=rag_memory_stream,
94
  inputs="text",
95
  outputs="text",
 
96
  allow_flagging="never",
97
  )
98
 
@@ -100,36 +74,5 @@ demo = gr.Interface(
100
  if __name__ == "__main__":
101
  demo.launch()
102
 
103
- """# Evaluating Using Blue Score and Rouge Score"""
104
-
105
- # qa_pair = []
106
- # for i in range(len(context_data)):
107
- # drug_name = str(context_data['drug_name'][i])
108
- # medical_condition = str(context_data['medical_condition'][i])
109
- # side_effects = str(context_data['side_effects'][i])
110
-
111
- # Question = f"What are the side effect of {drug_name} ?"
112
- # Answer = f"Side Effects: {side_effects}"
113
-
114
- # qa_pair.append([Question,Answer])
115
-
116
- # df = pd.DataFrame(qa_pair, columns=['Questions', 'Answers'])
117
-
118
- # question = [df['Questions'][0]]
119
-
120
- # import sacrebleu
121
- # from rouge_score import rouge_scorer
122
-
123
- # predicted_answer = rag_chain.invoke("What are the side effects of doxycycline?")
124
- # predicted_answer
125
-
126
- # reference_answer =df['Answers'][0]
127
- # reference_answer
128
-
129
- # blue_score = sacrebleu.corpus_bleu([predicted_answer], reference_answer).score
130
- # blue_score
131
 
132
- # scorer = rouge_scorer.RougeScorer(['rougeL'], use_stemmer=True)
133
- # rouge_score = scorer.score(reference_answer, predicted_answer)
134
- # rouge_score
135
 
 
1
  import pandas as pd
2
 
 
 
 
 
 
 
 
 
3
  context_data = pd.read_csv("drugs_side_effects_drugs_com.csv")
4
 
5
  import os
 
20
  persist_directory="./",
21
  )
22
 
 
 
23
  # add data to vector nstore
24
  vectorstore.add_texts(context_data)
25
 
 
 
 
 
26
  retriever = vectorstore.as_retriever()
27
 
28
  from langchain_core.prompts import PromptTemplate
 
31
  Use the provided context to answer the question.
32
  If you don't know the answer, say so. Explain your answer in detail.
33
  Do not discuss the context in your response; just provide the answer directly.
 
34
  Context: {context}
 
35
  Question: {question}
 
36
  Answer:""")
37
 
38
  rag_prompt = PromptTemplate.from_template(template)
 
47
  | StrOutputParser()
48
  )
49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  import gradio as gr
51
 
52
  def rag_memory_stream(text):
 
55
  partial_text += new_text
56
  yield partial_text
57
 
58
+ examples = ['I feel dizzy', 'what is the possible sickness for fatigue']
59
+
60
+
61
+
62
 
63
  title = "MediGuide ChatBot"
64
  demo = gr.Interface(
 
66
  fn=rag_memory_stream,
67
  inputs="text",
68
  outputs="text",
69
+ examples=examples,
70
  allow_flagging="never",
71
  )
72
 
 
74
  if __name__ == "__main__":
75
  demo.launch()
76
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
 
 
 
 
78