rishabh5752 commited on
Commit
740d871
Β·
1 Parent(s): 9da0b8d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -56
app.py CHANGED
@@ -1,8 +1,8 @@
1
-
2
  import chainlit as cl
3
- from langchain import OpenAI, LLMChain
4
  from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser
5
- from langchain.memory import ConversationBufferWindowMemory
6
  from langchain.prompts import StringPromptTemplate
7
  from langchain.tools import DuckDuckGoSearchRun
8
  from typing import List, Union
@@ -15,8 +15,7 @@ from main1 import predict
15
  from PIL import Image
16
  import io
17
 
18
-
19
- OPENAI_API_KEY='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'
20
 
21
  search = DuckDuckGoSearchRun()
22
 
@@ -26,26 +25,25 @@ def duck_wrapper(input_text):
26
 
27
  tools = [
28
  Tool(
29
- name = "Search",
30
  func=duck_wrapper,
31
- description="useful for when you need to answer medical and pharmalogical questions"
32
  )
33
  ]
34
 
35
-
36
  def call_detection_model(index):
37
  results = [
38
  {
39
- "has_cancer":False,
40
- "chances_of_having_cancer":8.64
41
  },
42
  {
43
- "has_cancer":True,
44
- "chances_of_having_cancer":97.89
45
  },
46
  {
47
- "has_cancer":False,
48
- "chances_of_having_cancer":2.78
49
  }
50
  ]
51
  return results[index]
@@ -55,7 +53,7 @@ class CustomPromptTemplate(StringPromptTemplate):
55
  template: str
56
  # The list of tools available
57
  tools: List[Tool]
58
-
59
  def format(self, **kwargs) -> str:
60
  # Get the intermediate steps (AgentAction, Observation tuples)
61
  # Format them in a particular way
@@ -77,7 +75,7 @@ class CustomOutputParser(AgentOutputParser):
77
  # Check if agent should finish
78
  if "Final Answer:" in llm_output:
79
  return AgentFinish(
80
- # Return values is generally always a dictionary with a single `output` key
81
  # It is not recommended to try anything else at the moment :)
82
  return_values={"output": llm_output.split("Final Answer:")[-1].strip()},
83
  log=llm_output,
@@ -94,74 +92,74 @@ class CustomOutputParser(AgentOutputParser):
94
 
95
  template = None
96
  prompt_with_history = None
97
- from contextlib import redirect_stdout
98
-
99
 
100
  @cl.on_chat_start
101
  async def main():
102
  cl.user_session.set("index", 0)
103
  cl.user_session.set("has_uploaded_image", False)
104
- await cl.Message("Upload image of your condition").send()
105
 
106
- @cl.on_file_upload(accept=['image/png'])
107
- async def main(file:any):
108
- index = cl.user_session.get("index")
109
- file = file[0]["content"]
110
- image_stream = io.BytesIO(file)
111
  image = Image.open(image_stream)
112
  image = image.convert('RGB')
113
  image = image.resize((150, 150))
114
  image.save("image.png", 'png')
115
  results = call_detection_model(index)
116
- cl.user_session.set("index", index+1)
117
  image.close()
118
- cl.user_session.set("results", results)
119
  if results["has_cancer"]:
120
- cl.user_session.set("template", template_for_has_cancer)
121
  else:
122
- cl.user_session.set("template", template_for_does_not_have_cancer)
123
  prompt_with_history = CustomPromptTemplate(
124
- template=cl.user_session.get("template"),
125
  tools=tools,
126
  input_variables=["input", "intermediate_steps", "history"]
127
  )
128
- llm_chain = LLMChain(prompt = prompt_with_history,llm=OpenAI(temperature=1.2,streaming=True),verbose=True)
129
  tool_names = [tool.name for tool in tools]
130
  output_parser = CustomOutputParser()
131
  agent = LLMSingleActionAgent(
132
- llm_chain=llm_chain,
133
  output_parser=output_parser,
134
- stop=["\nObservation:"],
135
  allowed_tools=tool_names
136
  )
137
- memory=ConversationBufferWindowMemory(k=2)
138
  agent_executor = AgentExecutor.from_agent_and_tools(
139
- agent=agent,
140
- tools=tools,
141
- verbose=True,
142
  memory=memory
143
  )
144
- cl.user_session.set("agent_executor", agent_executor)
145
- cl.user_session.set("has_uploaded_image", True)
146
- await cl.Message("Image has been uploaded and analyzing...\n").send()
147
-
148
-
149
 
150
- @cl.on_message
151
- async def main(message : str):
152
- has_uploaded_image = cl.user_session.get("has_uploaded_image")
153
- results = cl.user_session.get("results")
154
  if has_uploaded_image == False:
155
- await cl.Message("Please upload a relevent image to proceed with this conversation").send()
156
- return
157
- if "result" in message or "results" in message:
158
- msg = f"These results are a good estimation but its not meant to replace human medical intervention and should be taken with a grain of salt. According to the image uploaded, your chances of having skin cancer are {results['chances_of_having_cancer']}% and your condition lies in the {cancer_category(results['chances_of_having_cancer'])} range. "
159
  if cancer_category(results["chances_of_having_cancer"]) != "Pre Benign":
160
- msg += "You should consider vising the doctor for a complete checkup."
161
- await cl.Message(msg).send()
162
- return
163
- agent_executor = cl.user_session.get("agent_executor")
164
  res = agent_executor.run(message)
165
- await cl.Message(res).send()
166
-
167
-
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
  import chainlit as cl
3
+ from langchain import OpenAI, LLMChain
4
  from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser
5
+ from langchain.memory import ConversationBufferWindowMemory
6
  from langchain.prompts import StringPromptTemplate
7
  from langchain.tools import DuckDuckGoSearchRun
8
  from typing import List, Union
 
15
  from PIL import Image
16
  import io
17
 
18
+ OPENAI_API_KEY = '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'
 
19
 
20
  search = DuckDuckGoSearchRun()
21
 
 
25
 
26
  tools = [
27
  Tool(
28
+ name="Search",
29
  func=duck_wrapper,
30
+ description="useful for when you need to answer medical and pharmaceutical questions"
31
  )
32
  ]
33
 
 
34
  def call_detection_model(index):
35
  results = [
36
  {
37
+ "has_cancer": False,
38
+ "chances_of_having_cancer": 8.64
39
  },
40
  {
41
+ "has_cancer": True,
42
+ "chances_of_having_cancer": 97.89
43
  },
44
  {
45
+ "has_cancer": False,
46
+ "chances_of_having_cancer": 2.78
47
  }
48
  ]
49
  return results[index]
 
53
  template: str
54
  # The list of tools available
55
  tools: List[Tool]
56
+
57
  def format(self, **kwargs) -> str:
58
  # Get the intermediate steps (AgentAction, Observation tuples)
59
  # Format them in a particular way
 
75
  # Check if agent should finish
76
  if "Final Answer:" in llm_output:
77
  return AgentFinish(
78
+ # Return values are generally always a dictionary with a single `output` key
79
  # It is not recommended to try anything else at the moment :)
80
  return_values={"output": llm_output.split("Final Answer:")[-1].strip()},
81
  log=llm_output,
 
92
 
93
  template = None
94
  prompt_with_history = None
 
 
95
 
96
  @cl.on_chat_start
97
  async def main():
98
  cl.user_session.set("index", 0)
99
  cl.user_session.set("has_uploaded_image", False)
 
100
 
101
+ def analyze_image(file, user_session):
102
+ index = user_session.get("index")
103
+ image_stream = io.BytesIO(file.read())
 
 
104
  image = Image.open(image_stream)
105
  image = image.convert('RGB')
106
  image = image.resize((150, 150))
107
  image.save("image.png", 'png')
108
  results = call_detection_model(index)
109
+ user_session.set("index", index + 1)
110
  image.close()
111
+ user_session.set("results", results)
112
  if results["has_cancer"]:
113
+ user_session.set("template", template_for_has_cancer)
114
  else:
115
+ user_session.set("template", template_for_does_not_have_cancer)
116
  prompt_with_history = CustomPromptTemplate(
117
+ template=user_session.get("template"),
118
  tools=tools,
119
  input_variables=["input", "intermediate_steps", "history"]
120
  )
121
+ llm_chain = LLMChain(prompt=prompt_with_history, llm=OpenAI(temperature=1.2, streaming=True), verbose=True)
122
  tool_names = [tool.name for tool in tools]
123
  output_parser = CustomOutputParser()
124
  agent = LLMSingleActionAgent(
125
+ llm_chain=llm_chain,
126
  output_parser=output_parser,
127
+ stop=["\nObservation:"],
128
  allowed_tools=tool_names
129
  )
130
+ memory = ConversationBufferWindowMemory(k=2)
131
  agent_executor = AgentExecutor.from_agent_and_tools(
132
+ agent=agent,
133
+ tools=tools,
134
+ verbose=True,
135
  memory=memory
136
  )
137
+ user_session.set("agent_executor", agent_executor)
138
+ user_session.set("has_uploaded_image", True)
 
 
 
139
 
140
+ def get_result(user_session, message):
141
+ has_uploaded_image = user_session.get("has_uploaded_image")
142
+ results = user_session.get("results")
 
143
  if has_uploaded_image == False:
144
+ return "Please upload a relevant image to proceed with this conversation"
145
+ if "result" in message or "results" in message:
146
+ msg = f"These results are a good estimation but it's not meant to replace human medical intervention and should be taken with a grain of salt. According to the image uploaded, your chances of having skin cancer are {results['chances_of_having_cancer']}% and your condition lies in the {cancer_category(results['chances_of_having_cancer'])} range. "
 
147
  if cancer_category(results["chances_of_having_cancer"]) != "Pre Benign":
148
+ msg += "You should consider visiting the doctor for a complete checkup."
149
+ return msg
150
+ agent_executor = user_session.get("agent_executor")
 
151
  res = agent_executor.run(message)
152
+ return res
153
+
154
+ iface = gr.Interface(
155
+ analyze_image,
156
+ inputs=gr.inputs.File(label="Upload image of your condition", type="file", accept="image/png"),
157
+ outputs=gr.outputs.Textbox(),
158
+ live=True,
159
+ capture_session=True,
160
+ fn_kwargs={"user_session": cl.user_session},
161
+ title="Skin Condition Analyzer",
162
+ description="Upload an image of your skin condition and interact with the AI to get information about it."
163
+ )
164
+
165
+ iface.launch()