KeerthiVM commited on
Commit
8927e5b
Β·
1 Parent(s): 1914e12

Prompt change

Browse files
Files changed (2) hide show
  1. SkinGPT.py +12 -2
  2. app.py +4 -4
SkinGPT.py CHANGED
@@ -10,6 +10,8 @@ import torch
10
  MODEL_DTYPE = torch.float16 if torch.cuda.is_available() else torch.float32
11
  token = os.getenv("HF_TOKEN")
12
  import streamlit as st
 
 
13
 
14
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
15
  class Blip2QFormer(nn.Module):
@@ -250,8 +252,16 @@ class SkinGPT4(nn.Module):
250
 
251
  response = response.strip('"').strip()
252
 
253
- print("Processed response:", response) # Debug print
254
- return response
 
 
 
 
 
 
 
 
255
  # print("Split parts:", full_output.split("### Response:"))
256
  # # response = full_output.split("### Response:")[-1].strip()
257
  #
 
10
  MODEL_DTYPE = torch.float16 if torch.cuda.is_available() else torch.float32
11
  token = os.getenv("HF_TOKEN")
12
  import streamlit as st
13
+ import re
14
+
15
 
16
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
17
  class Blip2QFormer(nn.Module):
 
252
 
253
  response = response.strip('"').strip()
254
 
255
+ cleaned = re.sub(r'^[ \t]+', '', response, flags=re.MULTILINE)
256
+
257
+ # Remove excessive blank lines (keep at most one blank line between paragraphs)
258
+ cleaned = re.sub(r'\n\s*\n', '\n\n', cleaned)
259
+
260
+ print(cleaned)
261
+ return cleaned
262
+
263
+ # print("Processed response:", response) # Debug print
264
+ # return response
265
  # print("Split parts:", full_output.split("### Response:"))
266
  # # response = full_output.split("### Response:")[-1].strip()
267
  #
app.py CHANGED
@@ -103,8 +103,8 @@ if uploaded_file is not None and uploaded_file != st.session_state.current_image
103
  for message in st.session_state.messages:
104
  with st.chat_message(message["role"]):
105
  # st.markdown(remove_code_blocks(message["content"]))
106
- # st.markdown(message["content"])
107
- st.text(message["content"])
108
 
109
  # for message in st.session_state.messages:
110
  # role = "You" if message["role"] == "user" else "assistant"
@@ -135,8 +135,8 @@ if prompt := st.chat_input("Ask a follow-up question..."):
135
  result = classifier.predict(image, user_input=prompt, reuse_embeddings=False)
136
 
137
  # st.markdown(remove_code_blocks(result["diagnosis"]))
138
- # st.markdown(result["diagnosis"])
139
- st.text(result["diagnosis"])
140
  st.session_state.messages.append({"role": "assistant", "content": result["diagnosis"]})
141
 
142
  if st.session_state.messages and st.button("πŸ“„ Download Chat as PDF"):
 
103
  for message in st.session_state.messages:
104
  with st.chat_message(message["role"]):
105
  # st.markdown(remove_code_blocks(message["content"]))
106
+ st.markdown(message["content"])
107
+ # st.text(message["content"])
108
 
109
  # for message in st.session_state.messages:
110
  # role = "You" if message["role"] == "user" else "assistant"
 
135
  result = classifier.predict(image, user_input=prompt, reuse_embeddings=False)
136
 
137
  # st.markdown(remove_code_blocks(result["diagnosis"]))
138
+ st.markdown(result["diagnosis"])
139
+ # st.text(result["diagnosis"])
140
  st.session_state.messages.append({"role": "assistant", "content": result["diagnosis"]})
141
 
142
  if st.session_state.messages and st.button("πŸ“„ Download Chat as PDF"):