Increased token limit
Browse files- SkinGPT.py +15 -5
SkinGPT.py
CHANGED
@@ -239,14 +239,24 @@ class SkinGPT4(nn.Module):
|
|
239 |
# response = full_output.split("### Response:")[-1].strip()
|
240 |
|
241 |
print("Full output:", full_output)
|
242 |
-
print("Split parts:", full_output.split("### Response:"))
|
243 |
-
# response = full_output.split("### Response:")[-1].strip()
|
244 |
|
245 |
-
|
246 |
-
|
|
|
|
|
|
|
|
|
247 |
|
248 |
-
print("
|
249 |
return response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
250 |
|
251 |
# return response
|
252 |
|
|
|
239 |
# response = full_output.split("### Response:")[-1].strip()
|
240 |
|
241 |
print("Full output:", full_output)
|
|
|
|
|
242 |
|
243 |
+
if "### Response:" in full_output:
|
244 |
+
response = full_output.split("### Response:")[-1].strip()
|
245 |
+
else:
|
246 |
+
response = full_output.strip()
|
247 |
+
|
248 |
+
response = response.strip('"').strip()
|
249 |
|
250 |
+
print("Processed response:", response) # Debug print
|
251 |
return response
|
252 |
+
# print("Split parts:", full_output.split("### Response:"))
|
253 |
+
# # response = full_output.split("### Response:")[-1].strip()
|
254 |
+
#
|
255 |
+
# response_parts = full_output.split("### Response:")
|
256 |
+
# response = response_parts[-1].strip() if len(response_parts) > 1 else full_output
|
257 |
+
#
|
258 |
+
# print("Final response:", response_parts)
|
259 |
+
# return response
|
260 |
|
261 |
# return response
|
262 |
|