Spaces:
Runtime error
Runtime error
Make exceptions shorter to not (completely) bloat the UI.
Browse files
chat.py
CHANGED
@@ -1,8 +1,7 @@
|
|
1 |
import json
|
2 |
import re
|
3 |
import time
|
4 |
-
import
|
5 |
-
|
6 |
import gradio as gr
|
7 |
|
8 |
import chat_client
|
@@ -84,9 +83,9 @@ def _generate(
|
|
84 |
state["client"] = chat_client.ModelClient(CHAT_URL)
|
85 |
state["client"].open_session(f"bigscience/{model}-petals", max_length)
|
86 |
state["model"] = model
|
87 |
-
except Exception:
|
88 |
-
print(
|
89 |
-
raise gr.Error(
|
90 |
|
91 |
else:
|
92 |
context = ""
|
@@ -185,13 +184,13 @@ def _generate(
|
|
185 |
|
186 |
print("Broken session!")
|
187 |
raise
|
188 |
-
except Exception:
|
189 |
client.close_session()
|
190 |
state["client"] = None
|
191 |
state["model"] = None
|
192 |
|
193 |
-
print(
|
194 |
-
raise gr.Error(
|
195 |
|
196 |
|
197 |
def reset(state):
|
|
|
1 |
import json
|
2 |
import re
|
3 |
import time
|
4 |
+
from datetime import datetime
|
|
|
5 |
import gradio as gr
|
6 |
|
7 |
import chat_client
|
|
|
83 |
state["client"] = chat_client.ModelClient(CHAT_URL)
|
84 |
state["client"].open_session(f"bigscience/{model}-petals", max_length)
|
85 |
state["model"] = model
|
86 |
+
except Exception as e:
|
87 |
+
print(datetime.now(), str(e)[-500:])
|
88 |
+
raise gr.Error(str(e)[-500:])
|
89 |
|
90 |
else:
|
91 |
context = ""
|
|
|
184 |
|
185 |
print("Broken session!")
|
186 |
raise
|
187 |
+
except Exception as e:
|
188 |
client.close_session()
|
189 |
state["client"] = None
|
190 |
state["model"] = None
|
191 |
|
192 |
+
print(datetime.now(), str(e)[-500:])
|
193 |
+
raise gr.Error(str(e)[-500:])
|
194 |
|
195 |
|
196 |
def reset(state):
|
prompt.py
CHANGED
@@ -1,6 +1,5 @@
|
|
1 |
import time
|
2 |
-
import
|
3 |
-
|
4 |
import gradio as gr
|
5 |
|
6 |
import chat_client
|
@@ -50,9 +49,9 @@ def _generate(
|
|
50 |
try:
|
51 |
client = chat_client.ModelClient(CHAT_URL)
|
52 |
client.open_session(f"bigscience/{model}-petals", max_length)
|
53 |
-
except Exception:
|
54 |
-
print(
|
55 |
-
raise gr.Error(
|
56 |
|
57 |
if add_stoptoken:
|
58 |
prompt += "</s>" if "bloomz" in model else "\n\n"
|
@@ -112,9 +111,9 @@ def _generate(
|
|
112 |
|
113 |
# Prints final result w/o statistics
|
114 |
yield state, prompt2, output
|
115 |
-
except Exception:
|
116 |
-
print(
|
117 |
-
raise gr.Error(
|
118 |
|
119 |
|
120 |
def stop(state):
|
|
|
1 |
import time
|
2 |
+
from datetime import datetime
|
|
|
3 |
import gradio as gr
|
4 |
|
5 |
import chat_client
|
|
|
49 |
try:
|
50 |
client = chat_client.ModelClient(CHAT_URL)
|
51 |
client.open_session(f"bigscience/{model}-petals", max_length)
|
52 |
+
except Exception as e:
|
53 |
+
print(datetime.now(), str(e)[-500:])
|
54 |
+
raise gr.Error(str(e)[-500:])
|
55 |
|
56 |
if add_stoptoken:
|
57 |
prompt += "</s>" if "bloomz" in model else "\n\n"
|
|
|
111 |
|
112 |
# Prints final result w/o statistics
|
113 |
yield state, prompt2, output
|
114 |
+
except Exception as e:
|
115 |
+
print(datetime.now(), str(e)[-500:])
|
116 |
+
raise gr.Error(str(e)[-500:])
|
117 |
|
118 |
|
119 |
def stop(state):
|