Spaces:
Running
Running
Improve logging for normal operation
Browse files- app.py +35 -27
- log_chat.py +25 -23
- utils.py +22 -6
app.py
CHANGED
@@ -5,8 +5,8 @@ from openai import OpenAI
|
|
5 |
import gradio as gr
|
6 |
|
7 |
from theme import apriel
|
8 |
-
from utils import COMMUNITY_POSTFIX_URL, get_model_config,
|
9 |
-
logged_event_handler, DEBUG_MODEL
|
10 |
from log_chat import log_chat
|
11 |
|
12 |
MODEL_TEMPERATURE = 0.8
|
@@ -15,8 +15,6 @@ DEFAULT_OPT_OUT_VALUE = False
|
|
15 |
|
16 |
DEFAULT_MODEL_NAME = "Apriel-Nemotron-15b-Thinker" if not DEBUG_MODEL else "Apriel-5b"
|
17 |
|
18 |
-
print(f"Gradio version: {gr.__version__}")
|
19 |
-
|
20 |
BUTTON_ENABLED = gr.update(interactive=True)
|
21 |
BUTTON_DISABLED = gr.update(interactive=False)
|
22 |
INPUT_ENABLED = gr.update(interactive=True)
|
@@ -37,7 +35,7 @@ openai_client = None
|
|
37 |
def app_loaded(state, request: gr.Request):
|
38 |
message_html = setup_model(DEFAULT_MODEL_NAME, intial=False)
|
39 |
state['session'] = request.session_hash if request else uuid4().hex
|
40 |
-
|
41 |
return state, message_html
|
42 |
|
43 |
|
@@ -50,7 +48,7 @@ def update_model_and_clear_chat(model_name):
|
|
50 |
def setup_model(model_name, intial=False):
|
51 |
global model_config, openai_client
|
52 |
model_config = get_model_config(model_name)
|
53 |
-
|
54 |
openai_client = OpenAI(
|
55 |
api_key=model_config.get('AUTH_TOKEN'),
|
56 |
base_url=model_config.get('VLLM_API_URL')
|
@@ -60,7 +58,7 @@ def setup_model(model_name, intial=False):
|
|
60 |
_link = f"<a href='{model_config.get('MODEL_HF_URL')}{COMMUNITY_POSTFIX_URL}' target='_blank'>{_model_hf_name}</a>"
|
61 |
_description = f"We'd love to hear your thoughts on the model. Click here to provide feedback - {_link}"
|
62 |
|
63 |
-
|
64 |
|
65 |
if intial:
|
66 |
return
|
@@ -95,14 +93,20 @@ def run_chat_inference(history, message, state):
|
|
95 |
state["is_streaming"] = True
|
96 |
state["stop_flag"] = False
|
97 |
error = None
|
|
|
98 |
|
99 |
if len(history) == 0:
|
100 |
state["chat_id"] = uuid4().hex
|
101 |
|
|
|
|
|
|
|
|
|
|
|
102 |
# outputs: model_dropdown, user_input, send_btn, stop_btn, clear_btn, session_state
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
|
107 |
try:
|
108 |
# Check if the message is empty
|
@@ -112,16 +116,16 @@ def run_chat_inference(history, message, state):
|
|
112 |
return history, INPUT_ENABLED, SEND_BUTTON_ENABLED, STOP_BUTTON_DISABLED, BUTTON_ENABLED, state
|
113 |
|
114 |
chat_start_count = chat_start_count + 1
|
115 |
-
|
116 |
-
|
117 |
|
118 |
is_reasoning = model_config.get("REASONING")
|
119 |
|
120 |
# Remove any assistant messages with metadata from history for multiple turns
|
121 |
-
|
122 |
check_format(history, "messages")
|
123 |
history.append({"role": "user", "content": message})
|
124 |
-
|
125 |
check_format(history, "messages")
|
126 |
|
127 |
# Create the streaming response
|
@@ -131,18 +135,18 @@ def run_chat_inference(history, message, state):
|
|
131 |
item.get("role") == "assistant" and
|
132 |
isinstance(item.get("metadata"), dict) and
|
133 |
item.get("metadata", {}).get("title") is not None)]
|
134 |
-
|
135 |
check_format(history_no_thoughts, "messages")
|
136 |
-
|
137 |
|
138 |
stream = openai_client.chat.completions.create(
|
139 |
-
model=
|
140 |
messages=history_no_thoughts,
|
141 |
temperature=MODEL_TEMPERATURE,
|
142 |
stream=True
|
143 |
)
|
144 |
except Exception as e:
|
145 |
-
|
146 |
error = str(e)
|
147 |
yield ([{"role": "assistant",
|
148 |
"content": "😔 The model is unavailable at the moment. Please try again later."}],
|
@@ -150,14 +154,14 @@ def run_chat_inference(history, message, state):
|
|
150 |
if state["opt_out"] is not True:
|
151 |
log_chat(chat_id=state["chat_id"],
|
152 |
session_id=state["session"],
|
153 |
-
model_name=
|
154 |
prompt=message,
|
155 |
history=history,
|
156 |
info={"is_reasoning": model_config.get("REASONING"), "temperature": MODEL_TEMPERATURE,
|
157 |
"stopped": True, "error": str(e)},
|
158 |
)
|
159 |
else:
|
160 |
-
|
161 |
return history, INPUT_ENABLED, SEND_BUTTON_ENABLED, STOP_BUTTON_DISABLED, BUTTON_ENABLED, state
|
162 |
|
163 |
if is_reasoning:
|
@@ -166,21 +170,21 @@ def run_chat_inference(history, message, state):
|
|
166 |
content="Thinking...",
|
167 |
metadata={"title": "🧠 Thought"}
|
168 |
))
|
169 |
-
|
170 |
check_format(history, "messages")
|
171 |
else:
|
172 |
history.append(gr.ChatMessage(
|
173 |
role="assistant",
|
174 |
content="",
|
175 |
))
|
176 |
-
|
177 |
check_format(history, "messages")
|
178 |
|
179 |
output = ""
|
180 |
completion_started = False
|
181 |
for chunk in stream:
|
182 |
if state["stop_flag"]:
|
183 |
-
|
184 |
break # Exit the loop if the stop flag is set
|
185 |
# Extract the new content from the delta field
|
186 |
content = getattr(chunk.choices[0].delta, "content", "")
|
@@ -224,28 +228,31 @@ def run_chat_inference(history, message, state):
|
|
224 |
# log_message(f"Yielding messages: {history}")
|
225 |
yield history, INPUT_DISABLED, SEND_BUTTON_DISABLED, STOP_BUTTON_ENABLED, BUTTON_DISABLED, state
|
226 |
|
227 |
-
|
228 |
check_format(history, "messages")
|
229 |
yield history, INPUT_ENABLED, SEND_BUTTON_ENABLED, STOP_BUTTON_DISABLED, BUTTON_ENABLED, state
|
230 |
finally:
|
231 |
if error is None:
|
232 |
-
|
233 |
if state["opt_out"] is not True:
|
234 |
log_chat(chat_id=state["chat_id"],
|
235 |
session_id=state["session"],
|
236 |
-
model_name=
|
237 |
prompt=message,
|
238 |
history=history,
|
239 |
info={"is_reasoning": model_config.get("REASONING"), "temperature": MODEL_TEMPERATURE,
|
240 |
"stopped": state["stop_flag"]},
|
241 |
)
|
|
|
242 |
else:
|
243 |
-
|
244 |
state["is_streaming"] = False
|
245 |
state["stop_flag"] = False
|
246 |
return history, INPUT_ENABLED, SEND_BUTTON_ENABLED, STOP_BUTTON_DISABLED, BUTTON_ENABLED, state
|
247 |
|
248 |
|
|
|
|
|
249 |
title = None
|
250 |
description = None
|
251 |
theme = apriel
|
@@ -371,3 +378,4 @@ with gr.Blocks(theme=theme, css=custom_css) as demo:
|
|
371 |
)
|
372 |
|
373 |
demo.queue(default_concurrency_limit=2).launch(ssr_mode=False, show_api=False)
|
|
|
|
5 |
import gradio as gr
|
6 |
|
7 |
from theme import apriel
|
8 |
+
from utils import COMMUNITY_POSTFIX_URL, get_model_config, check_format, models_config, \
|
9 |
+
logged_event_handler, DEBUG_MODEL, log_debug, log_info, log_error
|
10 |
from log_chat import log_chat
|
11 |
|
12 |
MODEL_TEMPERATURE = 0.8
|
|
|
15 |
|
16 |
DEFAULT_MODEL_NAME = "Apriel-Nemotron-15b-Thinker" if not DEBUG_MODEL else "Apriel-5b"
|
17 |
|
|
|
|
|
18 |
BUTTON_ENABLED = gr.update(interactive=True)
|
19 |
BUTTON_DISABLED = gr.update(interactive=False)
|
20 |
INPUT_ENABLED = gr.update(interactive=True)
|
|
|
35 |
def app_loaded(state, request: gr.Request):
|
36 |
message_html = setup_model(DEFAULT_MODEL_NAME, intial=False)
|
37 |
state['session'] = request.session_hash if request else uuid4().hex
|
38 |
+
log_debug(f"app_loaded() --> Session: {state['session']}")
|
39 |
return state, message_html
|
40 |
|
41 |
|
|
|
48 |
def setup_model(model_name, intial=False):
|
49 |
global model_config, openai_client
|
50 |
model_config = get_model_config(model_name)
|
51 |
+
log_debug(f"update_model() --> Model config: {model_config}")
|
52 |
openai_client = OpenAI(
|
53 |
api_key=model_config.get('AUTH_TOKEN'),
|
54 |
base_url=model_config.get('VLLM_API_URL')
|
|
|
58 |
_link = f"<a href='{model_config.get('MODEL_HF_URL')}{COMMUNITY_POSTFIX_URL}' target='_blank'>{_model_hf_name}</a>"
|
59 |
_description = f"We'd love to hear your thoughts on the model. Click here to provide feedback - {_link}"
|
60 |
|
61 |
+
log_debug(f"Switched to model {_model_hf_name}")
|
62 |
|
63 |
if intial:
|
64 |
return
|
|
|
93 |
state["is_streaming"] = True
|
94 |
state["stop_flag"] = False
|
95 |
error = None
|
96 |
+
model_name = model_config.get('MODEL_NAME')
|
97 |
|
98 |
if len(history) == 0:
|
99 |
state["chat_id"] = uuid4().hex
|
100 |
|
101 |
+
if openai_client is None:
|
102 |
+
log_info("Client UI is stale, letting user know to refresh the page")
|
103 |
+
gr.Warning("Client UI is stale, please refresh the page")
|
104 |
+
return history, INPUT_ENABLED, SEND_BUTTON_ENABLED, STOP_BUTTON_DISABLED, BUTTON_ENABLED, state
|
105 |
+
|
106 |
# outputs: model_dropdown, user_input, send_btn, stop_btn, clear_btn, session_state
|
107 |
+
log_debug(f"{'-' * 80}")
|
108 |
+
log_debug(f"chat_fn() --> Message: {message}")
|
109 |
+
log_debug(f"chat_fn() --> History: {history}")
|
110 |
|
111 |
try:
|
112 |
# Check if the message is empty
|
|
|
116 |
return history, INPUT_ENABLED, SEND_BUTTON_ENABLED, STOP_BUTTON_DISABLED, BUTTON_ENABLED, state
|
117 |
|
118 |
chat_start_count = chat_start_count + 1
|
119 |
+
log_info(f"chat_start_count: {chat_start_count}, turns: {int(len(history if history else []) / 3)}, "
|
120 |
+
f"model: {model_name}")
|
121 |
|
122 |
is_reasoning = model_config.get("REASONING")
|
123 |
|
124 |
# Remove any assistant messages with metadata from history for multiple turns
|
125 |
+
log_debug(f"Initial History: {history}")
|
126 |
check_format(history, "messages")
|
127 |
history.append({"role": "user", "content": message})
|
128 |
+
log_debug(f"History with user message: {history}")
|
129 |
check_format(history, "messages")
|
130 |
|
131 |
# Create the streaming response
|
|
|
135 |
item.get("role") == "assistant" and
|
136 |
isinstance(item.get("metadata"), dict) and
|
137 |
item.get("metadata", {}).get("title") is not None)]
|
138 |
+
log_debug(f"Updated History: {history_no_thoughts}")
|
139 |
check_format(history_no_thoughts, "messages")
|
140 |
+
log_debug(f"history_no_thoughts with user message: {history_no_thoughts}")
|
141 |
|
142 |
stream = openai_client.chat.completions.create(
|
143 |
+
model=model_name,
|
144 |
messages=history_no_thoughts,
|
145 |
temperature=MODEL_TEMPERATURE,
|
146 |
stream=True
|
147 |
)
|
148 |
except Exception as e:
|
149 |
+
log_error(f"Error: {e}")
|
150 |
error = str(e)
|
151 |
yield ([{"role": "assistant",
|
152 |
"content": "😔 The model is unavailable at the moment. Please try again later."}],
|
|
|
154 |
if state["opt_out"] is not True:
|
155 |
log_chat(chat_id=state["chat_id"],
|
156 |
session_id=state["session"],
|
157 |
+
model_name=model_name,
|
158 |
prompt=message,
|
159 |
history=history,
|
160 |
info={"is_reasoning": model_config.get("REASONING"), "temperature": MODEL_TEMPERATURE,
|
161 |
"stopped": True, "error": str(e)},
|
162 |
)
|
163 |
else:
|
164 |
+
log_info(f"User opted out of chat history. Not logging chat. model: {model_name}")
|
165 |
return history, INPUT_ENABLED, SEND_BUTTON_ENABLED, STOP_BUTTON_DISABLED, BUTTON_ENABLED, state
|
166 |
|
167 |
if is_reasoning:
|
|
|
170 |
content="Thinking...",
|
171 |
metadata={"title": "🧠 Thought"}
|
172 |
))
|
173 |
+
log_debug(f"History added thinking: {history}")
|
174 |
check_format(history, "messages")
|
175 |
else:
|
176 |
history.append(gr.ChatMessage(
|
177 |
role="assistant",
|
178 |
content="",
|
179 |
))
|
180 |
+
log_debug(f"History added empty assistant: {history}")
|
181 |
check_format(history, "messages")
|
182 |
|
183 |
output = ""
|
184 |
completion_started = False
|
185 |
for chunk in stream:
|
186 |
if state["stop_flag"]:
|
187 |
+
log_debug(f"chat_fn() --> Stopping streaming...")
|
188 |
break # Exit the loop if the stop flag is set
|
189 |
# Extract the new content from the delta field
|
190 |
content = getattr(chunk.choices[0].delta, "content", "")
|
|
|
228 |
# log_message(f"Yielding messages: {history}")
|
229 |
yield history, INPUT_DISABLED, SEND_BUTTON_DISABLED, STOP_BUTTON_ENABLED, BUTTON_DISABLED, state
|
230 |
|
231 |
+
log_debug(f"Final History: {history}")
|
232 |
check_format(history, "messages")
|
233 |
yield history, INPUT_ENABLED, SEND_BUTTON_ENABLED, STOP_BUTTON_DISABLED, BUTTON_ENABLED, state
|
234 |
finally:
|
235 |
if error is None:
|
236 |
+
log_debug(f"chat_fn() --> Finished streaming. {chat_start_count} chats started.")
|
237 |
if state["opt_out"] is not True:
|
238 |
log_chat(chat_id=state["chat_id"],
|
239 |
session_id=state["session"],
|
240 |
+
model_name=model_name,
|
241 |
prompt=message,
|
242 |
history=history,
|
243 |
info={"is_reasoning": model_config.get("REASONING"), "temperature": MODEL_TEMPERATURE,
|
244 |
"stopped": state["stop_flag"]},
|
245 |
)
|
246 |
+
|
247 |
else:
|
248 |
+
log_info(f"User opted out of chat history. Not logging chat. model: {model_name}")
|
249 |
state["is_streaming"] = False
|
250 |
state["stop_flag"] = False
|
251 |
return history, INPUT_ENABLED, SEND_BUTTON_ENABLED, STOP_BUTTON_DISABLED, BUTTON_ENABLED, state
|
252 |
|
253 |
|
254 |
+
log_info(f"Gradio version: {gr.__version__}")
|
255 |
+
|
256 |
title = None
|
257 |
description = None
|
258 |
theme = apriel
|
|
|
378 |
)
|
379 |
|
380 |
demo.queue(default_concurrency_limit=2).launch(ssr_mode=False, show_api=False)
|
381 |
+
log_info("Gradio app launched")
|
log_chat.py
CHANGED
@@ -10,7 +10,7 @@ from gradio import ChatMessage
|
|
10 |
from huggingface_hub import HfApi, hf_hub_download
|
11 |
|
12 |
from timer import Timer
|
13 |
-
from utils import log_warning,
|
14 |
|
15 |
HF_TOKEN = os.environ.get("HF_TOKEN")
|
16 |
DATASET_REPO_ID = os.environ.get("APRIEL_PROMPT_DATASET")
|
@@ -18,6 +18,7 @@ CSV_FILENAME = "train.csv"
|
|
18 |
|
19 |
|
20 |
def log_chat(chat_id: str, session_id: str, model_name: str, prompt: str, history: list[str], info: dict) -> None:
|
|
|
21 |
log_chat_queue.put((chat_id, session_id, model_name, prompt, history, info))
|
22 |
|
23 |
|
@@ -28,12 +29,13 @@ def _log_chat_worker():
|
|
28 |
try:
|
29 |
_log_chat(chat_id, session_id, model_name, prompt, history, info)
|
30 |
except Exception as e:
|
31 |
-
|
32 |
finally:
|
33 |
log_chat_queue.task_done()
|
34 |
|
35 |
|
36 |
def _log_chat(chat_id: str, session_id: str, model_name: str, prompt: str, history: list[str], info: dict) -> bool:
|
|
|
37 |
if DATASET_REPO_ID is None:
|
38 |
log_warning("No dataset repo ID provided. Skipping logging of prompt.")
|
39 |
return False
|
@@ -50,9 +52,9 @@ def _log_chat(chat_id: str, session_id: str, model_name: str, prompt: str, histo
|
|
50 |
# Check if the dataset repo exists, if not, create it
|
51 |
try:
|
52 |
repo_info = api.repo_info(repo_id=DATASET_REPO_ID, repo_type="dataset")
|
53 |
-
|
54 |
except Exception: # Create new dataset if none exists
|
55 |
-
|
56 |
api.create_repo(repo_id=DATASET_REPO_ID, repo_type="dataset", private=True)
|
57 |
|
58 |
# Ensure messages are in the correct format
|
@@ -97,7 +99,7 @@ def _log_chat(chat_id: str, session_id: str, model_name: str, prompt: str, histo
|
|
97 |
)
|
98 |
pd.read_csv(csv_path)
|
99 |
file_exists = True
|
100 |
-
|
101 |
break # Success, exit the loop
|
102 |
except Exception as e:
|
103 |
retry_count += 1
|
@@ -127,10 +129,10 @@ def _log_chat(chat_id: str, session_id: str, model_name: str, prompt: str, histo
|
|
127 |
dump_hub_csv()
|
128 |
file_exists = False
|
129 |
else:
|
130 |
-
|
131 |
|
132 |
# Write out the new row to the CSV file (append isn't working in HF container, so recreate each time)
|
133 |
-
|
134 |
try:
|
135 |
with open(CSV_FILENAME, "w", newline="\n") as f:
|
136 |
writer = csv.DictWriter(f, fieldnames=new_row.keys())
|
@@ -140,10 +142,10 @@ def _log_chat(chat_id: str, session_id: str, model_name: str, prompt: str, histo
|
|
140 |
writer.writerow(row.to_dict()) # Write existing rows
|
141 |
writer.writerow(new_row) # Write the new row
|
142 |
|
143 |
-
|
144 |
# dump_local_csv()
|
145 |
except Exception as e:
|
146 |
-
|
147 |
return False
|
148 |
|
149 |
# Upload updated CSV
|
@@ -156,8 +158,8 @@ def _log_chat(chat_id: str, session_id: str, model_name: str, prompt: str, histo
|
|
156 |
)
|
157 |
log_timer.add_step("Uploaded updated CSV")
|
158 |
log_timer.end()
|
159 |
-
|
160 |
-
|
161 |
|
162 |
return True
|
163 |
|
@@ -172,23 +174,23 @@ def dump_hub_csv():
|
|
172 |
token=HF_TOKEN # Only needed if not already logged in
|
173 |
)
|
174 |
df = pd.read_csv(csv_path)
|
175 |
-
|
176 |
if (df.empty):
|
177 |
# show raw contents of downloaded csv file
|
178 |
-
|
179 |
with open(csv_path, 'r') as f:
|
180 |
print(f.read())
|
181 |
except Exception as e:
|
182 |
-
|
183 |
|
184 |
|
185 |
def dump_local_csv():
|
186 |
# Verify the file contents by loading it from the local file and printing it out
|
187 |
try:
|
188 |
df = pd.read_csv(CSV_FILENAME)
|
189 |
-
|
190 |
except Exception as e:
|
191 |
-
|
192 |
|
193 |
|
194 |
def test_log_chat():
|
@@ -207,21 +209,21 @@ def test_log_chat():
|
|
207 |
]
|
208 |
info = {"additional_info": "Some extra data"}
|
209 |
|
210 |
-
|
211 |
dump_hub_csv()
|
212 |
log_chat(chat_id, session_id, model_name, prompt, history, info)
|
213 |
-
|
214 |
log_chat(chat_id, session_id, model_name, prompt + " + 2", history, info)
|
215 |
-
|
216 |
log_chat(chat_id, session_id, model_name, prompt + " + 3", history, info)
|
217 |
-
|
218 |
log_chat(chat_id, session_id, model_name, prompt + " + 4", history, info)
|
219 |
-
|
220 |
|
221 |
sleep_seconds = 10
|
222 |
-
|
223 |
time.sleep(sleep_seconds)
|
224 |
-
|
225 |
dump_hub_csv()
|
226 |
|
227 |
|
|
|
10 |
from huggingface_hub import HfApi, hf_hub_download
|
11 |
|
12 |
from timer import Timer
|
13 |
+
from utils import log_warning, log_info, log_debug, log_error
|
14 |
|
15 |
HF_TOKEN = os.environ.get("HF_TOKEN")
|
16 |
DATASET_REPO_ID = os.environ.get("APRIEL_PROMPT_DATASET")
|
|
|
18 |
|
19 |
|
20 |
def log_chat(chat_id: str, session_id: str, model_name: str, prompt: str, history: list[str], info: dict) -> None:
|
21 |
+
log_info(f"log_chat() called for chat: {chat_id}, queue size: {log_chat_queue.qsize()}, model: {model_name}")
|
22 |
log_chat_queue.put((chat_id, session_id, model_name, prompt, history, info))
|
23 |
|
24 |
|
|
|
29 |
try:
|
30 |
_log_chat(chat_id, session_id, model_name, prompt, history, info)
|
31 |
except Exception as e:
|
32 |
+
log_error(f"Error logging chat: {e}")
|
33 |
finally:
|
34 |
log_chat_queue.task_done()
|
35 |
|
36 |
|
37 |
def _log_chat(chat_id: str, session_id: str, model_name: str, prompt: str, history: list[str], info: dict) -> bool:
|
38 |
+
log_info(f"_log_chat() storing chat {chat_id}")
|
39 |
if DATASET_REPO_ID is None:
|
40 |
log_warning("No dataset repo ID provided. Skipping logging of prompt.")
|
41 |
return False
|
|
|
52 |
# Check if the dataset repo exists, if not, create it
|
53 |
try:
|
54 |
repo_info = api.repo_info(repo_id=DATASET_REPO_ID, repo_type="dataset")
|
55 |
+
log_debug(f"log_chat() --> Dataset repo found: {repo_info.id} private={repo_info.private}")
|
56 |
except Exception: # Create new dataset if none exists
|
57 |
+
log_debug(f"log_chat() --> No dataset repo found, creating a new one...")
|
58 |
api.create_repo(repo_id=DATASET_REPO_ID, repo_type="dataset", private=True)
|
59 |
|
60 |
# Ensure messages are in the correct format
|
|
|
99 |
)
|
100 |
pd.read_csv(csv_path)
|
101 |
file_exists = True
|
102 |
+
log_debug(f"log_chat() --> Downloaded existing CSV with {len(pd.read_csv(csv_path))} rows")
|
103 |
break # Success, exit the loop
|
104 |
except Exception as e:
|
105 |
retry_count += 1
|
|
|
129 |
dump_hub_csv()
|
130 |
file_exists = False
|
131 |
else:
|
132 |
+
log_debug(f"log_chat() --> CSV {csv_path} has expected headers: {existing_headers}")
|
133 |
|
134 |
# Write out the new row to the CSV file (append isn't working in HF container, so recreate each time)
|
135 |
+
log_debug(f"log_chat() --> Writing CSV file, file_exists={file_exists}")
|
136 |
try:
|
137 |
with open(CSV_FILENAME, "w", newline="\n") as f:
|
138 |
writer = csv.DictWriter(f, fieldnames=new_row.keys())
|
|
|
142 |
writer.writerow(row.to_dict()) # Write existing rows
|
143 |
writer.writerow(new_row) # Write the new row
|
144 |
|
145 |
+
log_debug("log_chat() --> Wrote out CSV with new row")
|
146 |
# dump_local_csv()
|
147 |
except Exception as e:
|
148 |
+
log_error(f"log_chat() --> Error writing to CSV: {e}")
|
149 |
return False
|
150 |
|
151 |
# Upload updated CSV
|
|
|
158 |
)
|
159 |
log_timer.add_step("Uploaded updated CSV")
|
160 |
log_timer.end()
|
161 |
+
log_debug("log_chat() --> Finished logging chat")
|
162 |
+
log_debug(log_timer.formatted_result())
|
163 |
|
164 |
return True
|
165 |
|
|
|
174 |
token=HF_TOKEN # Only needed if not already logged in
|
175 |
)
|
176 |
df = pd.read_csv(csv_path)
|
177 |
+
log_info(df)
|
178 |
if (df.empty):
|
179 |
# show raw contents of downloaded csv file
|
180 |
+
log_info("Raw file contents:")
|
181 |
with open(csv_path, 'r') as f:
|
182 |
print(f.read())
|
183 |
except Exception as e:
|
184 |
+
log_error(f"Error loading CSV from hub: {e}")
|
185 |
|
186 |
|
187 |
def dump_local_csv():
|
188 |
# Verify the file contents by loading it from the local file and printing it out
|
189 |
try:
|
190 |
df = pd.read_csv(CSV_FILENAME)
|
191 |
+
log_info(df)
|
192 |
except Exception as e:
|
193 |
+
log_error(f"Error loading CSV from local file: {e}")
|
194 |
|
195 |
|
196 |
def test_log_chat():
|
|
|
209 |
]
|
210 |
info = {"additional_info": "Some extra data"}
|
211 |
|
212 |
+
log_debug("Starting test_log_chat()")
|
213 |
dump_hub_csv()
|
214 |
log_chat(chat_id, session_id, model_name, prompt, history, info)
|
215 |
+
log_debug("log_chat 1 returned")
|
216 |
log_chat(chat_id, session_id, model_name, prompt + " + 2", history, info)
|
217 |
+
log_debug("log_chat 2 returned")
|
218 |
log_chat(chat_id, session_id, model_name, prompt + " + 3", history, info)
|
219 |
+
log_debug("log_chat 3 returned")
|
220 |
log_chat(chat_id, session_id, model_name, prompt + " + 4", history, info)
|
221 |
+
log_debug("log_chat 4 returned")
|
222 |
|
223 |
sleep_seconds = 10
|
224 |
+
log_debug(f"Sleeping {sleep_seconds} seconds to let it finish and log the result.")
|
225 |
time.sleep(sleep_seconds)
|
226 |
+
log_debug("Finished sleeping.")
|
227 |
dump_hub_csv()
|
228 |
|
229 |
|
utils.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import os
|
2 |
import sys
|
|
|
3 |
from functools import wraps
|
4 |
from typing import Any, Literal
|
5 |
|
@@ -42,13 +43,28 @@ def get_model_config(model_name: str) -> dict:
|
|
42 |
return config
|
43 |
|
44 |
|
45 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
if DEBUG_MODE is True:
|
47 |
-
|
|
|
|
|
|
|
|
|
48 |
|
49 |
|
50 |
def log_warning(message):
|
51 |
-
|
|
|
|
|
|
|
|
|
52 |
|
53 |
|
54 |
# Gradio 5.0.1 had issues with checking the message formats. 5.29.0 does not!
|
@@ -95,8 +111,8 @@ def logged_event_handler(log_msg='', event_handler=None, log_timer=None, clear_t
|
|
95 |
if log_timer:
|
96 |
if clear_timer:
|
97 |
log_timer.clear()
|
98 |
-
log_timer.add_step(f"Start: {
|
99 |
-
|
100 |
|
101 |
# Call the original event handler
|
102 |
result = event_handler(*args, **kwargs)
|
@@ -104,7 +120,7 @@ def logged_event_handler(log_msg='', event_handler=None, log_timer=None, clear_t
|
|
104 |
# Log after
|
105 |
if log_timer:
|
106 |
log_timer.add_step(f"Completed: {log_msg}")
|
107 |
-
|
108 |
|
109 |
return result
|
110 |
|
|
|
1 |
import os
|
2 |
import sys
|
3 |
+
import time
|
4 |
from functools import wraps
|
5 |
from typing import Any, Literal
|
6 |
|
|
|
43 |
return config
|
44 |
|
45 |
|
46 |
+
def _log_message(prefix, message, icon=""):
|
47 |
+
timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
|
48 |
+
if len(icon) > 0:
|
49 |
+
icon = f"{icon} "
|
50 |
+
print(f"{timestamp}: {prefix} {icon}{message}")
|
51 |
+
|
52 |
+
|
53 |
+
def log_debug(message):
|
54 |
if DEBUG_MODE is True:
|
55 |
+
_log_message("DEBUG", message)
|
56 |
+
|
57 |
+
|
58 |
+
def log_info(message):
|
59 |
+
_log_message("INFO ", message)
|
60 |
|
61 |
|
62 |
def log_warning(message):
|
63 |
+
_log_message("WARN ", message, "⚠️")
|
64 |
+
|
65 |
+
|
66 |
+
def log_error(message):
|
67 |
+
_log_message("ERROR", message, "‼️")
|
68 |
|
69 |
|
70 |
# Gradio 5.0.1 had issues with checking the message formats. 5.29.0 does not!
|
|
|
111 |
if log_timer:
|
112 |
if clear_timer:
|
113 |
log_timer.clear()
|
114 |
+
log_timer.add_step(f"Start: {log_debug}")
|
115 |
+
log_debug(f"::: Before event: {log_msg}")
|
116 |
|
117 |
# Call the original event handler
|
118 |
result = event_handler(*args, **kwargs)
|
|
|
120 |
# Log after
|
121 |
if log_timer:
|
122 |
log_timer.add_step(f"Completed: {log_msg}")
|
123 |
+
log_debug(f"::: After event: {log_msg}")
|
124 |
|
125 |
return result
|
126 |
|