Spaces:
Running
Running
Remove message logging for privacy
Browse files
app.py
CHANGED
@@ -1,21 +1,24 @@
|
|
1 |
import os
|
2 |
import sys
|
|
|
3 |
|
4 |
from openai import OpenAI
|
5 |
import gradio as gr
|
6 |
from gradio.components.chatbot import ChatMessage, Message
|
7 |
from typing import (
|
8 |
-
TYPE_CHECKING,
|
9 |
Any,
|
10 |
Literal,
|
11 |
-
Optional,
|
12 |
-
Union,
|
13 |
-
cast,
|
14 |
)
|
15 |
|
|
|
|
|
|
|
|
|
16 |
title = None # "ServiceNow-AI Chat" # modelConfig.get('MODE_DISPLAY_NAME')
|
17 |
description = None
|
18 |
|
|
|
|
|
19 |
model_config = {
|
20 |
"MODEL_NAME": os.environ.get("MODEL_NAME"),
|
21 |
"MODE_DISPLAY_NAME": os.environ.get("MODE_DISPLAY_NAME"),
|
@@ -31,6 +34,12 @@ client = OpenAI(
|
|
31 |
)
|
32 |
|
33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
def _check_format(messages: Any, type: Literal["messages", "tuples"] = "messages") -> None:
|
35 |
if type == "messages":
|
36 |
all_valid = all(
|
@@ -52,8 +61,8 @@ def _check_format(messages: Any, type: Literal["messages", "tuples"] = "messages
|
|
52 |
raise Exception(
|
53 |
"Data incompatible with messages format. Each message should be a dictionary with 'role' and 'content' keys or a ChatMessage object."
|
54 |
)
|
55 |
-
else:
|
56 |
-
|
57 |
elif not all(
|
58 |
isinstance(message, (tuple, list)) and len(message) == 2
|
59 |
for message in messages
|
@@ -64,24 +73,26 @@ def _check_format(messages: Any, type: Literal["messages", "tuples"] = "messages
|
|
64 |
|
65 |
|
66 |
def chat_fn(message, history):
|
67 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
# Remove any assistant messages with metadata from history for multiple turns
|
69 |
-
|
70 |
_check_format(history, "messages")
|
71 |
history = [item for item in history if
|
72 |
not (isinstance(item, dict) and
|
73 |
item.get("role") == "assistant" and
|
74 |
isinstance(item.get("metadata"), dict) and
|
75 |
item.get("metadata", {}).get("title") is not None)]
|
76 |
-
|
77 |
_check_format(history, "messages")
|
78 |
|
79 |
-
# messages = history + [{"role": "user", "content": message}]
|
80 |
-
# print(f"Messages: {messages}")
|
81 |
-
# _check_format(messages, "messages")
|
82 |
-
|
83 |
history.append({"role": "user", "content": message})
|
84 |
-
|
85 |
_check_format(history, "messages")
|
86 |
|
87 |
# Create the streaming response
|
@@ -97,7 +108,7 @@ def chat_fn(message, history):
|
|
97 |
content="Thinking...",
|
98 |
metadata={"title": "🧠 Thought"}
|
99 |
))
|
100 |
-
|
101 |
_check_format(history, "messages")
|
102 |
|
103 |
output = ""
|
@@ -137,7 +148,7 @@ def chat_fn(message, history):
|
|
137 |
# _check_format(messages_to_yield, "messages")
|
138 |
yield messages_to_yield
|
139 |
|
140 |
-
|
141 |
_check_format(history, "messages")
|
142 |
|
143 |
|
|
|
1 |
import os
|
2 |
import sys
|
3 |
+
import datetime
|
4 |
|
5 |
from openai import OpenAI
|
6 |
import gradio as gr
|
7 |
from gradio.components.chatbot import ChatMessage, Message
|
8 |
from typing import (
|
|
|
9 |
Any,
|
10 |
Literal,
|
|
|
|
|
|
|
11 |
)
|
12 |
|
13 |
+
print(f"Gradio version: {gr.__version__}")
|
14 |
+
|
15 |
+
DEBUG_LOG = False or os.environ.get("DEBUG_LOG") == "True"
|
16 |
+
|
17 |
title = None # "ServiceNow-AI Chat" # modelConfig.get('MODE_DISPLAY_NAME')
|
18 |
description = None
|
19 |
|
20 |
+
chat_start_count = 0
|
21 |
+
|
22 |
model_config = {
|
23 |
"MODEL_NAME": os.environ.get("MODEL_NAME"),
|
24 |
"MODE_DISPLAY_NAME": os.environ.get("MODE_DISPLAY_NAME"),
|
|
|
34 |
)
|
35 |
|
36 |
|
37 |
+
def log_message(message):
|
38 |
+
if DEBUG_LOG is True:
|
39 |
+
print(message)
|
40 |
+
|
41 |
+
|
42 |
+
# Gradio 5.0.1 had issues with checking the message formats. 5.29.0 does not!
|
43 |
def _check_format(messages: Any, type: Literal["messages", "tuples"] = "messages") -> None:
|
44 |
if type == "messages":
|
45 |
all_valid = all(
|
|
|
61 |
raise Exception(
|
62 |
"Data incompatible with messages format. Each message should be a dictionary with 'role' and 'content' keys or a ChatMessage object."
|
63 |
)
|
64 |
+
# else:
|
65 |
+
# print("_check_format() --> All messages are valid.")
|
66 |
elif not all(
|
67 |
isinstance(message, (tuple, list)) and len(message) == 2
|
68 |
for message in messages
|
|
|
73 |
|
74 |
|
75 |
def chat_fn(message, history):
|
76 |
+
log_message(f"{'-' * 80}\nchat_fn() --> Message: {message}")
|
77 |
+
|
78 |
+
global chat_start_count
|
79 |
+
chat_start_count = chat_start_count + 1
|
80 |
+
print(
|
81 |
+
f"{datetime.datetime.now()}: chat_start_count: {chat_start_count}, turns: {int(len(history if history else []) / 3)}")
|
82 |
+
|
83 |
# Remove any assistant messages with metadata from history for multiple turns
|
84 |
+
log_message(f"Original History: {history}")
|
85 |
_check_format(history, "messages")
|
86 |
history = [item for item in history if
|
87 |
not (isinstance(item, dict) and
|
88 |
item.get("role") == "assistant" and
|
89 |
isinstance(item.get("metadata"), dict) and
|
90 |
item.get("metadata", {}).get("title") is not None)]
|
91 |
+
log_message(f"Updated History: {history}")
|
92 |
_check_format(history, "messages")
|
93 |
|
|
|
|
|
|
|
|
|
94 |
history.append({"role": "user", "content": message})
|
95 |
+
log_message(f"History with user message: {history}")
|
96 |
_check_format(history, "messages")
|
97 |
|
98 |
# Create the streaming response
|
|
|
108 |
content="Thinking...",
|
109 |
metadata={"title": "🧠 Thought"}
|
110 |
))
|
111 |
+
log_message(f"History added thinking: {history}")
|
112 |
_check_format(history, "messages")
|
113 |
|
114 |
output = ""
|
|
|
148 |
# _check_format(messages_to_yield, "messages")
|
149 |
yield messages_to_yield
|
150 |
|
151 |
+
log_message(f"Final History: {history}")
|
152 |
_check_format(history, "messages")
|
153 |
|
154 |
|