Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -4,84 +4,47 @@ import gradio as gr
|
|
4 |
from huggingface_hub import InferenceClient
|
5 |
|
6 |
# --- Configuration Constants ---
|
7 |
-
|
8 |
-
FIXED_MAX_TOKENS = 1000 # Note: This is a very high value, typical values are much lower (e.g., 512, 1024, 2048, 4096 for many models)
|
9 |
-
|
10 |
|
11 |
# --- Initialize the InferenceClient ---
|
12 |
-
|
13 |
-
# The specific model will be specified in the API call (e.g., chat_completion).
|
14 |
-
API_BASE_URL = "https://vulture-awake-probably.ngrok-free.app/v1/chat/completions" # Base URL for the custom API
|
15 |
|
16 |
try:
|
17 |
-
# Initialize the client with the base URL of your API.
|
18 |
-
# If your API requires an authentication token, you might need to pass it here,
|
19 |
-
# e.g., client = InferenceClient(base_url=API_BASE_URL, token="YOUR_API_TOKEN")
|
20 |
-
# or ensure it's set as an environment variable if the client/API supports that.
|
21 |
client = InferenceClient(base_url=API_BASE_URL)
|
22 |
print(f"InferenceClient initialized with base_url: {API_BASE_URL}")
|
23 |
except Exception as e:
|
24 |
print(f"Error initializing InferenceClient with base_url '{API_BASE_URL}': {e}")
|
25 |
-
# Handle the error appropriately, e.g., by exiting or using a fallback
|
26 |
raise RuntimeError(
|
27 |
"Could not initialize InferenceClient. "
|
28 |
f"Please check the API base URL ('{API_BASE_URL}') and ensure the server is accessible. "
|
29 |
f"Error: {e}"
|
30 |
)
|
31 |
|
32 |
-
|
33 |
# --- Core Chatbot Logic ---
|
34 |
def respond(message, history):
|
35 |
-
"""
|
36 |
-
This function processes the user's message and the chat history to generate a response
|
37 |
-
from the language model using the custom API.
|
38 |
-
|
39 |
-
Args:
|
40 |
-
message (str): The latest message from the user.
|
41 |
-
history (list of lists): A list where each inner list contains a pair of
|
42 |
-
[user_message, ai_message].
|
43 |
-
|
44 |
-
Yields:
|
45 |
-
str: The generated response token by token (for streaming).
|
46 |
-
"""
|
47 |
-
# Initialize the messages list
|
48 |
messages = []
|
49 |
-
|
50 |
-
# Append past interactions from the history to the messages list
|
51 |
-
# This provides context to the language model
|
52 |
for user_message, ai_message in history:
|
53 |
-
if user_message:
|
54 |
messages.append({"role": "user", "content": user_message})
|
55 |
-
if ai_message:
|
56 |
messages.append({"role": "assistant", "content": ai_message})
|
57 |
-
|
58 |
-
# Append the current user's message to the messages list
|
59 |
messages.append({"role": "user", "content": message})
|
60 |
|
61 |
-
# Initialize an empty string to accumulate the response
|
62 |
response_text = ""
|
63 |
-
|
64 |
try:
|
65 |
-
# Make a streaming call to the language model's chat completions endpoint.
|
66 |
-
# The `model` parameter specifies which model to use at the endpoint.
|
67 |
stream = client.chat_completion(
|
68 |
-
messages=messages,
|
69 |
-
max_tokens=FIXED_MAX_TOKENS,
|
70 |
-
stream=True,
|
71 |
)
|
72 |
-
|
73 |
for chunk in stream:
|
74 |
-
# Check if the chunk contains content and the content is not None
|
75 |
-
# The exact structure of the chunk can vary based on the model/endpoint
|
76 |
if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content is not None:
|
77 |
-
token = chunk.choices[0].delta.content
|
78 |
-
response_text += token
|
79 |
-
yield response_text
|
80 |
-
|
81 |
except Exception as e:
|
82 |
-
# If any error occurs during the API call, yield an error message
|
83 |
error_message = f"An error occurred during model inference: {e}"
|
84 |
-
print(error_message)
|
85 |
yield error_message
|
86 |
|
87 |
# --- Gradio Interface Definition ---
|
@@ -89,8 +52,8 @@ def respond(message, history):
|
|
89 |
# URL for the header image
|
90 |
header_image_path = "https://cdn-uploads.huggingface.co/production/uploads/6540a02d1389943fef4d2640/j61iZTDaK9g0UW3aWGwWi.gif"
|
91 |
|
92 |
-
# Ko-fi widget script
|
93 |
-
|
94 |
<script src='https://storage.ko-fi.com/cdn/scripts/overlay-widget.js'></script>
|
95 |
<script>
|
96 |
kofiWidgetOverlay.draw('sonnydesorbo', {
|
@@ -104,57 +67,163 @@ kofi_script = """
|
|
104 |
|
105 |
# Ko-fi button HTML
|
106 |
kofi_button_html = """
|
107 |
-
<div style="text-align: center; padding: 20px;">
|
108 |
<a href='https://ko-fi.com/Z8Z51E5TIG' target='_blank'>
|
109 |
<img height='36' style='border:0px;height:36px;' src='https://storage.ko-fi.com/cdn/kofi5.png?v=6' border='0' alt='Buy Me a Coffee at ko-fi.com' />
|
110 |
</a>
|
111 |
</div>
|
112 |
"""
|
113 |
|
114 |
-
#
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
gr.Image(
|
120 |
-
value=header_image_path,
|
121 |
-
label="Chatbot Header",
|
122 |
-
show_label=False,
|
123 |
-
interactive=False,
|
124 |
-
height=150,
|
125 |
-
elem_id="chatbot-logo"
|
126 |
)
|
127 |
|
128 |
# Create the chat interface component
|
129 |
gr.ChatInterface(
|
130 |
-
fn=respond,
|
131 |
-
chatbot=gr.Chatbot(
|
132 |
-
height=650
|
|
|
133 |
),
|
134 |
-
#
|
135 |
-
#
|
136 |
-
# description="Chat with Xortron7, your AI assistant.",
|
137 |
-
# examples=[["Hello!", None], ["What is Gradio?", None]],
|
138 |
-
# retry_btn=None, # Removes the retry button
|
139 |
-
# undo_btn="Delete Previous", # Customizes the undo button
|
140 |
-
# clear_btn="Clear Chat", # Customizes the clear button
|
141 |
)
|
142 |
|
143 |
# Add the Ko-fi button at the bottom
|
144 |
-
gr.HTML(kofi_button_html)
|
145 |
|
146 |
# --- Application Entry Point ---
|
147 |
if __name__ == "__main__":
|
148 |
-
# Launch the Gradio web server
|
149 |
-
# show_api=False disables the API documentation page
|
150 |
-
# share=False prevents creating a public Gradio link (for local development)
|
151 |
try:
|
152 |
demo.launch(show_api=False, share=False)
|
153 |
except NameError as ne:
|
154 |
-
# This might happen if 'client' was not defined due to an error during initialization
|
155 |
print(f"Gradio demo could not be launched. 'client' might not have been initialized: {ne}")
|
156 |
except RuntimeError as re:
|
157 |
-
# This catches the RuntimeError raised if client initialization failed explicitly
|
158 |
print(f"Gradio demo could not be launched due to an error during client initialization: {re}")
|
159 |
except Exception as e:
|
160 |
print(f"An unexpected error occurred when trying to launch Gradio demo: {e}")
|
|
|
4 |
from huggingface_hub import InferenceClient
|
5 |
|
6 |
# --- Configuration Constants ---
|
7 |
+
FIXED_MAX_TOKENS = 1000
|
|
|
|
|
8 |
|
9 |
# --- Initialize the InferenceClient ---
|
10 |
+
API_BASE_URL = "https://vulture-awake-probably.ngrok-free.app/v1/chat/completions"
|
|
|
|
|
11 |
|
12 |
try:
|
|
|
|
|
|
|
|
|
13 |
client = InferenceClient(base_url=API_BASE_URL)
|
14 |
print(f"InferenceClient initialized with base_url: {API_BASE_URL}")
|
15 |
except Exception as e:
|
16 |
print(f"Error initializing InferenceClient with base_url '{API_BASE_URL}': {e}")
|
|
|
17 |
raise RuntimeError(
|
18 |
"Could not initialize InferenceClient. "
|
19 |
f"Please check the API base URL ('{API_BASE_URL}') and ensure the server is accessible. "
|
20 |
f"Error: {e}"
|
21 |
)
|
22 |
|
|
|
23 |
# --- Core Chatbot Logic ---
|
24 |
def respond(message, history):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
messages = []
|
|
|
|
|
|
|
26 |
for user_message, ai_message in history:
|
27 |
+
if user_message:
|
28 |
messages.append({"role": "user", "content": user_message})
|
29 |
+
if ai_message:
|
30 |
messages.append({"role": "assistant", "content": ai_message})
|
|
|
|
|
31 |
messages.append({"role": "user", "content": message})
|
32 |
|
|
|
33 |
response_text = ""
|
|
|
34 |
try:
|
|
|
|
|
35 |
stream = client.chat_completion(
|
36 |
+
messages=messages,
|
37 |
+
max_tokens=FIXED_MAX_TOKENS,
|
38 |
+
stream=True,
|
39 |
)
|
|
|
40 |
for chunk in stream:
|
|
|
|
|
41 |
if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content is not None:
|
42 |
+
token = chunk.choices[0].delta.content
|
43 |
+
response_text += token
|
44 |
+
yield response_text
|
|
|
45 |
except Exception as e:
|
|
|
46 |
error_message = f"An error occurred during model inference: {e}"
|
47 |
+
print(error_message)
|
48 |
yield error_message
|
49 |
|
50 |
# --- Gradio Interface Definition ---
|
|
|
52 |
# URL for the header image
|
53 |
header_image_path = "https://cdn-uploads.huggingface.co/production/uploads/6540a02d1389943fef4d2640/j61iZTDaK9g0UW3aWGwWi.gif"
|
54 |
|
55 |
+
# Ko-fi widget script (original)
|
56 |
+
kofi_script_original = """
|
57 |
<script src='https://storage.ko-fi.com/cdn/scripts/overlay-widget.js'></script>
|
58 |
<script>
|
59 |
kofiWidgetOverlay.draw('sonnydesorbo', {
|
|
|
67 |
|
68 |
# Ko-fi button HTML
|
69 |
kofi_button_html = """
|
70 |
+
<div style="text-align: center; padding: 10px 20px 20px 20px;">
|
71 |
<a href='https://ko-fi.com/Z8Z51E5TIG' target='_blank'>
|
72 |
<img height='36' style='border:0px;height:36px;' src='https://storage.ko-fi.com/cdn/kofi5.png?v=6' border='0' alt='Buy Me a Coffee at ko-fi.com' />
|
73 |
</a>
|
74 |
</div>
|
75 |
"""
|
76 |
|
77 |
+
# HTML, CSS, and JavaScript for Matrix Rain
|
78 |
+
matrix_rain_html_script = """
|
79 |
+
<canvas id="matrixCanvas"></canvas>
|
80 |
+
<style>
|
81 |
+
body {
|
82 |
+
/* overflow: hidden; */ /* Might hide Gradio scrollbars, be careful */
|
83 |
+
/* Gradio manages its own background, so direct body changes might be tricky.
|
84 |
+
The canvas will be fixed behind everything. */
|
85 |
+
}
|
86 |
+
#matrixCanvas {
|
87 |
+
position: fixed;
|
88 |
+
top: 0;
|
89 |
+
left: 0;
|
90 |
+
width: 100%;
|
91 |
+
height: 100%;
|
92 |
+
z-index: -1; /* Place it behind all Gradio content */
|
93 |
+
display: block;
|
94 |
+
}
|
95 |
+
/* Ensure Gradio's main content area is above the canvas and has a transparent background
|
96 |
+
if you want the rain to be visible *through* it, or a solid dark background
|
97 |
+
if you want it *around* it. Gradio theming should handle the component backgrounds.
|
98 |
+
*/
|
99 |
+
gradio-app > .main, gradio-app > .main > .wrap, gradio-app { /* Attempt to make Gradio app background transparent */
|
100 |
+
/* background: transparent !important; */ /* This can be aggressive, use with caution */
|
101 |
+
}
|
102 |
+
/* You might need to adjust specific Gradio component backgrounds if they are not dark by default with the theme */
|
103 |
+
.gradio-container {
|
104 |
+
/* background: transparent !important; */ /* Example */
|
105 |
+
}
|
106 |
+
|
107 |
+
</style>
|
108 |
+
<script>
|
109 |
+
document.addEventListener('DOMContentLoaded', (event) => {
|
110 |
+
const canvas = document.getElementById('matrixCanvas');
|
111 |
+
if (!canvas) {
|
112 |
+
console.error("Matrix canvas not found");
|
113 |
+
return;
|
114 |
+
}
|
115 |
+
const ctx = canvas.getContext('2d');
|
116 |
+
|
117 |
+
canvas.width = window.innerWidth;
|
118 |
+
canvas.height = window.innerHeight;
|
119 |
+
|
120 |
+
const katakana = 'アァカサタナハマヤャラワガザダバパイィキシチニヒミリヰギジヂビピウゥクスツヌフムユュルグズブヅプエェケセテネヘメレヱゲゼデベペオォコソトノホモヨョロヲゴゾドボポヴッン';
|
121 |
+
const latin = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ';
|
122 |
+
const nums = '0123456789';
|
123 |
+
const characters = katakana + latin + nums;
|
124 |
+
|
125 |
+
const fontSize = 16;
|
126 |
+
let columns = Math.floor(canvas.width / fontSize);
|
127 |
+
let drops = [];
|
128 |
+
|
129 |
+
function initializeDrops() {
|
130 |
+
columns = Math.floor(canvas.width / fontSize);
|
131 |
+
drops = [];
|
132 |
+
for (let x = 0; x < columns; x++) {
|
133 |
+
drops[x] = 1 + Math.floor(Math.random() * (canvas.height / fontSize));
|
134 |
+
}
|
135 |
+
}
|
136 |
+
initializeDrops();
|
137 |
+
|
138 |
+
function drawMatrix() {
|
139 |
+
ctx.fillStyle = 'rgba(0, 0, 0, 0.04)'; // Black BG for the canvas with a little blur
|
140 |
+
ctx.fillRect(0, 0, canvas.width, canvas.height);
|
141 |
+
|
142 |
+
// Use a color that fits the dark theme, e.g., a neon green or the theme's primary color
|
143 |
+
// For Gradio, you might try to pick up CSS variables if available, or hardcode.
|
144 |
+
ctx.fillStyle = '#0F0'; // Classic green
|
145 |
+
// Example: Try to use Gradio's primary color if possible (might require more complex JS)
|
146 |
+
// const gradioPrimaryColor = getComputedStyle(document.documentElement).getPropertyValue('--primary-500') || '#0F0';
|
147 |
+
// ctx.fillStyle = gradioPrimaryColor;
|
148 |
+
|
149 |
+
ctx.font = fontSize + 'px monospace';
|
150 |
+
|
151 |
+
for (let i = 0; i < drops.length; i++) {
|
152 |
+
const text = characters.charAt(Math.floor(Math.random() * characters.length));
|
153 |
+
ctx.fillText(text, i * fontSize, drops[i] * fontSize);
|
154 |
+
|
155 |
+
if (drops[i] * fontSize > canvas.height && Math.random() > 0.975) {
|
156 |
+
drops[i] = 0;
|
157 |
+
}
|
158 |
+
drops[i]++;
|
159 |
+
}
|
160 |
+
}
|
161 |
+
|
162 |
+
let matrixInterval = setInterval(drawMatrix, 45); // Adjusted speed
|
163 |
+
|
164 |
+
window.addEventListener('resize', () => {
|
165 |
+
const oldWidth = canvas.width;
|
166 |
+
const oldHeight = canvas.height;
|
167 |
+
|
168 |
+
canvas.width = window.innerWidth;
|
169 |
+
canvas.height = window.innerHeight;
|
170 |
+
|
171 |
+
if (canvas.width !== oldWidth || canvas.height !== oldHeight) {
|
172 |
+
initializeDrops();
|
173 |
+
}
|
174 |
+
});
|
175 |
+
|
176 |
+
// Optional: A slight delay for Gradio to finish rendering before starting matrix
|
177 |
+
// setTimeout(() => {
|
178 |
+
// if (matrixInterval) clearInterval(matrixInterval);
|
179 |
+
// initializeDrops();
|
180 |
+
// matrixInterval = setInterval(drawMatrix, 45);
|
181 |
+
// }, 500);
|
182 |
+
});
|
183 |
+
</script>
|
184 |
+
"""
|
185 |
+
|
186 |
+
# Combine Ko-fi script with Matrix Rain script for the 'head'
|
187 |
+
combined_head_script = kofi_script_original + matrix_rain_html_script
|
188 |
+
|
189 |
+
|
190 |
+
# Create a Gradio Blocks layout
|
191 |
+
# Use a dark theme and add the combined scripts to the head
|
192 |
+
# theme = gr.themes.Soft() # Original theme
|
193 |
+
theme = gr.themes.Base(primary_hue=gr.themes.colors.green, neutral_hue=gr.themes.colors.slate).dark() # Example dark theme
|
194 |
+
|
195 |
+
with gr.Blocks(theme=theme, head=combined_head_script) as demo:
|
196 |
+
# Display an image at the top
|
197 |
gr.Image(
|
198 |
+
value=header_image_path,
|
199 |
+
label="Chatbot Header",
|
200 |
+
show_label=False,
|
201 |
+
interactive=False,
|
202 |
+
height=150,
|
203 |
+
elem_id="chatbot-logo"
|
204 |
)
|
205 |
|
206 |
# Create the chat interface component
|
207 |
gr.ChatInterface(
|
208 |
+
fn=respond,
|
209 |
+
chatbot=gr.Chatbot(
|
210 |
+
height=650,
|
211 |
+
# elem_id="chatbot_messages_container" # You can add an ID for more specific CSS
|
212 |
),
|
213 |
+
# title="XORTRON AI", # You can set a title
|
214 |
+
# description="Engage with the digital rain." # And a description
|
|
|
|
|
|
|
|
|
|
|
215 |
)
|
216 |
|
217 |
# Add the Ko-fi button at the bottom
|
218 |
+
gr.HTML(kofi_button_html)
|
219 |
|
220 |
# --- Application Entry Point ---
|
221 |
if __name__ == "__main__":
|
|
|
|
|
|
|
222 |
try:
|
223 |
demo.launch(show_api=False, share=False)
|
224 |
except NameError as ne:
|
|
|
225 |
print(f"Gradio demo could not be launched. 'client' might not have been initialized: {ne}")
|
226 |
except RuntimeError as re:
|
|
|
227 |
print(f"Gradio demo could not be launched due to an error during client initialization: {re}")
|
228 |
except Exception as e:
|
229 |
print(f"An unexpected error occurred when trying to launch Gradio demo: {e}")
|