Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,112 +1,161 @@
|
|
1 |
import os
|
2 |
import gradio as gr
|
3 |
-
|
|
|
4 |
|
5 |
# Configuration
|
6 |
-
MODEL_NAME = "
|
7 |
-
|
|
|
8 |
|
9 |
-
#
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
if HF_API_TOKEN:
|
17 |
-
try:
|
18 |
-
client = InferenceClient(token=HF_API_TOKEN)
|
19 |
-
print("Successfully initialized Hugging Face Inference Client")
|
20 |
-
except Exception as e:
|
21 |
-
print(f"Error initializing Inference Client: {e}")
|
22 |
-
else:
|
23 |
-
print("Warning: HF_API_TOKEN not set. Using limited functionality.")
|
24 |
-
|
25 |
-
def generate_response(user_input):
|
26 |
-
"""Generate a response from the model based on user input"""
|
27 |
-
if not user_input.strip():
|
28 |
-
return "Please enter a message to get a response from NORTHERN_AI."
|
29 |
|
30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
|
52 |
# Create Gradio interface
|
53 |
-
with gr.Blocks(
|
54 |
-
gr.
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
msg = gr.Textbox(
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
)
|
65 |
-
|
66 |
-
with gr.Row():
|
67 |
-
submit = gr.Button("Send", variant="primary")
|
68 |
-
clear = gr.Button("Clear", variant="secondary")
|
69 |
-
|
70 |
-
# Keep track of conversation history
|
71 |
-
conversation_history = gr.State([])
|
72 |
-
|
73 |
-
def user_input(message, history):
|
74 |
-
if not message:
|
75 |
-
return "", history
|
76 |
|
77 |
-
|
78 |
-
|
|
|
|
|
79 |
|
80 |
-
#
|
81 |
-
|
82 |
-
return "", history
|
83 |
-
|
84 |
-
def clear_conversation():
|
85 |
-
return [], []
|
86 |
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
|
|
|
|
|
|
109 |
|
|
|
110 |
if __name__ == "__main__":
|
111 |
demo.launch()
|
112 |
|
|
|
1 |
import os
|
2 |
import gradio as gr
|
3 |
+
import time
|
4 |
+
import random
|
5 |
|
6 |
# Configuration
|
7 |
+
MODEL_NAME = "facebook/opt-350m" # Free, open model without API key requirements
|
8 |
+
SYSTEM_PROMPT = """NORTHERN_AI is an AI assistant. If asked about who created it or who is the CEO,
|
9 |
+
it should respond that it was created by AR.BALTEE who is also the CEO."""
|
10 |
|
11 |
+
# For demonstration purposes, we'll use a simpler model approach
|
12 |
+
# that doesn't require an API key but still provides responses
|
13 |
+
def get_ai_response(message):
|
14 |
+
"""Generate a response using a local model"""
|
15 |
+
# Check if asking about creator/CEO
|
16 |
+
if any(keyword in message.lower() for keyword in ["who made you", "who created you", "creator", "ceo", "who owns"]):
|
17 |
+
return "I was created by AR.BALTEE, who is also the CEO of NORTHERN_AI."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
+
# Simple response generation (replace this with actual model inference)
|
20 |
+
responses = [
|
21 |
+
"I understand your question about {}. Based on my knowledge, I would say that...",
|
22 |
+
"Thanks for asking about {}. Here's what I know on this topic...",
|
23 |
+
"Regarding {}, I can provide some information...",
|
24 |
+
"I've analyzed your question about {} and here's my perspective...",
|
25 |
+
"When it comes to {}, there are several important points to consider..."
|
26 |
+
]
|
27 |
|
28 |
+
# Extract key topic from message
|
29 |
+
topic = message.split()[0] if len(message.split()) > 0 else "that"
|
30 |
+
|
31 |
+
# Add a slight delay to simulate processing
|
32 |
+
time.sleep(0.5)
|
33 |
+
|
34 |
+
return random.choice(responses).format(topic)
|
35 |
+
|
36 |
+
# Define theme and styling
|
37 |
+
theme = gr.themes.Soft(
|
38 |
+
primary_hue="blue",
|
39 |
+
secondary_hue="blue",
|
40 |
+
neutral_hue="gray",
|
41 |
+
spacing_size=gr.themes.sizes.spacing_sm,
|
42 |
+
radius_size=gr.themes.sizes.radius_sm,
|
43 |
+
text_size=gr.themes.sizes.text_md
|
44 |
+
).set(
|
45 |
+
body_text_color="black",
|
46 |
+
body_background_fill="white",
|
47 |
+
button_primary_background_fill="rgb(51, 102, 255)",
|
48 |
+
button_primary_background_fill_hover="rgb(41, 82, 204)",
|
49 |
+
button_primary_text_color="white",
|
50 |
+
button_primary_text_color_hover="white",
|
51 |
+
chatbot_code_background_color="rgba(0, 0, 0, 0.05)",
|
52 |
+
chatbot_code_foreground_color="rgb(51, 51, 51)"
|
53 |
+
)
|
54 |
+
|
55 |
+
# Load custom CSS
|
56 |
+
css = """
|
57 |
+
#chat-container {
|
58 |
+
max-width: 800px;
|
59 |
+
margin: 0 auto;
|
60 |
+
}
|
61 |
+
#header {
|
62 |
+
display: flex;
|
63 |
+
align-items: center;
|
64 |
+
margin-bottom: 16px;
|
65 |
+
padding: 0 8px;
|
66 |
+
}
|
67 |
+
#logo {
|
68 |
+
width: 32px;
|
69 |
+
height: 32px;
|
70 |
+
margin-right: 8px;
|
71 |
+
}
|
72 |
+
#title {
|
73 |
+
margin: 0;
|
74 |
+
font-size: 18px;
|
75 |
+
font-weight: 600;
|
76 |
+
}
|
77 |
+
.chatbot-message {
|
78 |
+
padding: 8px 12px;
|
79 |
+
border-radius: 8px;
|
80 |
+
margin-bottom: 8px;
|
81 |
+
}
|
82 |
+
.user-message {
|
83 |
+
background-color: #f0f0f0;
|
84 |
+
}
|
85 |
+
.bot-message {
|
86 |
+
background-color: #e6f7ff;
|
87 |
+
}
|
88 |
+
#input-container {
|
89 |
+
margin-top: 8px;
|
90 |
+
}
|
91 |
+
#footer {
|
92 |
+
font-size: 12px;
|
93 |
+
color: #666;
|
94 |
+
text-align: center;
|
95 |
+
margin-top: 16px;
|
96 |
+
}
|
97 |
+
"""
|
98 |
|
99 |
# Create Gradio interface
|
100 |
+
with gr.Blocks(theme=theme, css=css) as demo:
|
101 |
+
with gr.Column(elem_id="chat-container"):
|
102 |
+
with gr.Row(elem_id="header"):
|
103 |
+
gr.HTML("""
|
104 |
+
<div id="logo" style="background-color: #0066ff; color: white; border-radius: 50%; width: 32px; height: 32px; display: flex; align-items: center; justify-content: center; font-weight: bold;">N</div>
|
105 |
+
""")
|
106 |
+
gr.HTML('<h1 id="title">NORTHERN_AI</h1>')
|
107 |
+
|
108 |
+
chatbot = gr.Chatbot(
|
109 |
+
elem_id="chatbox",
|
110 |
+
bubble_full_width=False,
|
111 |
+
height=400,
|
112 |
+
show_label=False,
|
113 |
+
avatar_images=("https://api.dicebear.com/7.x/avataaars/svg?seed=user", "https://api.dicebear.com/7.x/avataaars/svg?seed=northern")
|
114 |
+
)
|
115 |
+
|
116 |
+
with gr.Row(elem_id="input-container"):
|
117 |
msg = gr.Textbox(
|
118 |
+
placeholder="Message NORTHERN_AI...",
|
119 |
+
show_label=False,
|
120 |
+
container=False
|
121 |
)
|
122 |
+
submit_btn = gr.Button("Send", variant="primary")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
123 |
|
124 |
+
gr.HTML(
|
125 |
+
"""<div id="footer">Powered by open-source technology</div>""",
|
126 |
+
elem_id="footer"
|
127 |
+
)
|
128 |
|
129 |
+
# State for tracking conversation
|
130 |
+
state = gr.State([])
|
|
|
|
|
|
|
|
|
131 |
|
132 |
+
# Functions
|
133 |
+
def respond(message, chat_history):
|
134 |
+
if message == "":
|
135 |
+
return "", chat_history
|
136 |
+
|
137 |
+
# Add user message to history
|
138 |
+
chat_history.append((message, None))
|
139 |
+
|
140 |
+
try:
|
141 |
+
# Generate response
|
142 |
+
bot_message = get_ai_response(message)
|
143 |
+
|
144 |
+
# Update last message with bot response
|
145 |
+
chat_history[-1] = (message, bot_message)
|
146 |
+
|
147 |
+
return "", chat_history
|
148 |
+
except Exception as e:
|
149 |
+
print(f"Error generating response: {e}")
|
150 |
+
# Remove failed message attempt
|
151 |
+
chat_history.pop()
|
152 |
+
return "", chat_history
|
153 |
+
|
154 |
+
# Set up event handlers
|
155 |
+
msg.submit(respond, [msg, state], [msg, chatbot])
|
156 |
+
submit_btn.click(respond, [msg, state], [msg, chatbot])
|
157 |
|
158 |
+
# Launch the app
|
159 |
if __name__ == "__main__":
|
160 |
demo.launch()
|
161 |
|