Spaces:
Sleeping
Sleeping
File size: 5,631 Bytes
b7a99bc 102a4e1 4327207 102a4e1 4327207 6496baa 4327207 6496baa 4327207 6496baa 22af4e4 102a4e1 96fa637 6496baa 102a4e1 6496baa 102a4e1 6496baa 102a4e1 6496baa 102a4e1 6496baa 102a4e1 6496baa 102a4e1 6496baa 102a4e1 6496baa 102a4e1 6496baa 96fa637 b7a99bc 6496baa 22af4e4 6496baa 22af4e4 6496baa 22af4e4 6496baa 22af4e4 6496baa 2dc5dda 6496baa 22af4e4 6496baa 22af4e4 102a4e1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 |
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load a better free model (OpenAssistant)
MODEL_NAME = "OpenAssistant/oasst-sft-1-pythia-12b"
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float16)
device = "cuda" if torch.cuda.is_available() else "cpu"
model.to(device)
# System prompt for the AI
SYSTEM_PROMPT = """NORTHERN_AI is an AI assistant. If asked about who created it or who is the CEO,
it should respond that it was created by AR.BALTEE who is also the CEO."""
# Function to generate AI responses
def get_ai_response(message):
try:
# Check if asking about creator/CEO
if any(keyword in message.lower() for keyword in ["who made you", "who created you", "creator", "ceo", "who owns"]):
return "I was created by AR.BALTEE, who is also the CEO of NORTHERN_AI."
# Prepare input for the model
input_text = f"{SYSTEM_PROMPT}\n\nUser: {message}\nAI:"
inputs = tokenizer(input_text, return_tensors="pt").to(device)
# Generate response
with torch.no_grad():
outputs = model.generate(
inputs.input_ids,
max_length=200,
temperature=0.7,
top_p=0.9,
do_sample=True,
pad_token_id=tokenizer.eos_token_id,
)
# Decode and clean the response
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
response = response.split("AI:")[-1].strip()
return response
except Exception as e:
print(f"Error generating response: {e}")
return "Sorry, I encountered an error while generating a response. Please try again."
# Custom CSS for a beautiful UI
css = """
.gradio-container {
max-width: 800px !important;
margin: 0 auto !important;
background: linear-gradient(135deg, #f0f4f8, #d9e2ec) !important;
padding: 20px !important;
border-radius: 15px !important;
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1) !important;
}
#header-container {
display: flex !important;
align-items: center !important;
margin-bottom: 1.5rem !important;
background-color: transparent !important;
padding: 0.5rem 1rem !important;
}
#logo {
background-color: #0066ff !important;
color: white !important;
border-radius: 50% !important;
width: 40px !important;
height: 40px !important;
display: flex !important;
align-items: center !important;
justify-content: center !important;
font-weight: bold !important;
margin-right: 10px !important;
font-size: 20px !important;
}
#title {
margin: 0 !important;
font-size: 24px !important;
font-weight: 600 !important;
color: #333 !important;
}
#chatbot {
background-color: white !important;
border-radius: 15px !important;
padding: 20px !important;
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1) !important;
height: 400px !important;
overflow-y: auto !important;
}
#footer {
font-size: 12px !important;
color: #666 !important;
text-align: center !important;
margin-top: 1.5rem !important;
padding: 0.5rem !important;
}
.textbox {
border-radius: 15px !important;
border: 1px solid #ddd !important;
padding: 10px !important;
font-size: 14px !important;
width: 100% !important;
}
.button {
background-color: #0066ff !important;
color: white !important;
border-radius: 15px !important;
padding: 10px 20px !important;
font-size: 14px !important;
border: none !important;
cursor: pointer !important;
transition: background-color 0.3s ease !important;
}
.button:hover {
background-color: #0052cc !important;
}
"""
# Create Gradio interface
with gr.Blocks(css=css) as demo:
with gr.Column():
# Custom header
with gr.Row(elem_id="header-container"):
gr.HTML('<div id="logo">N</div>')
gr.HTML('<h1 id="title">NORTHERN_AI</h1>')
# Chat interface
chatbot = gr.Chatbot(elem_id="chatbot")
with gr.Row():
msg = gr.Textbox(
placeholder="Message NORTHERN_AI...",
show_label=False,
container=False,
elem_classes="textbox"
)
submit_btn = gr.Button("Send", elem_classes="button")
gr.HTML('<div id="footer">Powered by open-source technology</div>')
# State for tracking conversation
state = gr.State([])
# Functions
def respond(message, chat_history):
if message == "":
return "", chat_history
# Add user message to history
chat_history.append((message, None))
try:
# Generate response
bot_message = get_ai_response(message)
# Update last message with bot response
chat_history[-1] = (message, bot_message)
return "", chat_history
except Exception as e:
print(f"Error generating response: {e}")
# Remove failed message attempt
chat_history.pop()
# Return error message
return "", chat_history
# Set up event handlers
msg.submit(respond, [msg, state], [msg, chatbot])
submit_btn.click(respond, [msg, state], [msg, chatbot])
# Launch the app
if __name__ == "__main__":
demo.launch() |