Spaces:
Sleeping
Sleeping
File size: 5,093 Bytes
8eb685b ee1ee46 8eb685b ee1ee46 8eb685b ee1ee46 8eb685b ee1ee46 8eb685b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 |
import os
import gradio as gr
from huggingface_hub import InferenceClient
# Initialize the Hugging Face InferenceClient with your API key.
client = InferenceClient(
provider="sambanova",
api_key=os.getenv("API_KEY") # Replace with your actual API key.
)
# Define a system message that contains the specialized event details.
system_message = {
"role": "system",
"content": (
"You are an AI chat assistant specialized in providing detailed information about "
"the Bhasha Bandhu Regional Ideathon @ SGSITS. Please always include event details, dates, "
"and relevant links (if available) in your responses.\n\n"
"Event Details:\n"
"Bhasha Bandhu Regional Ideathon @ SGSITS\n"
"Date: 22nd February 2025\n"
"Time: 9:00 AM - 3:00 PM\n"
"Venue: SGSITS, Indore\n\n"
"Join the Bhasha Bandhu Regional Ideathon!\n\n"
"Bhasha Bandhu, in collaboration with Bhashini and Microsoft, is organizing an exciting "
"Regional Ideathon at SGSITS, Indore, on 22nd February. This is a unique opportunity for "
"students, professionals, developers, and entrepreneurs to brainstorm and innovate solutions "
"that bridge India's linguistic digital divide.\n\n"
"Why Participate?\n"
"- Gain industry mentorship from experts in AI & language technology\n"
"- Work on real-world problem statements with open-source AI models\n"
"- Hands-on experience with Bhashini API, OpenAI, and GitHub Copilot\n"
"- Swags and Certificates for regional winners and participants\n"
"- Opportunity to get shortlisted for the main Hackathon with Microsoft & Bhashini\n\n"
"Event Agenda:\n"
"- 9:00 AM - 9:30 AM: Registration & Introduction\n"
"- 9:30 AM - 10:00 AM: Mentor Session on Bhashini API, OpenAI, GitHub Copilot\n"
"- 10:00 AM - 10:30 AM: Problem Statements Explained + Q&A\n"
"- 10:30 AM - 12:30 PM: Brainstorming & Ideation (PPT preparation on Ideathon Day)\n"
"- 12:30 PM - 2:00 PM: Mentor Evaluation & Regional Winner Selection\n"
"- 2:00 PM - 3:00 PM: Winner Announcement & Closing Ceremony\n\n"
"How to Participate:\n"
"- Form a team (or participate solo)\n"
"- Register for the event in advance\n"
"- Prepare a PPT on Ideathon Day covering:\n"
" • Problem Statement & Solution (using Bhashini API & OpenAI)\n"
" • Unique Selling Proposition & Business Potential\n"
" • Tech Stack & Implementation Plan\n"
"- Present your idea to the jury\n\n"
"Important Notes:\n"
"- Offline participation is mandatory\n"
"- Lunch will not be provided\n"
"- Winning at the regional hackathon does not guarantee a win in the main event, but all "
"submitted ideas will be considered.\n\n"
"For Queries: Contact Arpit at +91 95718 45422\n\n"
"Let's build a digitally inclusive India together!"
)
}
# Global conversation history (starting with the system message)
conversation = [system_message]
def generate_response(user_message, chat_history):
"""
Appends the user's message to the conversation, calls the inference client,
and returns the updated conversation (for display in the Gradio chat interface).
"""
global conversation
# Append the new user message
conversation.append({
"role": "user",
"content": user_message
})
# Call the Hugging Face chat completions API.
completion = client.chat.completions.create(
model="meta-llama/Llama-3.3-70B-Instruct",
messages=conversation,
max_tokens=500,
)
# The API response may return a dictionary or a string for the assistant's message.
assistant_message = completion.choices[0].message
if isinstance(assistant_message, dict):
assistant_text = assistant_message.get("content", "")
else:
assistant_text = assistant_message
# Append the assistant's response to the conversation.
conversation.append({
"role": "assistant",
"content": assistant_text
})
# Update the chat history (a list of tuples: (user, assistant)) for the Gradio interface.
chat_history.append((user_message, assistant_text))
return "", chat_history
# Build the Gradio interface.
with gr.Blocks() as demo:
gr.Markdown("# Bhasha Bandhu Ideathon Chat Assistant")
gr.Markdown(
"Ask any questions or request details about the Bhasha Bandhu Regional Ideathon @ SGSITS. "
"The assistant will provide detailed answers with event dates, agenda, and links where applicable."
)
chatbot = gr.Chatbot()
with gr.Row():
# Removed .style(container=False)
txt = gr.Textbox(
show_label=False,
placeholder="Enter your message here and press Enter"
)
# Wire up the textbox submission to our generate_response function.
txt.submit(generate_response, [txt, chatbot], [txt, chatbot])
# Launch the Gradio demo.
demo.launch()
|