Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer | |
import os | |
from bertopic import BERTopic | |
from sklearn.feature_extraction.text import CountVectorizer | |
import numpy as np | |
from sentence_transformers import SentenceTransformer | |
# Retrieve the token from environment variables | |
huggingface_token = os.getenv('LLAMA_ACCES_TOKEN') | |
# Use the token with from_pretrained | |
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf", token=huggingface_token) | |
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf", token=huggingface_token) | |
# Function to load bad words from a file | |
def load_bad_words(filepath): | |
with open(filepath, 'r', encoding='utf-8') as file: | |
return [line.strip().lower() for line in file] | |
# Load bad words list | |
bad_words = load_bad_words('badwords.txt') # Adjust the path to your bad words file | |
# List of topics for the dropdown | |
topics_list = ['Aviation', 'Science', 'Education', 'Air Force Pilot', 'Space Exploration', 'Technology'] | |
#Load BerTopic model | |
sentence_model = SentenceTransformer("all-MiniLM-L6-v2") | |
topic_model = BERTopic(embedding_model=sentence_model) | |
def is_inappropriate_or_offtopic(message, selected_topics): | |
# Function logic as before | |
def suggest_questions(): | |
predefined_prompt = "Hello! My name is Andrea. So you want to learn how to become an airforce pilot? Select a question and let's start your learning journey." | |
questions = [ | |
"What are the basic requirements to become an airforce pilot?", | |
"How long does it take to train as an airforce pilot?", | |
"Can you describe a day in the life of an airforce pilot?" | |
] | |
return predefined_prompt, questions | |
def generate_response(selected_question, selected_topics): | |
# Function logic for generating a response based on the selected question | |
# Example of generating a response based on the selected question | |
response = "This is a placeholder response for the question: " + selected_question | |
# In a real scenario, you'd integrate this with the Llama model to generate a detailed response | |
return response | |
def main(): | |
with gr.Blocks() as demo: | |
gr.Markdown("### Child-Safe Chatbot | BETA") | |
gr.Markdown("This chatbot uses BERTopic to identify topics in your messages and ensures the conversation stays relevant and safe.") | |
with gr.Row(): | |
start_btn = gr.Button("Start Conversation") | |
questions_output = gr.Radio(label="Select a question", choices=[]) | |
submit_btn = gr.Button("Submit Question") | |
response_output = gr.Textbox(label="Bot Response") | |
def update_questions(): | |
prompt, questions = suggest_questions() | |
questions_output.update(choices=questions) | |
return prompt | |
start_btn.click( | |
fn=update_questions, | |
inputs=[], | |
outputs=response_output | |
) | |
submit_btn.click( | |
fn=generate_response, | |
inputs=[questions_output, topics_list], # Assuming topics_list is used to filter topics or for future use | |
outputs=response_output | |
) | |
demo.launch() | |
if __name__ == "__main__": | |
main() | |