Spaces:
Sleeping
Sleeping
File size: 3,229 Bytes
144e4aa 1883a62 45fd4eb 59718f7 1883a62 14645ae 144e4aa 6c7b3b8 b58de5b 3ab38d3 45fd4eb 6c7b3b8 c25c819 144e4aa c25c819 45fd4eb c25c819 144e4aa c25c819 b58de5b c25c819 b58de5b bc27e82 75e71dc c25c819 bc27e82 c25c819 bc27e82 c25c819 b1746f2 bc27e82 dec9d6f c25c819 bc27e82 c25c819 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 |
import gradio as gr
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
import os
from bertopic import BERTopic
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
from sentence_transformers import SentenceTransformer
# Retrieve the token from environment variables
huggingface_token = os.getenv('LLAMA_ACCES_TOKEN')
# Use the token with from_pretrained
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf", token=huggingface_token)
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf", token=huggingface_token)
# Function to load bad words from a file
def load_bad_words(filepath):
with open(filepath, 'r', encoding='utf-8') as file:
return [line.strip().lower() for line in file]
# Load bad words list
bad_words = load_bad_words('badwords.txt') # Adjust the path to your bad words file
# List of topics for the dropdown
topics_list = ['Aviation', 'Science', 'Education', 'Air Force Pilot', 'Space Exploration', 'Technology']
#Load BerTopic model
sentence_model = SentenceTransformer("all-MiniLM-L6-v2")
topic_model = BERTopic(embedding_model=sentence_model)
def is_inappropriate_or_offtopic(message, selected_topics):
# Function logic as before
def suggest_questions():
predefined_prompt = "Hello! My name is Andrea. So you want to learn how to become an airforce pilot? Select a question and let's start your learning journey."
questions = [
"What are the basic requirements to become an airforce pilot?",
"How long does it take to train as an airforce pilot?",
"Can you describe a day in the life of an airforce pilot?"
]
return predefined_prompt, questions
def generate_response(selected_question, selected_topics):
# Function logic for generating a response based on the selected question
# Example of generating a response based on the selected question
response = "This is a placeholder response for the question: " + selected_question
# In a real scenario, you'd integrate this with the Llama model to generate a detailed response
return response
def main():
with gr.Blocks() as demo:
gr.Markdown("### Child-Safe Chatbot | BETA")
gr.Markdown("This chatbot uses BERTopic to identify topics in your messages and ensures the conversation stays relevant and safe.")
with gr.Row():
start_btn = gr.Button("Start Conversation")
questions_output = gr.Radio(label="Select a question", choices=[])
submit_btn = gr.Button("Submit Question")
response_output = gr.Textbox(label="Bot Response")
def update_questions():
prompt, questions = suggest_questions()
questions_output.update(choices=questions)
return prompt
start_btn.click(
fn=update_questions,
inputs=[],
outputs=response_output
)
submit_btn.click(
fn=generate_response,
inputs=[questions_output, topics_list], # Assuming topics_list is used to filter topics or for future use
outputs=response_output
)
demo.launch()
if __name__ == "__main__":
main()
|