peterkros's picture
Update app.py
dec9d6f verified
raw
history blame
2.86 kB
import gradio as gr
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
import os
# Retrieve the token from environment variables
huggingface_token = os.getenv('LLAMA_ACCES_TOKEN')
# Use the token with from_pretrained
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf", token=huggingface_token)
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf", token=huggingface_token)
# Load a content moderation pipeline
moderation_pipeline = pipeline("text-classification", model="typeform/mobilebert-uncased-mnli")
# Function to load bad words from a file
def load_bad_words(filepath):
with open(filepath, 'r', encoding='utf-8') as file:
return [line.strip().lower() for line in file]
# Load bad words list
bad_words = load_bad_words('badwords.txt') # Adjust the path to your bad words file
# List of topics for the dropdown
topics_list = ['Aviation', 'Science', 'Education', 'Air Force Pilot', 'Space Exploration', 'Technology']
def is_inappropriate_or_offtopic(message, selected_topics):
if any(bad_word in message.lower() for bad_word in bad_words):
return True
if selected_topics and not any(topic.lower() in message.lower() for topic in selected_topics if topic):
return True
return False
def check_content(message):
predictions = moderation_pipeline(message)
if predictions[0]['label'] == 'LABEL_1': # Adjust based on the model's output
return True
return False
def generate_response(message, selected_topics):
if is_inappropriate_or_offtopic(message, selected_topics):
return "Sorry, let's try to keep our conversation focused on positive and relevant topics!"
if check_content(message):
return "I'm here to provide a safe and friendly conversation. Let's talk about something else."
inputs = tokenizer.encode(message, return_tensors="pt")
outputs = model.generate(inputs, max_length=50, do_sample=True)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
#response = f"Echo: {message}. Selected topics: {', '.join(selected_topics)}"
return response
def main():
with gr.Blocks() as demo:
gr.Markdown("### Child-Safe Chatbot BETA")
with gr.Row():
message_input = gr.Textbox(label="Your Message")
topics_dropdown = gr.Dropdown(choices=topics_list, label="Select Topics", multiselect=True)
submit_btn = gr.Button("Send")
response_output = gr.Textbox(label="Bot Response")
# Corrected to directly pass selected_topics without wrapping it in another list
submit_btn.click(
fn=generate_response,
inputs=[message_input, topics_dropdown],
outputs=response_output
)
demo.launch()
# Run the app
if __name__ == "__main__":
main()