|
import gradio as gr |
|
from transformers import GPT2LMHeadModel, GPT2Tokenizer |
|
|
|
|
|
model_name = "gpt2" |
|
model = GPT2LMHeadModel.from_pretrained(model_name) |
|
tokenizer = GPT2Tokenizer.from_pretrained(model_name) |
|
|
|
|
|
def filter_explicit(content, filter_on): |
|
explicit_keywords = ["badword1", "badword2"] |
|
if filter_on: |
|
for word in explicit_keywords: |
|
content = content.replace(word, "[CENSORED]") |
|
return content |
|
|
|
def generate_response(prompt, explicit_filter): |
|
inputs = tokenizer.encode(prompt, return_tensors="pt") |
|
outputs = model.generate(inputs, max_length=100, num_return_sequences=1) |
|
response = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
filtered_response = filter_explicit(response, explicit_filter) |
|
return filtered_response |
|
|
|
|
|
iface = gr.Interface( |
|
fn=generate_response, |
|
inputs=[gr.Textbox(lines=2, placeholder="Type your message here..."), gr.Checkbox(label="Enable Explicit Content Filter")], |
|
outputs="text", |
|
title="Chatbot with Explicit Content Filter" |
|
) |
|
|
|
if __name__ == "__main__": |
|
iface.launch() |
|
|