peterkros commited on
Commit
c25c819
·
verified ·
1 Parent(s): ed4b793

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -38
app.py CHANGED
@@ -29,56 +29,55 @@ sentence_model = SentenceTransformer("all-MiniLM-L6-v2")
29
  topic_model = BERTopic(embedding_model=sentence_model)
30
 
31
  def is_inappropriate_or_offtopic(message, selected_topics):
32
- # Assume bad_words loading and check_content function are defined here
33
 
34
- topics, probabilities = topic_model.transform([message])
35
- # Assuming -1 is the outlier class, filter relevant topics
36
- relevant_topics = [topic for topic in topics if topic != -1]
37
-
38
- # Compare generated topics against selected topics
39
- is_offtopic = True # Default to True, prove it's on topic
40
- for topic_num in relevant_topics:
41
- topic_info = topic_model.get_topic(topic_num)
42
- if topic_info:
43
- topic_keywords = [word for word, _ in topic_info]
44
- if any(selected_topic.lower() in topic_keywords for selected_topic in selected_topics):
45
- is_offtopic = False
46
- break # If any one of the topics matches, it's not off-topic
47
-
48
- return is_offtopic or any(bad_word in message.lower() for bad_word in bad_words)
49
 
50
- def generate_response(message, selected_topics):
51
- # Checks if the message is inappropriate or off-topic
52
- if is_inappropriate_or_offtopic(message, selected_topics):
53
- response = "Sorry, let's try to keep our conversation focused on positive and relevant topics!"
54
- else:
55
- inputs = tokenizer.encode(message, return_tensors="pt")
56
- outputs = model.generate(inputs, max_length=50, do_sample=True)
57
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
58
 
59
- # Predict topics again to display them (could optimize by reusing earlier prediction)
60
- topics, _ = topic_model.transform([message])
61
- topic_names = [topic_model.get_topic(topic)[0][0] for topic in topics if topic != -1] # Top word for each topic
62
 
63
- return response, ", ".join(topic_names)
64
-
65
-
66
 
67
  def main():
68
  with gr.Blocks() as demo:
69
  gr.Markdown("### Child-Safe Chatbot | BETA")
70
- gr.Markdown("This chatbot uses BERTopic to identify topics in your messages and ensures the conversation stays relevant.")
71
  with gr.Row():
72
- message_input = gr.Textbox(label="Your Message")
73
- topics_dropdown = gr.Dropdown(choices=topics_list, label="Select Topics", multiselect=True)
74
- submit_btn = gr.Button("Send")
75
  response_output = gr.Textbox(label="Bot Response")
76
- topics_output = gr.Textbox(label="Identified Topics", placeholder="Topics will be displayed here...")
 
 
 
 
 
 
 
 
 
 
77
 
78
  submit_btn.click(
79
  fn=generate_response,
80
- inputs=[message_input, topics_dropdown],
81
- outputs=[response_output, topics_output]
82
  )
83
 
84
- demo.launch()
 
 
 
 
29
  topic_model = BERTopic(embedding_model=sentence_model)
30
 
31
  def is_inappropriate_or_offtopic(message, selected_topics):
32
+ # Function logic as before
33
 
34
+ def suggest_questions():
35
+ predefined_prompt = "Hello! My name is Andrea. So you want to learn how to become an airforce pilot? Select a question and let's start your learning journey."
36
+ questions = [
37
+ "What are the basic requirements to become an airforce pilot?",
38
+ "How long does it take to train as an airforce pilot?",
39
+ "Can you describe a day in the life of an airforce pilot?"
40
+ ]
41
+ return predefined_prompt, questions
 
 
 
 
 
 
 
42
 
43
+ def generate_response(selected_question, selected_topics):
44
+ # Function logic for generating a response based on the selected question
 
 
 
 
 
 
45
 
46
+ # Example of generating a response based on the selected question
47
+ response = "This is a placeholder response for the question: " + selected_question
 
48
 
49
+ # In a real scenario, you'd integrate this with the Llama model to generate a detailed response
50
+
51
+ return response
52
 
53
  def main():
54
  with gr.Blocks() as demo:
55
  gr.Markdown("### Child-Safe Chatbot | BETA")
56
+ gr.Markdown("This chatbot uses BERTopic to identify topics in your messages and ensures the conversation stays relevant and safe.")
57
  with gr.Row():
58
+ start_btn = gr.Button("Start Conversation")
59
+ questions_output = gr.Radio(label="Select a question", choices=[])
60
+ submit_btn = gr.Button("Submit Question")
61
  response_output = gr.Textbox(label="Bot Response")
62
+
63
+ def update_questions():
64
+ prompt, questions = suggest_questions()
65
+ questions_output.update(choices=questions)
66
+ return prompt
67
+
68
+ start_btn.click(
69
+ fn=update_questions,
70
+ inputs=[],
71
+ outputs=response_output
72
+ )
73
 
74
  submit_btn.click(
75
  fn=generate_response,
76
+ inputs=[questions_output, topics_list], # Assuming topics_list is used to filter topics or for future use
77
+ outputs=response_output
78
  )
79
 
80
+ demo.launch()
81
+
82
+ if __name__ == "__main__":
83
+ main()