peterkros commited on
Commit
f8de097
·
verified ·
1 Parent(s): c25c819

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +60 -56
app.py CHANGED
@@ -9,75 +9,79 @@ from sentence_transformers import SentenceTransformer
9
  # Retrieve the token from environment variables
10
  huggingface_token = os.getenv('LLAMA_ACCES_TOKEN')
11
 
 
 
 
 
 
 
 
 
 
 
12
  # Use the token with from_pretrained
13
  tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf", token=huggingface_token)
14
  model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf", token=huggingface_token)
15
 
16
- # Function to load bad words from a file
17
- def load_bad_words(filepath):
18
- with open(filepath, 'r', encoding='utf-8') as file:
19
- return [line.strip().lower() for line in file]
20
-
21
- # Load bad words list
22
- bad_words = load_bad_words('badwords.txt') # Adjust the path to your bad words file
23
-
24
- # List of topics for the dropdown
25
- topics_list = ['Aviation', 'Science', 'Education', 'Air Force Pilot', 'Space Exploration', 'Technology']
26
-
27
- #Load BerTopic model
28
  sentence_model = SentenceTransformer("all-MiniLM-L6-v2")
29
  topic_model = BERTopic(embedding_model=sentence_model)
30
 
31
- def is_inappropriate_or_offtopic(message, selected_topics):
32
- # Function logic as before
33
 
34
- def suggest_questions():
35
- predefined_prompt = "Hello! My name is Andrea. So you want to learn how to become an airforce pilot? Select a question and let's start your learning journey."
36
- questions = [
37
- "What are the basic requirements to become an airforce pilot?",
38
- "How long does it take to train as an airforce pilot?",
39
- "Can you describe a day in the life of an airforce pilot?"
40
- ]
41
- return predefined_prompt, questions
42
 
43
- def generate_response(selected_question, selected_topics):
44
- # Function logic for generating a response based on the selected question
45
-
46
- # Example of generating a response based on the selected question
47
- response = "This is a placeholder response for the question: " + selected_question
48
-
49
- # In a real scenario, you'd integrate this with the Llama model to generate a detailed response
50
-
51
- return response
52
 
53
- def main():
54
- with gr.Blocks() as demo:
55
- gr.Markdown("### Child-Safe Chatbot | BETA")
56
- gr.Markdown("This chatbot uses BERTopic to identify topics in your messages and ensures the conversation stays relevant and safe.")
57
- with gr.Row():
58
- start_btn = gr.Button("Start Conversation")
59
- questions_output = gr.Radio(label="Select a question", choices=[])
60
- submit_btn = gr.Button("Submit Question")
61
- response_output = gr.Textbox(label="Bot Response")
62
 
63
- def update_questions():
64
- prompt, questions = suggest_questions()
65
- questions_output.update(choices=questions)
66
- return prompt
 
 
 
 
 
 
 
 
 
 
 
 
67
 
68
- start_btn.click(
69
- fn=update_questions,
70
- inputs=[],
71
- outputs=response_output
72
- )
 
 
 
 
 
 
73
 
74
- submit_btn.click(
75
- fn=generate_response,
76
- inputs=[questions_output, topics_list], # Assuming topics_list is used to filter topics or for future use
77
- outputs=response_output
78
- )
79
 
80
- demo.launch()
 
 
 
 
 
81
 
82
  if __name__ == "__main__":
83
- main()
 
9
  # Retrieve the token from environment variables
10
  huggingface_token = os.getenv('LLAMA_ACCES_TOKEN')
11
 
12
+ import gradio as gr
13
+ from transformers import AutoTokenizer, AutoModelForCausalLM
14
+ import os
15
+ from bertopic import BERTopic
16
+ from sentence_transformers import SentenceTransformer
17
+
18
+ # Assuming necessary initializations and model loading here
19
+ # Retrieve the token from environment variables
20
+ huggingface_token = os.getenv('LLAMA_ACCES_TOKEN')
21
+
22
  # Use the token with from_pretrained
23
  tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf", token=huggingface_token)
24
  model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf", token=huggingface_token)
25
 
26
+ # Assuming BERTopic and other necessary components are initialized here
27
+ # Initialize your BERTopic model
 
 
 
 
 
 
 
 
 
 
28
  sentence_model = SentenceTransformer("all-MiniLM-L6-v2")
29
  topic_model = BERTopic(embedding_model=sentence_model)
30
 
31
+ def print_like_dislike(x: gr.LikeData):
32
+ print(x.index, x.value, x.liked)
33
 
34
+ def add_text(history, text):
35
+ history.append((text, "**That's cool!**"))
36
+ return history
 
 
 
 
 
37
 
38
+ def add_file(history, file):
39
+ # Assuming you want to display the name of the uploaded file
40
+ file_info = (f"Uploaded file: {file.name}", "")
41
+ history.append(file_info)
42
+ return history
 
 
 
 
43
 
44
+ def initialize_chat():
45
+ # This function initializes the chat with a "Hello" message.
46
+ return [(None, "Hello my name is Andrea I'm a FunDoo Chatbot and will help you wth your learning jurney. Select question from Below to start!")]
 
 
 
 
 
 
47
 
48
+ def generate_response(selected_question):
49
+ prompt = selected_question # Ensure selected_question is a string
50
+ inputs = tokenizer(prompt, return_tensors="pt", max_length=512, truncation=True)
51
+ outputs = model.generate(**inputs, max_length=100, do_sample=True, top_p=0.95, top_k=50)
52
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
53
+
54
+ try:
55
+ topics, _ = topic_model.transform([response])
56
+ topic_names = [", ".join([word for word, _ in topic_model.get_topic(topic)[:5]]) for topic in topics if topic != -1]
57
+ topics_str = "; ".join(topic_names[:10])
58
+ except Exception as e:
59
+ topics_str = "Topic analysis not available"
60
+ print(f"Error during topic analysis: {e}")
61
+
62
+ # Adjusted to return a list of tuples as expected by the Chatbot component
63
+ return [(prompt, response + "\n\nTopics: " + topics_str)]
64
 
65
+ with gr.Blocks() as demo:
66
+ chatbot = gr.Chatbot(
67
+ initialize_chat(),
68
+ elem_id="chatbot",
69
+ bubble_full_width=False,
70
+ avatar_images=(None, os.path.join(os.getcwd(), "avatar.png"))
71
+ )
72
+
73
+ with gr.Row():
74
+ txt = gr.Textbox(scale=4, show_label=False, placeholder="Select Question", container=False, interactive=True) # Adjust based on need
75
+ btn = gr.Button("Submit")
76
 
77
+ btn.click(fn=generate_response, inputs=[txt], outputs=chatbot)
 
 
 
 
78
 
79
+ examples = [
80
+ ["What are the basic requirements to become an airforce pilot?"],
81
+ ["How long does it take to train as an airforce pilot?"],
82
+ ["Can you describe a day in the life of an airforce pilot?"]
83
+ ]
84
+ gr.Examples(examples, inputs=[txt], outputs=[chatbot], label="Select Question")
85
 
86
  if __name__ == "__main__":
87
+ demo.launch()