Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -9,75 +9,79 @@ from sentence_transformers import SentenceTransformer
|
|
9 |
# Retrieve the token from environment variables
|
10 |
huggingface_token = os.getenv('LLAMA_ACCES_TOKEN')
|
11 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
# Use the token with from_pretrained
|
13 |
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf", token=huggingface_token)
|
14 |
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf", token=huggingface_token)
|
15 |
|
16 |
-
#
|
17 |
-
|
18 |
-
with open(filepath, 'r', encoding='utf-8') as file:
|
19 |
-
return [line.strip().lower() for line in file]
|
20 |
-
|
21 |
-
# Load bad words list
|
22 |
-
bad_words = load_bad_words('badwords.txt') # Adjust the path to your bad words file
|
23 |
-
|
24 |
-
# List of topics for the dropdown
|
25 |
-
topics_list = ['Aviation', 'Science', 'Education', 'Air Force Pilot', 'Space Exploration', 'Technology']
|
26 |
-
|
27 |
-
#Load BerTopic model
|
28 |
sentence_model = SentenceTransformer("all-MiniLM-L6-v2")
|
29 |
topic_model = BERTopic(embedding_model=sentence_model)
|
30 |
|
31 |
-
def
|
32 |
-
|
33 |
|
34 |
-
def
|
35 |
-
|
36 |
-
|
37 |
-
"What are the basic requirements to become an airforce pilot?",
|
38 |
-
"How long does it take to train as an airforce pilot?",
|
39 |
-
"Can you describe a day in the life of an airforce pilot?"
|
40 |
-
]
|
41 |
-
return predefined_prompt, questions
|
42 |
|
43 |
-
def
|
44 |
-
#
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
# In a real scenario, you'd integrate this with the Llama model to generate a detailed response
|
50 |
-
|
51 |
-
return response
|
52 |
|
53 |
-
def
|
54 |
-
with
|
55 |
-
|
56 |
-
gr.Markdown("This chatbot uses BERTopic to identify topics in your messages and ensures the conversation stays relevant and safe.")
|
57 |
-
with gr.Row():
|
58 |
-
start_btn = gr.Button("Start Conversation")
|
59 |
-
questions_output = gr.Radio(label="Select a question", choices=[])
|
60 |
-
submit_btn = gr.Button("Submit Question")
|
61 |
-
response_output = gr.Textbox(label="Bot Response")
|
62 |
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
|
74 |
-
|
75 |
-
fn=generate_response,
|
76 |
-
inputs=[questions_output, topics_list], # Assuming topics_list is used to filter topics or for future use
|
77 |
-
outputs=response_output
|
78 |
-
)
|
79 |
|
80 |
-
|
|
|
|
|
|
|
|
|
|
|
81 |
|
82 |
if __name__ == "__main__":
|
83 |
-
|
|
|
9 |
# Retrieve the token from environment variables
|
10 |
huggingface_token = os.getenv('LLAMA_ACCES_TOKEN')
|
11 |
|
12 |
+
import gradio as gr
|
13 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
14 |
+
import os
|
15 |
+
from bertopic import BERTopic
|
16 |
+
from sentence_transformers import SentenceTransformer
|
17 |
+
|
18 |
+
# Assuming necessary initializations and model loading here
|
19 |
+
# Retrieve the token from environment variables
|
20 |
+
huggingface_token = os.getenv('LLAMA_ACCES_TOKEN')
|
21 |
+
|
22 |
# Use the token with from_pretrained
|
23 |
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf", token=huggingface_token)
|
24 |
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf", token=huggingface_token)
|
25 |
|
26 |
+
# Assuming BERTopic and other necessary components are initialized here
|
27 |
+
# Initialize your BERTopic model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
sentence_model = SentenceTransformer("all-MiniLM-L6-v2")
|
29 |
topic_model = BERTopic(embedding_model=sentence_model)
|
30 |
|
31 |
+
def print_like_dislike(x: gr.LikeData):
|
32 |
+
print(x.index, x.value, x.liked)
|
33 |
|
34 |
+
def add_text(history, text):
|
35 |
+
history.append((text, "**That's cool!**"))
|
36 |
+
return history
|
|
|
|
|
|
|
|
|
|
|
37 |
|
38 |
+
def add_file(history, file):
|
39 |
+
# Assuming you want to display the name of the uploaded file
|
40 |
+
file_info = (f"Uploaded file: {file.name}", "")
|
41 |
+
history.append(file_info)
|
42 |
+
return history
|
|
|
|
|
|
|
|
|
43 |
|
44 |
+
def initialize_chat():
|
45 |
+
# This function initializes the chat with a "Hello" message.
|
46 |
+
return [(None, "Hello my name is Andrea I'm a FunDoo Chatbot and will help you wth your learning jurney. Select question from Below to start!")]
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
+
def generate_response(selected_question):
|
49 |
+
prompt = selected_question # Ensure selected_question is a string
|
50 |
+
inputs = tokenizer(prompt, return_tensors="pt", max_length=512, truncation=True)
|
51 |
+
outputs = model.generate(**inputs, max_length=100, do_sample=True, top_p=0.95, top_k=50)
|
52 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
53 |
+
|
54 |
+
try:
|
55 |
+
topics, _ = topic_model.transform([response])
|
56 |
+
topic_names = [", ".join([word for word, _ in topic_model.get_topic(topic)[:5]]) for topic in topics if topic != -1]
|
57 |
+
topics_str = "; ".join(topic_names[:10])
|
58 |
+
except Exception as e:
|
59 |
+
topics_str = "Topic analysis not available"
|
60 |
+
print(f"Error during topic analysis: {e}")
|
61 |
+
|
62 |
+
# Adjusted to return a list of tuples as expected by the Chatbot component
|
63 |
+
return [(prompt, response + "\n\nTopics: " + topics_str)]
|
64 |
|
65 |
+
with gr.Blocks() as demo:
|
66 |
+
chatbot = gr.Chatbot(
|
67 |
+
initialize_chat(),
|
68 |
+
elem_id="chatbot",
|
69 |
+
bubble_full_width=False,
|
70 |
+
avatar_images=(None, os.path.join(os.getcwd(), "avatar.png"))
|
71 |
+
)
|
72 |
+
|
73 |
+
with gr.Row():
|
74 |
+
txt = gr.Textbox(scale=4, show_label=False, placeholder="Select Question", container=False, interactive=True) # Adjust based on need
|
75 |
+
btn = gr.Button("Submit")
|
76 |
|
77 |
+
btn.click(fn=generate_response, inputs=[txt], outputs=chatbot)
|
|
|
|
|
|
|
|
|
78 |
|
79 |
+
examples = [
|
80 |
+
["What are the basic requirements to become an airforce pilot?"],
|
81 |
+
["How long does it take to train as an airforce pilot?"],
|
82 |
+
["Can you describe a day in the life of an airforce pilot?"]
|
83 |
+
]
|
84 |
+
gr.Examples(examples, inputs=[txt], outputs=[chatbot], label="Select Question")
|
85 |
|
86 |
if __name__ == "__main__":
|
87 |
+
demo.launch()
|