File size: 4,198 Bytes
144e4aa
 
1883a62
45fd4eb
 
 
59718f7
 
1883a62
 
 
 
41d6bd4
 
 
 
27725f6
 
144e4aa
f8de097
 
41d6bd4
 
45fd4eb
f8de097
 
144e4aa
f8de097
 
 
45fd4eb
f8de097
 
 
 
 
b58de5b
f8de097
 
916c578
 
397c8a2
c25c819
f8de097
397c8a2
f8de097
 
 
 
397c8a2
 
f8de097
41d6bd4
 
 
 
 
 
 
f8de097
 
ee3634d
397c8a2
 
 
 
c25c819
f8de097
a848e10
 
ee3634d
 
a848e10
 
f8de097
 
 
 
4bbaefc
f8de097
 
 
 
4e1da0f
f8de097
b1746f2
f8de097
bc27e82
f8de097
 
 
 
 
 
c25c819
41d6bd4
 
c25c819
f8de097
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import gradio as gr
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
import os
from bertopic import BERTopic
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
from sentence_transformers import SentenceTransformer

# Retrieve the token from environment variables
huggingface_token = os.getenv('LLAMA_ACCES_TOKEN')

# Use the token with from_pretrained
#tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf", token=huggingface_token)
#model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf", token=huggingface_token)

# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2-xl")
model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2-xl")

# Assuming BERTopic and other necessary components are initialized here
# Initialize your BERTopic model
#sentence_model = SentenceTransformer("all-MiniLM-L6-v2")
#topic_model = BERTopic(embedding_model=sentence_model)

def print_like_dislike(x: gr.LikeData):
    print(x.index, x.value, x.liked)

def add_text(history, text):
    history.append((text, "**That's cool!**"))
    return history

def add_file(history, file):
    # Assuming you want to display the name of the uploaded file
    file_info = (f"Uploaded file: {file.name}", "")
    history.append(file_info)
    return history

def initialize_chat():
    # This function initializes the chat with a "Hello" message.
    return [(None, "Hello, my name is <strong>Andrea</strong>, I'm a <em>Friendly Chatbot</em> and will help you with your learning journey. <br>Select a question from below to start!")]

chat_history = initialize_chat()

def generate_response(selected_question):
    global chat_history
    prompt = selected_question  # Ensure selected_question is a string
    inputs = tokenizer(prompt, return_tensors="pt", max_length=512, truncation=True)
    outputs = model.generate(**inputs, max_length=100, do_sample=True, top_p=0.95, top_k=50)
    response = tokenizer.decode(outputs[0], skip_special_tokens=True)

    
    
    #try:
        #topics, _ = topic_model.transform([response])
        #topic_names = [", ".join([word for word, _ in topic_model.get_topic(topic)[:5]]) for topic in topics if topic != -1]
        #topics_str = "; ".join(topic_names[:10])
    #except Exception as e:
    topics_str = "Topic analysis not available"
        #print(f"Error during topic analysis: {e}")
    
    # Adjusted to return a list of tuples as expected by the Chatbot component
    new_response = (None, response + "\n\nTopics: " + topics_str)
    chat_history.append(new_response)
    
    
    return chat_history

with gr.Blocks() as demo:
    gr.Markdown(
    """
    # Child safe chatbot project !
   In the realm of digital communication, the development of an advanced chatbot that incorporates topic modeling represents a significant leap towards enhancing user interaction and maintaining focus during conversations. This innovative chatbot design is specifically engineered to streamline discussions by guiding users to select from a curated list of suggested questions. This approach is crafted to mitigate the risk of diverging into off-topic dialogues, which are common pitfalls in conventional chatbot systems.
    """)
    
    chatbot = gr.Chatbot(
        initialize_chat(),
        elem_id="chatbot",
        bubble_full_width=False,
        label= "Safe Chatbot v1",
        avatar_images=(None, os.path.join(os.getcwd(), "avatar.png"))
    )
    
    with gr.Row():
        txt = gr.Textbox(scale=4, show_label=False, placeholder="Select Question", container=False, interactive=False)  # Adjust based on need
        btn = gr.Button("Submit")

    btn.click(fn=generate_response, inputs=[txt], outputs=chatbot)

    examples = [
        ["What are the basic requirements to become an airforce pilot?"],
        ["How long does it take to train as an airforce pilot?"],
        ["Can you describe a day in the life of an airforce pilot?"]
    ]
    gr.Examples(examples, inputs=[txt], outputs=[chatbot], label="Select Question")

    chatbot.like(print_like_dislike, None, None)

if __name__ == "__main__":
    demo.launch()