peterkros commited on
Commit
397c8a2
·
verified ·
1 Parent(s): 4bbaefc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -7
app.py CHANGED
@@ -9,12 +9,6 @@ from sentence_transformers import SentenceTransformer
9
  # Retrieve the token from environment variables
10
  huggingface_token = os.getenv('LLAMA_ACCES_TOKEN')
11
 
12
- import gradio as gr
13
- from transformers import AutoTokenizer, AutoModelForCausalLM
14
- import os
15
- from bertopic import BERTopic
16
- from sentence_transformers import SentenceTransformer
17
-
18
  # Assuming necessary initializations and model loading here
19
  # Retrieve the token from environment variables
20
  huggingface_token = os.getenv('LLAMA_ACCES_TOKEN')
@@ -45,12 +39,16 @@ def initialize_chat():
45
  # This function initializes the chat with a "Hello" message.
46
  return [(None, "Hello, my name is <strong>Andrea</strong>, I'm a <em>Friendly Chatbot</em> and will help you with your learning journey. <br>Select a question from below to start!")]
47
 
 
48
 
49
  def generate_response(selected_question):
 
50
  prompt = selected_question # Ensure selected_question is a string
51
  inputs = tokenizer(prompt, return_tensors="pt", max_length=512, truncation=True)
52
  outputs = model.generate(**inputs, max_length=100, do_sample=True, top_p=0.95, top_k=50)
53
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
 
54
 
55
  try:
56
  topics, _ = topic_model.transform([response])
@@ -61,7 +59,11 @@ def generate_response(selected_question):
61
  print(f"Error during topic analysis: {e}")
62
 
63
  # Adjusted to return a list of tuples as expected by the Chatbot component
64
- return [(prompt, response + "\n\nTopics: " + topics_str)]
 
 
 
 
65
 
66
  with gr.Blocks() as demo:
67
  chatbot = gr.Chatbot(
 
9
  # Retrieve the token from environment variables
10
  huggingface_token = os.getenv('LLAMA_ACCES_TOKEN')
11
 
 
 
 
 
 
 
12
  # Assuming necessary initializations and model loading here
13
  # Retrieve the token from environment variables
14
  huggingface_token = os.getenv('LLAMA_ACCES_TOKEN')
 
39
  # This function initializes the chat with a "Hello" message.
40
  return [(None, "Hello, my name is <strong>Andrea</strong>, I'm a <em>Friendly Chatbot</em> and will help you with your learning journey. <br>Select a question from below to start!")]
41
 
42
+ chat_history = initialize_chat()
43
 
44
  def generate_response(selected_question):
45
+ global chat_history
46
  prompt = selected_question # Ensure selected_question is a string
47
  inputs = tokenizer(prompt, return_tensors="pt", max_length=512, truncation=True)
48
  outputs = model.generate(**inputs, max_length=100, do_sample=True, top_p=0.95, top_k=50)
49
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
50
+
51
+
52
 
53
  try:
54
  topics, _ = topic_model.transform([response])
 
59
  print(f"Error during topic analysis: {e}")
60
 
61
  # Adjusted to return a list of tuples as expected by the Chatbot component
62
+ new_response = (prompt, response + "\n\nTopics: " + topics_str)
63
+ chat_history.append(new_response)
64
+
65
+
66
+ return chat_history
67
 
68
  with gr.Blocks() as demo:
69
  chatbot = gr.Chatbot(