Renegadesoffun commited on
Commit
a71b8eb
·
1 Parent(s): 4400abd

Updated for CPU eval

Browse files
Files changed (1) hide show
  1. app.py +21 -21
app.py CHANGED
@@ -1,29 +1,29 @@
1
  import streamlit as st
2
- from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
- # Use a smaller model
5
- model_name = "microsoft/DialoGPT-small"
6
 
7
- # Use the model's default GPT2 tokenizer
8
- tokenizer = AutoTokenizer.from_pretrained(model_name)
 
9
  model = AutoModelForCausalLM.from_pretrained(model_name)
 
 
 
10
 
11
- # Streamlit UI
12
- st.title("Buddy Christ Chatbot 🌟")
13
 
14
  user_input = st.text_input("You:", "")
 
15
  if user_input:
16
- # Prefix to make the model speak like Buddy Christ
17
- buddy_christ_prefix = """
18
- Buddy Christ: A beacon of light in a world seeking clarity. Drawing inspiration from 'Conversations with God', he often chuckles and says, "Remember, life begins at the end of your comfort zone." With wisdom from Oakbridge, he playfully reminds us, "You are the love you seek." Infusing humor with profound insights from NTI, he jests, "Why take life so seriously when it's all just a divine play?" Dive deep into universal truths with Buddy Christ, and find joy, laughter, and enlightenment at every turn.
19
- """
20
- # Tokenize the prefix and user input separately
21
- prefix_tokens = tokenizer.encode(buddy_christ_prefix, return_tensors="pt", truncation=True)
22
- input_tokens = tokenizer.encode(user_input, return_tensors="pt")
23
-
24
- # Concatenate the tokens and generate a response
25
- combined_tokens = torch.cat([prefix_tokens, input_tokens], dim=-1)
26
- response = model.generate(combined_tokens, max_length=1000, temperature=0.8, top_k=50, pad_token_id=tokenizer.eos_token_id)
27
-
28
- response_text = tokenizer.decode(response[0], skip_special_tokens=True)
29
- st.write("Buddy Christ:", response_text[len(buddy_christ_prefix) + len(user_input):])
 
1
  import streamlit as st
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
 
 
4
 
5
+ # Use a medium sized model
6
+ model_name = "microsoft/DialoGPT-medium"
7
+
8
  model = AutoModelForCausalLM.from_pretrained(model_name)
9
+ model.eval() # Make sure model is in evaluation mode
10
+
11
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
12
 
13
+ st.title("Buddy Christ Chatbot")
 
14
 
15
  user_input = st.text_input("You:", "")
16
+
17
  if user_input:
18
+
19
+ inputs = tokenizer.encode(user_input, return_tensors="pt")
20
+
21
+ response = model.generate(inputs,
22
+ max_length=1000,
23
+ temperature=0.8,
24
+ top_k=50,
25
+ pad_token_id=tokenizer.eos_token_id)
26
+
27
+ response_text = tokenizer.decode(response[0], skip_special_tokens=True)
28
+
29
+ st.write("Buddy Christ:", response_text)