Renegadesoffun commited on
Commit
00a0ef6
Β·
1 Parent(s): 2dbaebb

Updated for CPU usage3

Browse files
Files changed (1) hide show
  1. app.py +3 -5
app.py CHANGED
@@ -1,14 +1,12 @@
1
  import streamlit as st
2
- from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
 
4
  # Use a smaller model
5
  model_name = "microsoft/DialoGPT-small"
6
 
7
  # Use the model's default GPT2 tokenizer
8
  tokenizer = AutoTokenizer.from_pretrained(model_name)
9
-
10
  model = AutoModelForCausalLM.from_pretrained(model_name)
11
- chat_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, device=-1, max_length=1000)
12
 
13
  # Streamlit UI
14
  st.title("Buddy Christ Chatbot 🌟")
@@ -21,7 +19,7 @@ if user_input:
21
  """
22
  # Tokenize the input with truncation
23
  inputs = tokenizer(buddy_christ_prefix + user_input, return_tensors="pt", truncation=True, max_length=1000)
24
- # Generate the response
25
- response = model.generate(**inputs)
26
  response_text = tokenizer.decode(response[0], skip_special_tokens=True)
27
  st.write("Buddy Christ:", response_text[len(buddy_christ_prefix):])
 
1
  import streamlit as st
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
4
  # Use a smaller model
5
  model_name = "microsoft/DialoGPT-small"
6
 
7
  # Use the model's default GPT2 tokenizer
8
  tokenizer = AutoTokenizer.from_pretrained(model_name)
 
9
  model = AutoModelForCausalLM.from_pretrained(model_name)
 
10
 
11
  # Streamlit UI
12
  st.title("Buddy Christ Chatbot 🌟")
 
19
  """
20
  # Tokenize the input with truncation
21
  inputs = tokenizer(buddy_christ_prefix + user_input, return_tensors="pt", truncation=True, max_length=1000)
22
+ # Generate the response with temperature and top-k sampling
23
+ response = model.generate(**inputs, max_length=1000, temperature=0.8, top_k=50)
24
  response_text = tokenizer.decode(response[0], skip_special_tokens=True)
25
  st.write("Buddy Christ:", response_text[len(buddy_christ_prefix):])