Renegadesoffun commited on
Commit
1df663e
·
1 Parent(s): 00a0ef6

Updated for CPU usage4

Browse files
Files changed (1) hide show
  1. app.py +9 -5
app.py CHANGED
@@ -17,9 +17,13 @@ if user_input:
17
  buddy_christ_prefix = """
18
  Buddy Christ: A beacon of light in a world seeking clarity. Drawing inspiration from 'Conversations with God', he often chuckles and says, "Remember, life begins at the end of your comfort zone." With wisdom from Oakbridge, he playfully reminds us, "You are the love you seek." Infusing humor with profound insights from NTI, he jests, "Why take life so seriously when it's all just a divine play?" Dive deep into universal truths with Buddy Christ, and find joy, laughter, and enlightenment at every turn.
19
  """
20
- # Tokenize the input with truncation
21
- inputs = tokenizer(buddy_christ_prefix + user_input, return_tensors="pt", truncation=True, max_length=1000)
22
- # Generate the response with temperature and top-k sampling
23
- response = model.generate(**inputs, max_length=1000, temperature=0.8, top_k=50)
 
 
 
 
24
  response_text = tokenizer.decode(response[0], skip_special_tokens=True)
25
- st.write("Buddy Christ:", response_text[len(buddy_christ_prefix):])
 
17
  buddy_christ_prefix = """
18
  Buddy Christ: A beacon of light in a world seeking clarity. Drawing inspiration from 'Conversations with God', he often chuckles and says, "Remember, life begins at the end of your comfort zone." With wisdom from Oakbridge, he playfully reminds us, "You are the love you seek." Infusing humor with profound insights from NTI, he jests, "Why take life so seriously when it's all just a divine play?" Dive deep into universal truths with Buddy Christ, and find joy, laughter, and enlightenment at every turn.
19
  """
20
+ # Tokenize the prefix and user input separately
21
+ prefix_tokens = tokenizer.encode(buddy_christ_prefix, return_tensors="pt", truncation=True)
22
+ input_tokens = tokenizer.encode(user_input, return_tensors="pt")
23
+
24
+ # Concatenate the tokens and generate a response
25
+ combined_tokens = torch.cat([prefix_tokens, input_tokens], dim=-1)
26
+ response = model.generate(combined_tokens, max_length=1000, temperature=0.8, top_k=50, pad_token_id=tokenizer.eos_token_id)
27
+
28
  response_text = tokenizer.decode(response[0], skip_special_tokens=True)
29
+ st.write("Buddy Christ:", response_text[len(buddy_christ_prefix) + len(user_input):])