JamalAG commited on
Commit
926f3d7
·
1 Parent(s): be59ddb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -21
app.py CHANGED
@@ -1,33 +1,23 @@
1
  import streamlit as st
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
 
5
- # Load DialoGPT model and tokenizer
6
- tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium",padding_side='left')
7
- model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium")
8
 
9
  # Streamlit app header
10
- st.set_page_config(page_title="Conversational Model Demo", page_icon="🤖")
11
- st.header("Conversational Model Demo")
12
-
13
- # Initialize chat history
14
- chat_history_ids = None
15
 
16
  # Input for user message
17
  user_message = st.text_input("You:", "")
18
 
19
  if st.button("Send"):
20
- # Encode the new user input, add the eos_token and return a tensor in PyTorch
21
- new_user_input_ids = tokenizer.encode(user_message + tokenizer.eos_token, return_tensors='pt')
22
-
23
- # Append the new user input tokens to the chat history
24
- bot_input_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1) if chat_history_ids is not None else new_user_input_ids
25
-
26
- # Generate a response while limiting the total chat history to 1000 tokens
27
- chat_history_ids = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id)
28
-
29
- # Pretty print last output tokens from the bot
30
- model_response = tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)
31
 
32
  # Display the model's response
33
- st.text_area("Model:", model_response, height=100)
 
1
  import streamlit as st
2
+ from transformers import pipeline
3
  import torch
4
 
5
+ # Load TinyLlama chatbot pipeline
6
+ pipe = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.bfloat16, device_map="auto")
 
7
 
8
  # Streamlit app header
9
+ st.set_page_config(page_title="Chatbot Demo", page_icon="🤖")
10
+ st.header("Chatbot Demo")
 
 
 
11
 
12
  # Input for user message
13
  user_message = st.text_input("You:", "")
14
 
15
  if st.button("Send"):
16
+ # Use TinyLlama chatbot pipeline to generate a response
17
+ messages = [{"role": "system", "content": "You are a friendly chatbot who always responds in the style of a pirate"},
18
+ {"role": "user", "content": user_message}]
19
+ prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
20
+ response = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)[0]["generated_text"]
 
 
 
 
 
 
21
 
22
  # Display the model's response
23
+ st.text_area("Model Response:", response, height=100)