Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,33 +1,23 @@
|
|
1 |
import streamlit as st
|
2 |
-
from transformers import
|
3 |
import torch
|
4 |
|
5 |
-
# Load
|
6 |
-
|
7 |
-
model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium")
|
8 |
|
9 |
# Streamlit app header
|
10 |
-
st.set_page_config(page_title="
|
11 |
-
st.header("
|
12 |
-
|
13 |
-
# Initialize chat history
|
14 |
-
chat_history_ids = None
|
15 |
|
16 |
# Input for user message
|
17 |
user_message = st.text_input("You:", "")
|
18 |
|
19 |
if st.button("Send"):
|
20 |
-
#
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
# Generate a response while limiting the total chat history to 1000 tokens
|
27 |
-
chat_history_ids = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id)
|
28 |
-
|
29 |
-
# Pretty print last output tokens from the bot
|
30 |
-
model_response = tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)
|
31 |
|
32 |
# Display the model's response
|
33 |
-
st.text_area("Model:",
|
|
|
1 |
import streamlit as st
|
2 |
+
from transformers import pipeline
|
3 |
import torch
|
4 |
|
5 |
+
# Load TinyLlama chatbot pipeline
|
6 |
+
pipe = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.bfloat16, device_map="auto")
|
|
|
7 |
|
8 |
# Streamlit app header
|
9 |
+
st.set_page_config(page_title="Chatbot Demo", page_icon="🤖")
|
10 |
+
st.header("Chatbot Demo")
|
|
|
|
|
|
|
11 |
|
12 |
# Input for user message
|
13 |
user_message = st.text_input("You:", "")
|
14 |
|
15 |
if st.button("Send"):
|
16 |
+
# Use TinyLlama chatbot pipeline to generate a response
|
17 |
+
messages = [{"role": "system", "content": "You are a friendly chatbot who always responds in the style of a pirate"},
|
18 |
+
{"role": "user", "content": user_message}]
|
19 |
+
prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
20 |
+
response = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)[0]["generated_text"]
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
# Display the model's response
|
23 |
+
st.text_area("Model Response:", response, height=100)
|