Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,9 +1,8 @@
|
|
1 |
import streamlit as st
|
2 |
from transformers import pipeline
|
3 |
-
import torch
|
4 |
|
5 |
# Load TinyLlama chatbot pipeline
|
6 |
-
pipe = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=
|
7 |
|
8 |
# Streamlit app header
|
9 |
st.set_page_config(page_title="Chatbot Demo", page_icon="🤖")
|
@@ -14,10 +13,12 @@ user_message = st.text_input("You:", "")
|
|
14 |
|
15 |
if st.button("Send"):
|
16 |
# Use TinyLlama chatbot pipeline to generate a response
|
17 |
-
messages = [{"role": "system", "content": "You are a friendly chatbot who always responds in the style of a
|
18 |
{"role": "user", "content": user_message}]
|
19 |
prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
20 |
-
|
|
|
|
|
21 |
|
22 |
# Display the model's response
|
23 |
st.text_area("Model Response:", response, height=100)
|
|
|
1 |
import streamlit as st
|
2 |
from transformers import pipeline
|
|
|
3 |
|
4 |
# Load TinyLlama chatbot pipeline
|
5 |
+
pipe = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype="float32", device_map="auto")
|
6 |
|
7 |
# Streamlit app header
|
8 |
st.set_page_config(page_title="Chatbot Demo", page_icon="🤖")
|
|
|
13 |
|
14 |
if st.button("Send"):
|
15 |
# Use TinyLlama chatbot pipeline to generate a response
|
16 |
+
messages = [{"role": "system", "content": "You are a friendly chatbot who always responds in the style of a pirate"},
|
17 |
{"role": "user", "content": user_message}]
|
18 |
prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
19 |
+
|
20 |
+
# Generate response using TinyLlama
|
21 |
+
response = pipe(prompt, max_length=256, temperature=0.7, top_k=50, top_p=0.95)[0]["generated_text"]
|
22 |
|
23 |
# Display the model's response
|
24 |
st.text_area("Model Response:", response, height=100)
|