Spaces:
Sleeping
Sleeping
import streamlit as st | |
import torch | |
from transformers import pipeline | |
def main(): | |
# Set up the page | |
st.set_page_config(page_title="Nudge Generator Demo - tinyllama 1b", page_icon="orYx logo.png") | |
# Title and logo | |
col1, col2 = st.columns([3, 1]) | |
with col1: | |
st.title("Pirate Chatbot") | |
with col2: | |
st.image("orYx logo.png", use_column_width=True) | |
# Chat interface | |
st.markdown("---") | |
st.header("Chat Interface") | |
# Input for user-provided message | |
user_message = st.text_area("Enter your message:") | |
if st.button("Generate Response"): | |
if user_message.strip(): | |
with st.spinner("Generating response..."): | |
# Load the pipeline | |
pipe = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.bfloat16, device_map="auto") | |
# Define the message structure | |
messages = [ | |
{ | |
"role": "system", | |
"content": "You are a personal corporate trainer that teaches me how to perform well in my organization.", | |
}, | |
{"role": "user", "content": user_message}, | |
] | |
# Generate the prompt | |
prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) | |
# Generate the response | |
outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) | |
# Display the response | |
st.text_area("Chatbot Response:", outputs[0]["generated_text"], height=200) | |
else: | |
st.warning("Please enter a message to get a response.") | |
if __name__ == "__main__": | |
main() | |