Keira James
Update app file
d19bd0a
raw
history blame
738 Bytes
import streamlit as st
import torch
# Function to generate a response
def generate_response(prompt):
if not prompt:
return "Please enter a prompt."
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
output = model.generate(**inputs, max_new_tokens=512)
response = tokenizer.decode(output[0], skip_special_tokens=True)
return response
# Streamlit UI
st.title("AI Text Generator")
prompt = st.text_area("Enter your prompt:", placeholder="Type your question or prompt here...")
if st.button("Generate Response"):
with st.spinner("Generating response..."):
response = generate_response(prompt)
st.text_area("Model Response:", value=response, height=200, disabled=True)