# app.py import streamlit as st from PIL import Image from transformers import pipeline from gtts import gTTS import tempfile # —––––––– Page config st.set_page_config(page_title="Storyteller for Kids", layout="centered") st.title("🖼️ ➡️ 📖 Interactive Storyteller") # —––––––– Cache model loading @st.cache_resource def load_pipelines(): # 1) Image-to-text (captioning) captioner = pipeline( "image-to-text", model="Salesforce/blip-image-captioning-base" ) # 2) Story generation with GPT-Neo 2.7B storyteller = pipeline( "text-generation", model="EleutherAI/gpt-neo-2.7B", device=-1 # set to -1 if you only have CPU ) return captioner, storyteller captioner, storyteller = load_pipelines() # —––––––– Image upload uploaded = st.file_uploader("Upload an image:", type=["jpg", "jpeg", "png"]) if uploaded: image = Image.open(uploaded).convert("RGB") st.image(image, caption="Your image", use_column_width=True) # —––––––– 1. Caption with st.spinner("🔍 Looking at the image..."): cap_outputs = captioner(image) cap = cap_outputs[0].get("generated_text", "").strip() st.markdown(f"**Caption:** {cap}") # —––––––– 2. Story generation prompt = ( "Write a playful, 80–100 word story for 3–10 year-old children " f"based on this description:\n\n“{cap}”\n\nStory:" ) with st.spinner("✍️ Writing a story..."): out = storyteller( prompt, max_new_tokens=120, # allow space for ~100 words do_sample=True, top_p=0.9, temperature=0.8, num_return_sequences=1 ) story = out[0]["generated_text"].strip() st.markdown("**Story:**") st.write(story) # —––––––– 3. Text-to-Speech with st.spinner("🔊 Converting to speech..."): tts = gTTS(story, lang="en") tmp = tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) tts.write_to_fp(tmp) tmp.flush() st.audio(tmp.name, format="audio/mp3")