|
import streamlit as st |
|
from transformers import pipeline |
|
from huggingface_hub import cached_download |
|
import torch |
|
from PIL import Image |
|
|
|
|
|
text_generator = pipeline("text-generation", model="gpt2") |
|
question_answering = pipeline("question-answering", model="distilbert-base-cased-squad2") |
|
image_generator = pipeline("image-generation", model="sd-v1-diffusion") |
|
|
|
|
|
models = ["gpt2", "distilbert-base-cased-squad2", "sd-v1-diffusion"] |
|
for model in models: |
|
cached_download(repo_id=f"huggingface/transformers/{model}", force_download=True) |
|
|
|
|
|
prompt = st.text_input("Start your story with...", value="Once upon a time...") |
|
|
|
|
|
generated_text = text_generator(prompt, max_length=1024)[0]["generated_text"] |
|
|
|
|
|
st.button("Speak the story", on_click=lambda: speak(generated_text)) |
|
generated_text_2 = st.button("Generate different story", on_click=lambda: generate_text(prompt)) |
|
|
|
|
|
question = st.text_input("Ask a question about the story...") |
|
answer = question_answering(question=question, context=generated_text)["answer"] |
|
|
|
|
|
image = image_generator(prompt="Image related to the story:") |
|
image = Image.open(torch.ByteStorage.from_buffer(image[0]["image"]).read()) |
|
|
|
|
|
st.title("Your Story") |
|
st.write(generated_text) |
|
st.image(image) |
|
|
|
|
|
st.subheader("Ask and Learn") |
|
st.write(f"Question: {question}") |
|
st.write(f"Answer: {answer}") |
|
|
|
|
|
if generated_text_2: |
|
generated_text_2 = text_generator(prompt, max_length=1024)[0]["generated_text"] |
|
st.write("**New Story:**") |
|
st.write(generated_text_2) |
|
|
|
|
|
def speak(text): |
|
|
|
|
|
pass |
|
|
|
st.write("* This app is still under development and may not always generate accurate or coherent results.") |
|
st.write("* Please be mindful of the content generated by the AI models.") |
|
|
|
|
|
|
|
|