Spaces:
Runtime error
Runtime error
File size: 4,504 Bytes
b442155 aff2150 b442155 aff2150 b442155 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 |
import os
import sys
#import numpy as np
import streamlit as st
#from PIL import Image
# import clip
# sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# import gradio as gr
# from dalle.models import Dalle
# from dalle.utils.utils import clip_score, set_seed
device = "cpu"
# model = Dalle.from_pretrained("minDALL-E/1.3B") # This will automatically download the pretrained model.
# model.to(device=device)
# model_clip, preprocess_clip = clip.load("ViT-B/32", device=device)
# model_clip.to(device=device)
# def sample(prompt):
# # Sampling
# images = (
# model.sampling(prompt=prompt, top_k=256, top_p=None, softmax_temperature=1.0, num_candidates=3, device=device)
# .cpu()
# .numpy()
# )
# images = np.transpose(images, (0, 2, 3, 1))
# # CLIP Re-ranking
# rank = clip_score(
# prompt=prompt, images=images, model_clip=model_clip, preprocess_clip=preprocess_clip, device=device
# )
# # Save images
# images = images[rank]
# # print(rank, images.shape)
# pil_images = []
# for i in range(len(images)):
# im = Image.fromarray((images[i] * 255).astype(np.uint8))
# pil_images.append(im)
# # im = Image.fromarray((images[0] * 255).astype(np.uint8))
# return pil_images
# title = "Interactive demo: ImageGPT"
# description = "Demo for OpenAI's ImageGPT: Generative Pretraining from Pixels. To use it, simply upload an image or use the example image below and click 'submit'. Results will show up in a few seconds."
# article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2109.10282'>ImageGPT: Generative Pretraining from Pixels</a> | <a href='https://openai.com/blog/image-gpt/'>Official blog</a></p>"
# iface = gr.Interface(
# fn=sample,
# inputs=[gr.inputs.Textbox(label="What would you like to see?")],
# outputs=gr.outputs.Image(type="pil", label="Model input + completions"),
# title=title,
# description=description,
# article=article,
# #examples=examples,
# enable_queue=True,
# )
# iface.launch(debug=True)
#!/usr/bin/env python
# coding: utf-8
st.sidebar.markdown(
"""
<style>
.aligncenter {
text-align: center;
}
</style>
<p class="aligncenter">
<img src="https://raw.githubusercontent.com/borisdayma/dalle-mini/main/img/logo.png"/>
</p>
""",
unsafe_allow_html=True,
)
st.sidebar.markdown(
"""
___
<p style='text-align: center'>
DALL·E mini is an AI model that generates images from any prompt you give!
</p>
<p style='text-align: center'>
Created by Boris Dayma et al. 2021
<br/>
<a href="https://github.com/borisdayma/dalle-mini" target="_blank">GitHub</a> | <a href="https://wandb.ai/dalle-mini/dalle-mini/reports/DALL-E-mini--Vmlldzo4NjIxODA" target="_blank">Project Report</a>
</p>
""",
unsafe_allow_html=True,
)
st.header("DALL·E mini")
st.subheader("Generate images from text")
prompt = st.text_input("What do you want to see?")
DEBUG = False
# if prompt != "":
# container = st.empty()
# container.markdown(
# f"""
# <style> p {{ margin:0 }} div {{ margin:0 }} </style>
# <div data-stale="false" class="element-container css-1e5imcs e1tzin5v1">
# <div class="stAlert">
# <div role="alert" data-baseweb="notification" class="st-ae st-af st-ag st-ah st-ai st-aj st-ak st-g3 st-am st-b8 st-ao st-ap st-aq st-ar st-as st-at st-au st-av st-aw st-ax st-ay st-az st-b9 st-b1 st-b2 st-b3 st-b4 st-b5 st-b6">
# <div class="st-b7">
# <div class="css-whx05o e13vu3m50">
# <div data-testid="stMarkdownContainer" class="css-1ekf893 e16nr0p30">
# <img src="https://raw.githubusercontent.com/borisdayma/dalle-mini/main/app/streamlit/img/loading.gif" width="30"/>
# Generating predictions for: <b>{prompt}</b>
# </div>
# </div>
# </div>
# </div>
# </div>
# </div>
# <small><i>Predictions may take up to 40s under high load. Please stand by.</i></small>
# """,
# unsafe_allow_html=True,
# )
# print(f"Getting selections: {prompt}")
# selected = sample(prompt)
# margin = 0.1 # for better position of zoom in arrow
# n_columns = 3
# cols = st.columns([1] + [margin, 1] * (n_columns - 1))
# for i, img in enumerate(selected):
# cols[(i % n_columns) * 2].image(img)
# container.markdown(f"**{prompt}**")
# st.button("Again!", key="again_button")
|