import s23_openai_clip | |
import gradio as gr | |
import zipfile | |
# query_text = "dogs on the grass" | |
image_path = "./Images" | |
captions_path = "." | |
with zipfile.ZipFile('flickr8k.zip', 'r') as zip_ref: | |
zip_ref.extractall('Images') | |
_, valid_df = make_train_valid_dfs() | |
model, image_embeddings = get_image_embeddings(valid_df, "best.pt") | |
def greet(query_text): | |
return inference_CLIP2(query_text) | |
gallery = gr.Gallery( | |
label="Generated images", show_label=True, elem_id="gallery", | |
columns=[3], rows=[3], object_fit="contain", height="auto") | |
# btn = gr.Button("Generate images", scale=0) | |
demo = gr.Interface(fn=greet, inputs="text", | |
outputs=gallery) | |
demo.launch("debug") |