File size: 1,858 Bytes
c2f6a73
55b40d7
 
41c3c09
fc27a11
c2f6a73
731aa7c
13f3e99
6fb5a33
e1814fe
c2f6a73
17bead0
 
7f52f03
41c3c09
502c1bc
eafaa99
 
502c1bc
 
eafaa99
 
b4c8f81
3840504
 
 
 
 
 
e1814fe
65f0e8d
 
 
3840504
65f0e8d
 
b4c8f81
eafaa99
3840504
5c9b0b5
 
 
 
3840504
 
5c9b0b5
3840504
c2f6a73
3840504
c2f6a73
 
 
 
3840504
41c3c09
c2f6a73
17bead0
 
 
 
 
 
3840504
c2f6a73
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import s23_openai_clip
from s23_openai_clip import make_train_valid_dfs 
from s23_openai_clip import get_image_embeddings 
from s23_openai_clip import inference_CLIP

import gradio as gr
import zipfile
import os
import pandas as pd
import subprocess

# query_text = "dogs on the grass"
image_path = "./Images"
captions_path = "."
data_source = 'flickr8k.zip'

print("\n\n")
print("Going to unzip dataset")
with zipfile.ZipFile(data_source, 'r') as zip_ref:
    zip_ref.extractall('.')
print("unzip of dataset is done")

#=============================================

cmd = "pwd"
output1 = subprocess.check_output(cmd, shell=True).decode("utf-8")
print("result of pwd command")
print(output1) # result => /home/user/app


# shell command to run
cmd = "ls -l"
output1 = subprocess.check_output(cmd, shell=True).decode("utf-8")
print("result of ls -l command")
print(output1)

#=============================================

print("Going to prepare captions.csv")
df = pd.read_csv("captions.txt")
df['id'] = [id_ for id_ in range(df.shape[0] // 5) for _ in range(5)]
df.to_csv("captions.csv", index=False)
df = pd.read_csv("captions.csv")
print("Finished in preparing captions.csv")
print("\n\n")

print("Going to invoke make_train_valid_dfs")
_, valid_df = make_train_valid_dfs()
print("Going to invoke make_train_valid_dfs")
model, image_embeddings = get_image_embeddings(valid_df, "best.pt")


def greet(query_text):
    print("Going to invoke inference_CLIP")
    return inference_CLIP(query_text)
    
gallery = gr.Gallery(
           label="Generated images", show_label=True, elem_id="gallery", 
           columns=[3], rows=[3], object_fit="contain", height="auto")
# btn = gr.Button("Generate images", scale=0)
demo = gr.Interface(fn=greet, inputs="text", 
                    outputs=gallery)
print("Going to invoke demo.launch")
demo.launch("debug")