rayl-aoit commited on
Commit
aa638de
·
verified ·
1 Parent(s): ea1bb53

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -40
app.py CHANGED
@@ -1,31 +1,17 @@
1
  import gradio as gr
2
  from transformers import pipeline
3
- from diffusers import StableDiffusionPipeline
4
 
5
  playground = gr.Blocks()
6
 
7
  image_pipe = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base")
8
  summary_pipe = pipeline("summarization", model="sshleifer/distilbart-cnn-12-6")
9
  ner_pipe = pipeline("ner", model="dslim/bert-base-NER")
10
- pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
11
- pipe = pipe.to("cuda")
12
 
13
  def launch_image_pipe(input):
14
  out = image_pipe(input)
15
  return out[0]['generated_text']
16
-
17
- def base64_to_pil(img_base64):
18
- base64_decoded = base64.b64decode(img_base64)
19
- byte_stream = io.BytesIO(base64_decoded)
20
- pil_image = Image.open(byte_stream)
21
- return pil_image
22
-
23
- def image_generate(prompt):
24
- # output = get_completion(prompt)
25
- image = pipe(prompt).images[0]
26
- result_image = base64_to_pil(image)
27
- return result_image
28
-
29
  def translate(input_text, source, target):
30
  try:
31
  model = f"Helsinki-NLP/opus-mt-{source}-{target}"
@@ -81,35 +67,53 @@ def create_playground_footer():
81
  **To Learn More about 🤗 Hugging Face, [Click Here](https://huggingface.co/docs)**
82
  """)
83
 
84
- def create_tabs_header(topic, description, references):
85
- with gr.Row():
86
- with gr.Column(scale=4):
87
- # reference_list = "> " + "\n> ".join(references)
88
- # content = f"## {topic}\n"
89
- # content += f"### {description}\n"
90
- # for ref in references:
91
- # content += f"> {ref}\n"
92
- # gr.Markdown(content)
93
- gr.Markdown("""
94
- ## Image Captioning
95
- ### Upload a image, check what AI understand and have vision on it.
96
- > category: Image-to-Text, model: [Salesforce/blip-image-captioning-base](https://huggingface.co/Salesforce/blip-image-captioning-base)
97
- """)
98
 
99
- with gr.Column(scale=1):
100
- test_pipeline_button = gr.Button(value="Start Process", variant="primary")
101
- return test_pipeline_button
102
 
103
  with playground:
104
  create_playground_header()
105
  with gr.Tabs():
106
  with gr.TabItem("Image"):
107
 
108
- topic = "Image Captioning"
109
- description = "Upload a image, check what AI understand and have vision on it."
110
- references = ["category: Image-to-Text",
111
- "model: [Salesforce/blip-image-captioning-base](https://huggingface.co/Salesforce/blip-image-captioning-base)"]
112
- image_pipeline_button = create_tabs_header(topic, description, references)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
 
114
  with gr.Row():
115
  with gr.Column():
@@ -123,8 +127,8 @@ with playground:
123
  with gr.Row():
124
  generated_image = gr.Image(label="Generated Image")
125
 
126
- image_pipeline_button.click(launch_image_pipe, inputs=[img], outputs=[generated_textbox])
127
- image_generation_button.click(image_generate, inputs=[generated_textbox], outputs=[generated_image])
128
 
129
  with gr.TabItem("Text"):
130
  with gr.Row():
 
1
  import gradio as gr
2
  from transformers import pipeline
 
3
 
4
  playground = gr.Blocks()
5
 
6
  image_pipe = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base")
7
  summary_pipe = pipeline("summarization", model="sshleifer/distilbart-cnn-12-6")
8
  ner_pipe = pipeline("ner", model="dslim/bert-base-NER")
9
+
 
10
 
11
  def launch_image_pipe(input):
12
  out = image_pipe(input)
13
  return out[0]['generated_text']
14
+
 
 
 
 
 
 
 
 
 
 
 
 
15
  def translate(input_text, source, target):
16
  try:
17
  model = f"Helsinki-NLP/opus-mt-{source}-{target}"
 
67
  **To Learn More about 🤗 Hugging Face, [Click Here](https://huggingface.co/docs)**
68
  """)
69
 
70
+ # def create_tabs_header(topic, description, references):
71
+ # with gr.Row():
72
+ # with gr.Column(scale=4):
73
+ # # reference_list = "> " + "\n> ".join(references)
74
+ # # content = f"## {topic}\n"
75
+ # # content += f"### {description}\n"
76
+ # # for ref in references:
77
+ # # content += f"> {ref}\n"
78
+ # # gr.Markdown(content)
79
+ # gr.Markdown("""
80
+ # ## Image Captioning
81
+ # ### Upload a image, check what AI understand and have vision on it.
82
+ # > category: Image-to-Text, model: [Salesforce/blip-image-captioning-base](https://huggingface.co/Salesforce/blip-image-captioning-base)
83
+ # """)
84
 
85
+ # with gr.Column(scale=1):
86
+ # test_pipeline_button = gr.Button(value="Start Process", variant="primary")
87
+ # return test_pipeline_button
88
 
89
  with playground:
90
  create_playground_header()
91
  with gr.Tabs():
92
  with gr.TabItem("Image"):
93
 
94
+ # topic = "Image Captioning"
95
+ # description = "Upload a image, check what AI understand and have vision on it."
96
+ # references = ["category: Image-to-Text",
97
+ # "model: [Salesforce/blip-image-captioning-base](https://huggingface.co/Salesforce/blip-image-captioning-base)"]
98
+ # image_pipeline_button = create_tabs_header(topic, description, references)
99
+
100
+ with gr.Row():
101
+ with gr.Column(scale=4):
102
+ # reference_list = "> " + "\n> ".join(references)
103
+ # content = f"## {topic}\n"
104
+ # content += f"### {description}\n"
105
+ # for ref in references:
106
+ # content += f"> {ref}\n"
107
+ # gr.Markdown(content)
108
+ gr.Markdown("""
109
+ ## Image Captioning
110
+ ### Upload a image, check what AI understand and have vision on it.
111
+ > category: Image-to-Text, model: [Salesforce/blip-image-captioning-base](https://huggingface.co/Salesforce/blip-image-captioning-base)
112
+ """)
113
+
114
+ with gr.Column(scale=1):
115
+ ITT_button = gr.Button(value="Start Process", variant="primary")
116
+ ITT_Clear_button = gr.ClearButton(components=[img, generated_textbox], value="Clear")
117
 
118
  with gr.Row():
119
  with gr.Column():
 
127
  with gr.Row():
128
  generated_image = gr.Image(label="Generated Image")
129
 
130
+ ITT_button.click(launch_image_pipe, inputs=[img], outputs=[generated_textbox])
131
+ # image_generation_button.click(image_generate, inputs=[generated_textbox], outputs=[generated_image])
132
 
133
  with gr.TabItem("Text"):
134
  with gr.Row():