yulu2 commited on
Commit
d7d871b
1 Parent(s): 734b226

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -17,13 +17,13 @@ import torch
17
  from PIL import Image, ImageOps
18
  from diffusers import StableDiffusionInstructPix2PixPipeline
19
 
20
- title = "InstructCV"
21
 
22
  description = """
23
  <p style='text-align: center'> Yulu Gan, Sungwoo Park, Alex Schubert, Anthony Philippakis, Ahmed Alaa <br>
24
- <a href='https://huggingface.co/spaces/alaa-lab/InstructCV' target='_blank'>Project Page</a> | <a href='https://arxiv.org/abs/2310.00390'>Paper</a> | <a href='https://github.com/AlaaLab/InstructCV' target='_blank'>Code</a></p>
25
- Gradio demo for InstructCV: Instruction-Tuned Text-to-Image Diffusion Models As Vision Generalists. \n
26
- Please upload a new image and provide an instruction outlining the specific vision task you wish InstructCV to perform (e.g., “Segment the dog”, “Detect the dog”, “Estimate the depth map of this image”, etc.). \n
27
  """ # noqa
28
 
29
 
 
17
  from PIL import Image, ImageOps
18
  from diffusers import StableDiffusionInstructPix2PixPipeline
19
 
20
+ title = "InstructCV: Instruction-Tuned Text-to-Image Diffusion Models as Vision Generalists"
21
 
22
  description = """
23
  <p style='text-align: center'> Yulu Gan, Sungwoo Park, Alex Schubert, Anthony Philippakis, Ahmed Alaa <br>
24
+ <a href='https://arxiv.org/abs/2310.00390'>arXiv</a> | <a href='https://github.com/AlaaLab/InstructCV' target='_blank'>Code</a></p>
25
+ we develop a <b>unified language interface</b> for computer vision tasks that abstracts away task-specific design choices and enables task execution by following natural language instructions. \n
26
+ <b>Tips for using this demo</b>: Please upload a new image and provide an instruction outlining the specific vision task you wish InstructCV to perform (e.g., “Segment the dog”, “Detect the dog”, “Estimate the depth map of this image”, etc.). \n
27
  """ # noqa
28
 
29