Siwon123 commited on
Commit
b3a080b
·
1 Parent(s): e744ad6
Files changed (3) hide show
  1. app.py +30 -36
  2. inference.ipynb +33 -0
  3. requirements.txt +2 -1
app.py CHANGED
@@ -1,51 +1,45 @@
1
  import gradio as gr
2
  from inference import infer
3
 
4
- import os
5
-
6
- # setup.sh 파일 실행
7
- os.system("install.sh")
8
-
9
-
10
  def greet(image, prompt):
11
  restore_img = infer(img=image, text_prompt=prompt)
12
  return restore_img
13
 
 
 
 
 
14
 
15
- title = "🖼️ ICDR 🖼️"
16
- description = ''' ## ICDR: Image Restoration Framework for Composite Degradation following Human Instructions
17
- Our Github : https://github.com/
18
-
19
- Siwon Kim, Donghyeon Yoon
20
 
21
- Ajou Univ
22
- '''
23
 
24
 
25
- article = "<p style='text-align: center'><a href='https://github.com/' target='_blank'>ICDR</a></p>"
26
 
27
- #### Image,Prompts examples
28
- examples = [['input/00010.png', "I love this photo, could you remove the haze and more brighter?"],
29
- ['input/00058.png', "I have to post an emotional shot on Instagram, but it was shot too foggy and too dark. Change it like a sunny day and brighten it up!"]]
30
 
31
- css = """
32
- .image-frame img, .image-container img {
33
- width: auto;
34
- height: auto;
35
- max-width: none;
36
- }
37
- """
38
 
39
 
40
- demo = gr.Interface(
41
- fn=greet,
42
- inputs=[gr.Image(type="pil", label="Input"),
43
- gr.Text(label="Prompt") ],
44
- outputs=[gr.Image(type="pil", label="Ouput")],
45
- title=title,
46
- description=description,
47
- article=article,
48
- examples=examples,
49
- css=css,
50
- )
51
- demo.launch(share=True)
 
1
  import gradio as gr
2
  from inference import infer
3
 
 
 
 
 
 
 
4
  def greet(image, prompt):
5
  restore_img = infer(img=image, text_prompt=prompt)
6
  return restore_img
7
 
8
+ def main():
9
+ title = "🖼️ ICDR 🖼️"
10
+ description = ''' ## ICDR: Image Restoration Framework for Composite Degradation following Human Instructions
11
+ Our Github : https://github.com/
12
 
13
+ Siwon Kim, Donghyeon Yoon
 
 
 
 
14
 
15
+ Ajou Univ
16
+ '''
17
 
18
 
19
+ article = "<p style='text-align: center'><a href='https://github.com/' target='_blank'>ICDR</a></p>"
20
 
21
+ #### Image,Prompts examples
22
+ examples = [['input/00010.png', "I love this photo, could you remove the haze and more brighter?"],
23
+ ['input/00058.png', "I have to post an emotional shot on Instagram, but it was shot too foggy and too dark. Change it like a sunny day and brighten it up!"]]
24
 
25
+ css = """
26
+ .image-frame img, .image-container img {
27
+ width: auto;
28
+ height: auto;
29
+ max-width: none;
30
+ }
31
+ """
32
 
33
 
34
+ demo = gr.Interface(
35
+ fn=greet,
36
+ inputs=[gr.Image(type="pil", label="Input"),
37
+ gr.Text(label="Prompt") ],
38
+ outputs=[gr.Image(type="pil", label="Ouput")],
39
+ title=title,
40
+ description=description,
41
+ article=article,
42
+ examples=examples,
43
+ css=css,
44
+ )
45
+ demo.launch(share=True)
inference.ipynb ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "!pip install mmcv==2.2.0 -f https://download.openmmlab.com/mmcv/dist/cu121/torch2.4/index.html\n",
10
+ "!pip install git+https://github.com/openai/CLIP.git\n",
11
+ "!pip install scikit-video"
12
+ ]
13
+ },
14
+ {
15
+ "cell_type": "code",
16
+ "execution_count": null,
17
+ "metadata": {},
18
+ "outputs": [],
19
+ "source": [
20
+ "from app.py import main\n",
21
+ "\n",
22
+ "main()"
23
+ ]
24
+ }
25
+ ],
26
+ "metadata": {
27
+ "language_info": {
28
+ "name": "python"
29
+ }
30
+ },
31
+ "nbformat": 4,
32
+ "nbformat_minor": 2
33
+ }
requirements.txt CHANGED
@@ -7,4 +7,5 @@ scikit-image
7
  scikit-video
8
  matplotlib
9
  tqdm
10
- git+https://github.com/openai/CLIP.git
 
 
7
  scikit-video
8
  matplotlib
9
  tqdm
10
+ git+https://github.com/openai/CLIP.git
11
+ mmcv-lite