daviddwlee84 commited on
Commit
c8b4b8f
·
1 Parent(s): a473a1e

Try to fix the flagging mode error

Browse files
.specstory/history/2025-03-27_10-40-gradio-code-update-and-error-fix.md CHANGED
@@ -81,6 +81,44 @@ These changes maintain all the same functionality while using the current Gradio
81
 
82
  ---
83
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
 
85
 
86
  ---
 
81
 
82
  ---
83
 
84
+ _**User**_
85
+
86
+ Now the error become this
87
+
88
+ ```
89
+ /usr/local/lib/python3.10/site-packages/gradio/interface.py:415: UserWarning: The `allow_flagging` parameter in `Interface` is deprecated.Use `flagging_mode` instead.
90
+ warnings.warn(
91
+ Traceback (most recent call last):
92
+ File "/home/user/app/app.py", line 73, in <module>
93
+ demo = gr.Interface(
94
+ File "/usr/local/lib/python3.10/site-packages/gradio/interface.py", line 425, in __init__
95
+ raise ValueError(
96
+ ValueError: Invalid value for `flagging_mode` parameter.Must be: 'auto', 'manual', or 'never'.
97
+ ```
98
+
99
+ ---
100
+
101
+ _**Assistant**_
102
+
103
+
104
+
105
+ ---
106
+
107
+ ```diff
108
+ outputs=gr.Image(type="numpy", label="Generated - Conditioned Images"),
109
+ examples=examples,
110
+ - allow_flagging=False,
111
+ + flagging_mode="never",
112
+ analytics_enabled=False,
113
+ title=title,
114
+ ```
115
+
116
+ ---
117
+
118
+ I've updated the code by replacing the deprecated `allow_flagging=False` parameter with the new `flagging_mode="never"` parameter. The newer versions of Gradio have replaced the boolean flag with a more flexible string parameter that supports different flagging modes: 'auto', 'manual', or 'never'.
119
+
120
+ ---
121
+
122
 
123
 
124
  ---
app.py CHANGED
@@ -8,79 +8,109 @@ import gradio as gr
8
  from huggingface_hub import from_pretrained_keras
9
 
10
  model = from_pretrained_keras("keras-io/GauGAN-Image-generation")
11
-
 
12
  def predict(image_file, segmentation_png, bitmap_img):
13
 
14
- image_list = [segmentation_png, image_file, bitmap_img]
 
 
 
15
 
16
- image = tf.image.decode_png(tf.io.read_file(image_list[1]), channels=3)
17
- image = tf.cast(image, tf.float32) / 127.5 - 1
18
 
19
- segmentation_file = tf.image.decode_png(tf.io.read_file(image_list[0]), channels=3)
20
- segmentation_file = tf.cast(segmentation_file, tf.float32)/127.5 - 1
 
 
21
 
22
- label_file = tf.image.decode_bmp(tf.io.read_file(image_list[2]), channels=0)
23
- # label_file = tf.image.rgb_to_grayscale(tf.image.decode_bmp(tf.io.read_file(image_list[2]), channels=3))
24
- # print("after decode_bmp --> ", label_file.shape, type(label_file))
25
- label_file = tf.squeeze(label_file)
26
 
27
- image_list = [segmentation_file, image, label_file]
28
 
29
- crop_size = tf.convert_to_tensor((256, 256))
30
 
31
- image_shape = tf.shape(image_list[1])[:2]
32
 
33
- margins = image_shape - crop_size
 
 
 
34
 
35
- y1 = tf.random.uniform(shape=(), maxval=margins[0], dtype=tf.int32)
36
- x1 = tf.random.uniform(shape=(), maxval=margins[1], dtype=tf.int32)
37
- y2 = y1 + crop_size[0]
38
- x2 = x1 + crop_size[1]
39
 
40
- cropped_images = []
41
- for img in image_list:
42
- cropped_images.append(img[y1:y2, x1:x2])
 
 
43
 
44
- final_img_list = [tf.expand_dims(cropped_images[0], axis=0), tf.expand_dims(cropped_images[1], axis=0), tf.expand_dims(tf.one_hot(cropped_images[2], 12), axis=0)]
 
 
45
 
46
- # print(final_img_list[0].shape)
47
- # print(final_img_list[1].shape)
48
- # print(final_img_list[2].shape)
49
 
50
- latent_vector = tf.random.normal(shape=(1, 256), mean=0.0, stddev=2.0)
 
 
 
 
51
 
52
- # Generate fake images
53
- fake_image = model.predict([latent_vector, final_img_list[2]])
54
- fake_img = tf.squeeze(fake_image, axis=0)
55
 
56
- return np.array((fake_img+1)/2)
57
-
58
  # Define inputs with modern Gradio syntax
59
  ground_truth = gr.Image(type="filepath", label="Ground Truth - Real Image (jpg)")
60
  segmentation = gr.Image(type="filepath", label="Corresponding Segmentation (png)")
61
- bitmap = gr.Image(type="filepath", label="Corresponding bitmap image (bmp)", image_mode="L")
 
 
62
 
63
- examples = [["facades_data/cmp_b0010.jpg", "facades_data/cmp_b0010.png", "facades_data/cmp_b0010.bmp"],
64
- ["facades_data/cmp_b0020.jpg", "facades_data/cmp_b0020.png", "facades_data/cmp_b0020.bmp"],
65
- ["facades_data/cmp_b0030.jpg", "facades_data/cmp_b0030.png", "facades_data/cmp_b0030.bmp"],
66
- ["facades_data/cmp_b0040.jpg", "facades_data/cmp_b0040.png", "facades_data/cmp_b0040.bmp"],
67
- ["facades_data/cmp_b0050.jpg", "facades_data/cmp_b0050.png", "facades_data/cmp_b0050.bmp"]]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
 
69
  title = "GauGAN For Conditional Image Generation"
70
  description = "Upload an Image or take one from examples to generate realistic images that are conditioned on cue images and segmentation maps"
71
 
72
  # Create interface with modern Gradio syntax
73
  demo = gr.Interface(
74
- fn=predict,
75
  inputs=[ground_truth, segmentation, bitmap],
76
  outputs=gr.Image(type="numpy", label="Generated - Conditioned Images"),
77
  examples=examples,
78
- allow_flagging=False,
79
  analytics_enabled=False,
80
  title=title,
81
  description=description,
82
- article="<center>Space By: <u><a href='https://github.com/robotjellyzone'><b>Kavya Bisht</b></a></u> \n Based on <a href='https://keras.io/examples/generative/gaugan/'><b>this notebook</b></a></center>"
83
  )
84
 
85
  if __name__ == "__main__":
86
- demo.launch(enable_queue=True, debug=True)
 
8
  from huggingface_hub import from_pretrained_keras
9
 
10
  model = from_pretrained_keras("keras-io/GauGAN-Image-generation")
11
+
12
+
13
  def predict(image_file, segmentation_png, bitmap_img):
14
 
15
+ image_list = [segmentation_png, image_file, bitmap_img]
16
+
17
+ image = tf.image.decode_png(tf.io.read_file(image_list[1]), channels=3)
18
+ image = tf.cast(image, tf.float32) / 127.5 - 1
19
 
20
+ segmentation_file = tf.image.decode_png(tf.io.read_file(image_list[0]), channels=3)
21
+ segmentation_file = tf.cast(segmentation_file, tf.float32) / 127.5 - 1
22
 
23
+ label_file = tf.image.decode_bmp(tf.io.read_file(image_list[2]), channels=0)
24
+ # label_file = tf.image.rgb_to_grayscale(tf.image.decode_bmp(tf.io.read_file(image_list[2]), channels=3))
25
+ # print("after decode_bmp --> ", label_file.shape, type(label_file))
26
+ label_file = tf.squeeze(label_file)
27
 
28
+ image_list = [segmentation_file, image, label_file]
 
 
 
29
 
30
+ crop_size = tf.convert_to_tensor((256, 256))
31
 
32
+ image_shape = tf.shape(image_list[1])[:2]
33
 
34
+ margins = image_shape - crop_size
35
 
36
+ y1 = tf.random.uniform(shape=(), maxval=margins[0], dtype=tf.int32)
37
+ x1 = tf.random.uniform(shape=(), maxval=margins[1], dtype=tf.int32)
38
+ y2 = y1 + crop_size[0]
39
+ x2 = x1 + crop_size[1]
40
 
41
+ cropped_images = []
42
+ for img in image_list:
43
+ cropped_images.append(img[y1:y2, x1:x2])
 
44
 
45
+ final_img_list = [
46
+ tf.expand_dims(cropped_images[0], axis=0),
47
+ tf.expand_dims(cropped_images[1], axis=0),
48
+ tf.expand_dims(tf.one_hot(cropped_images[2], 12), axis=0),
49
+ ]
50
 
51
+ # print(final_img_list[0].shape)
52
+ # print(final_img_list[1].shape)
53
+ # print(final_img_list[2].shape)
54
 
55
+ latent_vector = tf.random.normal(shape=(1, 256), mean=0.0, stddev=2.0)
 
 
56
 
57
+ # Generate fake images
58
+ fake_image = model.predict([latent_vector, final_img_list[2]])
59
+ fake_img = tf.squeeze(fake_image, axis=0)
60
+
61
+ return np.array((fake_img + 1) / 2)
62
 
 
 
 
63
 
 
 
64
  # Define inputs with modern Gradio syntax
65
  ground_truth = gr.Image(type="filepath", label="Ground Truth - Real Image (jpg)")
66
  segmentation = gr.Image(type="filepath", label="Corresponding Segmentation (png)")
67
+ bitmap = gr.Image(
68
+ type="filepath", label="Corresponding bitmap image (bmp)", image_mode="L"
69
+ )
70
 
71
+ examples = [
72
+ [
73
+ "facades_data/cmp_b0010.jpg",
74
+ "facades_data/cmp_b0010.png",
75
+ "facades_data/cmp_b0010.bmp",
76
+ ],
77
+ [
78
+ "facades_data/cmp_b0020.jpg",
79
+ "facades_data/cmp_b0020.png",
80
+ "facades_data/cmp_b0020.bmp",
81
+ ],
82
+ [
83
+ "facades_data/cmp_b0030.jpg",
84
+ "facades_data/cmp_b0030.png",
85
+ "facades_data/cmp_b0030.bmp",
86
+ ],
87
+ [
88
+ "facades_data/cmp_b0040.jpg",
89
+ "facades_data/cmp_b0040.png",
90
+ "facades_data/cmp_b0040.bmp",
91
+ ],
92
+ [
93
+ "facades_data/cmp_b0050.jpg",
94
+ "facades_data/cmp_b0050.png",
95
+ "facades_data/cmp_b0050.bmp",
96
+ ],
97
+ ]
98
 
99
  title = "GauGAN For Conditional Image Generation"
100
  description = "Upload an Image or take one from examples to generate realistic images that are conditioned on cue images and segmentation maps"
101
 
102
  # Create interface with modern Gradio syntax
103
  demo = gr.Interface(
104
+ fn=predict,
105
  inputs=[ground_truth, segmentation, bitmap],
106
  outputs=gr.Image(type="numpy", label="Generated - Conditioned Images"),
107
  examples=examples,
108
+ flagging_mode="never",
109
  analytics_enabled=False,
110
  title=title,
111
  description=description,
112
+ article="<center>Space By: <u><a href='https://github.com/robotjellyzone'><b>Kavya Bisht</b></a></u> \n Based on <a href='https://keras.io/examples/generative/gaugan/'><b>this notebook</b></a></center>",
113
  )
114
 
115
  if __name__ == "__main__":
116
+ demo.launch(enable_queue=True, debug=True)