Rohil Bansal commited on
Commit
d2a2bed
·
1 Parent(s): ea39ccb

Moved repo. First commit.

Browse files
.gitattributes CHANGED
@@ -25,7 +25,6 @@
25
  *.safetensors filter=lfs diff=lfs merge=lfs -text
26
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
  *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
  *.tflite filter=lfs diff=lfs merge=lfs -text
30
  *.tgz filter=lfs diff=lfs merge=lfs -text
31
  *.wasm filter=lfs diff=lfs merge=lfs -text
@@ -33,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
25
  *.safetensors filter=lfs diff=lfs merge=lfs -text
26
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
  *.tar.* filter=lfs diff=lfs merge=lfs -text
 
28
  *.tflite filter=lfs diff=lfs merge=lfs -text
29
  *.tgz filter=lfs diff=lfs merge=lfs -text
30
  *.wasm filter=lfs diff=lfs merge=lfs -text
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ colorization_release_v2.caffemodel filter=lfs diff=lfs merge=lfs -text
README copy.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Black N White To Color
3
+ emoji: 🦀
4
+ colorFrom: pink
5
+ colorTo: yellow
6
+ sdk: gradio
7
+ sdk_version: 3.20.1
8
+ app_file: app.py
9
+ pinned: false
10
+ license: openrail
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Import statements
3
+ import numpy as np
4
+ import cv2
5
+ import gradio as gr
6
+
7
+
8
+ PROTOTXT = "colorization_deploy_v2.prototxt"
9
+ POINTS = "pts_in_hull.npy"
10
+ MODEL = "colorization_release_v2.caffemodel"
11
+
12
+ # Load the Model
13
+ print("Load model")
14
+ net = cv2.dnn.readNetFromCaffe(PROTOTXT, MODEL)
15
+ pts = np.load(POINTS)
16
+
17
+ # Load centers for ab channel quantization used for rebalancing.
18
+ class8 = net.getLayerId("class8_ab")
19
+ conv8 = net.getLayerId("conv8_313_rh")
20
+ pts = pts.transpose().reshape(2, 313, 1, 1)
21
+ net.getLayer(class8).blobs = [pts.astype("float32")]
22
+ net.getLayer(conv8).blobs = [np.full([1, 313], 2.606, dtype="float32")]
23
+
24
+ # Load the input image
25
+ def colorizedTheImage(image):
26
+ scaled = image.astype("float32") / 255.0
27
+ lab = cv2.cvtColor(scaled, cv2.COLOR_BGR2LAB)
28
+
29
+ resized = cv2.resize(lab, (224, 224))
30
+ L = cv2.split(resized)[0]
31
+ L -= 50
32
+
33
+ print("Colorizing the image")
34
+ net.setInput(cv2.dnn.blobFromImage(L))
35
+ ab = net.forward()[0, :, :, :].transpose((1, 2, 0))
36
+
37
+ ab = cv2.resize(ab, (image.shape[1], image.shape[0]))
38
+
39
+ L = cv2.split(lab)[0]
40
+ colorized = np.concatenate((L[:, :, np.newaxis], ab), axis=2)
41
+
42
+ colorized = cv2.cvtColor(colorized, cv2.COLOR_LAB2BGR)
43
+ colorized = np.clip(colorized, 0, 1)
44
+
45
+ colorized = (255 * colorized).astype("uint8")
46
+ colorized = cv2.cvtColor(colorized, cv2.COLOR_RGB2BGR)
47
+ return colorized
48
+
49
+ demo=gr.Interface(fn=colorizedTheImage,
50
+ inputs=["image"],
51
+ outputs=["image"],
52
+ examples=[["einstein.jpg"],["tiger.jpg"],["building.jpg"],["nature.jpg"]],
53
+ title="Black&White To Color Image")
54
+ demo.launch(debug=True)
building.jpg ADDED
colorization_deploy_v2.prototxt ADDED
@@ -0,0 +1,589 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: "LtoAB"
2
+
3
+ layer {
4
+ name: "data_l"
5
+ type: "Input"
6
+ top: "data_l"
7
+ input_param {
8
+ shape { dim: 1 dim: 1 dim: 224 dim: 224 }
9
+ }
10
+ }
11
+
12
+ # *****************
13
+ # ***** conv1 *****
14
+ # *****************
15
+ layer {
16
+ name: "bw_conv1_1"
17
+ type: "Convolution"
18
+ bottom: "data_l"
19
+ top: "conv1_1"
20
+ # param {lr_mult: 0 decay_mult: 0}
21
+ # param {lr_mult: 0 decay_mult: 0}
22
+ convolution_param {
23
+ num_output: 64
24
+ pad: 1
25
+ kernel_size: 3
26
+ }
27
+ }
28
+ layer {
29
+ name: "relu1_1"
30
+ type: "ReLU"
31
+ bottom: "conv1_1"
32
+ top: "conv1_1"
33
+ }
34
+ layer {
35
+ name: "conv1_2"
36
+ type: "Convolution"
37
+ bottom: "conv1_1"
38
+ top: "conv1_2"
39
+ # param {lr_mult: 0 decay_mult: 0}
40
+ # param {lr_mult: 0 decay_mult: 0}
41
+ convolution_param {
42
+ num_output: 64
43
+ pad: 1
44
+ kernel_size: 3
45
+ stride: 2
46
+ }
47
+ }
48
+ layer {
49
+ name: "relu1_2"
50
+ type: "ReLU"
51
+ bottom: "conv1_2"
52
+ top: "conv1_2"
53
+ }
54
+ layer {
55
+ name: "conv1_2norm"
56
+ type: "BatchNorm"
57
+ bottom: "conv1_2"
58
+ top: "conv1_2norm"
59
+ batch_norm_param{ }
60
+ param {lr_mult: 0 decay_mult: 0}
61
+ param {lr_mult: 0 decay_mult: 0}
62
+ param {lr_mult: 0 decay_mult: 0}
63
+ }
64
+ # *****************
65
+ # ***** conv2 *****
66
+ # *****************
67
+ layer {
68
+ name: "conv2_1"
69
+ type: "Convolution"
70
+ # bottom: "conv1_2"
71
+ bottom: "conv1_2norm"
72
+ # bottom: "pool1"
73
+ top: "conv2_1"
74
+ # param {lr_mult: 0 decay_mult: 0}
75
+ # param {lr_mult: 0 decay_mult: 0}
76
+ convolution_param {
77
+ num_output: 128
78
+ pad: 1
79
+ kernel_size: 3
80
+ }
81
+ }
82
+ layer {
83
+ name: "relu2_1"
84
+ type: "ReLU"
85
+ bottom: "conv2_1"
86
+ top: "conv2_1"
87
+ }
88
+ layer {
89
+ name: "conv2_2"
90
+ type: "Convolution"
91
+ bottom: "conv2_1"
92
+ top: "conv2_2"
93
+ # param {lr_mult: 0 decay_mult: 0}
94
+ # param {lr_mult: 0 decay_mult: 0}
95
+ convolution_param {
96
+ num_output: 128
97
+ pad: 1
98
+ kernel_size: 3
99
+ stride: 2
100
+ }
101
+ }
102
+ layer {
103
+ name: "relu2_2"
104
+ type: "ReLU"
105
+ bottom: "conv2_2"
106
+ top: "conv2_2"
107
+ }
108
+ layer {
109
+ name: "conv2_2norm"
110
+ type: "BatchNorm"
111
+ bottom: "conv2_2"
112
+ top: "conv2_2norm"
113
+ batch_norm_param{ }
114
+ param {lr_mult: 0 decay_mult: 0}
115
+ param {lr_mult: 0 decay_mult: 0}
116
+ param {lr_mult: 0 decay_mult: 0}
117
+ }
118
+ # *****************
119
+ # ***** conv3 *****
120
+ # *****************
121
+ layer {
122
+ name: "conv3_1"
123
+ type: "Convolution"
124
+ # bottom: "conv2_2"
125
+ bottom: "conv2_2norm"
126
+ # bottom: "pool2"
127
+ top: "conv3_1"
128
+ # param {lr_mult: 0 decay_mult: 0}
129
+ # param {lr_mult: 0 decay_mult: 0}
130
+ convolution_param {
131
+ num_output: 256
132
+ pad: 1
133
+ kernel_size: 3
134
+ }
135
+ }
136
+ layer {
137
+ name: "relu3_1"
138
+ type: "ReLU"
139
+ bottom: "conv3_1"
140
+ top: "conv3_1"
141
+ }
142
+ layer {
143
+ name: "conv3_2"
144
+ type: "Convolution"
145
+ bottom: "conv3_1"
146
+ top: "conv3_2"
147
+ # param {lr_mult: 0 decay_mult: 0}
148
+ # param {lr_mult: 0 decay_mult: 0}
149
+ convolution_param {
150
+ num_output: 256
151
+ pad: 1
152
+ kernel_size: 3
153
+ }
154
+ }
155
+ layer {
156
+ name: "relu3_2"
157
+ type: "ReLU"
158
+ bottom: "conv3_2"
159
+ top: "conv3_2"
160
+ }
161
+ layer {
162
+ name: "conv3_3"
163
+ type: "Convolution"
164
+ bottom: "conv3_2"
165
+ top: "conv3_3"
166
+ # param {lr_mult: 0 decay_mult: 0}
167
+ # param {lr_mult: 0 decay_mult: 0}
168
+ convolution_param {
169
+ num_output: 256
170
+ pad: 1
171
+ kernel_size: 3
172
+ stride: 2
173
+ }
174
+ }
175
+ layer {
176
+ name: "relu3_3"
177
+ type: "ReLU"
178
+ bottom: "conv3_3"
179
+ top: "conv3_3"
180
+ }
181
+ layer {
182
+ name: "conv3_3norm"
183
+ type: "BatchNorm"
184
+ bottom: "conv3_3"
185
+ top: "conv3_3norm"
186
+ batch_norm_param{ }
187
+ param {lr_mult: 0 decay_mult: 0}
188
+ param {lr_mult: 0 decay_mult: 0}
189
+ param {lr_mult: 0 decay_mult: 0}
190
+ }
191
+ # *****************
192
+ # ***** conv4 *****
193
+ # *****************
194
+ layer {
195
+ name: "conv4_1"
196
+ type: "Convolution"
197
+ # bottom: "conv3_3"
198
+ bottom: "conv3_3norm"
199
+ # bottom: "pool3"
200
+ top: "conv4_1"
201
+ # param {lr_mult: 0 decay_mult: 0}
202
+ # param {lr_mult: 0 decay_mult: 0}
203
+ convolution_param {
204
+ num_output: 512
205
+ kernel_size: 3
206
+ stride: 1
207
+ pad: 1
208
+ dilation: 1
209
+ }
210
+ }
211
+ layer {
212
+ name: "relu4_1"
213
+ type: "ReLU"
214
+ bottom: "conv4_1"
215
+ top: "conv4_1"
216
+ }
217
+ layer {
218
+ name: "conv4_2"
219
+ type: "Convolution"
220
+ bottom: "conv4_1"
221
+ top: "conv4_2"
222
+ # param {lr_mult: 0 decay_mult: 0}
223
+ # param {lr_mult: 0 decay_mult: 0}
224
+ convolution_param {
225
+ num_output: 512
226
+ kernel_size: 3
227
+ stride: 1
228
+ pad: 1
229
+ dilation: 1
230
+ }
231
+ }
232
+ layer {
233
+ name: "relu4_2"
234
+ type: "ReLU"
235
+ bottom: "conv4_2"
236
+ top: "conv4_2"
237
+ }
238
+ layer {
239
+ name: "conv4_3"
240
+ type: "Convolution"
241
+ bottom: "conv4_2"
242
+ top: "conv4_3"
243
+ # param {lr_mult: 0 decay_mult: 0}
244
+ # param {lr_mult: 0 decay_mult: 0}
245
+ convolution_param {
246
+ num_output: 512
247
+ kernel_size: 3
248
+ stride: 1
249
+ pad: 1
250
+ dilation: 1
251
+ }
252
+ }
253
+ layer {
254
+ name: "relu4_3"
255
+ type: "ReLU"
256
+ bottom: "conv4_3"
257
+ top: "conv4_3"
258
+ }
259
+ layer {
260
+ name: "conv4_3norm"
261
+ type: "BatchNorm"
262
+ bottom: "conv4_3"
263
+ top: "conv4_3norm"
264
+ batch_norm_param{ }
265
+ param {lr_mult: 0 decay_mult: 0}
266
+ param {lr_mult: 0 decay_mult: 0}
267
+ param {lr_mult: 0 decay_mult: 0}
268
+ }
269
+ # *****************
270
+ # ***** conv5 *****
271
+ # *****************
272
+ layer {
273
+ name: "conv5_1"
274
+ type: "Convolution"
275
+ # bottom: "conv4_3"
276
+ bottom: "conv4_3norm"
277
+ # bottom: "pool4"
278
+ top: "conv5_1"
279
+ # param {lr_mult: 0 decay_mult: 0}
280
+ # param {lr_mult: 0 decay_mult: 0}
281
+ convolution_param {
282
+ num_output: 512
283
+ kernel_size: 3
284
+ stride: 1
285
+ pad: 2
286
+ dilation: 2
287
+ }
288
+ }
289
+ layer {
290
+ name: "relu5_1"
291
+ type: "ReLU"
292
+ bottom: "conv5_1"
293
+ top: "conv5_1"
294
+ }
295
+ layer {
296
+ name: "conv5_2"
297
+ type: "Convolution"
298
+ bottom: "conv5_1"
299
+ top: "conv5_2"
300
+ # param {lr_mult: 0 decay_mult: 0}
301
+ # param {lr_mult: 0 decay_mult: 0}
302
+ convolution_param {
303
+ num_output: 512
304
+ kernel_size: 3
305
+ stride: 1
306
+ pad: 2
307
+ dilation: 2
308
+ }
309
+ }
310
+ layer {
311
+ name: "relu5_2"
312
+ type: "ReLU"
313
+ bottom: "conv5_2"
314
+ top: "conv5_2"
315
+ }
316
+ layer {
317
+ name: "conv5_3"
318
+ type: "Convolution"
319
+ bottom: "conv5_2"
320
+ top: "conv5_3"
321
+ # param {lr_mult: 0 decay_mult: 0}
322
+ # param {lr_mult: 0 decay_mult: 0}
323
+ convolution_param {
324
+ num_output: 512
325
+ kernel_size: 3
326
+ stride: 1
327
+ pad: 2
328
+ dilation: 2
329
+ }
330
+ }
331
+ layer {
332
+ name: "relu5_3"
333
+ type: "ReLU"
334
+ bottom: "conv5_3"
335
+ top: "conv5_3"
336
+ }
337
+ layer {
338
+ name: "conv5_3norm"
339
+ type: "BatchNorm"
340
+ bottom: "conv5_3"
341
+ top: "conv5_3norm"
342
+ batch_norm_param{ }
343
+ param {lr_mult: 0 decay_mult: 0}
344
+ param {lr_mult: 0 decay_mult: 0}
345
+ param {lr_mult: 0 decay_mult: 0}
346
+ }
347
+ # *****************
348
+ # ***** conv6 *****
349
+ # *****************
350
+ layer {
351
+ name: "conv6_1"
352
+ type: "Convolution"
353
+ bottom: "conv5_3norm"
354
+ top: "conv6_1"
355
+ convolution_param {
356
+ num_output: 512
357
+ kernel_size: 3
358
+ pad: 2
359
+ dilation: 2
360
+ }
361
+ }
362
+ layer {
363
+ name: "relu6_1"
364
+ type: "ReLU"
365
+ bottom: "conv6_1"
366
+ top: "conv6_1"
367
+ }
368
+ layer {
369
+ name: "conv6_2"
370
+ type: "Convolution"
371
+ bottom: "conv6_1"
372
+ top: "conv6_2"
373
+ convolution_param {
374
+ num_output: 512
375
+ kernel_size: 3
376
+ pad: 2
377
+ dilation: 2
378
+ }
379
+ }
380
+ layer {
381
+ name: "relu6_2"
382
+ type: "ReLU"
383
+ bottom: "conv6_2"
384
+ top: "conv6_2"
385
+ }
386
+ layer {
387
+ name: "conv6_3"
388
+ type: "Convolution"
389
+ bottom: "conv6_2"
390
+ top: "conv6_3"
391
+ convolution_param {
392
+ num_output: 512
393
+ kernel_size: 3
394
+ pad: 2
395
+ dilation: 2
396
+ }
397
+ }
398
+ layer {
399
+ name: "relu6_3"
400
+ type: "ReLU"
401
+ bottom: "conv6_3"
402
+ top: "conv6_3"
403
+ }
404
+ layer {
405
+ name: "conv6_3norm"
406
+ type: "BatchNorm"
407
+ bottom: "conv6_3"
408
+ top: "conv6_3norm"
409
+ batch_norm_param{ }
410
+ param {lr_mult: 0 decay_mult: 0}
411
+ param {lr_mult: 0 decay_mult: 0}
412
+ param {lr_mult: 0 decay_mult: 0}
413
+ }
414
+ # *****************
415
+ # ***** conv7 *****
416
+ # *****************
417
+ layer {
418
+ name: "conv7_1"
419
+ type: "Convolution"
420
+ bottom: "conv6_3norm"
421
+ top: "conv7_1"
422
+ convolution_param {
423
+ num_output: 512
424
+ kernel_size: 3
425
+ pad: 1
426
+ dilation: 1
427
+ }
428
+ }
429
+ layer {
430
+ name: "relu7_1"
431
+ type: "ReLU"
432
+ bottom: "conv7_1"
433
+ top: "conv7_1"
434
+ }
435
+ layer {
436
+ name: "conv7_2"
437
+ type: "Convolution"
438
+ bottom: "conv7_1"
439
+ top: "conv7_2"
440
+ convolution_param {
441
+ num_output: 512
442
+ kernel_size: 3
443
+ pad: 1
444
+ dilation: 1
445
+ }
446
+ }
447
+ layer {
448
+ name: "relu7_2"
449
+ type: "ReLU"
450
+ bottom: "conv7_2"
451
+ top: "conv7_2"
452
+ }
453
+ layer {
454
+ name: "conv7_3"
455
+ type: "Convolution"
456
+ bottom: "conv7_2"
457
+ top: "conv7_3"
458
+ convolution_param {
459
+ num_output: 512
460
+ kernel_size: 3
461
+ pad: 1
462
+ dilation: 1
463
+ }
464
+ }
465
+ layer {
466
+ name: "relu7_3"
467
+ type: "ReLU"
468
+ bottom: "conv7_3"
469
+ top: "conv7_3"
470
+ }
471
+ layer {
472
+ name: "conv7_3norm"
473
+ type: "BatchNorm"
474
+ bottom: "conv7_3"
475
+ top: "conv7_3norm"
476
+ batch_norm_param{ }
477
+ param {lr_mult: 0 decay_mult: 0}
478
+ param {lr_mult: 0 decay_mult: 0}
479
+ param {lr_mult: 0 decay_mult: 0}
480
+ }
481
+ # *****************
482
+ # ***** conv8 *****
483
+ # *****************
484
+ layer {
485
+ name: "conv8_1"
486
+ type: "Deconvolution"
487
+ bottom: "conv7_3norm"
488
+ top: "conv8_1"
489
+ convolution_param {
490
+ num_output: 256
491
+ kernel_size: 4
492
+ pad: 1
493
+ dilation: 1
494
+ stride: 2
495
+ }
496
+ }
497
+ layer {
498
+ name: "relu8_1"
499
+ type: "ReLU"
500
+ bottom: "conv8_1"
501
+ top: "conv8_1"
502
+ }
503
+ layer {
504
+ name: "conv8_2"
505
+ type: "Convolution"
506
+ bottom: "conv8_1"
507
+ top: "conv8_2"
508
+ convolution_param {
509
+ num_output: 256
510
+ kernel_size: 3
511
+ pad: 1
512
+ dilation: 1
513
+ }
514
+ }
515
+ layer {
516
+ name: "relu8_2"
517
+ type: "ReLU"
518
+ bottom: "conv8_2"
519
+ top: "conv8_2"
520
+ }
521
+ layer {
522
+ name: "conv8_3"
523
+ type: "Convolution"
524
+ bottom: "conv8_2"
525
+ top: "conv8_3"
526
+ convolution_param {
527
+ num_output: 256
528
+ kernel_size: 3
529
+ pad: 1
530
+ dilation: 1
531
+ }
532
+ }
533
+ layer {
534
+ name: "relu8_3"
535
+ type: "ReLU"
536
+ bottom: "conv8_3"
537
+ top: "conv8_3"
538
+ }
539
+ # *******************
540
+ # ***** Softmax *****
541
+ # *******************
542
+ layer {
543
+ name: "conv8_313"
544
+ type: "Convolution"
545
+ bottom: "conv8_3"
546
+ top: "conv8_313"
547
+ convolution_param {
548
+ num_output: 313
549
+ kernel_size: 1
550
+ stride: 1
551
+ dilation: 1
552
+ }
553
+ }
554
+ layer {
555
+ name: "conv8_313_rh"
556
+ type: "Scale"
557
+ bottom: "conv8_313"
558
+ top: "conv8_313_rh"
559
+ scale_param {
560
+ bias_term: false
561
+ filler { type: 'constant' value: 2.606 }
562
+ }
563
+ }
564
+ layer {
565
+ name: "class8_313_rh"
566
+ type: "Softmax"
567
+ bottom: "conv8_313_rh"
568
+ top: "class8_313_rh"
569
+ }
570
+ # ********************
571
+ # ***** Decoding *****
572
+ # ********************
573
+ layer {
574
+ name: "class8_ab"
575
+ type: "Convolution"
576
+ bottom: "class8_313_rh"
577
+ top: "class8_ab"
578
+ convolution_param {
579
+ num_output: 2
580
+ kernel_size: 1
581
+ stride: 1
582
+ dilation: 1
583
+ }
584
+ }
585
+ layer {
586
+ name: "Silence"
587
+ type: "Silence"
588
+ bottom: "class8_ab"
589
+ }
colorization_release_v2.caffemodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5af1e602646328c792e1094f9876fe9cd4c09ac46fa886e5708a1abc89137b1
3
+ size 128946764
einstein.jpg ADDED
nature.jpg ADDED
pts_in_hull.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5dec01315c34f43f1c8c089e84c45ae35d1838d8e77ed0e7ca930f79ffa450e
3
+ size 5088
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ opencv-python
2
+ numpy
tiger.jpg ADDED