Spaces:
Running
Running
File size: 5,174 Bytes
181d94d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 |
#include "cost_layer.h"
#include "utils.h"
#include "cuda.h"
#include "blas.h"
#include <math.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
COST_TYPE get_cost_type(char *s)
{
if (strcmp(s, "seg")==0) return SEG;
if (strcmp(s, "sse")==0) return SSE;
if (strcmp(s, "masked")==0) return MASKED;
if (strcmp(s, "smooth")==0) return SMOOTH;
if (strcmp(s, "L1")==0) return L1;
if (strcmp(s, "wgan")==0) return WGAN;
fprintf(stderr, "Couldn't find cost type %s, going with SSE\n", s);
return SSE;
}
char *get_cost_string(COST_TYPE a)
{
switch(a){
case SEG:
return "seg";
case SSE:
return "sse";
case MASKED:
return "masked";
case SMOOTH:
return "smooth";
case L1:
return "L1";
case WGAN:
return "wgan";
}
return "sse";
}
cost_layer make_cost_layer(int batch, int inputs, COST_TYPE cost_type, float scale)
{
fprintf(stderr, "cost %4d\n", inputs);
cost_layer l = {0};
l.type = COST;
l.scale = scale;
l.batch = batch;
l.inputs = inputs;
l.outputs = inputs;
l.cost_type = cost_type;
l.delta = calloc(inputs*batch, sizeof(float));
l.output = calloc(inputs*batch, sizeof(float));
l.cost = calloc(1, sizeof(float));
l.forward = forward_cost_layer;
l.backward = backward_cost_layer;
#ifdef GPU
l.forward_gpu = forward_cost_layer_gpu;
l.backward_gpu = backward_cost_layer_gpu;
l.delta_gpu = cuda_make_array(l.output, inputs*batch);
l.output_gpu = cuda_make_array(l.delta, inputs*batch);
#endif
return l;
}
void resize_cost_layer(cost_layer *l, int inputs)
{
l->inputs = inputs;
l->outputs = inputs;
l->delta = realloc(l->delta, inputs*l->batch*sizeof(float));
l->output = realloc(l->output, inputs*l->batch*sizeof(float));
#ifdef GPU
cuda_free(l->delta_gpu);
cuda_free(l->output_gpu);
l->delta_gpu = cuda_make_array(l->delta, inputs*l->batch);
l->output_gpu = cuda_make_array(l->output, inputs*l->batch);
#endif
}
void forward_cost_layer(cost_layer l, network net)
{
if (!net.truth) return;
if(l.cost_type == MASKED){
int i;
for(i = 0; i < l.batch*l.inputs; ++i){
if(net.truth[i] == SECRET_NUM) net.input[i] = SECRET_NUM;
}
}
if(l.cost_type == SMOOTH){
smooth_l1_cpu(l.batch*l.inputs, net.input, net.truth, l.delta, l.output);
}else if(l.cost_type == L1){
l1_cpu(l.batch*l.inputs, net.input, net.truth, l.delta, l.output);
} else {
l2_cpu(l.batch*l.inputs, net.input, net.truth, l.delta, l.output);
}
l.cost[0] = sum_array(l.output, l.batch*l.inputs);
}
void backward_cost_layer(const cost_layer l, network net)
{
axpy_cpu(l.batch*l.inputs, l.scale, l.delta, 1, net.delta, 1);
}
#ifdef GPU
void pull_cost_layer(cost_layer l)
{
cuda_pull_array(l.delta_gpu, l.delta, l.batch*l.inputs);
}
void push_cost_layer(cost_layer l)
{
cuda_push_array(l.delta_gpu, l.delta, l.batch*l.inputs);
}
int float_abs_compare (const void * a, const void * b)
{
float fa = *(const float*) a;
if(fa < 0) fa = -fa;
float fb = *(const float*) b;
if(fb < 0) fb = -fb;
return (fa > fb) - (fa < fb);
}
void forward_cost_layer_gpu(cost_layer l, network net)
{
if (!net.truth) return;
if(l.smooth){
scal_gpu(l.batch*l.inputs, (1-l.smooth), net.truth_gpu, 1);
add_gpu(l.batch*l.inputs, l.smooth * 1./l.inputs, net.truth_gpu, 1);
}
if(l.cost_type == SMOOTH){
smooth_l1_gpu(l.batch*l.inputs, net.input_gpu, net.truth_gpu, l.delta_gpu, l.output_gpu);
} else if (l.cost_type == L1){
l1_gpu(l.batch*l.inputs, net.input_gpu, net.truth_gpu, l.delta_gpu, l.output_gpu);
} else if (l.cost_type == WGAN){
wgan_gpu(l.batch*l.inputs, net.input_gpu, net.truth_gpu, l.delta_gpu, l.output_gpu);
} else {
l2_gpu(l.batch*l.inputs, net.input_gpu, net.truth_gpu, l.delta_gpu, l.output_gpu);
}
if (l.cost_type == SEG && l.noobject_scale != 1) {
scale_mask_gpu(l.batch*l.inputs, l.delta_gpu, 0, net.truth_gpu, l.noobject_scale);
scale_mask_gpu(l.batch*l.inputs, l.output_gpu, 0, net.truth_gpu, l.noobject_scale);
}
if (l.cost_type == MASKED) {
mask_gpu(l.batch*l.inputs, net.delta_gpu, SECRET_NUM, net.truth_gpu, 0);
}
if(l.ratio){
cuda_pull_array(l.delta_gpu, l.delta, l.batch*l.inputs);
qsort(l.delta, l.batch*l.inputs, sizeof(float), float_abs_compare);
int n = (1-l.ratio) * l.batch*l.inputs;
float thresh = l.delta[n];
thresh = 0;
printf("%f\n", thresh);
supp_gpu(l.batch*l.inputs, thresh, l.delta_gpu, 1);
}
if(l.thresh){
supp_gpu(l.batch*l.inputs, l.thresh*1./l.inputs, l.delta_gpu, 1);
}
cuda_pull_array(l.output_gpu, l.output, l.batch*l.inputs);
l.cost[0] = sum_array(l.output, l.batch*l.inputs);
}
void backward_cost_layer_gpu(const cost_layer l, network net)
{
axpy_gpu(l.batch*l.inputs, l.scale, l.delta_gpu, 1, net.delta_gpu, 1);
}
#endif
|