File size: 4,100 Bytes
18ddfe2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 |
syntax = "proto2";
package object_detection.protos;
// Configuration proto for the convolution op hyperparameters to use in the
// object detection pipeline.
message Hyperparams {
// Operations affected by hyperparameters.
enum Op {
// Convolution, Separable Convolution, Convolution transpose.
CONV = 1;
// Fully connected
FC = 2;
}
optional Op op = 1 [default = CONV];
// Regularizer for the weights of the convolution op.
optional Regularizer regularizer = 2;
// Initializer for the weights of the convolution op.
optional Initializer initializer = 3;
// Type of activation to apply after convolution.
enum Activation {
// Use None (no activation)
NONE = 0;
// Use tf.nn.relu
RELU = 1;
// Use tf.nn.relu6
RELU_6 = 2;
// Use tf.nn.swish
SWISH = 3;
}
optional Activation activation = 4 [default = RELU];
oneof normalizer_oneof {
// Note that if nothing below is selected, then no normalization is applied
// BatchNorm hyperparameters.
BatchNorm batch_norm = 5;
// GroupNorm hyperparameters. This is only supported on a subset of models.
// Note that the current implementation of group norm instantiated in
// tf.contrib.group.layers.group_norm() only supports fixed_size_resizer
// for image preprocessing.
GroupNorm group_norm = 7;
}
// Whether depthwise convolutions should be regularized. If this parameter is
// NOT set then the conv hyperparams will default to the parent scope.
optional bool regularize_depthwise = 6 [default = false];
}
// Proto with one-of field for regularizers.
message Regularizer {
oneof regularizer_oneof {
L1Regularizer l1_regularizer = 1;
L2Regularizer l2_regularizer = 2;
}
}
// Configuration proto for L1 Regularizer.
// See https://www.tensorflow.org/api_docs/python/tf/contrib/layers/l1_regularizer
message L1Regularizer {
optional float weight = 1 [default = 1.0];
}
// Configuration proto for L2 Regularizer.
// See https://www.tensorflow.org/api_docs/python/tf/contrib/layers/l2_regularizer
message L2Regularizer {
optional float weight = 1 [default = 1.0];
}
// Proto with one-of field for initializers.
message Initializer {
oneof initializer_oneof {
TruncatedNormalInitializer truncated_normal_initializer = 1;
VarianceScalingInitializer variance_scaling_initializer = 2;
RandomNormalInitializer random_normal_initializer = 3;
}
}
// Configuration proto for truncated normal initializer. See
// https://www.tensorflow.org/api_docs/python/tf/truncated_normal_initializer
message TruncatedNormalInitializer {
optional float mean = 1 [default = 0.0];
optional float stddev = 2 [default = 1.0];
}
// Configuration proto for variance scaling initializer. See
// https://www.tensorflow.org/api_docs/python/tf/contrib/layers/
// variance_scaling_initializer
message VarianceScalingInitializer {
optional float factor = 1 [default = 2.0];
optional bool uniform = 2 [default = false];
enum Mode {
FAN_IN = 0;
FAN_OUT = 1;
FAN_AVG = 2;
}
optional Mode mode = 3 [default = FAN_IN];
}
// Configuration proto for random normal initializer. See
// https://www.tensorflow.org/api_docs/python/tf/random_normal_initializer
message RandomNormalInitializer {
optional float mean = 1 [default = 0.0];
optional float stddev = 2 [default = 1.0];
}
// Configuration proto for batch norm to apply after convolution op. See
// https://www.tensorflow.org/api_docs/python/tf/contrib/layers/batch_norm
message BatchNorm {
optional float decay = 1 [default = 0.999];
optional bool center = 2 [default = true];
optional bool scale = 3 [default = false];
optional float epsilon = 4 [default = 0.001];
// Whether to train the batch norm variables. If this is set to false during
// training, the current value of the batch_norm variables are used for
// forward pass but they are never updated.
optional bool train = 5 [default = true];
}
// Configuration proto for group normalization to apply after convolution op.
// https://arxiv.org/abs/1803.08494
message GroupNorm {
}
|